Browse code

Adapt to mulle_allocator 2.0, which necessitates some small signature changes.

Nat! authored on 06/11/2016 21:29:40
Showing 7 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1,33 @@
0
+This is a BSD-3 style license
1
+-----------------------------
2
+
3
+Copyright (c) 2015 Nat! - Mulle kybernetiK
4
+Copyright © 2015 Codeon GmbH.
5
+All rights reserved.
6
+
7
+Redistribution and use in source and binary forms, with or without
8
+modification, are permitted provided that the following conditions are met:
9
+
10
+Redistributions of source code must retain the above copyright notice, this
11
+list of conditions and the following disclaimer.
12
+
13
+Redistributions in binary form must reproduce the above copyright notice,
14
+this list of conditions and the following disclaimer in the documentation
15
+and/or other materials provided with the distribution.
16
+
17
+Neither the name of Mulle kybernetiK nor the names of its contributors
18
+may be used to endorse or promote products derived from this software
19
+without specific prior written permission.
20
+
21
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31
+POSSIBILITY OF SUCH DAMAGE.
32
+
... ...
@@ -1,3 +1,11 @@
1
+1.1.0
2
+===
3
+
4
+* recompile because `mulle_allocator` change dramatically in layout
5
+* change some int returning funtions to void. Will probably redo the error
6
+handling here. Does this warrant a major version ? Hmm...
7
+
8
+
1 9
 1.0.1-1.0.8
2 10
 ===
3 11
 
... ...
@@ -3,7 +3,7 @@
3 3
 
4 4
 ## What you get
5 5
 
6
-* `libmulle_concurrent.a` the mulle-concurrent static libraries along with a
6
+* `libmulle_concurrent.a` the mulle-concurrent static library along with a
7 7
 bunch of headers.
8 8
 
9 9
 
... ...
@@ -12,11 +12,13 @@ bunch of headers.
12 12
 #### mulle-aba
13 13
 
14 14
 [mulle-aba](//www.mulle-kybernetik.com/software/git/mulle-aba/) provides the
15
-ABA safe freeing of resources.
15
+ABA safe freeing of resources. Or use your own ABA free routine and place it
16
+in the allocator.
16 17
 
17 18
 #### mulle-allocator
18 19
 
19
-[mulle-allocator](//www.mulle-kybernetik.com/software/git/mulle-allocator/) contains the memory-allocation scheme, that mulle-concurrent uses.
20
+[mulle-allocator](//www.mulle-kybernetik.com/software/git/mulle-allocator/)
21
+contains the memory-allocation scheme, that mulle-concurrent uses.
20 22
 
21 23
 #### mulle-c11
22 24
 
... ...
@@ -59,7 +61,7 @@ exist in `./tests`, if you want to run tests.
59 59
 ### Windows: Installing further prerequisites
60 60
 
61 61
 Check the [mulle-build README.md](//www.mulle-kybernetik.com/software/git/mulle-build/README.md)
62
-for instrutions how to get the "Git for Windows" bash going.
62
+for instructions how to get the "Git for Windows" bash going.
63 63
 
64 64
 
65 65
 ### OSX: Install mulle-build using homebrew
... ...
@@ -78,8 +78,6 @@ static struct _mulle_concurrent_hashmapstorage *
78 78
    
79 79
    p = _mulle_allocator_calloc( allocator, 1, sizeof( struct _mulle_concurrent_hashvaluepair) * (n - 1) +
80 80
                              sizeof( struct _mulle_concurrent_hashmapstorage));
81
-   if( ! p)
82
-      return( p);
83 81
    
84 82
    p->mask = n - 1;
85 83
    
... ...
@@ -35,7 +35,7 @@
35 35
 #define mulle_concurrent_h__
36 36
 
37 37
 
38
-#define MULLE_CONCURRENT_VERSION  ((1 << 20) | (0 << 8) | 8)
38
+#define MULLE_CONCURRENT_VERSION  ((1 << 20) | (1 << 8) | 0)
39 39
 
40 40
 #include <mulle_thread/mulle_thread.h>
41 41
 #include <mulle_allocator/mulle_allocator.h>
... ...
@@ -75,9 +75,7 @@ static struct _mulle_concurrent_pointerarraystorage *
75 75
       n = 8;
76 76
 
77 77
    p = _mulle_allocator_calloc( allocator, 1, sizeof( void *) * (n - 1) +
78
-                             sizeof( struct _mulle_concurrent_pointerarraystorage));
79
-   if( ! p)
80
-      return( p);
78
+                                sizeof( struct _mulle_concurrent_pointerarraystorage));
81 79
    p->size = n;
82 80
 
83 81
    /*
... ...
@@ -171,9 +169,9 @@ static void   _mulle_concurrent_pointerarraystorage_copy( struct _mulle_concurre
171 171
 #pragma mark -
172 172
 #pragma mark _mulle_concurrent_pointerarray
173 173
 
174
-int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,
175
-                                          unsigned int size,
176
-                                          struct mulle_allocator *allocator)
174
+void  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,
175
+                                           unsigned int size,
176
+                                           struct mulle_allocator *allocator)
177 177
 {
178 178
    struct _mulle_concurrent_pointerarraystorage   *storage;
179 179
 
... ...
@@ -182,19 +180,11 @@ int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *
182 182
 
183 183
    assert( allocator->abafree && allocator->abafree != (int (*)()) abort);
184 184
 
185
-   if( ! allocator->abafree)
186
-      return( EINVAL);
187
-
188 185
    array->allocator = allocator;
189 186
    storage          = _mulle_concurrent_alloc_pointerarraystorage( size, allocator);
190 187
 
191
-   if( ! storage)
192
-      return( ENOMEM);
193
-
194 188
    _mulle_atomic_pointer_nonatomic_write( &array->storage.pointer, storage);
195 189
    _mulle_atomic_pointer_nonatomic_write( &array->next_storage.pointer, storage);
196
-
197
-   return( 0);
198 190
 }
199 191
 
200 192
 
... ...
@@ -239,7 +229,7 @@ unsigned int   _mulle_concurrent_pointerarray_get_count( struct mulle_concurrent
239 239
 # pragma mark -
240 240
 # pragma mark multi-threaded
241 241
 
242
-static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurrent_pointerarray *array,
242
+static void  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurrent_pointerarray *array,
243 243
                                                       struct _mulle_concurrent_pointerarraystorage *p)
244 244
 {
245 245
 
... ...
@@ -258,8 +248,6 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
258 258
    if( q == p)
259 259
    {
260 260
       alloced = _mulle_concurrent_alloc_pointerarraystorage( (unsigned int) p->size * 2, array->allocator);
261
-      if( ! alloced)
262
-         return( ENOMEM);
263 261
 
264 262
       // make this the next world, assume that's still set to 'p' (SIC)
265 263
       q = __mulle_atomic_pointer_compare_and_swap( &array->next_storage.pointer, alloced, p);
... ...
@@ -283,8 +271,6 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
283 283
    // already gone
284 284
    if( previous == p)
285 285
       _mulle_allocator_abafree( array->allocator, previous);
286
-
287
-   return( 0);
288 286
 }
289 287
 
290 288
 
... ...
@@ -292,22 +278,21 @@ void  *_mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray
292 292
                                            unsigned int index)
293 293
 {
294 294
    struct _mulle_concurrent_pointerarraystorage   *p;
295
-   void                                     *value;
295
+   void                                           *value;
296 296
 
297 297
 retry:
298 298
    p     = _mulle_atomic_pointer_read( &array->storage.pointer);
299 299
    value = _mulle_concurrent_pointerarraystorage_get( p, index);
300 300
    if( value == REDIRECT_VALUE)
301 301
    {
302
-      if( _mulle_concurrent_pointerarray_migrate_storage( array, p))
303
-         return( MULLE_CONCURRENT_NO_POINTER);
302
+      _mulle_concurrent_pointerarray_migrate_storage( array, p);
304 303
       goto retry;
305 304
    }
306 305
    return( value);
307 306
 }
308 307
 
309 308
 
310
-int  _mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *array,
309
+void  _mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *array,
311 310
                                          void *value)
312 311
 {
313 312
    struct _mulle_concurrent_pointerarraystorage   *p;
... ...
@@ -321,12 +306,9 @@ retry:
321 321
    {
322 322
    case EBUSY   :
323 323
    case ENOSPC  :
324
-      if( _mulle_concurrent_pointerarray_migrate_storage( array, p))
325
-         return( ENOMEM);
324
+      _mulle_concurrent_pointerarray_migrate_storage( array, p);
326 325
       goto retry;
327 326
    }
328
-
329
-   return( 0);
330 327
 }
331 328
 
332 329
 
... ...
@@ -337,7 +319,9 @@ int  mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *ar
337 337
       return( EINVAL);
338 338
    if( value == MULLE_CONCURRENT_NO_POINTER || value == MULLE_CONCURRENT_INVALID_POINTER)
339 339
       return( EINVAL);
340
-   return( _mulle_concurrent_pointerarray_add( array, value));
340
+
341
+   _mulle_concurrent_pointerarray_add( array, value);
342
+   return( 0);
341 343
 }
342 344
 
343 345
 
... ...
@@ -70,12 +70,14 @@ static inline int  mulle_concurrent_pointerarray_init( struct mulle_concurrent_p
70 70
                                                        unsigned int size,
71 71
                                                        struct mulle_allocator *allocator)
72 72
 {
73
-   int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,
74
-                                            unsigned int size,
75
-                                            struct mulle_allocator *allocator);
73
+   void  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,
74
+                                              unsigned int size,
75
+                                              struct mulle_allocator *allocator);
76 76
    if( ! array)
77 77
       return( EINVAL);
78
-   return( _mulle_concurrent_pointerarray_init( array, size, allocator));
78
+
79
+   _mulle_concurrent_pointerarray_init( array, size, allocator);
80
+   return( 0);
79 81
 }
80 82
 
81 83
 
... ...
@@ -211,7 +213,7 @@ int   mulle_concurrent_pointerarray_map( struct mulle_concurrent_pointerarray *l
211 211
 #pragma mark -
212 212
 #pragma mark various functions, no parameter checks
213 213
 
214
-int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,
214
+void  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,
215 215
                                           unsigned int size,
216 216
                                           struct mulle_allocator *allocator);
217 217
 void  _mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray *array);
... ...
@@ -219,7 +221,7 @@ void  _mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray
219 219
 unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *array);
220 220
 unsigned int  _mulle_concurrent_pointerarray_get_count( struct mulle_concurrent_pointerarray *array);
221 221
 
222
-int  _mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *array,
222
+void  _mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *array,
223 223
                                          void *value);
224 224
 
225 225
 void  *_mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray *array,