Browse code

Improve by avoiding definetely superflous mallocs during migration. Rename some copy/paste variable names for better readability.

Nat! authored on 07-03-2016 19:29:29
Showing 3 changed files
... ...
@@ -396,22 +396,28 @@ static int  _mulle_concurrent_hashmap_migrate_storage( struct mulle_concurrent_h
396 396
    struct mulle_concurrent_hashmapstorage   *previous;
397 397
 
398 398
    assert( p);
399
-   
400
-   // acquire new storage
401
-   alloced = _mulle_concurrent_alloc_hashmapstorage( (p->mask + 1) * 2, map->allocator);
402
-   if( ! alloced)
403
-      return( -1);
404 399
 
405
-   // make this the next world, assume that's still set to 'p' (SIC)
406
-   q = __mulle_atomic_pointer_compare_and_swap( &map->next_storage, alloced, p);
407
-   if( q != p)
400
+   // check if we have a chance to succeed
401
+   alloced = NULL;
402
+   q       = _mulle_atomic_pointer_read( &map->next_storage);
403
+   if( q == p)
408 404
    {
409
-      // someone else produced a next world, use that and get rid of 'alloced'
410
-      _mulle_allocator_free( map->allocator, alloced);  // ABA!!
411
-      alloced = NULL;
405
+      // acquire new storage
406
+      alloced = _mulle_concurrent_alloc_hashmapstorage( (p->mask + 1) * 2, map->allocator);
407
+      if( ! alloced)
408
+         return( -1);
409
+      
410
+      // make this the next world, assume that's still set to 'p' (SIC)
411
+      q = __mulle_atomic_pointer_compare_and_swap( &map->next_storage, alloced, p);
412
+      if( q != p)
413
+      {
414
+         // someone else produced a next world, use that and get rid of 'alloced'
415
+         _mulle_allocator_free( map->allocator, alloced);  // ABA!!
416
+         alloced = NULL;
417
+      }
418
+      else
419
+         q = alloced;
412 420
    }
413
-   else
414
-      q = alloced;
415 421
    
416 422
    // this thread can partake in copying
417 423
    _mulle_concurrent_hashmapstorage_copy( q, p);
... ...
@@ -167,7 +167,7 @@ static void   _mulle_concurrent_pointerarraystorage_copy( struct mulle_concurren
167 167
 #pragma mark -
168 168
 #pragma mark _mulle_concurrent_pointerarray
169 169
 
170
-int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *map,
170
+int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,
171 171
                                           unsigned int size,
172 172
                                           struct mulle_allocator *allocator)
173 173
 {
... ...
@@ -180,11 +180,11 @@ int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *
180 180
       return( -1);
181 181
    }
182 182
    
183
-   map->allocator    = allocator;
184
-   map->storage      = _mulle_concurrent_alloc_pointerarraystorage( size, allocator);
185
-   map->next_storage = map->storage;
183
+   array->allocator    = allocator;
184
+   array->storage      = _mulle_concurrent_alloc_pointerarraystorage( size, allocator);
185
+   array->next_storage = array->storage;
186 186
    
187
-   if( ! map->storage)
187
+   if( ! array->storage)
188 188
       return( -1);
189 189
    return( 0);
190 190
 }
... ...
@@ -193,15 +193,15 @@ int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *
193 193
 //
194 194
 // this is called when you know, no other threads are accessing it anymore
195 195
 //
196
-void  _mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray *map)
196
+void  _mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray *array)
197 197
 {
198
-   _mulle_allocator_free( map->allocator, map->storage);
199
-   if( map->storage != map->next_storage)
200
-      _mulle_allocator_free( map->allocator, map->next_storage);
198
+   _mulle_allocator_free( array->allocator, array->storage);
199
+   if( array->storage != array->next_storage)
200
+      _mulle_allocator_free( array->allocator, array->next_storage);
201 201
 }
202 202
 
203 203
 
204
-static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurrent_pointerarray *map,
204
+static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurrent_pointerarray *array,
205 205
                                                       struct mulle_concurrent_pointerarraystorage *p)
206 206
 {
207 207
 
... ...
@@ -212,48 +212,53 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
212 212
    assert( p);
213 213
    
214 214
    // acquire new storage
215
-   alloced = _mulle_concurrent_alloc_pointerarraystorage( p->size * 2, map->allocator);
216
-   if( ! alloced)
217
-      return( -1);
218
-
219
-   // make this the next world, assume that's still set to 'p' (SIC)
220
-   q = __mulle_atomic_pointer_compare_and_swap( &map->next_storage, alloced, p);
221
-   if( q != p)
215
+   alloced = NULL;
216
+   q       = _mulle_atomic_pointer_read( &array->next_storage);
217
+   if( q == p)
222 218
    {
223
-      // someone else produced a next world, use that and get rid of 'alloced'
224
-      _mulle_allocator_free( map->allocator, alloced);
225
-      alloced = NULL;
219
+      alloced = _mulle_concurrent_alloc_pointerarraystorage( p->size * 2, array->allocator);
220
+      if( ! alloced)
221
+         return( -1);
222
+
223
+      // make this the next world, assume that's still set to 'p' (SIC)
224
+      q = __mulle_atomic_pointer_compare_and_swap( &array->next_storage, alloced, p);
225
+      if( q != p)
226
+      {
227
+         // someone else produced a next world, use that and get rid of 'alloced'
228
+         _mulle_allocator_free( array->allocator, alloced);
229
+         alloced = NULL;
230
+      }
231
+      else
232
+         q = alloced;
226 233
    }
227
-   else
228
-      q = alloced;
229 234
    
230 235
    // this thread can partake in copying
231 236
    _mulle_concurrent_pointerarraystorage_copy( q, p);
232 237
    
233 238
    // now update world, giving it the same value as 'next_world'
234
-   previous = __mulle_atomic_pointer_compare_and_swap( &map->storage, q, p);
239
+   previous = __mulle_atomic_pointer_compare_and_swap( &array->storage, q, p);
235 240
 
236 241
    // ok, if we succeed free old, if we fail alloced is
237 242
    // already gone
238 243
    if( previous == p)
239
-      _mulle_allocator_free( map->allocator, previous);
244
+      _mulle_allocator_free( array->allocator, previous);
240 245
    
241 246
    return( 0);
242 247
 }
243 248
 
244 249
 
245
-void  *_mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray *map,
246
-                                     unsigned int index)
250
+void  *_mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray *array,
251
+                                           unsigned int index)
247 252
 {
248 253
    struct mulle_concurrent_pointerarraystorage   *p;
249 254
    void                                     *value;
250 255
    
251 256
 retry:
252
-   p     = _mulle_atomic_pointer_read( &map->storage);
257
+   p     = _mulle_atomic_pointer_read( &array->storage);
253 258
    value = _mulle_concurrent_pointerarraystorage_get( p, index);
254 259
    if( value == REDIRECT_VALUE)
255 260
    {
256
-      if( _mulle_concurrent_pointerarray_migrate_storage( map, p))
261
+      if( _mulle_concurrent_pointerarray_migrate_storage( array, p))
257 262
          return( MULLE_CONCURRENT_NO_POINTER);
258 263
       goto retry;
259 264
    }
... ...
@@ -261,8 +266,8 @@ retry:
261 266
 }
262 267
 
263 268
 
264
-int  _mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *map,
265
-                                   void *value)
269
+int  _mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *array,
270
+                                         void *value)
266 271
 {
267 272
    struct mulle_concurrent_pointerarraystorage   *p;
268 273
 
... ...
@@ -270,12 +275,12 @@ int  _mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *m
270 275
    assert( value != REDIRECT_VALUE);
271 276
    
272 277
 retry:
273
-   p = _mulle_atomic_pointer_read( &map->storage);
278
+   p = _mulle_atomic_pointer_read( &array->storage);
274 279
    switch( _mulle_concurrent_pointerarraystorage_add( p, value))
275 280
    {
276 281
    case EBUSY   :
277 282
    case ENOSPC  :
278
-      if( _mulle_concurrent_pointerarray_migrate_storage( map, p))
283
+      if( _mulle_concurrent_pointerarray_migrate_storage( array, p))
279 284
          return( -1);
280 285
       goto retry;
281 286
    }
... ...
@@ -284,11 +289,11 @@ retry:
284 289
 }
285 290
 
286 291
 
287
-unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *map)
292
+unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *array)
288 293
 {
289 294
    struct mulle_concurrent_pointerarraystorage   *p;
290 295
    
291
-   p = _mulle_atomic_pointer_read( &map->storage);
296
+   p = _mulle_atomic_pointer_read( &array->storage);
292 297
    return( p->size);
293 298
 }
294 299
 
... ...
@@ -296,14 +301,14 @@ unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_p
296 301
 //
297 302
 // obviously just a snapshot at some recent point in time
298 303
 //
299
-unsigned int  mulle_concurrent_pointerarray_get_count( struct mulle_concurrent_pointerarray *map)
304
+unsigned int  mulle_concurrent_pointerarray_get_count( struct mulle_concurrent_pointerarray *array)
300 305
 {
301 306
    struct mulle_concurrent_pointerarraystorage   *p;
302 307
    
303
-   if( ! map)
308
+   if( ! array)
304 309
       return( 0);
305 310
    
306
-   p = _mulle_atomic_pointer_read( &map->storage);
311
+   p = _mulle_atomic_pointer_read( &array->storage);
307 312
    return( (unsigned int) (uintptr_t) _mulle_atomic_pointer_read( &p->n));
308 313
 }
309 314
 
... ...
@@ -317,11 +322,11 @@ int  _mulle_concurrent_pointerarrayenumerator_next( struct mulle_concurrent_poin
317 322
    void           *value;
318 323
    unsigned int   n;
319 324
    
320
-   n = mulle_concurrent_pointerarray_get_count( rover->map);
325
+   n = mulle_concurrent_pointerarray_get_count( rover->array);
321 326
    if( rover->index >= n)
322 327
       return( 0);
323 328
    
324
-   value = _mulle_concurrent_pointerarray_get( rover->map, rover->index);
329
+   value = _mulle_concurrent_pointerarray_get( rover->array, rover->index);
325 330
    if( value == MULLE_CONCURRENT_NO_POINTER)
326 331
       return( -1);
327 332
 
... ...
@@ -341,7 +346,7 @@ int  _mulle_concurrent_pointerarrayreverseenumerator_next( struct mulle_concurre
341 346
    if( ! rover->index)
342 347
       return( 0);
343 348
    
344
-   value = _mulle_concurrent_pointerarray_get( rover->map, --rover->index);
349
+   value = _mulle_concurrent_pointerarray_get( rover->array, --rover->index);
345 350
    if( value == MULLE_CONCURRENT_NO_POINTER)
346 351
       return( -1);
347 352
 
... ...
@@ -353,15 +358,15 @@ int  _mulle_concurrent_pointerarrayreverseenumerator_next( struct mulle_concurre
353 358
 
354 359
 
355 360
 
356
-int   _mulle_concurrent_pointerarray_find( struct mulle_concurrent_pointerarray *map,
361
+int   _mulle_concurrent_pointerarray_find( struct mulle_concurrent_pointerarray *array,
357 362
                                        void *search)
358 363
 {
359 364
    struct mulle_concurrent_pointerarrayenumerator   rover;
360
-   int                                         found;
361
-   void                                        *value;
365
+   int                                              found;
366
+   void                                             *value;
362 367
    
363 368
    found = 0;
364
-   rover = mulle_concurrent_pointerarray_enumerate( map);
369
+   rover = mulle_concurrent_pointerarray_enumerate( array);
365 370
    while( _mulle_concurrent_pointerarrayenumerator_next( &rover, (void **) &value) == 1)
366 371
    {
367 372
       if( value == search)
... ...
@@ -47,24 +47,24 @@ struct mulle_concurrent_pointerarray
47 47
    struct mulle_allocator   *allocator;
48 48
 };
49 49
 
50
-int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *map,
50
+int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,
51 51
                                           unsigned int size,
52 52
                                           struct mulle_allocator *allocator);
53
-void  _mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray *map);
53
+void  _mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray *array);
54 54
 
55 55
 
56
-int  _mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *map,
56
+int  _mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *array,
57 57
                                          void *value);
58 58
 
59
-void  *_mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray *map,
59
+void  *_mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray *array,
60 60
                                            unsigned int n);
61 61
 
62
-int  _mulle_concurrent_pointerarray_find( struct mulle_concurrent_pointerarray *map,
62
+int  _mulle_concurrent_pointerarray_find( struct mulle_concurrent_pointerarray *array,
63 63
                                             void *value);
64 64
 
65 65
 
66
-unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *map);
67
-unsigned int  mulle_concurrent_pointerarray_get_count( struct mulle_concurrent_pointerarray *map);
66
+unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *array);
67
+unsigned int  mulle_concurrent_pointerarray_get_count( struct mulle_concurrent_pointerarray *array);
68 68
 
69 69
 
70 70
 #pragma mark -
... ...
@@ -72,38 +72,38 @@ unsigned int  mulle_concurrent_pointerarray_get_count( struct mulle_concurrent_p
72 72
 
73 73
 struct mulle_concurrent_pointerarrayenumerator
74 74
 {
75
-   struct mulle_concurrent_pointerarray   *map;
75
+   struct mulle_concurrent_pointerarray   *array;
76 76
    unsigned int                            index;
77 77
 };
78 78
 
79 79
 struct mulle_concurrent_pointerarrayreverseenumerator
80 80
 {
81
-   struct mulle_concurrent_pointerarray   *map;
81
+   struct mulle_concurrent_pointerarray   *array;
82 82
    unsigned int                           index;
83 83
 };
84 84
 
85 85
 //
86 86
 // the specific retuned enumerator is only useable for the calling thread
87
-// if you remove stuff from the map, the enumerator will be unhappy and
88
-// stop (but will tell you). If the map grows, the rover is equally unhappy.
87
+// if you remove stuff from the array, the enumerator will be unhappy and
88
+// stop (but will tell you). If the array grows, the rover is equally unhappy.
89 89
 //
90 90
 static inline struct mulle_concurrent_pointerarrayenumerator
91
-   mulle_concurrent_pointerarray_enumerate( struct mulle_concurrent_pointerarray *map)
91
+   mulle_concurrent_pointerarray_enumerate( struct mulle_concurrent_pointerarray *array)
92 92
 {
93 93
    struct mulle_concurrent_pointerarrayenumerator   rover;
94 94
    
95
-   rover.map   = map;
96
-   rover.index = map ? 0 : (unsigned int) -1;
95
+   rover.array   = array;
96
+   rover.index = array ? 0 : (unsigned int) -1;
97 97
    
98 98
    return( rover);
99 99
 }
100 100
 
101 101
 static inline struct mulle_concurrent_pointerarrayreverseenumerator
102
-   _mulle_concurrent_pointerarray_reverseenumerate( struct mulle_concurrent_pointerarray *map, unsigned int n)
102
+   _mulle_concurrent_pointerarray_reverseenumerate( struct mulle_concurrent_pointerarray *array, unsigned int n)
103 103
 {
104 104
    struct mulle_concurrent_pointerarrayreverseenumerator   rover;
105 105
    
106
-   rover.map   = map;
106
+   rover.array   = array;
107 107
    rover.index = n;
108 108
    
109 109
    return( rover);