Browse code

Improve opaque atomic pointers for better debugability my using a union.

Nat! authored on 12-04-2016 14:42:41
Showing 4 changed files
... ...
@@ -369,8 +369,8 @@ int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
369 369
    map->allocator = allocator;
370 370
    storage        = _mulle_concurrent_alloc_hashmapstorage( size, allocator);
371 371
 
372
-   _mulle_atomic_pointer_nonatomic_write( &map->storage, storage);
373
-   _mulle_atomic_pointer_nonatomic_write( &map->next_storage, storage);
372
+   _mulle_atomic_pointer_nonatomic_write( &map->storage.pointer, storage);
373
+   _mulle_atomic_pointer_nonatomic_write( &map->next_storage.pointer, storage);
374 374
    
375 375
    if( ! storage)
376 376
       return( -1);
... ...
@@ -387,8 +387,8 @@ void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map)
387 387
    struct _mulle_concurrent_hashmapstorage   *next_storage;
388 388
    // ABA!
389 389
 
390
-   storage      = _mulle_atomic_pointer_nonatomic_read( &map->storage);
391
-   next_storage = _mulle_atomic_pointer_nonatomic_read( &map->next_storage);
390
+   storage      = _mulle_atomic_pointer_nonatomic_read( &map->storage.pointer);
391
+   next_storage = _mulle_atomic_pointer_nonatomic_read( &map->next_storage.pointer);
392 392
 
393 393
    _mulle_allocator_abafree( map->allocator, storage);
394 394
    if( storage != next_storage)
... ...
@@ -408,7 +408,7 @@ static int  _mulle_concurrent_hashmap_migrate_storage( struct mulle_concurrent_h
408 408
 
409 409
    // check if we have a chance to succeed
410 410
    alloced = NULL;
411
-   q       = _mulle_atomic_pointer_read( &map->next_storage);
411
+   q       = _mulle_atomic_pointer_read( &map->next_storage.pointer);
412 412
    if( q == p)
413 413
    {
414 414
       // acquire new storage
... ...
@@ -417,7 +417,7 @@ static int  _mulle_concurrent_hashmap_migrate_storage( struct mulle_concurrent_h
417 417
          return( -1);
418 418
       
419 419
       // make this the next world, assume that's still set to 'p' (SIC)
420
-      q = __mulle_atomic_pointer_compare_and_swap( &map->next_storage, alloced, p);
420
+      q = __mulle_atomic_pointer_compare_and_swap( &map->next_storage.pointer, alloced, p);
421 421
       if( q != p)
422 422
       {
423 423
          // someone else produced a next world, use that and get rid of 'alloced'
... ...
@@ -432,7 +432,7 @@ static int  _mulle_concurrent_hashmap_migrate_storage( struct mulle_concurrent_h
432 432
    _mulle_concurrent_hashmapstorage_copy( q, p);
433 433
    
434 434
    // now update world, giving it the same value as 'next_world'
435
-   previous = __mulle_atomic_pointer_compare_and_swap( &map->storage, q, p);
435
+   previous = __mulle_atomic_pointer_compare_and_swap( &map->storage.pointer, q, p);
436 436
 
437 437
    // ok, if we succeed free old, if we fail alloced is
438 438
    // already gone. this must be an ABA free 
... ...
@@ -450,7 +450,7 @@ void  *_mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
450 450
    void                                    *value;
451 451
    
452 452
 retry:
453
-   p     = _mulle_atomic_pointer_read( &map->storage);
453
+   p     = _mulle_atomic_pointer_read( &map->storage.pointer);
454 454
    value = _mulle_concurrent_hashmapstorage_lookup( p, hash);
455 455
    if( value == REDIRECT_VALUE)
456 456
    {
... ...
@@ -475,7 +475,7 @@ static int   _mulle_concurrent_hashmap_search_next( struct mulle_concurrent_hash
475 475
    void                                     *value;
476 476
    
477 477
 retry:
478
-   p = _mulle_atomic_pointer_read( &map->storage);
478
+   p = _mulle_atomic_pointer_read( &map->storage.pointer);
479 479
    if( *expect_mask && p->mask != *expect_mask)
480 480
    {
481 481
       errno = ECANCELED;
... ...
@@ -525,7 +525,7 @@ int  _mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
525 525
    assert( value != MULLE_CONCURRENT_NO_POINTER && value != MULLE_CONCURRENT_INVALID_POINTER);
526 526
    
527 527
 retry:
528
-   p = _mulle_atomic_pointer_read( &map->storage);
528
+   p = _mulle_atomic_pointer_read( &map->storage.pointer);
529 529
    assert( p);
530 530
 
531 531
    max = _mulle_concurrent_hashmapstorage_get_max_n_hashs( p);
... ...
@@ -561,7 +561,7 @@ int  _mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
561 561
    struct _mulle_concurrent_hashmapstorage   *p;
562 562
    
563 563
 retry:
564
-   p = _mulle_atomic_pointer_read( &map->storage);
564
+   p = _mulle_atomic_pointer_read( &map->storage.pointer);
565 565
    switch( _mulle_concurrent_hashmapstorage_remove( p, hash, value))
566 566
    {
567 567
    case EBUSY  :
... ...
@@ -577,7 +577,7 @@ unsigned int  _mulle_concurrent_hashmap_get_size( struct mulle_concurrent_hashma
577 577
 {
578 578
    struct _mulle_concurrent_hashmapstorage   *p;
579 579
    
580
-   p = _mulle_atomic_pointer_read( &map->storage);
580
+   p = _mulle_atomic_pointer_read( &map->storage.pointer);
581 581
    return( p->mask + 1);
582 582
 }
583 583
 
... ...
@@ -38,15 +38,24 @@
38 38
 #include <mulle_allocator/mulle_allocator.h>
39 39
 
40 40
 
41
+struct _mulle_concurrent_hashmapstorage;
42
+
43
+
44
+union mulle_concurrent_atomichashmapstorage_t
45
+{
46
+   struct _mulle_concurrent_hashmapstorage  *storage;
47
+   mulle_atomic_pointer_t                   pointer;
48
+};
49
+
41 50
 //
42 51
 // basically does: http://preshing.com/20160222/a-resizable-concurrent-map/
43 52
 // but is wait-free
44 53
 //
45 54
 struct mulle_concurrent_hashmap
46 55
 {
47
-   mulle_atomic_pointer_t   storage;
48
-   mulle_atomic_pointer_t   next_storage;
49
-   struct mulle_allocator   *allocator;
56
+   union mulle_concurrent_atomichashmapstorage_t   storage;
57
+   union mulle_concurrent_atomichashmapstorage_t   next_storage;
58
+   struct mulle_allocator                          *allocator;
50 59
 };
51 60
 
52 61
 int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
... ...
@@ -188,8 +188,8 @@ int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *
188 188
    array->allocator = allocator;
189 189
    storage          = _mulle_concurrent_alloc_pointerarraystorage( size, allocator);
190 190
    
191
-   _mulle_atomic_pointer_nonatomic_write( &array->storage, storage);
192
-   _mulle_atomic_pointer_nonatomic_write( &array->next_storage, storage);
191
+   _mulle_atomic_pointer_nonatomic_write( &array->storage.pointer, storage);
192
+   _mulle_atomic_pointer_nonatomic_write( &array->next_storage.pointer, storage);
193 193
    
194 194
    if( ! storage)
195 195
       return( -1);
... ...
@@ -205,8 +205,8 @@ void  _mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray
205 205
    struct _mulle_concurrent_pointerarraystorage   *storage;
206 206
    struct _mulle_concurrent_pointerarraystorage   *next_storage;
207 207
    
208
-   storage      = _mulle_atomic_pointer_nonatomic_read( &array->storage);
209
-   next_storage = _mulle_atomic_pointer_nonatomic_read( &array->next_storage);
208
+   storage      = _mulle_atomic_pointer_nonatomic_read( &array->storage.pointer);
209
+   next_storage = _mulle_atomic_pointer_nonatomic_read( &array->next_storage.pointer);
210 210
    
211 211
    _mulle_allocator_abafree( array->allocator, storage);
212 212
    if( storage != next_storage)
... ...
@@ -226,7 +226,7 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
226 226
    
227 227
    // acquire new storage
228 228
    alloced = NULL;
229
-   q       = _mulle_atomic_pointer_read( &array->next_storage);
229
+   q       = _mulle_atomic_pointer_read( &array->next_storage.pointer);
230 230
 
231 231
    assert( q);
232 232
    
... ...
@@ -237,7 +237,7 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
237 237
          return( -1);
238 238
 
239 239
       // make this the next world, assume that's still set to 'p' (SIC)
240
-      q = __mulle_atomic_pointer_compare_and_swap( &array->next_storage, alloced, p);
240
+      q = __mulle_atomic_pointer_compare_and_swap( &array->next_storage.pointer, alloced, p);
241 241
       if( q != p)
242 242
       {
243 243
          // someone else produced a next world, use that and get rid of 'alloced'
... ...
@@ -252,7 +252,7 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
252 252
    _mulle_concurrent_pointerarraystorage_copy( q, p);
253 253
    
254 254
    // now update world, giving it the same value as 'next_world'
255
-   previous = __mulle_atomic_pointer_compare_and_swap( &array->storage, q, p);
255
+   previous = __mulle_atomic_pointer_compare_and_swap( &array->storage.pointer, q, p);
256 256
 
257 257
    // ok, if we succeed free old, if we fail alloced is
258 258
    // already gone
... ...
@@ -270,7 +270,7 @@ void  *_mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray
270 270
    void                                     *value;
271 271
    
272 272
 retry:
273
-   p     = _mulle_atomic_pointer_read( &array->storage);
273
+   p     = _mulle_atomic_pointer_read( &array->storage.pointer);
274 274
    value = _mulle_concurrent_pointerarraystorage_get( p, index);
275 275
    if( value == REDIRECT_VALUE)
276 276
    {
... ...
@@ -291,7 +291,7 @@ int  _mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *a
291 291
    assert( value != REDIRECT_VALUE);
292 292
    
293 293
 retry:
294
-   p = _mulle_atomic_pointer_read( &array->storage);
294
+   p = _mulle_atomic_pointer_read( &array->storage.pointer);
295 295
    switch( _mulle_concurrent_pointerarraystorage_add( p, value))
296 296
    {
297 297
    case EBUSY   :
... ...
@@ -309,7 +309,7 @@ unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_p
309 309
 {
310 310
    struct _mulle_concurrent_pointerarraystorage   *p;
311 311
    
312
-   p = _mulle_atomic_pointer_read( &array->storage);
312
+   p = _mulle_atomic_pointer_read( &array->storage.pointer);
313 313
    return( p->size);
314 314
 }
315 315
 
... ...
@@ -324,7 +324,7 @@ unsigned int  mulle_concurrent_pointerarray_get_count( struct mulle_concurrent_p
324 324
    if( ! array)
325 325
       return( 0);
326 326
    
327
-   p = _mulle_atomic_pointer_read( &array->storage);
327
+   p = _mulle_atomic_pointer_read( &array->storage.pointer);
328 328
    return( (unsigned int) (uintptr_t) _mulle_atomic_pointer_read( &p->n));
329 329
 }
330 330
 
... ...
@@ -38,11 +38,21 @@
38 38
 #include <mulle_allocator/mulle_allocator.h>
39 39
 
40 40
 
41
+struct _mulle_concurrent_pointerarraystorage;
42
+
43
+
44
+union mulle_concurrent_atomicpointerarraystorage_t
45
+{
46
+   struct _mulle_concurrent_pointerarraystorage  *storage;
47
+   mulle_atomic_pointer_t                        pointer;
48
+};
49
+
50
+
41 51
 struct mulle_concurrent_pointerarray
42 52
 {
43
-   mulle_atomic_pointer_t   storage;
44
-   mulle_atomic_pointer_t   next_storage;
45
-   struct mulle_allocator   *allocator;
53
+   union mulle_concurrent_atomicpointerarraystorage_t   storage;
54
+   union mulle_concurrent_atomicpointerarraystorage_t   next_storage;
55
+   struct mulle_allocator                               *allocator;
46 56
 };
47 57
 
48 58
 int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,