Browse code

Remove superflous whitespace. Avoid some analyzer warnings for #ifndef NDEBUG

Nat! authored on 31-03-2017 18:33:58
Showing 3 changed files
... ...
@@ -53,7 +53,7 @@ struct _mulle_concurrent_hashmapstorage
53 53
 {
54 54
    mulle_atomic_pointer_t   n_hashs;  // with possibly empty values
55 55
    uintptr_t                mask;     // easier to read from debugger if void * size
56
-   
56
+
57 57
    struct _mulle_concurrent_hashvaluepair  entries[ 1];
58 58
 };
59 59
 
... ...
@@ -70,17 +70,17 @@ static struct _mulle_concurrent_hashmapstorage *
70 70
                                            struct mulle_allocator *allocator)
71 71
 {
72 72
    struct _mulle_concurrent_hashmapstorage  *p;
73
-   
73
+
74 74
    assert( (~(n - 1) & n) == n);
75
-   
75
+
76 76
    if( n < 4)
77 77
       n = 4;
78
-   
78
+
79 79
    p = _mulle_allocator_calloc( allocator, 1, sizeof( struct _mulle_concurrent_hashvaluepair) * (n - 1) +
80 80
                              sizeof( struct _mulle_concurrent_hashmapstorage));
81
-   
81
+
82 82
    p->mask = n - 1;
83
-   
83
+
84 84
    /*
85 85
     * in theory, one should be able to use different values for NO_POINTER and
86 86
     * INVALID_POINTER
... ...
@@ -89,7 +89,7 @@ static struct _mulle_concurrent_hashmapstorage *
89 89
    {
90 90
       struct _mulle_concurrent_hashvaluepair   *q;
91 91
       struct _mulle_concurrent_hashvaluepair   *sentinel;
92
-      
92
+
93 93
       q        = p->entries;
94 94
       sentinel = &p->entries[ (unsigned int) p->mask];
95 95
       while( q <= sentinel)
... ...
@@ -99,7 +99,7 @@ static struct _mulle_concurrent_hashmapstorage *
99 99
          ++q;
100 100
       }
101 101
    }
102
-   
102
+
103 103
    return( p);
104 104
 }
105 105
 
... ...
@@ -109,7 +109,7 @@ static unsigned int
109 109
 {
110 110
    unsigned int   size;
111 111
    unsigned int   max;
112
-   
112
+
113 113
    size = (unsigned int) p->mask + 1;
114 114
    max  = size - (size >> 1);
115 115
    return( max);
... ...
@@ -121,10 +121,12 @@ static void   *_mulle_concurrent_hashmapstorage_lookup( struct _mulle_concurrent
121 121
 {
122 122
    struct _mulle_concurrent_hashvaluepair   *entry;
123 123
    unsigned int                             index;
124
+#ifndef NDEBUG
124 125
    unsigned int                             sentinel;
125
-   
126
+
127
+   sentinel = (unsigned int) hash + (unsigned int) p->mask + 1;
128
+#endif
126 129
    index    = (unsigned int) hash;
127
-   sentinel = index + (unsigned int) p->mask + 1;
128 130
 
129 131
    for(;;)
130 132
    {
... ...
@@ -132,10 +134,10 @@ static void   *_mulle_concurrent_hashmapstorage_lookup( struct _mulle_concurrent
132 134
 
133 135
       if( entry->hash == MULLE_CONCURRENT_NO_HASH)
134 136
          return( MULLE_CONCURRENT_NO_POINTER);
135
-      
137
+
136 138
       if( entry->hash == hash)
137 139
          return( _mulle_atomic_pointer_read( &entry->value));
138
-      
140
+
139 141
       ++index;
140 142
       assert( index != sentinel);  // can't happen we always leave space
141 143
    }
... ...
@@ -148,7 +150,7 @@ static struct _mulle_concurrent_hashvaluepair  *
148 150
 {
149 151
    struct _mulle_concurrent_hashvaluepair   *entry;
150 152
    struct _mulle_concurrent_hashvaluepair   *sentinel;
151
-   
153
+
152 154
    entry    = &p->entries[ *index];
153 155
    sentinel = &p->entries[ (unsigned int) p->mask + 1];
154 156
 
... ...
@@ -159,7 +161,7 @@ static struct _mulle_concurrent_hashvaluepair  *
159 161
          ++entry;
160 162
          continue;
161 163
       }
162
-      
164
+
163 165
       *index = (unsigned int) (entry - p->entries) + 1;
164 166
       return( entry);
165 167
    }
... ...
@@ -181,14 +183,17 @@ static int   _mulle_concurrent_hashmapstorage_insert( struct _mulle_concurrent_h
181 183
    struct _mulle_concurrent_hashvaluepair   *entry;
182 184
    void                               *found;
183 185
    unsigned int                       index;
186
+#ifndef NDEBUG
184 187
    unsigned int                       sentinel;
185
-   
188
+
189
+   sentinel = (unsigned int) hash + (unsigned int) p->mask + 1;
190
+#endif
191
+
186 192
    assert( hash != MULLE_CONCURRENT_NO_HASH);
187 193
    assert( value != MULLE_CONCURRENT_NO_POINTER && value != MULLE_CONCURRENT_INVALID_POINTER);
188 194
 
189
-   index    = (unsigned int) hash;
190
-   sentinel = (unsigned int) (index + (unsigned int) p->mask + 1);
191
-   
195
+   index = (unsigned int) hash;
196
+
192 197
    for(;;)
193 198
    {
194 199
       entry = &p->entries[ index & (unsigned int) p->mask];
... ...
@@ -208,10 +213,10 @@ static int   _mulle_concurrent_hashmapstorage_insert( struct _mulle_concurrent_h
208 213
             _mulle_atomic_pointer_increment( &p->n_hashs);
209 214
             entry->hash = hash;
210 215
          }
211
-         
216
+
212 217
          return( 0);
213 218
       }
214
-      
219
+
215 220
       ++index;
216 221
       assert( index != sentinel);  // can't happen we always leave space
217 222
    }
... ...
@@ -226,13 +231,16 @@ static int   _mulle_concurrent_hashmapstorage_put( struct _mulle_concurrent_hash
226 231
    void                                     *found;
227 232
    void                                     *expect;
228 233
    unsigned int                             index;
234
+#ifndef NDEBUG
229 235
    unsigned int                             sentinel;
230
-   
236
+
237
+   sentinel = (unsigned int) hash + (unsigned int) p->mask + 1;
238
+#endif
239
+
231 240
    assert( value);
232 241
 
233
-   index    = (unsigned int) hash;
234
-   sentinel = (unsigned int) (index + (unsigned int) p->mask + 1);
235
-   
242
+   index = (unsigned int) hash;
243
+
236 244
    for(;;)
237 245
    {
238 246
       entry = &p->entries[ index & (unsigned int) p->mask];
... ...
@@ -250,7 +258,7 @@ static int   _mulle_concurrent_hashmapstorage_put( struct _mulle_concurrent_hash
250 258
             expect = found;
251 259
          }
252 260
       }
253
-      
261
+
254 262
       if( entry->hash == MULLE_CONCURRENT_NO_HASH)
255 263
       {
256 264
          found = __mulle_atomic_pointer_compare_and_swap( &entry->value, value, MULLE_CONCURRENT_NO_POINTER);
... ...
@@ -263,10 +271,10 @@ static int   _mulle_concurrent_hashmapstorage_put( struct _mulle_concurrent_hash
263 271
 
264 272
          _mulle_atomic_pointer_increment( &p->n_hashs);
265 273
          entry->hash = hash;
266
-         
274
+
267 275
          return( 0);
268 276
       }
269
-   
277
+
270 278
       ++index;
271 279
       assert( index != sentinel);  // can't happen we always leave space
272 280
    }
... ...
@@ -280,10 +288,13 @@ static int   _mulle_concurrent_hashmapstorage_remove( struct _mulle_concurrent_h
280 288
    struct _mulle_concurrent_hashvaluepair   *entry;
281 289
    void                                     *found;
282 290
    unsigned int                             index;
291
+#ifndef NDEBUG
283 292
    unsigned int                             sentinel;
284
-   
285
-   index    = (unsigned int) hash;
286
-   sentinel = index + (unsigned int) p->mask + 1;
293
+
294
+   sentinel = (unsigned int) hash + (unsigned int) p->mask + 1;
295
+#endif
296
+
297
+   index = (unsigned int) hash;
287 298
    for(;;)
288 299
    {
289 300
       entry  = &p->entries[ index & (unsigned int) p->mask];
... ...
@@ -295,10 +306,10 @@ static int   _mulle_concurrent_hashmapstorage_remove( struct _mulle_concurrent_h
295 306
             return( EBUSY);
296 307
          return( found == value ? 0 : ENOENT);
297 308
       }
298
-      
309
+
299 310
       if( entry->hash == MULLE_CONCURRENT_NO_HASH)
300 311
          return( ENOENT);
301
-      
312
+
302 313
       ++index;
303 314
       assert( index != sentinel);  // can't happen we always leave space
304 315
    }
... ...
@@ -312,7 +323,7 @@ static void   _mulle_concurrent_hashmapstorage_copy( struct _mulle_concurrent_ha
312 323
    struct _mulle_concurrent_hashvaluepair   *p_last;
313 324
    void                                     *actual;
314 325
    void                                     *value;
315
-   
326
+
316 327
    p      = src->entries;
317 328
    p_last = &src->entries[ src->mask];
318 329
 
... ...
@@ -328,15 +339,15 @@ static void   _mulle_concurrent_hashmapstorage_copy( struct _mulle_concurrent_ha
328 339
             break;
329 340
          if( value == REDIRECT_VALUE)
330 341
             break;
331
-         
342
+
332 343
          // it's important that we copy over first so
333 344
          // No One Gets Left Behind
334 345
          _mulle_concurrent_hashmapstorage_put( dst, p->hash, value);
335
-         
346
+
336 347
          actual = __mulle_atomic_pointer_compare_and_swap( &p->value, REDIRECT_VALUE, value);
337 348
          if( actual == value)
338 349
             break;
339
-         
350
+
340 351
          value = actual;
341 352
       }
342 353
    }
... ...
@@ -351,10 +362,10 @@ int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
351 362
                                      struct mulle_allocator *allocator)
352 363
 {
353 364
    struct _mulle_concurrent_hashmapstorage   *storage;
354
-   
365
+
355 366
    if( ! allocator)
356 367
       allocator = &mulle_default_allocator;
357
-   
368
+
358 369
    assert( allocator->abafree && allocator->abafree != (int (*)()) abort);
359 370
    if( ! allocator->abafree || allocator->abafree == (int (*)()) abort)
360 371
       return( EINVAL);
... ...
@@ -367,7 +378,7 @@ int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
367 378
 
368 379
    _mulle_atomic_pointer_nonatomic_write( &map->storage.pointer, storage);
369 380
    _mulle_atomic_pointer_nonatomic_write( &map->next_storage.pointer, storage);
370
-   
381
+
371 382
    return( 0);
372 383
 }
373 384
 
... ...
@@ -393,7 +404,7 @@ void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map)
393 404
 unsigned int  _mulle_concurrent_hashmap_get_size( struct mulle_concurrent_hashmap *map)
394 405
 {
395 406
    struct _mulle_concurrent_hashmapstorage   *p;
396
-   
407
+
397 408
    p = _mulle_atomic_pointer_read( &map->storage.pointer);
398 409
    return( (unsigned int) p->mask + 1);
399 410
 }
... ...
@@ -418,7 +429,7 @@ static int  _mulle_concurrent_hashmap_migrate_storage( struct mulle_concurrent_h
418 429
       alloced = _mulle_concurrent_alloc_hashmapstorage( ((unsigned int) p->mask + 1) * 2, map->allocator);
419 430
       if( ! alloced)
420 431
          return( ENOMEM);
421
-      
432
+
422 433
       // make this the next world, assume that's still set to 'p' (SIC)
423 434
       q = __mulle_atomic_pointer_compare_and_swap( &map->next_storage.pointer, alloced, p);
424 435
       if( q != p)
... ...
@@ -430,18 +441,18 @@ static int  _mulle_concurrent_hashmap_migrate_storage( struct mulle_concurrent_h
430 441
       else
431 442
          q = alloced;
432 443
    }
433
-   
444
+
434 445
    // this thread can partake in copying
435 446
    _mulle_concurrent_hashmapstorage_copy( q, p);
436
-   
447
+
437 448
    // now update world, giving it the same value as 'next_world'
438 449
    previous = __mulle_atomic_pointer_compare_and_swap( &map->storage.pointer, q, p);
439 450
 
440 451
    // ok, if we succeed free old, if we fail alloced is
441
-   // already gone. this must be an ABA free 
452
+   // already gone. this must be an ABA free
442 453
    if( previous == p)
443 454
       _mulle_allocator_abafree( map->allocator, previous); // ABA!!
444
-   
455
+
445 456
    return( 0);
446 457
 }
447 458
 
... ...
@@ -451,7 +462,7 @@ void  *_mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
451 462
 {
452 463
    struct _mulle_concurrent_hashmapstorage   *p;
453 464
    void                                      *value;
454
-   
465
+
455 466
    // won't find invalid hash anyway
456 467
 retry:
457 468
    p     = _mulle_atomic_pointer_read( &map->storage.pointer);
... ...
@@ -475,18 +486,18 @@ static int   _mulle_concurrent_hashmap_search_next( struct mulle_concurrent_hash
475 486
    struct _mulle_concurrent_hashmapstorage   *p;
476 487
    struct _mulle_concurrent_hashvaluepair    *entry;
477 488
    void                                      *value;
478
-   
489
+
479 490
 retry:
480 491
    p = _mulle_atomic_pointer_read( &map->storage.pointer);
481 492
    if( *expect_mask && (unsigned int) p->mask != *expect_mask)
482 493
       return( ECANCELED);
483
-   
494
+
484 495
    for(;;)
485 496
    {
486 497
       entry = _mulle_concurrent_hashmapstorage_next_pair( p, index);
487 498
       if( ! entry)
488 499
          return( 0);
489
-      
500
+
490 501
       value = _mulle_atomic_pointer_read( &entry->value);
491 502
       if( value == REDIRECT_VALUE)
492 503
       {
... ...
@@ -498,15 +509,15 @@ retry:
498 509
       if( value != MULLE_CONCURRENT_NO_POINTER)
499 510
          break;
500 511
    }
501
-   
512
+
502 513
    if( p_hash)
503 514
       *p_hash = entry->hash;
504 515
    if( p_value)
505 516
       *p_value = value;
506
-   
517
+
507 518
    if( ! *expect_mask)
508 519
       *expect_mask = (unsigned int) p->mask;
509
-   
520
+
510 521
    return( 1);
511 522
 }
512 523
 
... ...
@@ -528,21 +539,21 @@ int  _mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
528 539
    unsigned int                              max;
529 540
 
530 541
    assert_hash_value( hash, value);
531
-   
542
+
532 543
 retry:
533 544
    p = _mulle_atomic_pointer_read( &map->storage.pointer);
534 545
    assert( p);
535 546
 
536 547
    max = _mulle_concurrent_hashmapstorage_get_max_n_hashs( p);
537 548
    n   = (unsigned int) (uintptr_t) _mulle_atomic_pointer_read( &p->n_hashs);
538
-   
549
+
539 550
    if( n >= max)
540 551
    {
541 552
       if( _mulle_concurrent_hashmap_migrate_storage( map, p))
542 553
          return( ENOMEM);
543 554
       goto retry;
544 555
    }
545
-   
556
+
546 557
    switch( _mulle_concurrent_hashmapstorage_insert( p, hash, value))
547 558
    {
548 559
    case EEXIST :
... ...
@@ -579,16 +590,16 @@ int  _mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
579 590
                                        void *value)
580 591
 {
581 592
    struct _mulle_concurrent_hashmapstorage   *p;
582
-   
593
+
583 594
    assert_hash_value( hash, value);
584
-   
595
+
585 596
 retry:
586 597
    p = _mulle_atomic_pointer_read( &map->storage.pointer);
587 598
    switch( _mulle_concurrent_hashmapstorage_remove( p, hash, value))
588 599
    {
589 600
    case ENOENT :
590 601
      return( ENOENT);
591
-         
602
+
592 603
    case EBUSY  :
593 604
       if( _mulle_concurrent_hashmap_migrate_storage( map, p))
594 605
          return( ENOMEM);
... ...
@@ -623,12 +634,12 @@ int  _mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapen
623 634
    int        rval;
624 635
    void       *value;
625 636
    intptr_t   hash;
626
-   
637
+
627 638
    rval = _mulle_concurrent_hashmap_search_next( rover->map, &rover->mask, &rover->index, &hash, &value);
628
-   
639
+
629 640
    if( rval != 1)
630 641
       return( rval);
631
-   
642
+
632 643
    if( p_hash)
633 644
       *p_hash = hash;
634 645
    if( p_value)
... ...
@@ -649,10 +660,10 @@ unsigned int  mulle_concurrent_hashmap_count( struct mulle_concurrent_hashmap *m
649 660
    unsigned int                                count;
650 661
    int                                         rval;
651 662
    struct mulle_concurrent_hashmapenumerator   rover;
652
-   
663
+
653 664
 retry:
654 665
    count = 0;
655
-   
666
+
656 667
    rover = mulle_concurrent_hashmap_enumerate( map);
657 668
    for(;;)
658 669
    {
... ...
@@ -665,11 +676,11 @@ retry:
665 676
 
666 677
       if( ! rval)
667 678
          break;
668
-   
679
+
669 680
       mulle_concurrent_hashmapenumerator_done( &rover);
670 681
       goto retry;
671 682
    }
672
-   
683
+
673 684
    mulle_concurrent_hashmapenumerator_done( &rover);
674 685
    return( count);
675 686
 }
... ...
@@ -679,12 +690,12 @@ void  *mulle_concurrent_hashmap_lookup_any( struct mulle_concurrent_hashmap *map
679 690
 {
680 691
    struct mulle_concurrent_hashmapenumerator  rover;
681 692
    void  *any;
682
-   
693
+
683 694
    any   = NULL;
684
-   
695
+
685 696
    rover = mulle_concurrent_hashmap_enumerate( map);
686 697
    _mulle_concurrent_hashmapenumerator_next( &rover, NULL, &any);
687 698
    mulle_concurrent_hashmapenumerator_done( &rover);
688
-   
699
+
689 700
    return( any);
690 701
 }
... ...
@@ -94,7 +94,7 @@ static inline void  mulle_concurrent_hashmap_done( struct mulle_concurrent_hashm
94 94
 static inline unsigned int  mulle_concurrent_hashmap_get_size( struct mulle_concurrent_hashmap *map)
95 95
 {
96 96
    unsigned int  _mulle_concurrent_hashmap_get_size( struct mulle_concurrent_hashmap *map);
97
-   
97
+
98 98
    if( ! map)
99 99
       return( 0);
100 100
    return( _mulle_concurrent_hashmap_get_size( map));
... ...
@@ -164,11 +164,11 @@ struct mulle_concurrent_hashmapenumerator
164 164
 static inline struct mulle_concurrent_hashmapenumerator  mulle_concurrent_hashmap_enumerate( struct mulle_concurrent_hashmap *map)
165 165
 {
166 166
    struct mulle_concurrent_hashmapenumerator   rover;
167
-   
167
+
168 168
    rover.map   = map;
169 169
    rover.index = map ? 0 : (unsigned int) -1;
170 170
    rover.mask  = 0;
171
-   
171
+
172 172
    return( rover);
173 173
 }
174 174
 
... ...
@@ -93,7 +93,7 @@ static inline void  mulle_concurrent_pointerarray_done( struct mulle_concurrent_
93 93
 static inline unsigned int  mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *array)
94 94
 {
95 95
    unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *array);
96
-   
96
+
97 97
    if( ! array)
98 98
       return( 0);
99 99
    return( _mulle_concurrent_pointerarray_get_size( array));
... ...
@@ -149,10 +149,10 @@ static inline struct mulle_concurrent_pointerarrayenumerator
149 149
    mulle_concurrent_pointerarray_enumerate( struct mulle_concurrent_pointerarray *array)
150 150
 {
151 151
    struct mulle_concurrent_pointerarrayenumerator   rover;
152
-   
152
+
153 153
    rover.array = array;
154 154
    rover.index = array ? 0 : (unsigned int) -1;
155
-   
155
+
156 156
    return( rover);
157 157
 }
158 158
 
... ...
@@ -161,10 +161,10 @@ static inline struct mulle_concurrent_pointerarrayreverseenumerator
161 161
    mulle_concurrent_pointerarray_reverseenumerate( struct mulle_concurrent_pointerarray *array, unsigned int n)
162 162
 {
163 163
    struct mulle_concurrent_pointerarrayreverseenumerator   rover;
164
-   
164
+
165 165
    rover.array = array;
166 166
    rover.index = n;
167
-   
167
+
168 168
    return( rover);
169 169
 }
170 170