Browse code

Adapt to changes in mulle_allocator.

Nat! authored on 07-03-2016 23:51:09
Showing 11 changed files
... ...
@@ -12,7 +12,7 @@ include_directories( SYSTEM
12 12
 dependencies/include
13 13
 )
14 14
 
15
-#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O0 -g -DDEBUG -DMULLE_ABA_TRACE_LIST -DMULLE_ABA_TRACE_SWAP")
15
+#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O0 -g -DDEBUG ")
16 16
 set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O3 -g -DNDEBUG")
17 17
 
18 18
 set( HEADERS
... ...
@@ -20,7 +20,7 @@ src/pointerarray/mulle_concurrent_pointerarray.h
20 20
 src/hashmap/mulle_concurrent_hashmap.h
21 21
 )
22 22
 
23
-add_library( mulle_container
23
+add_library( mulle_concurrent
24 24
 src/pointerarray/mulle_concurrent_pointerarray.c
25 25
 src/hashmap/mulle_concurrent_hashmap.c
26 26
 )
... ...
@@ -1,8 +1,9 @@
1 1
 ## `mulle_concurrent_pointerlist`
2 2
 
3
-> Taking the definitions here from [concurrencyfreaks.blogspot.de](http://concurrencyfreaks.blogspot.de/2013/05/lock-free-and-wait-free-definition-and.html)
3
+> Most of the ideas are taken from [Preshing on Programming](http://preshing.com/20160222/a-resizable-concurrent-map/). 
4
+> The definition of concurrent and wait-free are from [concurrencyfreaks.blogspot.de](http://concurrencyfreaks.blogspot.de/2013/05/lock-free-and-wait-free-definition-and.html)
4 5
 
5
-A growing array of pointers, that is wait-free. 
6
+A growing array of pointers, that is **wait-free**. 
6 7
 
7 8
 Here is a simple usage example:
8 9
 ```
... ...
@@ -16,7 +17,7 @@ static void   test( void)
16 17
    unsigned int                                     i;
17 18
    void                                             *value;
18 19
 
19
-   _mulle_concurrent_pointerarray_init( &map, 0, mulle_aba_as_allocator());
20
+   _mulle_concurrent_pointerarray_init( &map, 0, NULL);
20 21
 
21 22
    value = (void *) 0x1848;
22 23
 
... ...
@@ -31,7 +32,7 @@ static void   test( void)
31 32
    }
32 33
    _mulle_concurrent_pointerarrayenumerator_done( &rover);
33 34
 
34
-   _mulle_concurrent_pointerarray_free( &map);
35
+   _mulle_concurrent_pointerarray_done( &map);
35 36
 }
36 37
 
37 38
 
... ...
@@ -51,7 +52,7 @@ int   main( void)
51 52
 
52 53
 ## `mulle_concurrent_hashmap`
53 54
 
54
-A mutable map of pointers, indexed by a hash, that is wait-free.
55
+A mutable map of pointers, indexed by a hash, that is **wait-free**.
55 56
 
56 57
 Here is a also a simple usage example:
57 58
 
... ...
@@ -60,13 +61,13 @@ Here is a also a simple usage example:
60 61
 
61 62
 static void  test( void)
62 63
 {
63
-   intptr_t                                     hash;
64
+   intptr_t                                    hash;
64 65
    struct mulle_concurrent_hashmap             map;
65 66
    struct mulle_concurrent_hashmapenumerator   rover;
66 67
    unsigned int                                i;
67 68
    void                                        *value;
68
-
69
-   _mulle_concurrent_hashmap_init( &map, 0, mulle_aba_as_allocator());
69
+   
70
+   _mulle_concurrent_hashmap_init( &map, 0, NULL);
70 71
    {
71 72
       _mulle_concurrent_hashmap_insert( &map, 100000, (void *) 0x1848);
72 73
       value =  _mulle_concurrent_hashmap_lookup( &map, 100000);
... ...
@@ -88,13 +89,13 @@ static void  test( void)
88 89
       value =  _mulle_concurrent_hashmap_lookup( &map, 100000);
89 90
       printf( "%s\n", value == (void *) 0x1848 ? "unexpected" : "expected");
90 91
    }
91
-   _mulle_concurrent_hashmap_free( &map);
92
+   _mulle_concurrent_hashmap_done( &map);
92 93
 }
93 94
 
94 95
 
95 96
 int   main( void)
96 97
 {
97
-   mulle_aba_init( &mulle_default_allocator);
98
+   mulle_aba_init( NULL);
98 99
    mulle_aba_register();
99 100
 
100 101
    test();
... ...
@@ -108,53 +109,11 @@ int   main( void)
108 109
 
109 110
 ## ABA
110 111
 
111
-This library assumes that the allocator you give it is smart enough to solve
112
-the ABA problem when freeing memory.
113
-
114
-If you use `mulle_aba` you can ask it to act as an allocator (call 
115
-`mulle_aba_as_allocator()`). If you don't want to use `mulle_aba`, create an 
116
-allocator like this for your own scheme:
117
-
118
-
119
-```
120
-struct my_aba_allocator
121
-{
122
-   struct mulle_allocator  allocator;
123
-   void                    *my_aba;
124
-};
125
-
126
-
127
-static void  *my_calloc( struct mulle_allocator *allocator,
128
-                         size_t  n,
129
-                         size_t  size)
130
-{
131
-   return( calloc( n, size));
132
-}
112
+This library assumes that the allocator you give it, has a vector installed
113
+for 'abafree', that is smart enough to solve the ABA problem when freeing memory.
133 114
 
115
+> Hint : Use [mulle-aba](https://www.mulle-kybernetik.com/weblog/2015/mulle_aba_release.html) for that.
134 116
 
135
-static void  *my_realloc( struct mulle_allocator *allocator,
136
-                          void  *block,
137
-                          size_t  size)
138
-{
139
-   return( realloc( block, size));
140
-}
141
-
142
-
143
-static void  my_free( struct mulle_allocator *allocator,
144
-                      void *pointer)
145
-{
146
-   struct my_aba_allocator   *p;
147
-   
148
-   p = (struct my_aba_allocator *) allocator;
149
-   clever_free( p->my_aba, pointer);
150
-}
151
-
152
-struct my_aba_allocator    my_aba_allocator = 
153
-{
154
-   { my_calloc, my_realloc, my_free, 1 }, &clever_struct
155
-};
156
-
157
-```
158 117
 
159 118
 ## Dependencies
160 119
 
... ...
@@ -1,7 +1,11 @@
1
+# v0.2
2
+
3
+* Adapt to changes in `mulle_allocator` and `mulle_aba`
4
+
1 5
 # v0.1
2 6
 
3
-Remove dependency on `mulle_aba` for the pure library.
4
-Rename _free to _done.
7
+* Remove dependency on `mulle_aba` for the pure library.
8
+* Rename _free to _done.
5 9
 
6 10
 # v0.0
7 11
 
... ...
@@ -7,6 +7,18 @@
7 7
 	objects = {
8 8
 
9 9
 /* Begin PBXAggregateTarget section */
10
+		41AD75F51C8E33E600B18C35 /* Tests */ = {
11
+			isa = PBXAggregateTarget;
12
+			buildConfigurationList = 41AD75F81C8E33E600B18C35 /* Build configuration list for PBXAggregateTarget "Tests" */;
13
+			buildPhases = (
14
+			);
15
+			dependencies = (
16
+				41AD75FA1C8E33F700B18C35 /* PBXTargetDependency */,
17
+				41AD75FC1C8E33F700B18C35 /* PBXTargetDependency */,
18
+			);
19
+			name = Tests;
20
+			productName = Tests;
21
+		};
10 22
 		41CAEAC81C8D95A6003C2C7B /* Libraries */ = {
11 23
 			isa = PBXAggregateTarget;
12 24
 			buildConfigurationList = 41CAEAC91C8D95A6003C2C7B /* Build configuration list for PBXAggregateTarget "Libraries" */;
... ...
@@ -44,6 +56,20 @@
44 56
 /* End PBXBuildFile section */
45 57
 
46 58
 /* Begin PBXContainerItemProxy section */
59
+		41AD75F91C8E33F700B18C35 /* PBXContainerItemProxy */ = {
60
+			isa = PBXContainerItemProxy;
61
+			containerPortal = 414785241ABAF290002DBAE4 /* Project object */;
62
+			proxyType = 1;
63
+			remoteGlobalIDString = 41CAEAE61C8D9FF4003C2C7B;
64
+			remoteInfo = "test-hashmap";
65
+		};
66
+		41AD75FB1C8E33F700B18C35 /* PBXContainerItemProxy */ = {
67
+			isa = PBXContainerItemProxy;
68
+			containerPortal = 414785241ABAF290002DBAE4 /* Project object */;
69
+			proxyType = 1;
70
+			remoteGlobalIDString = 41CAEAF61C8DA20F003C2C7B;
71
+			remoteInfo = "test-pointerarray";
72
+		};
47 73
 		41CAEACC1C8D95B0003C2C7B /* PBXContainerItemProxy */ = {
48 74
 			isa = PBXContainerItemProxy;
49 75
 			containerPortal = 414785241ABAF290002DBAE4 /* Project object */;
... ...
@@ -353,6 +379,9 @@
353 379
 				LastUpgradeCheck = 0640;
354 380
 				ORGANIZATIONNAME = "Mulle kybernetiK";
355 381
 				TargetAttributes = {
382
+					41AD75F51C8E33E600B18C35 = {
383
+						CreatedOnToolsVersion = 7.2.1;
384
+					};
356 385
 					41CAEAC31C8D9599003C2C7B = {
357 386
 						CreatedOnToolsVersion = 7.2.1;
358 387
 					};
... ...
@@ -386,6 +415,7 @@
386 415
 				41CAEAC81C8D95A6003C2C7B /* Libraries */,
387 416
 				41F14FE51B68F098002189F1 /* mulle_concurrent */,
388 417
 				41CAEAC31C8D9599003C2C7B /* mulle_standalone_concurrent */,
418
+				41AD75F51C8E33E600B18C35 /* Tests */,
389 419
 				41CAEAE61C8D9FF4003C2C7B /* test-hashmap */,
390 420
 				41CAEAF61C8DA20F003C2C7B /* test-pointerarray */,
391 421
 			);
... ...
@@ -429,6 +459,16 @@
429 459
 /* End PBXSourcesBuildPhase section */
430 460
 
431 461
 /* Begin PBXTargetDependency section */
462
+		41AD75FA1C8E33F700B18C35 /* PBXTargetDependency */ = {
463
+			isa = PBXTargetDependency;
464
+			target = 41CAEAE61C8D9FF4003C2C7B /* test-hashmap */;
465
+			targetProxy = 41AD75F91C8E33F700B18C35 /* PBXContainerItemProxy */;
466
+		};
467
+		41AD75FC1C8E33F700B18C35 /* PBXTargetDependency */ = {
468
+			isa = PBXTargetDependency;
469
+			target = 41CAEAF61C8DA20F003C2C7B /* test-pointerarray */;
470
+			targetProxy = 41AD75FB1C8E33F700B18C35 /* PBXContainerItemProxy */;
471
+		};
432 472
 		41CAEACD1C8D95B0003C2C7B /* PBXTargetDependency */ = {
433 473
 			isa = PBXTargetDependency;
434 474
 			target = 41F14FE51B68F098002189F1 /* mulle_concurrent */;
... ...
@@ -461,7 +501,7 @@
461 501
 			isa = XCBuildConfiguration;
462 502
 			baseConfigurationReference = 417C02651BD8404E005A3751 /* Debug.xcconfig */;
463 503
 			buildSettings = {
464
-				CURRENT_PROJECT_VERSION = 0.1;
504
+				CURRENT_PROJECT_VERSION = 0.2;
465 505
 			};
466 506
 			name = Debug;
467 507
 		};
... ...
@@ -469,7 +509,21 @@
469 509
 			isa = XCBuildConfiguration;
470 510
 			baseConfigurationReference = 417C02681BD8404E005A3751 /* Release.xcconfig */;
471 511
 			buildSettings = {
472
-				CURRENT_PROJECT_VERSION = 0.1;
512
+				CURRENT_PROJECT_VERSION = 0.2;
513
+			};
514
+			name = Release;
515
+		};
516
+		41AD75F61C8E33E600B18C35 /* Debug */ = {
517
+			isa = XCBuildConfiguration;
518
+			buildSettings = {
519
+				PRODUCT_NAME = "$(TARGET_NAME)";
520
+			};
521
+			name = Debug;
522
+		};
523
+		41AD75F71C8E33E600B18C35 /* Release */ = {
524
+			isa = XCBuildConfiguration;
525
+			buildSettings = {
526
+				PRODUCT_NAME = "$(TARGET_NAME)";
473 527
 			};
474 528
 			name = Release;
475 529
 		};
... ...
@@ -485,6 +539,8 @@
485 539
 					"-force_load",
486 540
 					"$(DEPENDENCIES_DIR)/lib/$(CONFIGURATION)/libmulle_test_allocator.a",
487 541
 					"-force_load",
542
+					"$(DEPENDENCIES_DIR)/lib/$(CONFIGURATION)/libmulle_aba.a",
543
+					"-force_load",
488 544
 					"$(CONFIGURATION_BUILD_DIR)/libmulle_concurrent.a",
489 545
 				);
490 546
 			};
... ...
@@ -502,6 +558,8 @@
502 558
 					"-force_load",
503 559
 					"$(DEPENDENCIES_DIR)/lib/$(CONFIGURATION)/libmulle_test_allocator.a",
504 560
 					"-force_load",
561
+					"$(DEPENDENCIES_DIR)/lib/$(CONFIGURATION)/libmulle_aba.a",
562
+					"-force_load",
505 563
 					"$(CONFIGURATION_BUILD_DIR)/libmulle_concurrent.a",
506 564
 				);
507 565
 			};
... ...
@@ -589,6 +647,15 @@
589 647
 			defaultConfigurationIsVisible = 0;
590 648
 			defaultConfigurationName = Release;
591 649
 		};
650
+		41AD75F81C8E33E600B18C35 /* Build configuration list for PBXAggregateTarget "Tests" */ = {
651
+			isa = XCConfigurationList;
652
+			buildConfigurations = (
653
+				41AD75F61C8E33E600B18C35 /* Debug */,
654
+				41AD75F71C8E33E600B18C35 /* Release */,
655
+			);
656
+			defaultConfigurationIsVisible = 0;
657
+			defaultConfigurationName = Release;
658
+		};
592 659
 		41CAEAC51C8D9599003C2C7B /* Build configuration list for PBXNativeTarget "mulle_standalone_concurrent" */ = {
593 660
 			isa = XCConfigurationList;
594 661
 			buildConfigurations = (
... ...
@@ -37,6 +37,7 @@
37 37
 #include <assert.h>
38 38
 #include <errno.h>
39 39
 #include <stdint.h>
40
+#include <stdlib.h>
40 41
 
41 42
 
42 43
 struct _mulle_concurrent_hashvaluepair
... ...
@@ -46,7 +47,7 @@ struct _mulle_concurrent_hashvaluepair
46 47
 };
47 48
 
48 49
 
49
-struct mulle_concurrent_hashmapstorage
50
+struct _mulle_concurrent_hashmapstorage
50 51
 {
51 52
    mulle_atomic_pointer_t   n_hashs;  // with possibly empty values
52 53
    unsigned int             mask;
... ...
@@ -62,17 +63,17 @@ struct mulle_concurrent_hashmapstorage
62 63
 
63 64
 
64 65
 // n must be a power of 2
65
-static struct mulle_concurrent_hashmapstorage *
66
+static struct _mulle_concurrent_hashmapstorage *
66 67
    _mulle_concurrent_alloc_hashmapstorage( unsigned int n,
67 68
                                            struct mulle_allocator *allocator)
68 69
 {
69
-   struct mulle_concurrent_hashmapstorage  *p;
70
+   struct _mulle_concurrent_hashmapstorage  *p;
70 71
    
71 72
    if( n < 8)
72 73
       n = 8;
73 74
    
74 75
    p = _mulle_allocator_calloc( allocator, 1, sizeof( struct _mulle_concurrent_hashvaluepair) * (n - 1) +
75
-                             sizeof( struct mulle_concurrent_hashmapstorage));
76
+                             sizeof( struct _mulle_concurrent_hashmapstorage));
76 77
    if( ! p)
77 78
       return( p);
78 79
    
... ...
@@ -102,7 +103,7 @@ static struct mulle_concurrent_hashmapstorage *
102 103
 
103 104
 
104 105
 static unsigned int
105
-   _mulle_concurrent_hashmapstorage_get_max_n_hashs( struct mulle_concurrent_hashmapstorage *p)
106
+   _mulle_concurrent_hashmapstorage_get_max_n_hashs( struct _mulle_concurrent_hashmapstorage *p)
106 107
 {
107 108
    unsigned int   size;
108 109
    unsigned int   max;
... ...
@@ -113,7 +114,7 @@ static unsigned int
113 114
 }
114 115
 
115 116
 
116
-static void   *_mulle_concurrent_hashmapstorage_lookup( struct mulle_concurrent_hashmapstorage *p,
117
+static void   *_mulle_concurrent_hashmapstorage_lookup( struct _mulle_concurrent_hashmapstorage *p,
117 118
                                                         intptr_t hash)
118 119
 {
119 120
    struct _mulle_concurrent_hashvaluepair   *entry;
... ...
@@ -140,7 +141,7 @@ static void   *_mulle_concurrent_hashmapstorage_lookup( struct mulle_concurrent_
140 141
 
141 142
 
142 143
 static struct _mulle_concurrent_hashvaluepair  *
143
-    _mulle_concurrent_hashmapstorage_next_pair( struct mulle_concurrent_hashmapstorage *p,
144
+    _mulle_concurrent_hashmapstorage_next_pair( struct _mulle_concurrent_hashmapstorage *p,
144 145
                                                 unsigned int *index)
145 146
 {
146 147
    struct _mulle_concurrent_hashvaluepair   *entry;
... ...
@@ -171,7 +172,7 @@ static struct _mulle_concurrent_hashvaluepair  *
171 172
 //  EEXIST : key already exists (can't replace currently)
172 173
 //  EBUSY  : this storage can't be written to
173 174
 //
174
-static int   _mulle_concurrent_hashmapstorage_insert( struct mulle_concurrent_hashmapstorage *p,
175
+static int   _mulle_concurrent_hashmapstorage_insert( struct _mulle_concurrent_hashmapstorage *p,
175 176
                                                       intptr_t hash,
176 177
                                                       void *value)
177 178
 {
... ...
@@ -215,7 +216,7 @@ static int   _mulle_concurrent_hashmapstorage_insert( struct mulle_concurrent_ha
215 216
 }
216 217
 
217 218
 
218
-static int   _mulle_concurrent_hashmapstorage_put( struct mulle_concurrent_hashmapstorage *p,
219
+static int   _mulle_concurrent_hashmapstorage_put( struct _mulle_concurrent_hashmapstorage *p,
219 220
                                                    intptr_t hash,
220 221
                                                    void *value)
221 222
 {
... ...
@@ -270,7 +271,7 @@ static int   _mulle_concurrent_hashmapstorage_put( struct mulle_concurrent_hashm
270 271
 }
271 272
 
272 273
 
273
-static int   _mulle_concurrent_hashmapstorage_remove( struct mulle_concurrent_hashmapstorage *p,
274
+static int   _mulle_concurrent_hashmapstorage_remove( struct _mulle_concurrent_hashmapstorage *p,
274 275
                                                       intptr_t hash,
275 276
                                                       void *value)
276 277
 {
... ...
@@ -308,8 +309,8 @@ static int   _mulle_concurrent_hashmapstorage_remove( struct mulle_concurrent_ha
308 309
 }
309 310
 
310 311
 
311
-static void   _mulle_concurrent_hashmapstorage_copy( struct mulle_concurrent_hashmapstorage *dst,
312
-                                                     struct mulle_concurrent_hashmapstorage *src)
312
+static void   _mulle_concurrent_hashmapstorage_copy( struct _mulle_concurrent_hashmapstorage *dst,
313
+                                                     struct _mulle_concurrent_hashmapstorage *src)
313 314
 {
314 315
    struct _mulle_concurrent_hashvaluepair   *p;
315 316
    struct _mulle_concurrent_hashvaluepair   *p_last;
... ...
@@ -356,7 +357,8 @@ int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
356 357
    if( ! allocator)
357 358
       allocator = &mulle_default_allocator;
358 359
    
359
-   if( ! allocator->mode)
360
+   assert( allocator->abafree && allocator->abafree != (void *) abort);
361
+   if( ! allocator->abafree || allocator->abafree == (void *) abort)
360 362
    {
361 363
       errno = ENXIO;
362 364
       return( -1);
... ...
@@ -381,19 +383,19 @@ int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
381 383
 void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map)
382 384
 {
383 385
    // ABA!
384
-   _mulle_allocator_free( map->allocator, map->storage);
386
+   _mulle_allocator_abafree( map->allocator, map->storage);
385 387
    if( map->storage != map->next_storage)
386
-      _mulle_allocator_free( map->allocator, map->next_storage);
388
+      _mulle_allocator_abafree( map->allocator, map->next_storage);
387 389
 }
388 390
 
389 391
 
390 392
 static int  _mulle_concurrent_hashmap_migrate_storage( struct mulle_concurrent_hashmap *map,
391
-                                                       struct mulle_concurrent_hashmapstorage *p)
393
+                                                       struct _mulle_concurrent_hashmapstorage *p)
392 394
 {
393 395
 
394
-   struct mulle_concurrent_hashmapstorage   *q;
395
-   struct mulle_concurrent_hashmapstorage   *alloced;
396
-   struct mulle_concurrent_hashmapstorage   *previous;
396
+   struct _mulle_concurrent_hashmapstorage   *q;
397
+   struct _mulle_concurrent_hashmapstorage   *alloced;
398
+   struct _mulle_concurrent_hashmapstorage   *previous;
397 399
 
398 400
    assert( p);
399 401
 
... ...
@@ -412,7 +414,7 @@ static int  _mulle_concurrent_hashmap_migrate_storage( struct mulle_concurrent_h
412 414
       if( q != p)
413 415
       {
414 416
          // someone else produced a next world, use that and get rid of 'alloced'
415
-         _mulle_allocator_free( map->allocator, alloced);  // ABA!!
417
+         _mulle_allocator_abafree( map->allocator, alloced);  // ABA!!
416 418
          alloced = NULL;
417 419
       }
418 420
       else
... ...
@@ -428,7 +430,7 @@ static int  _mulle_concurrent_hashmap_migrate_storage( struct mulle_concurrent_h
428 430
    // ok, if we succeed free old, if we fail alloced is
429 431
    // already gone. this must be an ABA free (use mulle_aba as allocator)
430 432
    if( previous == p)
431
-      _mulle_allocator_free( map->allocator, previous); // ABA!!
433
+      _mulle_allocator_abafree( map->allocator, previous); // ABA!!
432 434
    
433 435
    return( 0);
434 436
 }
... ...
@@ -437,7 +439,7 @@ static int  _mulle_concurrent_hashmap_migrate_storage( struct mulle_concurrent_h
437 439
 void  *_mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
438 440
                                          intptr_t hash)
439 441
 {
440
-   struct mulle_concurrent_hashmapstorage   *p;
442
+   struct _mulle_concurrent_hashmapstorage   *p;
441 443
    void                                    *value;
442 444
    
443 445
 retry:
... ...
@@ -459,7 +461,7 @@ static int   _mulle_concurrent_hashmap_search_next( struct mulle_concurrent_hash
459 461
                                                     intptr_t *p_hash,
460 462
                                                     void **p_value)
461 463
 {
462
-   struct mulle_concurrent_hashmapstorage   *p;
464
+   struct _mulle_concurrent_hashmapstorage   *p;
463 465
    struct _mulle_concurrent_hashvaluepair   *entry;
464 466
    void                                     *value;
465 467
    
... ...
@@ -505,7 +507,7 @@ int  _mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
505 507
                                        intptr_t hash,
506 508
                                        void *value)
507 509
 {
508
-   struct mulle_concurrent_hashmapstorage   *p;
510
+   struct _mulle_concurrent_hashmapstorage   *p;
509 511
    unsigned int                              n;
510 512
    unsigned int                              max;
511 513
 
... ...
@@ -547,7 +549,7 @@ int  _mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
547 549
                                        intptr_t hash,
548 550
                                        void *value)
549 551
 {
550
-   struct mulle_concurrent_hashmapstorage   *p;
552
+   struct _mulle_concurrent_hashmapstorage   *p;
551 553
    
552 554
 retry:
553 555
    p = _mulle_atomic_pointer_read( &map->storage);
... ...
@@ -564,7 +566,7 @@ retry:
564 566
 
565 567
 unsigned int  _mulle_concurrent_hashmap_get_size( struct mulle_concurrent_hashmap *map)
566 568
 {
567
-   struct mulle_concurrent_hashmapstorage   *p;
569
+   struct _mulle_concurrent_hashmapstorage   *p;
568 570
    
569 571
    p = _mulle_atomic_pointer_read( &map->storage);
570 572
    return( p->mask + 1);
... ...
@@ -35,7 +35,7 @@
35 35
 #define mulle_concurrent_h__
36 36
 
37 37
 
38
-#define MULLE_CONCURRENT_VERSION  ((0 << 20) | (1 << 8) | 0)
38
+#define MULLE_CONCURRENT_VERSION  ((0 << 20) | (2 << 8) | 0)
39 39
 
40 40
 
41 41
 #include "mulle_concurrent_hashmap.h"
... ...
@@ -37,9 +37,10 @@
37 37
 #include <assert.h>
38 38
 #include <errno.h>
39 39
 #include <stdint.h>
40
+#include <stdlib.h>
40 41
 
41 42
 
42
-struct mulle_concurrent_pointerarraystorage
43
+struct _mulle_concurrent_pointerarraystorage
43 44
 {
44 45
    mulle_atomic_pointer_t   n;
45 46
    unsigned int             size;
... ...
@@ -62,17 +63,17 @@ struct mulle_concurrent_pointerarraystorage
62 63
 
63 64
 
64 65
 // n must be a power of 2
65
-static struct mulle_concurrent_pointerarraystorage *
66
+static struct _mulle_concurrent_pointerarraystorage *
66 67
    _mulle_concurrent_alloc_pointerarraystorage( unsigned int n,
67 68
                                                 struct mulle_allocator *allocator)
68 69
 {
69
-   struct mulle_concurrent_pointerarraystorage  *p;
70
+   struct _mulle_concurrent_pointerarraystorage  *p;
70 71
    
71 72
    if( n < 8)
72 73
       n = 8;
73 74
    
74 75
    p = _mulle_allocator_calloc( allocator, 1, sizeof( void *) * (n - 1) +
75
-                             sizeof( struct mulle_concurrent_pointerarraystorage));
76
+                             sizeof( struct _mulle_concurrent_pointerarraystorage));
76 77
    if( ! p)
77 78
       return( p);
78 79
    p->size = n;
... ...
@@ -99,7 +100,7 @@ static struct mulle_concurrent_pointerarraystorage *
99 100
 }
100 101
 
101 102
 
102
-static void   *_mulle_concurrent_pointerarraystorage_get( struct mulle_concurrent_pointerarraystorage *p,
103
+static void   *_mulle_concurrent_pointerarraystorage_get( struct _mulle_concurrent_pointerarraystorage *p,
103 104
                                                     unsigned int i)
104 105
 {
105 106
    assert( i < (unsigned int) (uintptr_t) _mulle_atomic_pointer_read( &p->n));
... ...
@@ -114,7 +115,7 @@ static void   *_mulle_concurrent_pointerarraystorage_get( struct mulle_concurren
114 115
 //  EBUSY  : this storage can't be written to
115 116
 //  ENOSPC : storage is full
116 117
 //
117
-static int   _mulle_concurrent_pointerarraystorage_add( struct mulle_concurrent_pointerarraystorage *p,
118
+static int   _mulle_concurrent_pointerarraystorage_add( struct _mulle_concurrent_pointerarraystorage *p,
118 119
                                                   void *value)
119 120
 {
120 121
    void           *found;
... ...
@@ -142,8 +143,8 @@ static int   _mulle_concurrent_pointerarraystorage_add( struct mulle_concurrent_
142 143
 }
143 144
 
144 145
 
145
-static void   _mulle_concurrent_pointerarraystorage_copy( struct mulle_concurrent_pointerarraystorage *dst,
146
-                                                    struct mulle_concurrent_pointerarraystorage *src)
146
+static void   _mulle_concurrent_pointerarraystorage_copy( struct _mulle_concurrent_pointerarraystorage *dst,
147
+                                                    struct _mulle_concurrent_pointerarraystorage *src)
147 148
 {
148 149
    mulle_atomic_pointer_t   *p;
149 150
    mulle_atomic_pointer_t   *p_last;
... ...
@@ -174,7 +175,9 @@ int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *
174 175
    if( ! allocator)
175 176
       allocator = &mulle_default_allocator;
176 177
 
177
-   if( ! allocator->mode)
178
+   assert( allocator->abafree && allocator->abafree != (void *) abort);
179
+   
180
+   if( ! allocator->abafree || allocator->abafree == (void *) abort)
178 181
    {
179 182
       errno = ENXIO;
180 183
       return( -1);
... ...
@@ -195,25 +198,28 @@ int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *
195 198
 //
196 199
 void  _mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray *array)
197 200
 {
198
-   _mulle_allocator_free( array->allocator, array->storage);
201
+   _mulle_allocator_abafree( array->allocator, array->storage);
199 202
    if( array->storage != array->next_storage)
200
-      _mulle_allocator_free( array->allocator, array->next_storage);
203
+      _mulle_allocator_abafree( array->allocator, array->next_storage);
201 204
 }
202 205
 
203 206
 
204 207
 static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurrent_pointerarray *array,
205
-                                                      struct mulle_concurrent_pointerarraystorage *p)
208
+                                                      struct _mulle_concurrent_pointerarraystorage *p)
206 209
 {
207 210
 
208
-   struct mulle_concurrent_pointerarraystorage   *q;
209
-   struct mulle_concurrent_pointerarraystorage   *alloced;
210
-   struct mulle_concurrent_pointerarraystorage   *previous;
211
+   struct _mulle_concurrent_pointerarraystorage   *q;
212
+   struct _mulle_concurrent_pointerarraystorage   *alloced;
213
+   struct _mulle_concurrent_pointerarraystorage   *previous;
211 214
 
212 215
    assert( p);
213 216
    
214 217
    // acquire new storage
215 218
    alloced = NULL;
216 219
    q       = _mulle_atomic_pointer_read( &array->next_storage);
220
+
221
+   assert( q);
222
+   
217 223
    if( q == p)
218 224
    {
219 225
       alloced = _mulle_concurrent_alloc_pointerarraystorage( p->size * 2, array->allocator);
... ...
@@ -225,7 +231,7 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
225 231
       if( q != p)
226 232
       {
227 233
          // someone else produced a next world, use that and get rid of 'alloced'
228
-         _mulle_allocator_free( array->allocator, alloced);
234
+         _mulle_allocator_abafree( array->allocator, alloced);
229 235
          alloced = NULL;
230 236
       }
231 237
       else
... ...
@@ -241,7 +247,7 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
241 247
    // ok, if we succeed free old, if we fail alloced is
242 248
    // already gone
243 249
    if( previous == p)
244
-      _mulle_allocator_free( array->allocator, previous);
250
+      _mulle_allocator_abafree( array->allocator, previous);
245 251
    
246 252
    return( 0);
247 253
 }
... ...
@@ -250,7 +256,7 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
250 256
 void  *_mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray *array,
251 257
                                            unsigned int index)
252 258
 {
253
-   struct mulle_concurrent_pointerarraystorage   *p;
259
+   struct _mulle_concurrent_pointerarraystorage   *p;
254 260
    void                                     *value;
255 261
    
256 262
 retry:
... ...
@@ -269,7 +275,7 @@ retry:
269 275
 int  _mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *array,
270 276
                                          void *value)
271 277
 {
272
-   struct mulle_concurrent_pointerarraystorage   *p;
278
+   struct _mulle_concurrent_pointerarraystorage   *p;
273 279
 
274 280
    assert( value);
275 281
    assert( value != REDIRECT_VALUE);
... ...
@@ -291,7 +297,7 @@ retry:
291 297
 
292 298
 unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *array)
293 299
 {
294
-   struct mulle_concurrent_pointerarraystorage   *p;
300
+   struct _mulle_concurrent_pointerarraystorage   *p;
295 301
    
296 302
    p = _mulle_atomic_pointer_read( &array->storage);
297 303
    return( p->size);
... ...
@@ -303,7 +309,7 @@ unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_p
303 309
 //
304 310
 unsigned int  mulle_concurrent_pointerarray_get_count( struct mulle_concurrent_pointerarray *array)
305 311
 {
306
-   struct mulle_concurrent_pointerarraystorage   *p;
312
+   struct _mulle_concurrent_pointerarraystorage   *p;
307 313
    
308 314
    if( ! array)
309 315
       return( 0);
... ...
@@ -5,7 +5,32 @@
5 5
 //  Created by Nat! on 04.03.16.
6 6
 //  Copyright © 2016 Mulle kybernetiK. All rights reserved.
7 7
 //
8
-
8
+//  Redistribution and use in source and binary forms, with or without
9
+//  modification, are permitted provided that the following conditions are met:
10
+//
11
+//  Redistributions of source code must retain the above copyright notice, this
12
+//  list of conditions and the following disclaimer.
13
+//
14
+//  Redistributions in binary form must reproduce the above copyright notice,
15
+//  this list of conditions and the following disclaimer in the documentation
16
+//  and/or other materials provided with the distribution.
17
+//
18
+//  Neither the name of Mulle kybernetiK nor the names of its contributors
19
+//  may be used to endorse or promote products derived from this software
20
+//  without specific prior written permission.
21
+//
22
+//  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23
+//  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24
+//  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25
+//  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
26
+//  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27
+//  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28
+//  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29
+//  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30
+//  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31
+//  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32
+//  POSSIBILITY OF SUCH DAMAGE.
33
+//
9 34
 #include <mulle_standalone_concurrent/mulle_standalone_concurrent.h>
10 35
 
11 36
 #include <mulle_test_allocator/mulle_test_allocator.h>
... ...
@@ -144,8 +169,9 @@ static void  multi_threaded_test( unsigned int n_threads)
144 169
    assert( n_threads <= 32);
145 170
 
146 171
    mulle_aba_init( &mulle_test_allocator);
172
+   mulle_allocator_set_aba( &mulle_test_allocator, mulle_aba_get_global(), (void *) _mulle_aba_free);
147 173
 
148
-   _mulle_concurrent_hashmap_init( &map, 0, mulle_aba_as_allocator());
174
+   _mulle_concurrent_hashmap_init( &map, 0, &mulle_test_allocator);
149 175
 
150 176
    {
151 177
       for( i = 0; i < n_threads; i++)
... ...
@@ -165,6 +191,7 @@ static void  multi_threaded_test( unsigned int n_threads)
165 191
    _mulle_concurrent_hashmap_done( &map);
166 192
    mulle_aba_unregister();
167 193
 
194
+   mulle_allocator_set_aba( &mulle_test_allocator, NULL, NULL);
168 195
    mulle_aba_done();
169 196
 }
170 197
 
... ...
@@ -178,9 +205,12 @@ static void  single_threaded_test( void)
178 205
    void                                        *value;
179 206
 
180 207
    mulle_aba_init( &mulle_test_allocator);
208
+
209
+   mulle_allocator_set_aba( &mulle_test_allocator,  mulle_aba_get_global(), (void *) _mulle_aba_free);
210
+   
181 211
    mulle_aba_register();
182 212
 
183
-   _mulle_concurrent_hashmap_init( &map, 0, mulle_aba_as_allocator());
213
+   _mulle_concurrent_hashmap_init( &map, 0, &mulle_test_allocator);
184 214
    {
185 215
       for( i = 1; i <= 100; i++)
186 216
       {
... ...
@@ -227,6 +257,9 @@ static void  single_threaded_test( void)
227 257
    _mulle_concurrent_hashmap_done( &map);
228 258
 
229 259
    mulle_aba_unregister();
260
+   
261
+   mulle_allocator_set_aba( &mulle_test_allocator, NULL, NULL);
262
+   
230 263
    mulle_aba_done();
231 264
 }
232 265
 
... ...
@@ -1,14 +1,15 @@
1 1
 #include <mulle_standalone_concurrent/mulle_standalone_concurrent.h>
2 2
 
3
-static void  test( void)
3
+
4
+static void   test( void)
4 5
 {
5
-   intptr_t                                     hash;
6
+   intptr_t                                    hash;
6 7
    struct mulle_concurrent_hashmap             map;
7 8
    struct mulle_concurrent_hashmapenumerator   rover;
8 9
    unsigned int                                i;
9 10
    void                                        *value;
10 11
 
11
-   _mulle_concurrent_hashmap_init( &map, 0, NULL, NULL);
12
+   _mulle_concurrent_hashmap_init( &map, 0, NULL);
12 13
    {
13 14
       _mulle_concurrent_hashmap_insert( &map, 100000, (void *) 0x1848);
14 15
       value =  _mulle_concurrent_hashmap_lookup( &map, 100000);
... ...
@@ -27,17 +28,16 @@ static void  test( void)
27 28
 
28 29
       _mulle_concurrent_hashmap_remove( &map, 100000, (void *) 0x1848);
29 30
 
30
-      value =  _mulle_concurrent_hashmap_lookup( &map, 100000);
31
+      value = _mulle_concurrent_hashmap_lookup( &map, 100000);
31 32
       printf( "%s\n", value == (void *) 0x1848 ? "unexpected" : "expected");
32 33
    }
33
-   _mulle_concurrent_hashmap_free( &map);
34
+   _mulle_concurrent_hashmap_done( &map);
34 35
 }
35 36
 
36 37
 
37
-
38 38
 int   main( void)
39 39
 {
40
-   mulle_aba_init( &mulle_default_allocator);
40
+   mulle_aba_init( NULL);
41 41
    mulle_aba_register();
42 42
 
43 43
    test();
... ...
@@ -5,7 +5,32 @@
5 5
 //  Created by Nat! on 06.03.16.
6 6
 //  Copyright © 2016 Mulle kybernetiK. All rights reserved.
7 7
 //
8
-
8
+//  Redistribution and use in source and binary forms, with or without
9
+//  modification, are permitted provided that the following conditions are met:
10
+//
11
+//  Redistributions of source code must retain the above copyright notice, this
12
+//  list of conditions and the following disclaimer.
13
+//
14
+//  Redistributions in binary form must reproduce the above copyright notice,
15
+//  this list of conditions and the following disclaimer in the documentation
16
+//  and/or other materials provided with the distribution.
17
+//
18
+//  Neither the name of Mulle kybernetiK nor the names of its contributors
19
+//  may be used to endorse or promote products derived from this software
20
+//  without specific prior written permission.
21
+//
22
+//  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23
+//  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24
+//  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25
+//  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
26
+//  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27
+//  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28
+//  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29
+//  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30
+//  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31
+//  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32
+//  POSSIBILITY OF SUCH DAMAGE.
33
+//
9 34
 #include <mulle_standalone_concurrent/mulle_standalone_concurrent.h>
10 35
 
11 36
 #include <mulle_test_allocator/mulle_test_allocator.h>
... ...
@@ -116,15 +141,15 @@ static void  tester( struct mulle_concurrent_pointerarray *map)
116 141
 static void  multi_threaded_test( unsigned int n_threads)
117 142
 {
118 143
    struct mulle_concurrent_pointerarray   map;
119
-   mulle_thread_t                             threads[ 32];
120
-   unsigned int                               i;
121
-
144
+   mulle_thread_t                         threads[ 32];
145
+   unsigned int                           i;
122 146
 
123 147
    assert( n_threads <= 32);
124 148
 
125 149
    mulle_aba_init( &mulle_test_allocator);
126
-
127
-   _mulle_concurrent_pointerarray_init( &map, 0, mulle_aba_as_allocator());
150
+   mulle_allocator_set_aba( &mulle_test_allocator, mulle_aba_get_global(), (void *) _mulle_aba_free);
151
+   
152
+   _mulle_concurrent_pointerarray_init( &map, 0, &mulle_test_allocator);
128 153
 
129 154
    {
130 155
       for( i = 0; i < n_threads; i++)
... ...
@@ -144,6 +169,8 @@ static void  multi_threaded_test( unsigned int n_threads)
144 169
    _mulle_concurrent_pointerarray_done( &map);
145 170
    mulle_aba_unregister();
146 171
 
172
+   mulle_allocator_set_aba( &mulle_test_allocator, NULL, NULL);
173
+
147 174
    mulle_aba_done();
148 175
 }
149 176
 
... ...
@@ -152,13 +179,16 @@ static void  single_threaded_test( void)
152 179
 {
153 180
    struct mulle_concurrent_pointerarray             map;
154 181
    struct mulle_concurrent_pointerarrayenumerator   rover;
155
-   unsigned int                                i;
156
-   void                                        *value;
182
+   unsigned int                                     i;
183
+   void                                             *value;
157 184
 
158 185
    mulle_aba_init( &mulle_test_allocator);
186
+   
187
+   mulle_allocator_set_aba( &mulle_test_allocator,  mulle_aba_get_global(), (void *) _mulle_aba_free);
188
+   
159 189
    mulle_aba_register();
160 190
 
161
-   _mulle_concurrent_pointerarray_init( &map, 0, mulle_aba_as_allocator());
191
+   _mulle_concurrent_pointerarray_init( &map, 0, &mulle_test_allocator);
162 192
    {
163 193
       for( i = 1; i <= 100; i++)
164 194
       {
... ...
@@ -188,6 +218,9 @@ static void  single_threaded_test( void)
188 218
    _mulle_concurrent_pointerarray_done( &map);
189 219
 
190 220
    mulle_aba_unregister();
221
+   
222
+   mulle_allocator_set_aba( &mulle_test_allocator, NULL, NULL);
223
+
191 224
    mulle_aba_done();
192 225
 }
193 226
 
... ...
@@ -8,7 +8,7 @@ static void   test( void)
8 8
    unsigned int                                     i;
9 9
    void                                             *value;
10 10
 
11
-   _mulle_concurrent_pointerarray_init( &map, 0, NULL, NULL);
11
+   _mulle_concurrent_pointerarray_init( &map, 0, NULL);
12 12
    {
13 13
       value = (void *) 0x1848;
14 14
 
... ...
@@ -23,13 +23,13 @@ static void   test( void)
23 23
       }
24 24
       _mulle_concurrent_pointerarrayenumerator_done( &rover);
25 25
    }
26
-   _mulle_concurrent_pointerarray_free( &map);
26
+   _mulle_concurrent_pointerarray_done( &map);
27 27
 }
28 28
 
29 29
 
30 30
 int   main( void)
31 31
 {
32
-   mulle_aba_init( &mulle_default_allocator);
32
+   mulle_aba_init( NULL);
33 33
    mulle_aba_register();
34 34
 
35 35
    test();