Browse code

Remove dependency on aba, but make the allocator more clever. In the end mulle_aba is just a more clever kind of free. Rename some functions to be compatible with container naming.

Nat! authored on 07-03-2016 16:33:39
Showing 10 changed files
... ...
@@ -1,6 +1,8 @@
1 1
 ## `mulle_concurrent_pointerlist`
2 2
 
3
-A growing array of pointers. 
3
+> Taking the definitions here from [concurrencyfreaks.blogspot.de](http://concurrencyfreaks.blogspot.de/2013/05/lock-free-and-wait-free-definition-and.html)
4
+
5
+A growing array of pointers, that is wait-free. 
4 6
 
5 7
 Here is a simple usage example:
6 8
 ```
... ...
@@ -14,7 +16,7 @@ static void   test( void)
14 16
    unsigned int                                     i;
15 17
    void                                             *value;
16 18
 
17
-   _mulle_concurrent_pointerarray_init( &map, 0, NULL, NULL);
19
+   _mulle_concurrent_pointerarray_init( &map, 0, mulle_aba_as_allocator());
18 20
 
19 21
    value = (void *) 0x1848;
20 22
 
... ...
@@ -49,7 +51,7 @@ int   main( void)
49 51
 
50 52
 ## `mulle_concurrent_hashmap`
51 53
 
52
-A mutable map of pointers, indexed by a hash.
54
+A mutable map of pointers, indexed by a hash, that is wait-free.
53 55
 
54 56
 Here is a also a simple usage example:
55 57
 
... ...
@@ -64,7 +66,7 @@ static void  test( void)
64 66
    unsigned int                                i;
65 67
    void                                        *value;
66 68
 
67
-   _mulle_concurrent_hashmap_init( &map, 0, NULL, NULL);
69
+   _mulle_concurrent_hashmap_init( &map, 0, mulle_aba_as_allocator());
68 70
    {
69 71
       _mulle_concurrent_hashmap_insert( &map, 100000, (void *) 0x1848);
70 72
       value =  _mulle_concurrent_hashmap_lookup( &map, 100000);
... ...
@@ -90,7 +92,6 @@ static void  test( void)
90 92
 }
91 93
 
92 94
 
93
-
94 95
 int   main( void)
95 96
 {
96 97
    mulle_aba_init( &mulle_default_allocator);
... ...
@@ -105,11 +106,61 @@ int   main( void)
105 106
 }
106 107
 ```
107 108
 
109
+## ABA
110
+
111
+This library assumes that the allocator you give it is smart enough to solve
112
+the ABA problem when freeing memory.
113
+
114
+If you use `mulle_aba` you can ask it to act as an allocator (call 
115
+`mulle_aba_as_allocator()`). If you don't want to use `mulle_aba`, create an 
116
+allocator like this for your own scheme:
117
+
118
+
119
+```
120
+struct my_aba_allocator
121
+{
122
+   struct mulle_allocator  allocator;
123
+   void                    *my_aba;
124
+};
125
+
126
+
127
+static void  *my_calloc( struct mulle_allocator *allocator,
128
+                         size_t  n,
129
+                         size_t  size)
130
+{
131
+   return( calloc( n, size));
132
+}
133
+
134
+
135
+static void  *my_realloc( struct mulle_allocator *allocator,
136
+                          void  *block,
137
+                          size_t  size)
138
+{
139
+   return( realloc( block, size));
140
+}
141
+
142
+
143
+static void  my_free( struct mulle_allocator *allocator,
144
+                      void *pointer)
145
+{
146
+   struct my_aba_allocator   *p;
147
+   
148
+   p = (struct my_aba_allocator *) allocator;
149
+   clever_free( p->my_aba, pointer);
150
+}
151
+
152
+struct my_aba_allocator    my_aba_allocator = 
153
+{
154
+   { my_calloc, my_realloc, my_free, 1 }, &clever_struct
155
+};
156
+
157
+```
158
+
108 159
 ## Dependencies
109 160
 
110 161
 * mulle_allocator
111 162
 * mulle_thread
112
-* mulle_aba
163
+* mulle_aba (for testing)
113 164
 
114 165
 ## How to build on OS X
115 166
 
116 167
new file mode 100644
... ...
@@ -0,0 +1,8 @@
1
+# v0.1
2
+
3
+Remove dependency on `mulle_aba` for the pure library.
4
+Rename _free to _done.
5
+
6
+# v0.0
7
+
8
+* Merycful Release
... ...
@@ -22,8 +22,9 @@
22 22
 /* End PBXAggregateTarget section */
23 23
 
24 24
 /* Begin PBXBuildFile section */
25
+		4184C47A1C8DCA56004CED2F /* libmulle_aba.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 41CAEAD31C8D95D4003C2C7B /* libmulle_aba.a */; };
26
+		4184C47B1C8DCA5A004CED2F /* libmulle_aba.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 41CAEAD31C8D95D4003C2C7B /* libmulle_aba.a */; };
25 27
 		41CAEAD21C8D95BB003C2C7B /* libmulle_concurrent.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 41F14FE61B68F098002189F1 /* libmulle_concurrent.a */; };
26
-		41CAEAD61C8D95D4003C2C7B /* libmulle_aba.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 41CAEAD31C8D95D4003C2C7B /* libmulle_aba.a */; };
27 28
 		41CAEAD81C8D95D4003C2C7B /* libmulle_allocator.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 41CAEAD51C8D95D4003C2C7B /* libmulle_allocator.a */; };
28 29
 		41CAEADA1C8D95ED003C2C7B /* mulle_concurrent.h in Headers */ = {isa = PBXBuildFile; fileRef = 41CAEAD91C8D95ED003C2C7B /* mulle_concurrent.h */; settings = {ATTRIBUTES = (Public, ); }; };
29 30
 		41CAEADC1C8D9601003C2C7B /* mulle_standalone_concurrent.h in Headers */ = {isa = PBXBuildFile; fileRef = 41CAEADB1C8D9601003C2C7B /* mulle_standalone_concurrent.h */; settings = {ATTRIBUTES = (Public, ); }; };
... ...
@@ -102,6 +103,7 @@
102 103
 		41CAEB031C8DA251003C2C7B /* pointerarray.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = pointerarray.c; sourceTree = "<group>"; };
103 104
 		41CAEB051C8DA326003C2C7B /* libmulle_test_allocator.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libmulle_test_allocator.a; path = dependencies/lib/Debug/libmulle_test_allocator.a; sourceTree = "<group>"; };
104 105
 		41CAEB091C8DB97F003C2C7B /* README.md */ = {isa = PBXFileReference; lastKnownFileType = net.daringfireball.markdown; path = README.md; sourceTree = "<group>"; };
106
+		41D04AB71C8DD69000CC8F11 /* RELEASENOTES.md */ = {isa = PBXFileReference; lastKnownFileType = net.daringfireball.markdown; path = RELEASENOTES.md; sourceTree = "<group>"; };
105 107
 		41D8BA7D1C8D92DE00186ECE /* mulle_concurrent_hashmap.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mulle_concurrent_hashmap.c; sourceTree = "<group>"; };
106 108
 		41D8BA7E1C8D92DE00186ECE /* mulle_concurrent_hashmap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mulle_concurrent_hashmap.h; sourceTree = "<group>"; };
107 109
 		41D8BA801C8D92DE00186ECE /* mulle_concurrent_pointerarray.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mulle_concurrent_pointerarray.c; sourceTree = "<group>"; };
... ...
@@ -114,7 +116,6 @@
114 116
 			isa = PBXFrameworksBuildPhase;
115 117
 			buildActionMask = 2147483647;
116 118
 			files = (
117
-				41CAEAD61C8D95D4003C2C7B /* libmulle_aba.a in Frameworks */,
118 119
 				41CAEAD81C8D95D4003C2C7B /* libmulle_allocator.a in Frameworks */,
119 120
 				41CAEB061C8DA326003C2C7B /* libmulle_test_allocator.a in Frameworks */,
120 121
 				41CAEAD21C8D95BB003C2C7B /* libmulle_concurrent.a in Frameworks */,
... ...
@@ -125,8 +126,9 @@
125 126
 			isa = PBXFrameworksBuildPhase;
126 127
 			buildActionMask = 2147483647;
127 128
 			files = (
128
-				41CAEB071C8DA518003C2C7B /* libmulle_test_allocator.a in Frameworks */,
129 129
 				41CAEAF21C8DA09C003C2C7B /* libmulle_standalone_concurrent.dylib in Frameworks */,
130
+				4184C47A1C8DCA56004CED2F /* libmulle_aba.a in Frameworks */,
131
+				41CAEB071C8DA518003C2C7B /* libmulle_test_allocator.a in Frameworks */,
130 132
 			);
131 133
 			runOnlyForDeploymentPostprocessing = 0;
132 134
 		};
... ...
@@ -136,6 +138,7 @@
136 138
 			files = (
137 139
 				41CAEB001C8DA224003C2C7B /* libmulle_standalone_concurrent.dylib in Frameworks */,
138 140
 				41CAEB081C8DA51E003C2C7B /* libmulle_test_allocator.a in Frameworks */,
141
+				4184C47B1C8DCA5A004CED2F /* libmulle_aba.a in Frameworks */,
139 142
 			);
140 143
 			runOnlyForDeploymentPostprocessing = 0;
141 144
 		};
... ...
@@ -153,6 +156,7 @@
153 156
 			isa = PBXGroup;
154 157
 			children = (
155 158
 				41CAEB091C8DB97F003C2C7B /* README.md */,
159
+				41D04AB71C8DD69000CC8F11 /* RELEASENOTES.md */,
156 160
 				411A28C11BCD446E00D39FF7 /* CMakeLists.txt */,
157 161
 				417C02631BD8404E005A3751 /* mulle-configuration */,
158 162
 				4147852E1ABAF290002DBAE4 /* src */,
... ...
@@ -457,6 +461,7 @@
457 461
 			isa = XCBuildConfiguration;
458 462
 			baseConfigurationReference = 417C02651BD8404E005A3751 /* Debug.xcconfig */;
459 463
 			buildSettings = {
464
+				CURRENT_PROJECT_VERSION = 0.1;
460 465
 			};
461 466
 			name = Debug;
462 467
 		};
... ...
@@ -464,6 +469,7 @@
464 469
 			isa = XCBuildConfiguration;
465 470
 			baseConfigurationReference = 417C02681BD8404E005A3751 /* Release.xcconfig */;
466 471
 			buildSettings = {
472
+				CURRENT_PROJECT_VERSION = 0.1;
467 473
 			};
468 474
 			name = Release;
469 475
 		};
... ...
@@ -504,14 +510,12 @@
504 510
 		41CAEACA1C8D95A6003C2C7B /* Debug */ = {
505 511
 			isa = XCBuildConfiguration;
506 512
 			buildSettings = {
507
-				PRODUCT_NAME = "$(TARGET_NAME)";
508 513
 			};
509 514
 			name = Debug;
510 515
 		};
511 516
 		41CAEACB1C8D95A6003C2C7B /* Release */ = {
512 517
 			isa = XCBuildConfiguration;
513 518
 			buildSettings = {
514
-				PRODUCT_NAME = "$(TARGET_NAME)";
515 519
 			};
516 520
 			name = Release;
517 521
 		};
... ...
@@ -592,6 +596,7 @@
592 596
 				41CAEAC71C8D9599003C2C7B /* Release */,
593 597
 			);
594 598
 			defaultConfigurationIsVisible = 0;
599
+			defaultConfigurationName = Release;
595 600
 		};
596 601
 		41CAEAC91C8D95A6003C2C7B /* Build configuration list for PBXAggregateTarget "Libraries" */ = {
597 602
 			isa = XCConfigurationList;
... ...
@@ -600,6 +605,7 @@
600 605
 				41CAEACB1C8D95A6003C2C7B /* Release */,
601 606
 			);
602 607
 			defaultConfigurationIsVisible = 0;
608
+			defaultConfigurationName = Release;
603 609
 		};
604 610
 		41CAEAEB1C8D9FF4003C2C7B /* Build configuration list for PBXNativeTarget "test-hashmap" */ = {
605 611
 			isa = XCConfigurationList;
... ...
@@ -608,6 +614,7 @@
608 614
 				41CAEAED1C8D9FF4003C2C7B /* Release */,
609 615
 			);
610 616
 			defaultConfigurationIsVisible = 0;
617
+			defaultConfigurationName = Release;
611 618
 		};
612 619
 		41CAEAFB1C8DA20F003C2C7B /* Build configuration list for PBXNativeTarget "test-pointerarray" */ = {
613 620
 			isa = XCConfigurationList;
... ...
@@ -616,6 +623,7 @@
616 623
 				41CAEAFD1C8DA20F003C2C7B /* Release */,
617 624
 			);
618 625
 			defaultConfigurationIsVisible = 0;
626
+			defaultConfigurationName = Release;
619 627
 		};
620 628
 		41F14FE71B68F098002189F1 /* Build configuration list for PBXNativeTarget "mulle_concurrent" */ = {
621 629
 			isa = XCConfigurationList;
... ...
@@ -34,7 +34,6 @@
34 34
 #include "mulle_concurrent_hashmap.h"
35 35
 
36 36
 #include "mulle_concurrent_types.h"
37
-#include <mulle_aba/mulle_aba.h>
38 37
 #include <assert.h>
39 38
 #include <errno.h>
40 39
 #include <stdint.h>
... ...
@@ -72,7 +71,7 @@ static struct mulle_concurrent_hashmapstorage *
72 71
    if( n < 8)
73 72
       n = 8;
74 73
    
75
-   p = allocator->calloc( 1, sizeof( struct _mulle_concurrent_hashvaluepair) * (n - 1) +
74
+   p = _mulle_allocator_calloc( allocator, 1, sizeof( struct _mulle_concurrent_hashvaluepair) * (n - 1) +
76 75
                              sizeof( struct mulle_concurrent_hashmapstorage));
77 76
    if( ! p)
78 77
       return( p);
... ...
@@ -314,8 +313,8 @@ static void   _mulle_concurrent_hashmapstorage_copy( struct mulle_concurrent_has
314 313
 {
315 314
    struct _mulle_concurrent_hashvaluepair   *p;
316 315
    struct _mulle_concurrent_hashvaluepair   *p_last;
317
-   void                                   *actual;
318
-   void                                   *value;
316
+   void                                     *actual;
317
+   void                                     *value;
319 318
    
320 319
    p      = src->entries;
321 320
    p_last = &src->entries[ src->mask];
... ...
@@ -352,16 +351,21 @@ static void   _mulle_concurrent_hashmapstorage_copy( struct mulle_concurrent_has
352 351
 
353 352
 int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
354 353
                                      unsigned int size,
355
-                                     struct mulle_allocator *allocator,
356
-                                     struct mulle_aba *aba)
354
+                                     struct mulle_allocator *allocator)
357 355
 {
358 356
    if( ! allocator)
359 357
       allocator = &mulle_default_allocator;
360
-   if( ! aba)
361
-      aba = mulle_aba_get_global();
358
+   
359
+   if( ! allocator->mode)
360
+   {
361
+      errno = ENXIO;
362
+      return( -1);
363
+   }
364
+
365
+   // use a smart mode allocator that supports ABA free. Preferably use
366
+   // mulle_aba_as_allocator()
362 367
    
363 368
    map->allocator    = allocator;
364
-   map->aba          = aba;
365 369
    map->storage      = _mulle_concurrent_alloc_hashmapstorage( size, allocator);
366 370
    map->next_storage = map->storage;
367 371
    
... ...
@@ -374,11 +378,12 @@ int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
374 378
 //
375 379
 // this is called when you know, no other threads are accessing it anymore
376 380
 //
377
-void  _mulle_concurrent_hashmap_free( struct mulle_concurrent_hashmap *map)
381
+void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map)
378 382
 {
379
-   _mulle_aba_free( map->aba, map->allocator->free, map->storage);
383
+   // ABA!
384
+   _mulle_allocator_free( map->allocator, map->storage);
380 385
    if( map->storage != map->next_storage)
381
-      _mulle_aba_free( map->aba, map->allocator->free, map->next_storage);
386
+      _mulle_allocator_free( map->allocator, map->next_storage);
382 387
 }
383 388
 
384 389
 
... ...
@@ -402,7 +407,7 @@ static int  _mulle_concurrent_hashmap_migrate_storage( struct mulle_concurrent_h
402 407
    if( q != p)
403 408
    {
404 409
       // someone else produced a next world, use that and get rid of 'alloced'
405
-      _mulle_aba_free( map->aba, map->allocator->free, alloced);
410
+      _mulle_allocator_free( map->allocator, alloced);  // ABA!!
406 411
       alloced = NULL;
407 412
    }
408 413
    else
... ...
@@ -415,9 +420,9 @@ static int  _mulle_concurrent_hashmap_migrate_storage( struct mulle_concurrent_h
415 420
    previous = __mulle_atomic_pointer_compare_and_swap( &map->storage, q, p);
416 421
 
417 422
    // ok, if we succeed free old, if we fail alloced is
418
-   // already gone
423
+   // already gone. this must be an ABA free (use mulle_aba as allocator)
419 424
    if( previous == p)
420
-      _mulle_aba_free( map->aba, map->allocator->free, previous);
425
+      _mulle_allocator_free( map->allocator, previous); // ABA!!
421 426
    
422 427
    return( 0);
423 428
 }
... ...
@@ -617,5 +622,3 @@ int  _mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapen
617 622
 
618 623
    return( 1);
619 624
 }
620
-
621
-
... ...
@@ -38,26 +38,21 @@
38 38
 #include <mulle_allocator/mulle_allocator.h>
39 39
 
40 40
 
41
-struct mulle_aba;
42
-
43
-
44 41
 //
45 42
 // basically does: http://preshing.com/20160222/a-resizable-concurrent-map/
46
-// but is truely concurrent :)
43
+// but is wait-free
47 44
 //
48 45
 struct mulle_concurrent_hashmap
49 46
 {
50 47
    mulle_atomic_pointer_t   storage;
51 48
    mulle_atomic_pointer_t   next_storage;
52 49
    struct mulle_allocator   *allocator;
53
-   struct mulle_aba         *aba;
54 50
 };
55 51
 
56 52
 int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
57 53
                                      unsigned int size,
58
-                                     struct mulle_allocator *allocator,
59
-                                     struct mulle_aba *aba);
60
-void  _mulle_concurrent_hashmap_free( struct mulle_concurrent_hashmap *map);
54
+                                     struct mulle_allocator *allocator);
55
+void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map);
61 56
 
62 57
 
63 58
 int  _mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
... ...
@@ -86,6 +81,7 @@ struct mulle_concurrent_hashmapenumerator
86 81
    unsigned int                      mask;
87 82
 };
88 83
 
84
+
89 85
 //
90 86
 // the specific retuned enumerator is only useable for the calling thread
91 87
 // if you remove stuff from the map, the enumerator will be unhappy and
... ...
@@ -35,13 +35,13 @@
35 35
 #define mulle_concurrent_h__
36 36
 
37 37
 
38
-#define MULLE_CONCURRENT_VERSION  ((0 << 20) | (0 << 8) | 0)
38
+#define MULLE_CONCURRENT_VERSION  ((0 << 20) | (1 << 8) | 0)
39 39
 
40 40
 
41 41
 #include "mulle_concurrent_hashmap.h"
42 42
 #include "mulle_concurrent_pointerarray.h"
43 43
 
44
-#if MULLE_ALLOCATOR_VERSION < ((0 << 20) | (1 << 8) | 0)
44
+#if MULLE_ALLOCATOR_VERSION < ((1 << 20) | (0 << 8) | 0)
45 45
 # error "mulle_allocator is too old"
46 46
 #endif
47 47
 #if MULLE_THREAD_VERSION < ((1 << 20) | (0 << 8) | 0)
... ...
@@ -34,17 +34,11 @@
34 34
 #include "mulle_concurrent_pointerarray.h"
35 35
 
36 36
 #include "mulle_concurrent_types.h"
37
-#include <mulle_aba/mulle_aba.h>
38 37
 #include <assert.h>
39 38
 #include <errno.h>
40 39
 #include <stdint.h>
41 40
 
42 41
 
43
-#if MULLE_ABA_VERSION < ((1 << 20) | (0 << 8) | 0)
44
-# error "mulle_aba is too old"
45
-#endif
46
-
47
-
48 42
 struct mulle_concurrent_pointerarraystorage
49 43
 {
50 44
    mulle_atomic_pointer_t   n;
... ...
@@ -70,14 +64,14 @@ struct mulle_concurrent_pointerarraystorage
70 64
 // n must be a power of 2
71 65
 static struct mulle_concurrent_pointerarraystorage *
72 66
    _mulle_concurrent_alloc_pointerarraystorage( unsigned int n,
73
-                                          struct mulle_allocator *allocator)
67
+                                                struct mulle_allocator *allocator)
74 68
 {
75 69
    struct mulle_concurrent_pointerarraystorage  *p;
76 70
    
77 71
    if( n < 8)
78 72
       n = 8;
79 73
    
80
-   p = allocator->calloc( 1, sizeof( void *) * (n - 1) +
74
+   p = _mulle_allocator_calloc( allocator, 1, sizeof( void *) * (n - 1) +
81 75
                              sizeof( struct mulle_concurrent_pointerarraystorage));
82 76
    if( ! p)
83 77
       return( p);
... ...
@@ -174,17 +168,19 @@ static void   _mulle_concurrent_pointerarraystorage_copy( struct mulle_concurren
174 168
 #pragma mark _mulle_concurrent_pointerarray
175 169
 
176 170
 int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *map,
177
-                                    unsigned int size,
178
-                                    struct mulle_allocator *allocator,
179
-                                    struct mulle_aba *aba)
171
+                                          unsigned int size,
172
+                                          struct mulle_allocator *allocator)
180 173
 {
181 174
    if( ! allocator)
182 175
       allocator = &mulle_default_allocator;
183
-   if( ! aba)
184
-      aba = mulle_aba_get_global();
176
+
177
+   if( ! allocator->mode)
178
+   {
179
+      errno = ENXIO;
180
+      return( -1);
181
+   }
185 182
    
186 183
    map->allocator    = allocator;
187
-   map->aba          = aba;
188 184
    map->storage      = _mulle_concurrent_alloc_pointerarraystorage( size, allocator);
189 185
    map->next_storage = map->storage;
190 186
    
... ...
@@ -197,11 +193,11 @@ int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *
197 193
 //
198 194
 // this is called when you know, no other threads are accessing it anymore
199 195
 //
200
-void  _mulle_concurrent_pointerarray_free( struct mulle_concurrent_pointerarray *map)
196
+void  _mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray *map)
201 197
 {
202
-   _mulle_aba_free( map->aba, map->allocator->free, map->storage);
198
+   _mulle_allocator_free( map->allocator, map->storage);
203 199
    if( map->storage != map->next_storage)
204
-      _mulle_aba_free( map->aba, map->allocator->free, map->next_storage);
200
+      _mulle_allocator_free( map->allocator, map->next_storage);
205 201
 }
206 202
 
207 203
 
... ...
@@ -225,7 +221,7 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
225 221
    if( q != p)
226 222
    {
227 223
       // someone else produced a next world, use that and get rid of 'alloced'
228
-      _mulle_aba_free( map->aba, map->allocator->free, alloced);
224
+      _mulle_allocator_free( map->allocator, alloced);
229 225
       alloced = NULL;
230 226
    }
231 227
    else
... ...
@@ -240,7 +236,7 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
240 236
    // ok, if we succeed free old, if we fail alloced is
241 237
    // already gone
242 238
    if( previous == p)
243
-      _mulle_aba_free( map->aba, map->allocator->free, previous);
239
+      _mulle_allocator_free( map->allocator, previous);
244 240
    
245 241
    return( 0);
246 242
 }
... ...
@@ -357,7 +353,7 @@ int  _mulle_concurrent_pointerarrayreverseenumerator_next( struct mulle_concurre
357 353
 
358 354
 
359 355
 
360
-int   _mulle_concurrent_pointerarray_search( struct mulle_concurrent_pointerarray *map,
356
+int   _mulle_concurrent_pointerarray_find( struct mulle_concurrent_pointerarray *map,
361 357
                                        void *search)
362 358
 {
363 359
    struct mulle_concurrent_pointerarrayenumerator   rover;
... ...
@@ -45,24 +45,22 @@ struct mulle_concurrent_pointerarray
45 45
    mulle_atomic_pointer_t   storage;
46 46
    mulle_atomic_pointer_t   next_storage;
47 47
    struct mulle_allocator   *allocator;
48
-   struct mulle_aba         *aba;
49 48
 };
50 49
 
51 50
 int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *map,
52
-                                    unsigned int size,
53
-                                    struct mulle_allocator *allocator,
54
-                                    struct mulle_aba *aba);
55
-void  _mulle_concurrent_pointerarray_free( struct mulle_concurrent_pointerarray *map);
51
+                                          unsigned int size,
52
+                                          struct mulle_allocator *allocator);
53
+void  _mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray *map);
56 54
 
57 55
 
58 56
 int  _mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *map,
59
-                                   void *value);
57
+                                         void *value);
60 58
 
61 59
 void  *_mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray *map,
62
-                                   unsigned int n);
60
+                                           unsigned int n);
63 61
 
64
-int  _mulle_concurrent_pointerarray_search( struct mulle_concurrent_pointerarray *map,
65
-                                       void *value);
62
+int  _mulle_concurrent_pointerarray_find( struct mulle_concurrent_pointerarray *map,
63
+                                            void *value);
66 64
 
67 65
 
68 66
 unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *map);
... ...
@@ -112,7 +110,6 @@ static inline struct mulle_concurrent_pointerarrayreverseenumerator
112 110
 }
113 111
 
114 112
 
115
-
116 113
 //  1 : OK
117 114
 //  0 : nothing left
118 115
 // -1 : failed to enumerate further
... ...
@@ -143,9 +143,9 @@ static void  multi_threaded_test( unsigned int n_threads)
143 143
 
144 144
    assert( n_threads <= 32);
145 145
 
146
-   mulle_aba_init( &mulle_default_allocator);
146
+   mulle_aba_init( &mulle_test_allocator);
147 147
 
148
-   _mulle_concurrent_hashmap_init( &map, 0, &mulle_test_allocator, mulle_aba_get_global());
148
+   _mulle_concurrent_hashmap_init( &map, 0, mulle_aba_as_allocator());
149 149
 
150 150
    {
151 151
       for( i = 0; i < n_threads; i++)
... ...
@@ -162,7 +162,7 @@ static void  multi_threaded_test( unsigned int n_threads)
162 162
    }
163 163
 
164 164
    mulle_aba_register();
165
-   _mulle_concurrent_hashmap_free( &map);
165
+   _mulle_concurrent_hashmap_done( &map);
166 166
    mulle_aba_unregister();
167 167
 
168 168
    mulle_aba_done();
... ...
@@ -177,10 +177,10 @@ static void  single_threaded_test( void)
177 177
    unsigned int                                i;
178 178
    void                                        *value;
179 179
 
180
-   mulle_aba_init( &mulle_default_allocator);
180
+   mulle_aba_init( &mulle_test_allocator);
181 181
    mulle_aba_register();
182 182
 
183
-   _mulle_concurrent_hashmap_init( &map, 0, &mulle_test_allocator, mulle_aba_get_global());
183
+   _mulle_concurrent_hashmap_init( &map, 0, mulle_aba_as_allocator());
184 184
    {
185 185
       for( i = 1; i <= 100; i++)
186 186
       {
... ...
@@ -224,7 +224,7 @@ static void  single_threaded_test( void)
224 224
       _mulle_concurrent_hashmapenumerator_done( &rover);
225 225
       assert( i == 99);
226 226
    }
227
-   _mulle_concurrent_hashmap_free( &map);
227
+   _mulle_concurrent_hashmap_done( &map);
228 228
 
229 229
    mulle_aba_unregister();
230 230
    mulle_aba_done();
... ...
@@ -122,9 +122,9 @@ static void  multi_threaded_test( unsigned int n_threads)
122 122
 
123 123
    assert( n_threads <= 32);
124 124
 
125
-   mulle_aba_init( &mulle_default_allocator);
125
+   mulle_aba_init( &mulle_test_allocator);
126 126
 
127
-   _mulle_concurrent_pointerarray_init( &map, 0, &mulle_test_allocator, mulle_aba_get_global());
127
+   _mulle_concurrent_pointerarray_init( &map, 0, mulle_aba_as_allocator());
128 128
 
129 129
    {
130 130
       for( i = 0; i < n_threads; i++)
... ...
@@ -141,7 +141,7 @@ static void  multi_threaded_test( unsigned int n_threads)
141 141
    }
142 142
 
143 143
    mulle_aba_register();
144
-   _mulle_concurrent_pointerarray_free( &map);
144
+   _mulle_concurrent_pointerarray_done( &map);
145 145
    mulle_aba_unregister();
146 146
 
147 147
    mulle_aba_done();
... ...
@@ -155,10 +155,10 @@ static void  single_threaded_test( void)
155 155
    unsigned int                                i;
156 156
    void                                        *value;
157 157
 
158
-   mulle_aba_init( &mulle_default_allocator);
158
+   mulle_aba_init( &mulle_test_allocator);
159 159
    mulle_aba_register();
160 160
 
161
-   _mulle_concurrent_pointerarray_init( &map, 0, &mulle_test_allocator, mulle_aba_get_global());
161
+   _mulle_concurrent_pointerarray_init( &map, 0, mulle_aba_as_allocator());
162 162
    {
163 163
       for( i = 1; i <= 100; i++)
164 164
       {
... ...
@@ -185,7 +185,7 @@ static void  single_threaded_test( void)
185 185
       _mulle_concurrent_pointerarrayenumerator_done( &rover);
186 186
       assert( i == 101);
187 187
    }
188
-   _mulle_concurrent_pointerarray_free( &map);
188
+   _mulle_concurrent_pointerarray_done( &map);
189 189
 
190 190
    mulle_aba_unregister();
191 191
    mulle_aba_done();