Browse code

Improve header organization and add two examples

Nat! authored on 18-10-2016 18:32:06
Showing 26 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1 @@
1
+https://github.com/
0 2
new file mode 100644
... ...
@@ -0,0 +1 @@
1
+https://mulle-kybernetik.com/repositories/
... ...
@@ -1,3 +1,8 @@
1
-https://www.mulle-kybernetik.com/repositories/mulle-tests;tests/mulle-tests;${MULLE_TESTS_BRANCH:-release}
2
-https://www.mulle-kybernetik.com/repositories/mulle-homebrew;bin/mulle-homebrew;${MULLE_HOMEBREW_BRANCH:-release}
3
-https://www.mulle-kybernetik.com/repositories/mulle-configuration;;${MULLE_CONFIGURATION_BRANCH:-release}
1
+#
2
+# MULLE_REPOSITORIES is defined in mulle-bootstrap 2.3
3
+# overide the branches with e.g.
4
+# `echo "master" > .bootstrap.local/MULLE_TESTS_BRANCH`
5
+
6
+${MULLE_REPOSITORIES}mulle-tests;tests/mulle-tests;${MULLE_TESTS_BRANCH:-release}
7
+${MULLE_REPOSITORIES}mulle-homebrew;bin/mulle-homebrew;${MULLE_HOMEBREW_BRANCH:-release}
8
+${MULLE_REPOSITORIES}mulle-configuration;;${MULLE_CONFIGURATION_BRANCH:-release}
... ...
@@ -1,3 +1,8 @@
1
-https://www.mulle-kybernetik.com/repositories/mulle-thread
2
-https://www.mulle-kybernetik.com/repositories/mulle-allocator
3
-https://www.mulle-kybernetik.com/repositories/mulle-aba
1
+#
2
+# MULLE_REPOSITORIES is defined in mulle-bootstrap 2.3
3
+# overide the branches with e.g.
4
+# `echo "master" > .bootstrap.local/MULLE_THREAD_BRANCH`
5
+
6
+${MULLE_REPOSITORIES}mulle-thread;;${MULLE_THREAD_BRANCH:-release}
7
+${MULLE_REPOSITORIES}mulle-allocator;;${MULLE_ALLOCATOR_BRANCH:-release}
8
+${MULLE_REPOSITORIES}mulle-aba;;${MULLE_ABA_BRANCH:-release}
... ...
@@ -27,3 +27,12 @@ mulle-tests/
27 27
 /tests/pointerarray/simple
28 28
 /tests/hashmap/hashmap
29 29
 /tests/hashmap/othersimple
30
+
31
+/tests/mulle-tests/
32
+tests/array/example
33
+tests/array/pointerarray
34
+tests/array/simple
35
+tests/hashmap/example
36
+tests/array/example.debug
37
+tests/hashmap/example.debug
38
+tests/hashmap/hashmap.debug
30 39
\ No newline at end of file
... ...
@@ -7,10 +7,10 @@ environments.
7 7
 
8 8
 ## Data structures
9 9
 
10
-Name                            | Description
11
-`mulle_concurrent_hashmap`      | A growing, mutable map of pointers, indexed by a hash
12
-`mulle_concurrent_pointerarray` | A growing array of pointers
10
+Name                            | Description                            | Example
11
+--------------------------------|----------------------------------------|-------------------
12
+`mulle_concurrent_hashmap`      | A growing, mutable map of pointers, indexed by a hash. A.k.a. hashtable, dictionary, maptable                                          | [Example](tests/hashmap/example.c)
13
+`mulle_concurrent_pointerarray` | A growing array of pointers            | [Example](tests/array/example.c)
13 14
 
14 15
 
15 16
 
... ...
@@ -4,6 +4,9 @@
4 4
 * renamed `_mulle_concurrent_hashmap_get_count` to `mulle_concurrent_hashmap_count`,
5 5
 since it's safe to pass NULL and it's not a get operation.'
6 6
 * improved the documentation
7
+* added  some more "safe API" routines for release
8
+* improved the headers for readability
9
+* clarified return codes of `mulle_concurrent_hashmap_remove`.
7 10
 
8 11
 # v0.5
9 12
 
... ...
@@ -31,4 +34,4 @@ since it's safe to pass NULL and it's not a get operation.'
31 34
 
32 35
 # v0.0
33 36
 
34
-* Merycful Release
37
+* Merciful Release
... ...
@@ -8,71 +8,66 @@ in time.
8 8
 
9 9
 The following operations should be executed in single-threaded fashion:
10 10
 
11
-* `_mulle_concurrent_hashmap_init`
12
-* `_mulle_concurrent_hashmap_done`
13
-* `_mulle_concurrent_hashmap_get_size`
11
+* `mulle_concurrent_hashmap_init`
12
+* `mulle_concurrent_hashmap_done`
13
+* `mulle_concurrent_hashmap_get_size`
14 14
 
15 15
 The following operations are fine in multi-threaded environments:
16 16
 
17
-* `_mulle_concurrent_hashmap_insert`
18
-* `_mulle_concurrent_hashmap_remove`
19
-* `_mulle_concurrent_hashmap_lookup`
17
+* `mulle_concurrent_hashmap_insert`
18
+* `mulle_concurrent_hashmap_remove`
19
+* `mulle_concurrent_hashmap_lookup`
20 20
 
21
-
22
-The following operations work in multi-threaded environments, but should be approached with caution:
21
+The following operations work in multi-threaded environments, but should be
22
+approached with caution:
23 23
 
24 24
 * `mulle_concurrent_hashmap_enumerate`
25
-* `_mulle_concurrent_hashmapenumerator_next`
26
-* `_mulle_concurrent_hashmapenumerator_done`
25
+* `mulle_concurrent_hashmapenumerator_next`
26
+* `mulle_concurrent_hashmapenumerator_done`
27 27
 * `mulle_concurrent_hashmap_lookup_any`
28 28
 * `mulle_concurrent_hashmap_count`
29 29
 
30
+
30 31
 ## single-threaded
31 32
 
32 33
 
33
-### `_mulle_concurrent_hashmap_init`
34
+### `mulle_concurrent_hashmap_init`
34 35
 
35 36
 ```
36
-void   _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
37
-                                       unsigned int size,
38
-                                       struct mulle_allocator *allocator)
37
+int   mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
38
+                                     unsigned int size,
39
+                                     struct mulle_allocator *allocator)
39 40
 ```
40 41
 
41 42
 Initialize `map`, with a starting `size` of elements. `allocator` will be
42 43
 used to allocate and free memory during the lifetime of `map`.  You can pass in
43 44
 for `allocator` to use the default. Call this in single-threaded fashion.
44 45
 
46
+Return Values:
47
+   0      : OK
48
+   EINVAL : invalid argument
49
+   ENOMEM : out of memory
50
+
45 51
 
46
-### `void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map)`
52
+### `void  mulle_concurrent_hashmap_done`
47 53
 
48 54
 ```
49
-void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map)
55
+void  mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map)
50 56
 ```
51 57
 
52 58
 This will free all allocated resources `map`. It will not **free** `map` itself
53 59
 though. `map` must be a valid pointer. Call this in single-threaded fashion.
54 60
 
55 61
 
56
-### `_mulle_concurrent_hashmap_get_size`
57
-
58
-```
59
-unsigned int   mulle_concurrent_hashmap_get_count( struct mulle_concurrent_hashmap *map);
60
-```
61
-
62
-This gives you the current number of hash/value entries of `map`. The returned
63
-number is close to meaningless, when the map is accessed in multi-threaded
64
-fashion. Call this in single-threaded fashion.
65
-
66
-
67 62
 ## multi-threaded
68 63
 
69 64
 
70
-### `_mulle_concurrent_hashmap_insert`
65
+### `mulle_concurrent_hashmap_insert`
71 66
 
72 67
 ```
73
-int  _mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
74
-                                       intptr_t hash,
75
-                                       void *value)
68
+int  mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
69
+                                      intptr_t hash,
70
+                                      void *value)
76 71
 ```
77 72
 
78 73
 Insert a `hash`, `value` pair.
... ...
@@ -102,16 +97,16 @@ Return Values:
102 97
    ENOMEM : out of memory
103 98
 
104 99
 
105
-### `_mulle_concurrent_hashmap_remove` - remove a hash/value pair
100
+### `mulle_concurrent_hashmap_remove`
106 101
 
107 102
 ```
108
-int  _mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
103
+int  mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
109 104
                                        intptr_t hash,
110 105
                                        void *value)
111 106
 ```
112 107
 
113 108
 Remove a `hash`, `value` pair. Read the description of
114
-`_mulle_concurrent_hashmap_insert` for information about restrictions
109
+`mulle_concurrent_hashmap_insert` for information about restrictions
115 110
 pertaining to both.
116 111
 
117 112
 Return Values:
... ...
@@ -120,10 +115,10 @@ Return Values:
120 115
    ENOMEM : out of memory
121 116
 
122 117
 
123
-### `_mulle_concurrent_hashmap_lookup` - search for a value by hash
118
+### `mulle_concurrent_hashmap_lookup`
124 119
 
125 120
 ```
126
-void   *_mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
121
+void   *mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
127 122
                                           intptr_t hash)
128 123
 ```
129 124
 
... ...
@@ -135,6 +130,18 @@ Return Values:
135 130
 
136 131
 ---
137 132
 
133
+
134
+### `mulle_concurrent_hashmap_get_size`
135
+
136
+```
137
+unsigned int   mulle_concurrent_hashmap_get_count( struct mulle_concurrent_hashmap *map);
138
+```
139
+
140
+This gives you the current number of hash/value entries of `map`. The returned
141
+number is close to meaningless, when the map is accessed in multi-threaded
142
+fashion.
143
+
144
+
138 145
 # `mulle_concurrent_hashmapenumerator`
139 146
 
140 147
 ```
... ...
@@ -156,17 +163,17 @@ Here is a simple usage example:
156 163
    void                                        *value;
157 164
 
158 165
    rover = mulle_concurrent_hashmap_enumerate( map);
159
-   while( _mulle_concurrent_hashmapenumerator_next( &rover, &hash, &value) == 1)
166
+   while( mulle_concurrent_hashmapenumerator_next( &rover, &hash, &value) == 1)
160 167
    {
161 168
       printf( "%ld %p\n", hash, value);
162 169
    }
163
-   _mulle_concurrent_hashmapenumerator_done( &rover);
170
+   mulle_concurrent_hashmapenumerator_done( &rover);
164 171
 ```
165 172
 
166
-### `_mulle_concurrent_hashmapenumerator_next`
173
+### `mulle_concurrent_hashmapenumerator_next`
167 174
 
168 175
 ```
169
-int  _mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapenumerator *rover,
176
+int  mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapenumerator *rover,
170 177
                                                intptr_t *hash,
171 178
                                                void **value)
172 179
 ```
... ...
@@ -174,16 +181,16 @@ int  _mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapen
174 181
 Get the next `hash`, `value` pair from the enumerator.
175 182
 
176 183
 Return Values:
177
-   1           : OK
178
-   0           : nothing left
179
-   -ECANCELLED : hashtable was mutated (Note: **negative errno value**!)
180
-   -ENOMEM     : out of memory         (Note: **negative errno value**!)
184
+   1          : OK
185
+   0          : nothing left
186
+   ECANCELLED : hashtable was mutated
187
+   ENOMEM     : out of memory
181 188
 
182 189
 
183
-### `_mulle_concurrent_hashmapenumerator_done`
190
+### `mulle_concurrent_hashmapenumerator_done`
184 191
 
185 192
 ```
186
-void  _mulle_concurrent_hashmapenumerator_done( struct mulle_concurrent_hashmapenumerator *rover)
193
+void  mulle_concurrent_hashmapenumerator_done( struct mulle_concurrent_hashmapenumerator *rover)
187 194
 ```
188 195
 
189 196
 It's a mere conventional function. It may be left out.
... ...
@@ -195,8 +202,10 @@ It's a mere conventional function. It may be left out.
195 202
 unsigned int   mulle_concurrent_hashmap_count( struct mulle_concurrent_hashmap *map);
196 203
 ```
197 204
 
198
-This gives you the current number of hash/value entries of `map`. It is implemented as an iterator loop, that counts the number of values.
199
-The returned number is close to meaningless, when the map is accessed in multi-threaded fashion.
205
+This gives you the current number of hash/value entries of `map`. It is
206
+implemented as an iterator loop, that counts the number of values.
207
+The returned number may be close to meaningless, when the map is accessed in
208
+multi-threaded fashion.
200 209
 
201 210
 
202 211
 ### `mulle_concurrent_hashmap_lookup_any` - get a value from the hashmap
... ...
@@ -8,35 +8,38 @@ handling very simple.
8 8
 
9 9
 The following operations should be executed in single-threaded fashion:
10 10
 
11
-* `_mulle_concurrent_pointerarray_init`
12
-* `_mulle_concurrent_pointerarray_done`
13
-* `_mulle_concurrent_pointerarray_get_size`
11
+* `mulle_concurrent_pointerarray_init`
12
+* `mulle_concurrent_pointerarray_done`
14 13
 
15 14
 The following operations are fine in multi-threaded environments:
16 15
 
17
-* `_mulle_concurrent_pointerarray_add`
18
-* `_mulle_concurrent_pointerarray_get`
16
+* `mulle_concurrent_pointerarray_add`
17
+* `mulle_concurrent_pointerarray_get`
19 18
 
20 19
 The following operations work in multi-threaded environments,
21 20
 but should be approached with caution:
22 21
 
23 22
 * `mulle_concurrent_pointerarray_enumerate`
24
-* `_mulle_concurrent_pointerarrayenumerator_next`
25
-* `_mulle_concurrent_pointerarrayenumerator_done`
26
-* `_mulle_concurrent_pointerarray_reverseenumerate`
27
-* `_mulle_concurrent_pointerarrayreverseenumerator_next`
28
-* `_mulle_concurrent_pointerarrayreverseenumerator_done`
23
+* `mulle_concurrent_pointerarrayenumerator_next`
24
+* `mulle_concurrent_pointerarrayenumerator_done`
25
+* `mulle_concurrent_pointerarray_reverseenumerate`
26
+* `mulle_concurrent_pointerarrayreverseenumerator_next`
27
+* `mulle_concurrent_pointerarrayreverseenumerator_done`
29 28
 * `mulle_concurrent_pointerarray_map`
30
-* `_mulle_concurrent_pointerarray_find`
29
+* `mulle_concurrent_pointerarray_find`
31 30
 * `mulle_concurrent_pointerarray_get_count`
31
+* `mulle_concurrent_pointerarray_get_size`
32 32
 
33 33
 
34
-### `_mulle_concurrent_pointerarray_init` - initialize pointerarray
34
+## single-threaded
35
+
36
+
37
+### `mulle_concurrent_pointerarray_init`
35 38
 
36 39
 ```
37
-void   _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,
38
-                                       unsigned int size,
39
-                                       struct mulle_allocator *allocator)
40
+int   mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,
41
+                                          unsigned int size,
42
+                                          struct mulle_allocator *allocator)
40 43
 ```
41 44
 
42 45
 Initialize `array`, with a starting `size` of elements. `allocator` will be
... ...
@@ -44,10 +47,10 @@ used to allocate and free memory during the lifetime of `array`.  You can pass i
44 47
 for `allocator` to use the default. Call this in single-threaded fashion.
45 48
 
46 49
 
47
-### `_mulle_concurrent_pointerarray_done` - free pointerarray resources
50
+### `mulle_concurrent_pointerarray_done`
48 51
 
49 52
 ```
50
-void  _mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray *array)
53
+void  mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray *array)
51 54
 ```
52 55
 
53 56
 This will free all allocated resources `array`. It will not **free** `array`
... ...
@@ -55,104 +58,72 @@ itself though. `array` must be a valid pointer. Call this in single-threaded
55 58
 fashion.
56 59
 
57 60
 
58
-### `_mulle_concurrent_pointerarray_insert` - insert a hash/value pair
61
+## multi-threaded
59 62
 
60
-```
61
-int  _mulle_concurrent_pointerarray_insert( struct mulle_concurrent_pointerarray *array,
62
-                                       intptr_t hash,
63
-                                       void *value)
64
-```
65 63
 
66
-Insert a `hash`, `value` pair.
67
-`hash` must not be zero. It should be a unique integer key, suitably treated to
68
-be a good hash value. Here is an example of an avalance function for simple
69
-integer keys (1-...)
64
+### `mulle_concurrent_pointerarray_add`
70 65
 
71 66
 ```
72
-static inline uint64_t   mulle_hash_avalanche64(uint64_t h)
73
-{
74
-   h ^= h >> 33;
75
-   h *= 0xff51afd7ed558ccd;
76
-   h ^= h >> 33;
77
-   h *= 0xc4ceb9fe1a85ec53;
78
-   h ^= h >> 33;
79
-   return h;
80
-}
67
+int  mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *array,
68
+                                        void *value)
81 69
 ```
82 70
 
83
-`value` can be any `void *` except `NULL` or `(void *) INTPTR_MAX`.  It will
71
+Add value to the end of the array.
72
+`value` can be any `void *` except `NULL` or `(void *) INTPTR_MAX`. It will
84 73
 not get dereferenced by the pointerarray.
85 74
 
86 75
 
87 76
 Return Values:
88 77
    0      : OK
89
-   EEXIST : duplicate
90
-   ENOMEM : out of memory
91
-
92
-
93
-### `_mulle_concurrent_pointerarray_remove` - remove a hash/value pair
94
-
95
-```
96
-int  _mulle_concurrent_pointerarray_remove( struct mulle_concurrent_pointerarray *array,
97
-                                       intptr_t hash,
98
-                                       void *value)
99
-```
100
-
101
-Remove a `hash`, `value` pair. Read the description of
102
-`_mulle_concurrent_pointerarray_insert` for information about restrictions
103
-pertaining to both.
104
-
105
-Return Values:
106
-   0      : OK
107
-   ENOENT : not found
78
+   EINVAL : invalid argument
108 79
    ENOMEM : out of memory
109 80
 
110 81
 
111
-### `_mulle_concurrent_pointerarray_lookup` - search for a value by hash
82
+### `mulle_concurrent_pointerarray_get`
112 83
 
113 84
 ```
114
-void   *_mulle_concurrent_pointerarray_lookup( struct mulle_concurrent_pointerarray *array,
115
-                                          intptr_t hash)
85
+void   *mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray *array,
86
+                                           unsigned int index)
116 87
 ```
117 88
 
118
-Looks up a value by its hash.
89
+Get value at `index` of array.
119 90
 
120 91
 Return Values:
121
-   NULL  : not found
122
-   otherwise the value for this hash
92
+   NULL  : not found (invalid argument)
93
+   otherwise the value
123 94
 
124 95
 
125
-### `_mulle_concurrent_pointerarray_get_size` - get size of pointerarray
96
+### `mulle_concurrent_pointerarray_get_size`
126 97
 
127 98
 ```
128
-unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *array)
99
+unsigned int  mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *array)
129 100
 ```
130 101
 
131 102
 This gives you the capacity of `array`. This value is close to
132 103
 meaningless, when the array is accessed in multi-threaded fashion.
133 104
 
134 105
 
135
-### `_mulle_concurrent_pointerarray_get_size` - get number of entries of pointerarray
106
+### `mulle_concurrent_pointerarray_get_count`
136 107
 
137 108
 ```
138 109
 unsigned int   mulle_concurrent_pointerarray_get_count( struct mulle_concurrent_pointerarray *array);
139 110
 ```
140 111
 
141
-This gives you the current number of hash/value entries of `array`. The returned
142
-number is close to meaningless, when the array is accessed in multi-threaded
143
-fashion.
112
+This gives you the current number of entries in `array`. As the array can only
113
+grow this value is useful, but maybe already outdated.
114
+
144 115
 
116
+## `mulle_concurrent_pointerarrayenumerator`
145 117
 
146
-## `mulle_concurrent_pointerarrayenumerator` - enumerator interface
118
+### `mulle_concurrent_pointerarray_enumerate`
147 119
 
148 120
 ```
149 121
 struct mulle_concurrent_pointerarrayenumerator  mulle_concurrent_pointerarray_enumerate( struct mulle_concurrent_pointerarray *array)
150 122
 ```
151 123
 
152
-Enumerate a hashtable. This works reliably if `array` is accessed in
153
-single-threaded fashion, which it probably will NOT be. In multi-threaded
154
-environments, the enumeration may be interrupted by mutations of the hashtable
155
-by other threads. The enumerator itself should not be shared accessed by other threads.
124
+Enumerate a pointerarray. This works reliably even in multi-threaded
125
+environments. The enumerator itself should not be shared with other
126
+threads though.
156 127
 
157 128
 Here is a simple usage example:
158 129
 
... ...
@@ -160,41 +131,37 @@ Here is a simple usage example:
160 131
 ```
161 132
    struct mulle_concurrent_pointerarray             *array;
162 133
    struct mulle_concurrent_pointerarrayenumerator   rover;
163
-   intptr_t                                    hash;
164
-   void                                        *value;
134
+   void                                             *value;
165 135
 
166 136
    rover = mulle_concurrent_pointerarray_enumerate( array);
167
-   while( _mulle_concurrent_pointerarrayenumerator_next( &rover, &hash, &value) == 1)
137
+   while( value = mulle_concurrent_pointerarrayenumerator_next( &rover))
168 138
    {
169
-      printf( "%ld %p\n", hash, value);
139
+      printf( "%p\n", value);
170 140
    }
171
-   _mulle_concurrent_pointerarrayenumerator_done( &rover);
141
+   mulle_concurrent_pointerarrayenumerator_done( &rover);
172 142
 ```
173 143
 
174
-### `_mulle_concurrent_pointerarrayenumerator_next` - get next hash/value pair
144
+### `mulle_concurrent_pointerarrayenumerator_next`
175 145
 
176 146
 ```
177
-int  _mulle_concurrent_pointerarrayenumerator_next( struct mulle_concurrent_pointerarrayenumerator *rover,
178
-                                               intptr_t *hash,
179
-                                               void **value)
147
+void   *mulle_concurrent_pointerarrayenumerator_next( struct mulle_concurrent_pointerarrayenumerator *rover)
180 148
 ```
181 149
 
182
-Get the next `hash`, `value` pair from the enumerator.
150
+Get the next value from the enumerator.
183 151
 
184 152
 Return Values:
185
-   1           : OK
186
-   0           : nothing left
187
-   -ECANCELLED : hashtable was mutated (Note: **negative errno value**!)
188
-   -ENOMEM     : out of memory         (Note: **negative errno value**!)
153
+   NULL  : nothing left
154
+   otherwis the value
189 155
 
190 156
 
191
-### `_mulle_concurrent_pointerarrayenumerator_done` - mark the end of the enumerator
157
+### `mulle_concurrent_pointerarrayenumerator_done`
192 158
 
193 159
 ```
194
-void  _mulle_concurrent_pointerarrayenumerator_done( struct mulle_concurrent_pointerarrayenumerator *rover)
160
+void   mulle_concurrent_pointerarrayenumerator_done( struct mulle_concurrent_pointerarrayenumerator *rover)
195 161
 ```
196 162
 
197
-It's a mere conventional function. It may be left out.
163
+Mark the end of the enumerator lifetime. It's a mere conventional function.
164
+It may be left out.
198 165
 
199 166
 
200 167
 
... ...
@@ -124,7 +124,7 @@
124 124
 		41CAEAE71C8D9FF4003C2C7B /* test-hashmap */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "test-hashmap"; sourceTree = BUILT_PRODUCTS_DIR; };
125 125
 		41CAEAEE1C8DA00F003C2C7B /* hashmap.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = hashmap.c; path = tests/hashmap/hashmap.c; sourceTree = SOURCE_ROOT; };
126 126
 		41CAEAF71C8DA20F003C2C7B /* test-pointerarray */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "test-pointerarray"; sourceTree = BUILT_PRODUCTS_DIR; };
127
-		41CAEB031C8DA251003C2C7B /* pointerarray.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = pointerarray.c; sourceTree = "<group>"; };
127
+		41CAEB031C8DA251003C2C7B /* pointerarray.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = pointerarray.c; path = ../array/pointerarray.c; sourceTree = "<group>"; };
128 128
 		41CAEB051C8DA326003C2C7B /* libmulle_test_allocator.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libmulle_test_allocator.a; path = dependencies/lib/Debug/libmulle_test_allocator.a; sourceTree = "<group>"; };
129 129
 		41CAEB091C8DB97F003C2C7B /* README.md */ = {isa = PBXFileReference; lastKnownFileType = net.daringfireball.markdown; path = README.md; sourceTree = "<group>"; };
130 130
 		41D04AB71C8DD69000CC8F11 /* RELEASENOTES.md */ = {isa = PBXFileReference; lastKnownFileType = net.daringfireball.markdown; path = RELEASENOTES.md; sourceTree = "<group>"; };
... ...
@@ -3,7 +3,9 @@
3 3
 //  mulle-concurrent
4 4
 //
5 5
 //  Created by Nat! on 04.03.16.
6
-//  Copyright © 2016 Mulle kybernetiK. All rights reserved.
6
+//  Copyright © 2016 Nat! for Mulle kybernetiK.
7
+//  Copyright © 2016 Codeon GmbH.
8
+//  All rights reserved.
7 9
 //
8 10
 //  Redistribution and use in source and binary forms, with or without
9 11
 //  modification, are permitted provided that the following conditions are met:
... ...
@@ -254,7 +256,7 @@ static int   _mulle_concurrent_hashmapstorage_put( struct _mulle_concurrent_hash
254 256
       if( entry->hash == MULLE_CONCURRENT_NO_HASH)
255 257
       {
256 258
          found = __mulle_atomic_pointer_compare_and_swap( &entry->value, value, MULLE_CONCURRENT_NO_POINTER);
257
-         if( found)
259
+         if( found != MULLE_CONCURRENT_NO_POINTER)
258 260
          {
259 261
             if( found == REDIRECT_VALUE)
260 262
                return( EBUSY);
... ...
@@ -291,15 +293,9 @@ static int   _mulle_concurrent_hashmapstorage_remove( struct _mulle_concurrent_h
291 293
       if( entry->hash == hash)
292 294
       {
293 295
          found = __mulle_atomic_pointer_compare_and_swap( &entry->value, MULLE_CONCURRENT_NO_POINTER, value);
294
-         if( found != MULLE_CONCURRENT_NO_POINTER)
295
-         {
296
-            if( found == REDIRECT_VALUE)
297
-               return( EBUSY);
298
-
299
-            // once a entry->hash it must not be erased (except
300
-            // during migration)
301
-         }
302
-         return( 0);
296
+         if( found == REDIRECT_VALUE)
297
+            return( EBUSY);
298
+         return( found == value ? 0 : ENOENT);
303 299
       }
304 300
       
305 301
       if( entry->hash == MULLE_CONCURRENT_NO_HASH)
... ...
@@ -361,8 +357,8 @@ int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
361 357
    if( ! allocator)
362 358
       allocator = &mulle_default_allocator;
363 359
    
364
-   assert( allocator->abafree && allocator->abafree != (void *) abort);
365
-   if( ! allocator->abafree || allocator->abafree == (void *) abort)
360
+   assert( allocator->abafree && allocator->abafree != (int (*)()) abort);
361
+   if( ! allocator->abafree || allocator->abafree == (int (*)()) abort)
366 362
       return( EINVAL);
367 363
 
368 364
    map->allocator = allocator;
... ...
@@ -423,7 +419,7 @@ static int  _mulle_concurrent_hashmap_migrate_storage( struct mulle_concurrent_h
423 419
       // acquire new storage
424 420
       alloced = _mulle_concurrent_alloc_hashmapstorage( ((unsigned int) p->mask + 1) * 2, map->allocator);
425 421
       if( ! alloced)
426
-         return( -1);
422
+         return( ENOMEM);
427 423
       
428 424
       // make this the next world, assume that's still set to 'p' (SIC)
429 425
       q = __mulle_atomic_pointer_compare_and_swap( &map->next_storage.pointer, alloced, p);
... ...
@@ -458,6 +454,7 @@ void  *_mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
458 454
    struct _mulle_concurrent_hashmapstorage   *p;
459 455
    void                                      *value;
460 456
    
457
+   // won't find invalid hash anyway
461 458
 retry:
462 459
    p     = _mulle_atomic_pointer_read( &map->storage.pointer);
463 460
    value = _mulle_concurrent_hashmapstorage_lookup( p, hash);
... ...
@@ -470,6 +467,7 @@ retry:
470 467
    return( value);
471 468
 }
472 469
 
470
+
473 471
 static int   _mulle_concurrent_hashmap_search_next( struct mulle_concurrent_hashmap *map,
474 472
                                                     unsigned int  *expect_mask,
475 473
                                                     unsigned int  *index,
... ...
@@ -483,7 +481,7 @@ static int   _mulle_concurrent_hashmap_search_next( struct mulle_concurrent_hash
483 481
 retry:
484 482
    p = _mulle_atomic_pointer_read( &map->storage.pointer);
485 483
    if( *expect_mask && (unsigned int) p->mask != *expect_mask)
486
-      return( -ECANCELED);
484
+      return( ECANCELED);
487 485
    
488 486
    for(;;)
489 487
    {
... ...
@@ -492,15 +490,15 @@ retry:
492 490
          return( 0);
493 491
       
494 492
       value = _mulle_atomic_pointer_read( &entry->value);
495
-      if( value != MULLE_CONCURRENT_NO_POINTER)
496
-         break;
497
-
498 493
       if( value == REDIRECT_VALUE)
499 494
       {
500 495
          if( _mulle_concurrent_hashmap_migrate_storage( map, p))
501
-            return( -ENOMEM);
496
+            return( ENOMEM);
502 497
          goto retry;
503 498
       }
499
+
500
+      if( value != MULLE_CONCURRENT_NO_POINTER)
501
+         break;
504 502
    }
505 503
    
506 504
    if( p_hash)
... ...
@@ -515,6 +513,14 @@ retry:
515 513
 }
516 514
 
517 515
 
516
+static inline void   assert_hash_value( intptr_t hash, void *value)
517
+{
518
+   assert( hash != MULLE_CONCURRENT_NO_HASH);
519
+   assert( value != MULLE_CONCURRENT_NO_POINTER);
520
+   assert( value != MULLE_CONCURRENT_INVALID_POINTER);
521
+}
522
+
523
+
518 524
 int  _mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
519 525
                                        intptr_t hash,
520 526
                                        void *value)
... ...
@@ -523,9 +529,7 @@ int  _mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
523 529
    unsigned int                              n;
524 530
    unsigned int                              max;
525 531
 
526
-   assert( hash != MULLE_CONCURRENT_NO_HASH);
527
-   assert( value);
528
-   assert( value != MULLE_CONCURRENT_NO_POINTER && value != MULLE_CONCURRENT_INVALID_POINTER);
532
+   assert_hash_value( hash, value);
529 533
    
530 534
 retry:
531 535
    p = _mulle_atomic_pointer_read( &map->storage.pointer);
... ...
@@ -556,12 +560,30 @@ retry:
556 560
 }
557 561
 
558 562
 
563
+int  mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
564
+                                      intptr_t hash,
565
+                                      void *value)
566
+{
567
+   if( ! map)
568
+      return( EINVAL);
569
+   if( hash == MULLE_CONCURRENT_NO_HASH)
570
+      return( EINVAL);
571
+   if( value == MULLE_CONCURRENT_NO_POINTER || value == MULLE_CONCURRENT_INVALID_POINTER)
572
+      return( EINVAL);
573
+
574
+   return( _mulle_concurrent_hashmap_insert( map, hash, value));
575
+}
576
+
577
+
578
+
559 579
 int  _mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
560 580
                                        intptr_t hash,
561 581
                                        void *value)
562 582
 {
563 583
    struct _mulle_concurrent_hashmapstorage   *p;
564 584
    
585
+   assert_hash_value( hash, value);
586
+   
565 587
 retry:
566 588
    p = _mulle_atomic_pointer_read( &map->storage.pointer);
567 589
    switch( _mulle_concurrent_hashmapstorage_remove( p, hash, value))
... ...
@@ -578,6 +600,20 @@ retry:
578 600
 }
579 601
 
580 602
 
603
+int  mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
604
+                                      intptr_t hash,
605
+                                      void *value)
606
+{
607
+   if( ! map)
608
+      return( EINVAL);
609
+   if( hash == MULLE_CONCURRENT_NO_HASH)
610
+      return( EINVAL);
611
+   if( value == MULLE_CONCURRENT_NO_POINTER || value == MULLE_CONCURRENT_INVALID_POINTER)
612
+      return( EINVAL);
613
+
614
+   return( _mulle_concurrent_hashmap_remove( map, hash, value));
615
+}
616
+
581 617
 
582 618
 #pragma mark -
583 619
 #pragma mark not so concurrent enumerator
... ...
@@ -592,7 +628,7 @@ int  _mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapen
592 628
    
593 629
    rval = _mulle_concurrent_hashmap_search_next( rover->map, &rover->mask, &rover->index, &hash, &value);
594 630
    
595
-   if( rval <= 0)
631
+   if( rval != 1)
596 632
       return( rval);
597 633
    
598 634
    if( p_hash)
... ...
@@ -623,17 +659,20 @@ retry:
623 659
    for(;;)
624 660
    {
625 661
       rval = _mulle_concurrent_hashmapenumerator_next( &rover, NULL, NULL);
626
-      if( ! rval)
627
-         break;
628
-      if( rval < 0)
662
+      if( rval == 1)
629 663
       {
630
-         _mulle_concurrent_hashmapenumerator_done( &rover);
631
-         goto retry;
664
+         ++count;
665
+         continue;
632 666
       }
633
-      ++count;
667
+
668
+      if( ! rval)
669
+         break;
670
+   
671
+      mulle_concurrent_hashmapenumerator_done( &rover);
672
+      goto retry;
634 673
    }
635 674
    
636
-   _mulle_concurrent_hashmapenumerator_done( &rover);
675
+   mulle_concurrent_hashmapenumerator_done( &rover);
637 676
    return( count);
638 677
 }
639 678
 
... ...
@@ -647,7 +686,7 @@ void  *mulle_concurrent_hashmap_lookup_any( struct mulle_concurrent_hashmap *map
647 686
    
648 687
    rover = mulle_concurrent_hashmap_enumerate( map);
649 688
    _mulle_concurrent_hashmapenumerator_next( &rover, NULL, &any);
650
-   _mulle_concurrent_hashmapenumerator_done( &rover);
689
+   mulle_concurrent_hashmapenumerator_done( &rover);
651 690
    
652 691
    return( any);
653 692
 }
... ...
@@ -3,7 +3,9 @@
3 3
 //  mulle-concurrent
4 4
 //
5 5
 //  Created by Nat! on 04.03.16.
6
-//  Copyright © 2016 Mulle kybernetiK. All rights reserved.
6
+//  Copyright © 2016 Nat! for Mulle kybernetiK.
7
+//  Copyright © 2016 Codeon GmbH.
8
+//  All rights reserved.
7 9
 //
8 10
 //  Redistribution and use in source and binary forms, with or without
9 11
 //  modification, are permitted provided that the following conditions are met:
... ...
@@ -61,33 +63,87 @@ struct mulle_concurrent_hashmap
61 63
 #pragma mark -
62 64
 #pragma mark single-threaded
63 65
 
64
-int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
65
-                                     unsigned int size,
66
-                                     struct mulle_allocator *allocator);
67
-void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map);
68 66
 
69
-unsigned int  _mulle_concurrent_hashmap_get_size( struct mulle_concurrent_hashmap *map);
67
+// Returns:
68
+//   0      : OK
69
+//   EINVAL : invalid argument
70
+//   ENOMEM : out of memory
71
+//
72
+static inline int  mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
73
+                                                  unsigned int size,
74
+                                                  struct mulle_allocator *allocator)
75
+{
76
+   int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
77
+                                       unsigned int size,
78
+                                       struct mulle_allocator *allocator);
79
+   if( ! map)
80
+      return( EINVAL);
81
+   return( _mulle_concurrent_hashmap_init( map, size, allocator));
82
+}
83
+
84
+
85
+static inline void  mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map)
86
+{
87
+   void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map);
88
+
89
+   if( map)
90
+      _mulle_concurrent_hashmap_done( map);
91
+}
92
+
93
+
94
+static inline unsigned int  mulle_concurrent_hashmap_get_size( struct mulle_concurrent_hashmap *map)
95
+{
96
+   unsigned int  _mulle_concurrent_hashmap_get_size( struct mulle_concurrent_hashmap *map);
97
+   
98
+   if( ! map)
99
+      return( 0);
100
+   return( _mulle_concurrent_hashmap_get_size( map));
101
+}
102
+
70 103
 
71 104
 #pragma mark -
72 105
 #pragma mark multi-threaded
73 106
 
74
-// if rval == 0, inserted
75
-// rval == EEXIST, detected duplicate
76
-// rval == ENOMEM, must be out of memory
77
-int  _mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
107
+// Return value (rval):
108
+//   0      : OK, inserted
109
+//   EEXIST : detected duplicate
110
+//   EINVAL : invalid argument
111
+//   ENOMEM : must be out of memory
112
+//
113
+// Do not use hash=0
114
+// Do not use value=0 or value=INTPTR_MAX
115
+//
116
+int   mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
78 117
                                        intptr_t hash,
79 118
                                        void *value);
80 119
 
120
+
121
+// if rval == NULL, not found
122
+
123
+static inline void  *mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
124
+                                                     intptr_t hash)
125
+{
126
+   void  *_mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
127
+                                           intptr_t hash);
128
+
129
+   if( ! map)
130
+      return( NULL);
131
+   return( _mulle_concurrent_hashmap_lookup( map, hash));
132
+}
133
+
134
+
81 135
 // if rval == 0, removed
82
-// rval == ENOENT, not found
136
+// rval == ENOENT, not found (hash/value pair does not exist (anymore))
137
+// rval == EINVAL, parameter has invalid value
83 138
 // rval == ENOMEM, must be out of memory
84
-//
85
-int  _mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
139
+
140
+int   mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
86 141
                                        intptr_t hash,
87 142
                                        void *value);
88 143
 
89
-void  *_mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
90
-                                         intptr_t hash);
144
+
145
+
146
+
91 147
 
92 148
 #pragma mark -
93 149
 #pragma mark limited multi-threaded
... ...
@@ -110,28 +166,69 @@ static inline struct mulle_concurrent_hashmapenumerator  mulle_concurrent_hashma
110 166
    struct mulle_concurrent_hashmapenumerator   rover;
111 167
    
112 168
    rover.map   = map;
113
-   rover.index = map ? 0 : -1;
169
+   rover.index = map ? 0 : (unsigned int) -1;
114 170
    rover.mask  = 0;
115 171
    
116 172
    return( rover);
117 173
 }
118 174
 
119 175
 
120
-//  1 : OK
121
-//  0 : nothing left
122
-// -ECANCELLED: mutation alert
123
-// -ENOMEM    : out of memory
124
-int  _mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapenumerator *rover,
176
+//  1         : OK
177
+//  0         : nothing left
178
+// ECANCELLED : mutation alert
179
+// ENOMEM     : out of memory
180
+// EINVAL     : wrong parameter value
181
+
182
+static inline int  mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapenumerator *rover,
125 183
                                                intptr_t *hash,
126
-                                               void **value);
184
+                                               void **value)
185
+{
186
+   int  _mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapenumerator *rover,
187
+                                                 intptr_t *hash,
188
+                                                 void **value);
189
+   if( ! rover)
190
+      return( -EINVAL);
191
+   return( _mulle_concurrent_hashmapenumerator_next( rover, hash, value));
192
+}
193
+
127 194
 
128
-static inline void  _mulle_concurrent_hashmapenumerator_done( struct mulle_concurrent_hashmapenumerator *rover)
195
+static inline void  mulle_concurrent_hashmapenumerator_done( struct mulle_concurrent_hashmapenumerator *rover)
129 196
 {
130 197
 }
131 198
 
132
-// conveniences using the enumerator
133 199
 
134
-void   *mulle_concurrent_hashmap_lookup_any( struct mulle_concurrent_hashmap *map);
135
-unsigned int  mulle_concurrent_hashmap_count( struct mulle_concurrent_hashmap *map);
200
+#pragma mark -
201
+#pragma mark enumerator conveniences
202
+
203
+void           *mulle_concurrent_hashmap_lookup_any( struct mulle_concurrent_hashmap *map);
204
+unsigned int   mulle_concurrent_hashmap_count( struct mulle_concurrent_hashmap *map);
205
+
206
+
207
+#pragma mark -
208
+#pragma mark various functions, no parameter checks
209
+
210
+int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
211
+                                     unsigned int size,
212
+                                     struct mulle_allocator *allocator);
213
+void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map);
214
+
215
+unsigned int  _mulle_concurrent_hashmap_get_size( struct mulle_concurrent_hashmap *map);
216
+
217
+
218
+int  _mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
219
+                                       intptr_t hash,
220
+                                       void *value);
221
+
222
+void  *_mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
223
+                                         intptr_t hash);
224
+
225
+int  _mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
226
+                                       intptr_t hash,
227
+                                       void *value);
228
+
229
+
230
+int  _mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapenumerator *rover,
231
+                                               intptr_t *hash,
232
+                                               void **value);
136 233
 
137 234
 #endif /* mulle_concurrent_hashmap_h */
... ...
@@ -35,7 +35,7 @@
35 35
 #define mulle_concurrent_h__
36 36
 
37 37
 
38
-#define MULLE_CONCURRENT_VERSION  ((0 << 20) | (5 << 8) | 0)
38
+#define MULLE_CONCURRENT_VERSION  ((1 << 20) | (0 << 8) | 0)
39 39
 
40 40
 #include <mulle_thread/mulle_thread.h>
41 41
 #include <mulle_allocator/mulle_allocator.h>
... ...
@@ -3,7 +3,9 @@
3 3
 //  mulle-concurrent
4 4
 //
5 5
 //  Created by Nat! on 07.03.16.
6
-//  Copyright © 2016 Mulle kybernetiK. All rights reserved.
6
+//  Copyright © 2016 Nat! for Mulle kybernetiK.
7
+//  Copyright © 2016 Codeon GmbH.
8
+//  All rights reserved.
7 9
 //
8 10
 //  Redistribution and use in source and binary forms, with or without
9 11
 //  modification, are permitted provided that the following conditions are met:
... ...
@@ -3,7 +3,9 @@
3 3
 //  mulle-concurrent
4 4
 //
5 5
 //  Created by Nat! on 07.03.16.
6
-//  Copyright © 2016 Mulle kybernetiK. All rights reserved.
6
+//  Copyright © 2016 Nat! for Mulle kybernetiK.
7
+//  Copyright © 2016 Codeon GmbH.
8
+//  All rights reserved.
7 9
 //
8 10
 //  Redistribution and use in source and binary forms, with or without
9 11
 //  modification, are permitted provided that the following conditions are met:
... ...
@@ -38,6 +40,7 @@
38 40
 
39 41
 //
40 42
 // if you don't like those values, you 'should' be able to redefine those
43
+// this file is not exposed (yet)
41 44
 //
42 45
 #define MULLE_CONCURRENT_NO_HASH           0
43 46
 
... ...
@@ -3,7 +3,9 @@
3 3
 //  mulle-concurrent
4 4
 //
5 5
 //  Created by Nat! on 06.03.16.
6
-//  Copyright © 2016 Mulle kybernetiK. All rights reserved.
6
+//  Copyright © 2016 Nat! for Mulle kybernetiK.
7
+//  Copyright © 2016 Codeon GmbH.
8
+//  All rights reserved.
7 9
 //
8 10
 //  Redistribution and use in source and binary forms, with or without
9 11
 //  modification, are permitted provided that the following conditions are met:
... ...
@@ -44,7 +46,7 @@ struct _mulle_concurrent_pointerarraystorage
44 46
 {
45 47
    mulle_atomic_pointer_t   n;
46 48
    uintptr_t                size;
47
-   
49
+
48 50
    mulle_atomic_pointer_t   entries[ 1];
49 51
 };
50 52
 
... ...
@@ -68,16 +70,16 @@ static struct _mulle_concurrent_pointerarraystorage *
68 70
                                                 struct mulle_allocator *allocator)
69 71
 {
70 72
    struct _mulle_concurrent_pointerarraystorage  *p;
71
-   
73
+
72 74
    if( n < 8)
73 75
       n = 8;
74
-   
76
+
75 77
    p = _mulle_allocator_calloc( allocator, 1, sizeof( void *) * (n - 1) +
76 78
                              sizeof( struct _mulle_concurrent_pointerarraystorage));
77 79
    if( ! p)
78 80
       return( p);
79 81
    p->size = n;
80
-   
82
+
81 83
    /*
82 84
     * in theory, one should be able to use different values for NO_POINTER and
83 85
     * INVALID_POINTER
... ...
@@ -86,7 +88,7 @@ static struct _mulle_concurrent_pointerarraystorage *
86 88
    {
87 89
       mulle_atomic_pointer_t   *q;
88 90
       mulle_atomic_pointer_t   *sentinel;
89
-      
91
+
90 92
       q        = p->entries;
91 93
       sentinel = &p->entries[ (unsigned int) p->size];
92 94
       while( q < sentinel)
... ...
@@ -95,7 +97,7 @@ static struct _mulle_concurrent_pointerarraystorage *
95 97
          ++q;
96 98
       }
97 99
    }
98
-   
100
+
99 101
    return( p);
100 102
 }
101 103
 
... ...
@@ -120,9 +122,10 @@ static int   _mulle_concurrent_pointerarraystorage_add( struct _mulle_concurrent
120 122
 {
121 123
    void           *found;
122 124
    unsigned int   i;
123
-   
125
+
124 126
    assert( p);
125
-   assert( value != MULLE_CONCURRENT_NO_POINTER && value != MULLE_CONCURRENT_INVALID_POINTER);
127
+   assert( value != MULLE_CONCURRENT_NO_POINTER);
128
+   assert( value != MULLE_CONCURRENT_INVALID_POINTER);
126 129
 
127 130
    for(;;)
128 131
    {
... ...
@@ -136,7 +139,7 @@ static int   _mulle_concurrent_pointerarraystorage_add( struct _mulle_concurrent
136 139
          _mulle_atomic_pointer_increment( &p->n);
137 140
          return( 0);
138 141
       }
139
-      
142
+
140 143
       if( found == REDIRECT_VALUE)
141 144
          return( EBUSY);
142 145
    }
... ...
@@ -151,7 +154,7 @@ static void   _mulle_concurrent_pointerarraystorage_copy( struct _mulle_concurre
151 154
    void                     *value;
152 155
    unsigned int             i;
153 156
    unsigned int             n;
154
-   
157
+
155 158
    n      = (unsigned int) (uintptr_t) _mulle_atomic_pointer_read( &dst->n);
156 159
    p      = &src->entries[ n];
157 160
    p_last = &src->entries[ src->size];
... ...
@@ -173,15 +176,15 @@ int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *
173 176
                                           struct mulle_allocator *allocator)
174 177
 {
175 178
    struct _mulle_concurrent_pointerarraystorage   *storage;
176
-   
179
+
177 180
    if( ! allocator)
178 181
       allocator = &mulle_default_allocator;
179 182
 
180
-   assert( allocator->abafree && allocator->abafree != (void *) abort);
181
-   
182
-   if( ! allocator->abafree || allocator->abafree == (void *) abort)
183
+   assert( allocator->abafree && allocator->abafree != (int (*)()) abort);
184
+
185
+   if( ! allocator->abafree)
183 186
       return( EINVAL);
184
-   
187
+
185 188
    array->allocator = allocator;
186 189
    storage          = _mulle_concurrent_alloc_pointerarraystorage( size, allocator);
187 190
 
... ...
@@ -190,7 +193,7 @@ int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *
190 193
 
191 194
    _mulle_atomic_pointer_nonatomic_write( &array->storage.pointer, storage);
192 195
    _mulle_atomic_pointer_nonatomic_write( &array->next_storage.pointer, storage);
193
-   
196
+
194 197
    return( 0);
195 198
 }
196 199
 
... ...
@@ -202,16 +205,40 @@ void  _mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray
202 205
 {
203 206
    struct _mulle_concurrent_pointerarraystorage   *storage;
204 207
    struct _mulle_concurrent_pointerarraystorage   *next_storage;
205
-   
208
+
206 209
    storage      = _mulle_atomic_pointer_nonatomic_read( &array->storage.pointer);
207 210
    next_storage = _mulle_atomic_pointer_nonatomic_read( &array->next_storage.pointer);
208
-   
211
+
209 212
    _mulle_allocator_abafree( array->allocator, storage);
210 213
    if( storage != next_storage)
211 214
       _mulle_allocator_abafree( array->allocator, next_storage);
212 215
 }
213 216
 
214 217
 
218
+unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *array)
219
+{
220
+   struct _mulle_concurrent_pointerarraystorage   *p;
221
+
222
+   p = _mulle_atomic_pointer_read( &array->storage.pointer);
223
+   return( (unsigned int) p->size);
224
+}
225
+
226
+
227
+//
228
+// obviously just a snapshot at some recent point in time
229
+//
230
+unsigned int   _mulle_concurrent_pointerarray_get_count( struct mulle_concurrent_pointerarray *array)
231
+{
232
+   struct _mulle_concurrent_pointerarraystorage   *p;
233
+
234
+   p = _mulle_atomic_pointer_read( &array->storage.pointer);
235
+   return( (unsigned int) (uintptr_t) _mulle_atomic_pointer_read( &p->n));
236
+}
237
+
238
+
239
+# pragma mark -
240
+# pragma mark multi-threaded
241
+
215 242
 static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurrent_pointerarray *array,
216 243
                                                       struct _mulle_concurrent_pointerarraystorage *p)
217 244
 {
... ...
@@ -221,18 +248,18 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
221 248
    struct _mulle_concurrent_pointerarraystorage   *previous;
222 249
 
223 250
    assert( p);
224
-   
251
+
225 252
    // acquire new storage
226 253
    alloced = NULL;
227 254
    q       = _mulle_atomic_pointer_read( &array->next_storage.pointer);
228 255
 
229 256
    assert( q);
230
-   
257
+
231 258
    if( q == p)
232 259
    {
233 260
       alloced = _mulle_concurrent_alloc_pointerarraystorage( (unsigned int) p->size * 2, array->allocator);
234 261
       if( ! alloced)
235
-         return( -1);
262
+         return( ENOMEM);
236 263
 
237 264
       // make this the next world, assume that's still set to 'p' (SIC)
238 265
       q = __mulle_atomic_pointer_compare_and_swap( &array->next_storage.pointer, alloced, p);
... ...
@@ -245,10 +272,10 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
245 272
       else
246 273
          q = alloced;
247 274
    }
248
-   
275
+
249 276
    // this thread can partake in copying
250 277
    _mulle_concurrent_pointerarraystorage_copy( q, p);
251
-   
278
+
252 279
    // now update world, giving it the same value as 'next_world'
253 280
    previous = __mulle_atomic_pointer_compare_and_swap( &array->storage.pointer, q, p);
254 281
 
... ...
@@ -256,7 +283,7 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
256 283
    // already gone
257 284
    if( previous == p)
258 285
       _mulle_allocator_abafree( array->allocator, previous);
259
-   
286
+
260 287
    return( 0);
261 288
 }
262 289
 
... ...
@@ -266,7 +293,7 @@ void  *_mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray
266 293
 {
267 294
    struct _mulle_concurrent_pointerarraystorage   *p;
268 295
    void                                     *value;
269
-   
296
+
270 297
 retry:
271 298
    p     = _mulle_atomic_pointer_read( &array->storage.pointer);
272 299
    value = _mulle_concurrent_pointerarraystorage_get( p, index);
... ...
@@ -285,9 +312,9 @@ int  _mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *a
285 312
 {
286 313
    struct _mulle_concurrent_pointerarraystorage   *p;
287 314
 
288
-   assert( value);
315
+   assert( value != MULLE_CONCURRENT_NO_POINTER);
289 316
    assert( value != REDIRECT_VALUE);
290
-   
317
+
291 318
 retry:
292 319
    p = _mulle_atomic_pointer_read( &array->storage.pointer);
293 320
    switch( _mulle_concurrent_pointerarraystorage_add( p, value))
... ...
@@ -295,7 +322,7 @@ retry:
295 322
    case EBUSY   :
296 323
    case ENOSPC  :
297 324
       if( _mulle_concurrent_pointerarray_migrate_storage( array, p))
298
-         return( -1);
325
+         return( ENOMEM);
299 326
       goto retry;
300 327
    }
301 328
 
... ...
@@ -303,114 +330,104 @@ retry:
303 330
 }
304 331
 
305 332
 
306
-unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *array)
333
+int  mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *array,
334
+                                        void *value)
307 335
 {
308
-   struct _mulle_concurrent_pointerarraystorage   *p;
309
-   
310
-   p = _mulle_atomic_pointer_read( &array->storage.pointer);
311
-   return( (unsigned int) p->size);
336
+   if( ! array)
337
+      return( EINVAL);
338
+   if( value == MULLE_CONCURRENT_NO_POINTER || value == MULLE_CONCURRENT_INVALID_POINTER)
339
+      return( EINVAL);
340
+   return( _mulle_concurrent_pointerarray_add( array, value));
312 341
 }
313 342
 
314 343
 
315
-//
316
-// obviously just a snapshot at some recent point in time
317
-//
318
-unsigned int   mulle_concurrent_pointerarray_get_count( struct mulle_concurrent_pointerarray *array)
344
+void  *mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray *array,
345
+                                          unsigned int i)
319 346
 {
320
-   struct _mulle_concurrent_pointerarraystorage   *p;
321
-   
322 347
    if( ! array)
323
-      return( 0);
324
-   
325
-   p = _mulle_atomic_pointer_read( &array->storage.pointer);
326
-   return( (unsigned int) (uintptr_t) _mulle_atomic_pointer_read( &p->n));
348
+      return( NULL);
349
+   return( _mulle_concurrent_pointerarray_get( array, i));
350
+}
351
+
352
+
353
+
354
+int  mulle_concurrent_pointerarray_find( struct mulle_concurrent_pointerarray *array,
355
+                                         void *value)
356
+{
357
+   if( ! array)
358
+      return( EINVAL);
359
+   if( value == MULLE_CONCURRENT_NO_POINTER || value == MULLE_CONCURRENT_INVALID_POINTER)
360
+      return( EINVAL);
361
+   return( _mulle_concurrent_pointerarray_find( array, value));
327 362
 }
328 363
 
329 364
 
330 365
 #pragma mark -
331 366
 #pragma mark not so concurrent enumerator
332 367
 
333
-int  _mulle_concurrent_pointerarrayenumerator_next( struct mulle_concurrent_pointerarrayenumerator *rover,
334
-                                              void **p_value)
368
+void  *_mulle_concurrent_pointerarrayenumerator_next( struct mulle_concurrent_pointerarrayenumerator *rover)
335 369
 {
336 370
    void           *value;
337 371
    unsigned int   n;
338
-   
372
+
339 373
    n = mulle_concurrent_pointerarray_get_count( rover->array);
340 374
    if( rover->index >= n)
341
-      return( 0);
342
-   
375
+      return( MULLE_CONCURRENT_NO_POINTER);
376
+
343 377
    value = _mulle_concurrent_pointerarray_get( rover->array, rover->index);
344
-   if( value == MULLE_CONCURRENT_NO_POINTER)
345
-      return( -1);
378
+   assert( value != MULLE_CONCURRENT_NO_POINTER);
346 379
 
347 380
    ++rover->index;
348
-   if( p_value)
349
-      *p_value = value;
350
-
351
-   return( 1);
381
+   return( value);
352 382
 }
353 383
 
354 384
 
355
-int  _mulle_concurrent_pointerarrayreverseenumerator_next( struct mulle_concurrent_pointerarrayreverseenumerator *rover,
356
-                                                     void **p_value)
385
+void   *_mulle_concurrent_pointerarrayreverseenumerator_next( struct mulle_concurrent_pointerarrayreverseenumerator *rover)
357 386
 {
358 387
    void   *value;
359
-   
388
+
360 389
    if( ! rover->index)
361
-      return( 0);
362
-   
363
-   value = _mulle_concurrent_pointerarray_get( rover->array, --rover->index);
364
-   if( value == MULLE_CONCURRENT_NO_POINTER)
365
-      return( -1);
390
+      return( MULLE_CONCURRENT_NO_POINTER);
366 391
 
367
-   if( p_value)
368
-      *p_value = value;
392
+   value = _mulle_concurrent_pointerarray_get( rover->array, --rover->index);
393
+   assert( value != MULLE_CONCURRENT_NO_POINTER);
369 394
 
370
-   return( 1);
395
+   return( value);
371 396
 }
372 397
 
373 398
 
374 399
 int   _mulle_concurrent_pointerarray_find( struct mulle_concurrent_pointerarray *array,
375
-                                       void *search)
400
+                                           void *search)
376 401
 {
377 402
    struct mulle_concurrent_pointerarrayenumerator   rover;
378 403
    int                                              found;
379 404
    void                                             *value;
380
-   
405
+
381 406
    found = 0;
382 407
    rover = mulle_concurrent_pointerarray_enumerate( array);
383
-   while( _mulle_concurrent_pointerarrayenumerator_next( &rover, (void **) &value) == 1)
384
-   {
408
+   while( value = _mulle_concurrent_pointerarrayenumerator_next( &rover))
385 409
       if( value == search)
386 410
       {
387 411
          found = 1;
388 412
          break;
389 413
       }
390
-   }
391
-   _mulle_concurrent_pointerarrayenumerator_done( &rover);
392
-   
414
+   mulle_concurrent_pointerarrayenumerator_done( &rover);
415
+
393 416
    return( found);
394 417
 }
395 418
 
396 419
 
397 420
 int   mulle_concurrent_pointerarray_map( struct mulle_concurrent_pointerarray *list,
398
-                                                void (*f)( void *, void *),
399
-                                                void *userinfo)
421
+                                         void (*f)( void *, void *),
422
+                                         void *userinfo)
400 423
 {
401 424
    struct mulle_concurrent_pointerarrayenumerator  rover;
402 425
    void                                            *value;
403
-   
426
+
404 427
    rover = mulle_concurrent_pointerarray_enumerate( list);
405
-   for(;;)
406
-   {
407
-      switch( _mulle_concurrent_pointerarrayenumerator_next( &rover, &value))
408
-      {
409
-      case -1 : return( -1);
410
-      case  1 : (*f)( value, userinfo); continue;
411
-      }
412
-      break;
413
-   }
414
-   _mulle_concurrent_pointerarrayenumerator_done( &rover);
428
+   while( value = _mulle_concurrent_pointerarrayenumerator_next( &rover))
429
+      (*f)( value, userinfo);
430
+   mulle_concurrent_pointerarrayenumerator_done( &rover);
431
+
415 432
    return( 0);
416 433
 }
... ...
@@ -3,7 +3,9 @@
3 3
 //  mulle-concurrent
4 4
 //
5 5
 //  Created by Nat! on 06.03.16.
6
-//  Copyright © 2016 Mulle kybernetiK. All rights reserved.
6
+//  Copyright © 2016 Nat! for Mulle kybernetiK.
7
+//  Copyright © 2016 Codeon GmbH.
8
+//  All rights reserved.
7 9
 //
8 10
 //  Redistribution and use in source and binary forms, with or without
9 11
 //  modification, are permitted provided that the following conditions are met:
... ...
@@ -56,28 +58,75 @@ struct mulle_concurrent_pointerarray
56 58
 };
57 59
 
58 60
 
59
-int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,