Browse code

Improve header organization and add two examples

Nat! authored on 18/10/2016 18:32:06
Showing 26 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1 @@
0
+https://github.com/
0 1
new file mode 100644
... ...
@@ -0,0 +1 @@
0
+https://mulle-kybernetik.com/repositories/
... ...
@@ -1,3 +1,8 @@
1
-https://www.mulle-kybernetik.com/repositories/mulle-tests;tests/mulle-tests;${MULLE_TESTS_BRANCH:-release}
2
-https://www.mulle-kybernetik.com/repositories/mulle-homebrew;bin/mulle-homebrew;${MULLE_HOMEBREW_BRANCH:-release}
3
-https://www.mulle-kybernetik.com/repositories/mulle-configuration;;${MULLE_CONFIGURATION_BRANCH:-release}
1
+#
2
+# MULLE_REPOSITORIES is defined in mulle-bootstrap 2.3
3
+# overide the branches with e.g.
4
+# `echo "master" > .bootstrap.local/MULLE_TESTS_BRANCH`
5
+
6
+${MULLE_REPOSITORIES}mulle-tests;tests/mulle-tests;${MULLE_TESTS_BRANCH:-release}
7
+${MULLE_REPOSITORIES}mulle-homebrew;bin/mulle-homebrew;${MULLE_HOMEBREW_BRANCH:-release}
8
+${MULLE_REPOSITORIES}mulle-configuration;;${MULLE_CONFIGURATION_BRANCH:-release}
... ...
@@ -1,3 +1,8 @@
1
-https://www.mulle-kybernetik.com/repositories/mulle-thread
2
-https://www.mulle-kybernetik.com/repositories/mulle-allocator
3
-https://www.mulle-kybernetik.com/repositories/mulle-aba
1
+#
2
+# MULLE_REPOSITORIES is defined in mulle-bootstrap 2.3
3
+# overide the branches with e.g.
4
+# `echo "master" > .bootstrap.local/MULLE_THREAD_BRANCH`
5
+
6
+${MULLE_REPOSITORIES}mulle-thread;;${MULLE_THREAD_BRANCH:-release}
7
+${MULLE_REPOSITORIES}mulle-allocator;;${MULLE_ALLOCATOR_BRANCH:-release}
8
+${MULLE_REPOSITORIES}mulle-aba;;${MULLE_ABA_BRANCH:-release}
... ...
@@ -27,3 +27,12 @@ mulle-tests/
27 27
 /tests/pointerarray/simple
28 28
 /tests/hashmap/hashmap
29 29
 /tests/hashmap/othersimple
30
+
31
+/tests/mulle-tests/
32
+tests/array/example
33
+tests/array/pointerarray
34
+tests/array/simple
35
+tests/hashmap/example
36
+tests/array/example.debug
37
+tests/hashmap/example.debug
38
+tests/hashmap/hashmap.debug
30 39
\ No newline at end of file
... ...
@@ -7,10 +7,10 @@ environments.
7 7
 
8 8
 ## Data structures
9 9
 
10
-Name                            | Description
11
-`mulle_concurrent_hashmap`      | A growing, mutable map of pointers, indexed by a hash
12
-`mulle_concurrent_pointerarray` | A growing array of pointers
10
+Name                            | Description                            | Example
11
+--------------------------------|----------------------------------------|-------------------
12
+`mulle_concurrent_hashmap`      | A growing, mutable map of pointers, indexed by a hash. A.k.a. hashtable, dictionary, maptable                                          | [Example](tests/hashmap/example.c)
13
+`mulle_concurrent_pointerarray` | A growing array of pointers            | [Example](tests/array/example.c)
13 14
 
14 15
 
15 16
 
... ...
@@ -4,6 +4,9 @@
4 4
 * renamed `_mulle_concurrent_hashmap_get_count` to `mulle_concurrent_hashmap_count`,
5 5
 since it's safe to pass NULL and it's not a get operation.'
6 6
 * improved the documentation
7
+* added  some more "safe API" routines for release
8
+* improved the headers for readability
9
+* clarified return codes of `mulle_concurrent_hashmap_remove`.
7 10
 
8 11
 # v0.5
9 12
 
... ...
@@ -31,4 +34,4 @@ since it's safe to pass NULL and it's not a get operation.'
31 31
 
32 32
 # v0.0
33 33
 
34
-* Merycful Release
34
+* Merciful Release
... ...
@@ -8,71 +8,66 @@ in time.
8 8
 
9 9
 The following operations should be executed in single-threaded fashion:
10 10
 
11
-* `_mulle_concurrent_hashmap_init`
12
-* `_mulle_concurrent_hashmap_done`
13
-* `_mulle_concurrent_hashmap_get_size`
11
+* `mulle_concurrent_hashmap_init`
12
+* `mulle_concurrent_hashmap_done`
13
+* `mulle_concurrent_hashmap_get_size`
14 14
 
15 15
 The following operations are fine in multi-threaded environments:
16 16
 
17
-* `_mulle_concurrent_hashmap_insert`
18
-* `_mulle_concurrent_hashmap_remove`
19
-* `_mulle_concurrent_hashmap_lookup`
17
+* `mulle_concurrent_hashmap_insert`
18
+* `mulle_concurrent_hashmap_remove`
19
+* `mulle_concurrent_hashmap_lookup`
20 20
 
21
-
22
-The following operations work in multi-threaded environments, but should be approached with caution:
21
+The following operations work in multi-threaded environments, but should be
22
+approached with caution:
23 23
 
24 24
 * `mulle_concurrent_hashmap_enumerate`
25
-* `_mulle_concurrent_hashmapenumerator_next`
26
-* `_mulle_concurrent_hashmapenumerator_done`
25
+* `mulle_concurrent_hashmapenumerator_next`
26
+* `mulle_concurrent_hashmapenumerator_done`
27 27
 * `mulle_concurrent_hashmap_lookup_any`
28 28
 * `mulle_concurrent_hashmap_count`
29 29
 
30
+
30 31
 ## single-threaded
31 32
 
32 33
 
33
-### `_mulle_concurrent_hashmap_init`
34
+### `mulle_concurrent_hashmap_init`
34 35
 
35 36
 ```
36
-void   _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
37
-                                       unsigned int size,
38
-                                       struct mulle_allocator *allocator)
37
+int   mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
38
+                                     unsigned int size,
39
+                                     struct mulle_allocator *allocator)
39 40
 ```
40 41
 
41 42
 Initialize `map`, with a starting `size` of elements. `allocator` will be
42 43
 used to allocate and free memory during the lifetime of `map`.  You can pass in
43 44
 for `allocator` to use the default. Call this in single-threaded fashion.
44 45
 
46
+Return Values:
47
+   0      : OK
48
+   EINVAL : invalid argument
49
+   ENOMEM : out of memory
50
+
45 51
 
46
-### `void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map)`
52
+### `void  mulle_concurrent_hashmap_done`
47 53
 
48 54
 ```
49
-void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map)
55
+void  mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map)
50 56
 ```
51 57
 
52 58
 This will free all allocated resources `map`. It will not **free** `map` itself
53 59
 though. `map` must be a valid pointer. Call this in single-threaded fashion.
54 60
 
55 61
 
56
-### `_mulle_concurrent_hashmap_get_size`
57
-
58
-```
59
-unsigned int   mulle_concurrent_hashmap_get_count( struct mulle_concurrent_hashmap *map);
60
-```
61
-
62
-This gives you the current number of hash/value entries of `map`. The returned
63
-number is close to meaningless, when the map is accessed in multi-threaded
64
-fashion. Call this in single-threaded fashion.
65
-
66
-
67 62
 ## multi-threaded
68 63
 
69 64
 
70
-### `_mulle_concurrent_hashmap_insert`
65
+### `mulle_concurrent_hashmap_insert`
71 66
 
72 67
 ```
73
-int  _mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
74
-                                       intptr_t hash,
75
-                                       void *value)
68
+int  mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
69
+                                      intptr_t hash,
70
+                                      void *value)
76 71
 ```
77 72
 
78 73
 Insert a `hash`, `value` pair.
... ...
@@ -102,16 +97,16 @@ Return Values:
102 102
    ENOMEM : out of memory
103 103
 
104 104
 
105
-### `_mulle_concurrent_hashmap_remove` - remove a hash/value pair
105
+### `mulle_concurrent_hashmap_remove`
106 106
 
107 107
 ```
108
-int  _mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
108
+int  mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
109 109
                                        intptr_t hash,
110 110
                                        void *value)
111 111
 ```
112 112
 
113 113
 Remove a `hash`, `value` pair. Read the description of
114
-`_mulle_concurrent_hashmap_insert` for information about restrictions
114
+`mulle_concurrent_hashmap_insert` for information about restrictions
115 115
 pertaining to both.
116 116
 
117 117
 Return Values:
... ...
@@ -120,10 +115,10 @@ Return Values:
120 120
    ENOMEM : out of memory
121 121
 
122 122
 
123
-### `_mulle_concurrent_hashmap_lookup` - search for a value by hash
123
+### `mulle_concurrent_hashmap_lookup`
124 124
 
125 125
 ```
126
-void   *_mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
126
+void   *mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
127 127
                                           intptr_t hash)
128 128
 ```
129 129
 
... ...
@@ -135,6 +130,18 @@ Return Values:
135 135
 
136 136
 ---
137 137
 
138
+
139
+### `mulle_concurrent_hashmap_get_size`
140
+
141
+```
142
+unsigned int   mulle_concurrent_hashmap_get_count( struct mulle_concurrent_hashmap *map);
143
+```
144
+
145
+This gives you the current number of hash/value entries of `map`. The returned
146
+number is close to meaningless, when the map is accessed in multi-threaded
147
+fashion.
148
+
149
+
138 150
 # `mulle_concurrent_hashmapenumerator`
139 151
 
140 152
 ```
... ...
@@ -156,17 +163,17 @@ Here is a simple usage example:
156 156
    void                                        *value;
157 157
 
158 158
    rover = mulle_concurrent_hashmap_enumerate( map);
159
-   while( _mulle_concurrent_hashmapenumerator_next( &rover, &hash, &value) == 1)
159
+   while( mulle_concurrent_hashmapenumerator_next( &rover, &hash, &value) == 1)
160 160
    {
161 161
       printf( "%ld %p\n", hash, value);
162 162
    }
163
-   _mulle_concurrent_hashmapenumerator_done( &rover);
163
+   mulle_concurrent_hashmapenumerator_done( &rover);
164 164
 ```
165 165
 
166
-### `_mulle_concurrent_hashmapenumerator_next`
166
+### `mulle_concurrent_hashmapenumerator_next`
167 167
 
168 168
 ```
169
-int  _mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapenumerator *rover,
169
+int  mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapenumerator *rover,
170 170
                                                intptr_t *hash,
171 171
                                                void **value)
172 172
 ```
... ...
@@ -174,16 +181,16 @@ int  _mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapen
174 174
 Get the next `hash`, `value` pair from the enumerator.
175 175
 
176 176
 Return Values:
177
-   1           : OK
178
-   0           : nothing left
179
-   -ECANCELLED : hashtable was mutated (Note: **negative errno value**!)
180
-   -ENOMEM     : out of memory         (Note: **negative errno value**!)
177
+   1          : OK
178
+   0          : nothing left
179
+   ECANCELLED : hashtable was mutated
180
+   ENOMEM     : out of memory
181 181
 
182 182
 
183
-### `_mulle_concurrent_hashmapenumerator_done`
183
+### `mulle_concurrent_hashmapenumerator_done`
184 184
 
185 185
 ```
186
-void  _mulle_concurrent_hashmapenumerator_done( struct mulle_concurrent_hashmapenumerator *rover)
186
+void  mulle_concurrent_hashmapenumerator_done( struct mulle_concurrent_hashmapenumerator *rover)
187 187
 ```
188 188
 
189 189
 It's a mere conventional function. It may be left out.
... ...
@@ -195,8 +202,10 @@ It's a mere conventional function. It may be left out.
195 195
 unsigned int   mulle_concurrent_hashmap_count( struct mulle_concurrent_hashmap *map);
196 196
 ```
197 197
 
198
-This gives you the current number of hash/value entries of `map`. It is implemented as an iterator loop, that counts the number of values.
199
-The returned number is close to meaningless, when the map is accessed in multi-threaded fashion.
198
+This gives you the current number of hash/value entries of `map`. It is
199
+implemented as an iterator loop, that counts the number of values.
200
+The returned number may be close to meaningless, when the map is accessed in
201
+multi-threaded fashion.
200 202
 
201 203
 
202 204
 ### `mulle_concurrent_hashmap_lookup_any` - get a value from the hashmap
... ...
@@ -8,35 +8,38 @@ handling very simple.
8 8
 
9 9
 The following operations should be executed in single-threaded fashion:
10 10
 
11
-* `_mulle_concurrent_pointerarray_init`
12
-* `_mulle_concurrent_pointerarray_done`
13
-* `_mulle_concurrent_pointerarray_get_size`
11
+* `mulle_concurrent_pointerarray_init`
12
+* `mulle_concurrent_pointerarray_done`
14 13
 
15 14
 The following operations are fine in multi-threaded environments:
16 15
 
17
-* `_mulle_concurrent_pointerarray_add`
18
-* `_mulle_concurrent_pointerarray_get`
16
+* `mulle_concurrent_pointerarray_add`
17
+* `mulle_concurrent_pointerarray_get`
19 18
 
20 19
 The following operations work in multi-threaded environments,
21 20
 but should be approached with caution:
22 21
 
23 22
 * `mulle_concurrent_pointerarray_enumerate`
24
-* `_mulle_concurrent_pointerarrayenumerator_next`
25
-* `_mulle_concurrent_pointerarrayenumerator_done`
26
-* `_mulle_concurrent_pointerarray_reverseenumerate`
27
-* `_mulle_concurrent_pointerarrayreverseenumerator_next`
28
-* `_mulle_concurrent_pointerarrayreverseenumerator_done`
23
+* `mulle_concurrent_pointerarrayenumerator_next`
24
+* `mulle_concurrent_pointerarrayenumerator_done`
25
+* `mulle_concurrent_pointerarray_reverseenumerate`
26
+* `mulle_concurrent_pointerarrayreverseenumerator_next`
27
+* `mulle_concurrent_pointerarrayreverseenumerator_done`
29 28
 * `mulle_concurrent_pointerarray_map`
30
-* `_mulle_concurrent_pointerarray_find`
29
+* `mulle_concurrent_pointerarray_find`
31 30
 * `mulle_concurrent_pointerarray_get_count`
31
+* `mulle_concurrent_pointerarray_get_size`
32 32
 
33 33
 
34
-### `_mulle_concurrent_pointerarray_init` - initialize pointerarray
34
+## single-threaded
35
+
36
+
37
+### `mulle_concurrent_pointerarray_init`
35 38
 
36 39
 ```
37
-void   _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,
38
-                                       unsigned int size,
39
-                                       struct mulle_allocator *allocator)
40
+int   mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,
41
+                                          unsigned int size,
42
+                                          struct mulle_allocator *allocator)
40 43
 ```
41 44
 
42 45
 Initialize `array`, with a starting `size` of elements. `allocator` will be
... ...
@@ -44,10 +47,10 @@ used to allocate and free memory during the lifetime of `array`.  You can pass i
44 44
 for `allocator` to use the default. Call this in single-threaded fashion.
45 45
 
46 46
 
47
-### `_mulle_concurrent_pointerarray_done` - free pointerarray resources
47
+### `mulle_concurrent_pointerarray_done`
48 48
 
49 49
 ```
50
-void  _mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray *array)
50
+void  mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray *array)
51 51
 ```
52 52
 
53 53
 This will free all allocated resources `array`. It will not **free** `array`
... ...
@@ -55,104 +58,72 @@ itself though. `array` must be a valid pointer. Call this in single-threaded
55 55
 fashion.
56 56
 
57 57
 
58
-### `_mulle_concurrent_pointerarray_insert` - insert a hash/value pair
58
+## multi-threaded
59 59
 
60
-```
61
-int  _mulle_concurrent_pointerarray_insert( struct mulle_concurrent_pointerarray *array,
62
-                                       intptr_t hash,
63
-                                       void *value)
64
-```
65 60
 
66
-Insert a `hash`, `value` pair.
67
-`hash` must not be zero. It should be a unique integer key, suitably treated to
68
-be a good hash value. Here is an example of an avalance function for simple
69
-integer keys (1-...)
61
+### `mulle_concurrent_pointerarray_add`
70 62
 
71 63
 ```
72
-static inline uint64_t   mulle_hash_avalanche64(uint64_t h)
73
-{
74
-   h ^= h >> 33;
75
-   h *= 0xff51afd7ed558ccd;
76
-   h ^= h >> 33;
77
-   h *= 0xc4ceb9fe1a85ec53;
78
-   h ^= h >> 33;
79
-   return h;
80
-}
64
+int  mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *array,
65
+                                        void *value)
81 66
 ```
82 67
 
83
-`value` can be any `void *` except `NULL` or `(void *) INTPTR_MAX`.  It will
68
+Add value to the end of the array.
69
+`value` can be any `void *` except `NULL` or `(void *) INTPTR_MAX`. It will
84 70
 not get dereferenced by the pointerarray.
85 71
 
86 72
 
87 73
 Return Values:
88 74
    0      : OK
89
-   EEXIST : duplicate
90
-   ENOMEM : out of memory
91
-
92
-
93
-### `_mulle_concurrent_pointerarray_remove` - remove a hash/value pair
94
-
95
-```
96
-int  _mulle_concurrent_pointerarray_remove( struct mulle_concurrent_pointerarray *array,
97
-                                       intptr_t hash,
98
-                                       void *value)
99
-```
100
-
101
-Remove a `hash`, `value` pair. Read the description of
102
-`_mulle_concurrent_pointerarray_insert` for information about restrictions
103
-pertaining to both.
104
-
105
-Return Values:
106
-   0      : OK
107
-   ENOENT : not found
75
+   EINVAL : invalid argument
108 76
    ENOMEM : out of memory
109 77
 
110 78
 
111
-### `_mulle_concurrent_pointerarray_lookup` - search for a value by hash
79
+### `mulle_concurrent_pointerarray_get`
112 80
 
113 81
 ```
114
-void   *_mulle_concurrent_pointerarray_lookup( struct mulle_concurrent_pointerarray *array,
115
-                                          intptr_t hash)
82
+void   *mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray *array,
83
+                                           unsigned int index)
116 84
 ```
117 85
 
118
-Looks up a value by its hash.
86
+Get value at `index` of array.
119 87
 
120 88
 Return Values:
121
-   NULL  : not found
122
-   otherwise the value for this hash
89
+   NULL  : not found (invalid argument)
90
+   otherwise the value
123 91
 
124 92
 
125
-### `_mulle_concurrent_pointerarray_get_size` - get size of pointerarray
93
+### `mulle_concurrent_pointerarray_get_size`
126 94
 
127 95
 ```
128
-unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *array)
96
+unsigned int  mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *array)
129 97
 ```
130 98
 
131 99
 This gives you the capacity of `array`. This value is close to
132 100
 meaningless, when the array is accessed in multi-threaded fashion.
133 101
 
134 102
 
135
-### `_mulle_concurrent_pointerarray_get_size` - get number of entries of pointerarray
103
+### `mulle_concurrent_pointerarray_get_count`
136 104
 
137 105
 ```
138 106
 unsigned int   mulle_concurrent_pointerarray_get_count( struct mulle_concurrent_pointerarray *array);
139 107
 ```
140 108
 
141
-This gives you the current number of hash/value entries of `array`. The returned
142
-number is close to meaningless, when the array is accessed in multi-threaded
143
-fashion.
109
+This gives you the current number of entries in `array`. As the array can only
110
+grow this value is useful, but maybe already outdated.
111
+
144 112
 
113
+## `mulle_concurrent_pointerarrayenumerator`
145 114
 
146
-## `mulle_concurrent_pointerarrayenumerator` - enumerator interface
115
+### `mulle_concurrent_pointerarray_enumerate`
147 116
 
148 117
 ```
149 118
 struct mulle_concurrent_pointerarrayenumerator  mulle_concurrent_pointerarray_enumerate( struct mulle_concurrent_pointerarray *array)
150 119
 ```
151 120
 
152
-Enumerate a hashtable. This works reliably if `array` is accessed in
153
-single-threaded fashion, which it probably will NOT be. In multi-threaded
154
-environments, the enumeration may be interrupted by mutations of the hashtable
155
-by other threads. The enumerator itself should not be shared accessed by other threads.
121
+Enumerate a pointerarray. This works reliably even in multi-threaded
122
+environments. The enumerator itself should not be shared with other
123
+threads though.
156 124
 
157 125
 Here is a simple usage example:
158 126
 
... ...
@@ -160,41 +131,37 @@ Here is a simple usage example:
160 160
 ```
161 161
    struct mulle_concurrent_pointerarray             *array;
162 162
    struct mulle_concurrent_pointerarrayenumerator   rover;
163
-   intptr_t                                    hash;
164
-   void                                        *value;
163
+   void                                             *value;
165 164
 
166 165
    rover = mulle_concurrent_pointerarray_enumerate( array);
167
-   while( _mulle_concurrent_pointerarrayenumerator_next( &rover, &hash, &value) == 1)
166
+   while( value = mulle_concurrent_pointerarrayenumerator_next( &rover))
168 167
    {
169
-      printf( "%ld %p\n", hash, value);
168
+      printf( "%p\n", value);
170 169
    }
171
-   _mulle_concurrent_pointerarrayenumerator_done( &rover);
170
+   mulle_concurrent_pointerarrayenumerator_done( &rover);
172 171
 ```
173 172
 
174
-### `_mulle_concurrent_pointerarrayenumerator_next` - get next hash/value pair
173
+### `mulle_concurrent_pointerarrayenumerator_next`
175 174
 
176 175
 ```
177
-int  _mulle_concurrent_pointerarrayenumerator_next( struct mulle_concurrent_pointerarrayenumerator *rover,
178
-                                               intptr_t *hash,
179
-                                               void **value)
176
+void   *mulle_concurrent_pointerarrayenumerator_next( struct mulle_concurrent_pointerarrayenumerator *rover)
180 177
 ```
181 178
 
182
-Get the next `hash`, `value` pair from the enumerator.
179
+Get the next value from the enumerator.
183 180
 
184 181
 Return Values:
185
-   1           : OK
186
-   0           : nothing left
187
-   -ECANCELLED : hashtable was mutated (Note: **negative errno value**!)
188
-   -ENOMEM     : out of memory         (Note: **negative errno value**!)
182
+   NULL  : nothing left
183
+   otherwis the value
189 184
 
190 185
 
191
-### `_mulle_concurrent_pointerarrayenumerator_done` - mark the end of the enumerator
186
+### `mulle_concurrent_pointerarrayenumerator_done`
192 187
 
193 188
 ```
194
-void  _mulle_concurrent_pointerarrayenumerator_done( struct mulle_concurrent_pointerarrayenumerator *rover)
189
+void   mulle_concurrent_pointerarrayenumerator_done( struct mulle_concurrent_pointerarrayenumerator *rover)
195 190
 ```
196 191
 
197
-It's a mere conventional function. It may be left out.
192
+Mark the end of the enumerator lifetime. It's a mere conventional function.
193
+It may be left out.
198 194
 
199 195
 
200 196
 
... ...
@@ -124,7 +124,7 @@
124 124
 		41CAEAE71C8D9FF4003C2C7B /* test-hashmap */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "test-hashmap"; sourceTree = BUILT_PRODUCTS_DIR; };
125 125
 		41CAEAEE1C8DA00F003C2C7B /* hashmap.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = hashmap.c; path = tests/hashmap/hashmap.c; sourceTree = SOURCE_ROOT; };
126 126
 		41CAEAF71C8DA20F003C2C7B /* test-pointerarray */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "test-pointerarray"; sourceTree = BUILT_PRODUCTS_DIR; };
127
-		41CAEB031C8DA251003C2C7B /* pointerarray.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = pointerarray.c; sourceTree = "<group>"; };
127
+		41CAEB031C8DA251003C2C7B /* pointerarray.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = pointerarray.c; path = ../array/pointerarray.c; sourceTree = "<group>"; };
128 128
 		41CAEB051C8DA326003C2C7B /* libmulle_test_allocator.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libmulle_test_allocator.a; path = dependencies/lib/Debug/libmulle_test_allocator.a; sourceTree = "<group>"; };
129 129
 		41CAEB091C8DB97F003C2C7B /* README.md */ = {isa = PBXFileReference; lastKnownFileType = net.daringfireball.markdown; path = README.md; sourceTree = "<group>"; };
130 130
 		41D04AB71C8DD69000CC8F11 /* RELEASENOTES.md */ = {isa = PBXFileReference; lastKnownFileType = net.daringfireball.markdown; path = RELEASENOTES.md; sourceTree = "<group>"; };
... ...
@@ -3,7 +3,9 @@
3 3
 //  mulle-concurrent
4 4
 //
5 5
 //  Created by Nat! on 04.03.16.
6
-//  Copyright © 2016 Mulle kybernetiK. All rights reserved.
6
+//  Copyright © 2016 Nat! for Mulle kybernetiK.
7
+//  Copyright © 2016 Codeon GmbH.
8
+//  All rights reserved.
7 9
 //
8 10
 //  Redistribution and use in source and binary forms, with or without
9 11
 //  modification, are permitted provided that the following conditions are met:
... ...
@@ -254,7 +256,7 @@ static int   _mulle_concurrent_hashmapstorage_put( struct _mulle_concurrent_hash
254 254
       if( entry->hash == MULLE_CONCURRENT_NO_HASH)
255 255
       {
256 256
          found = __mulle_atomic_pointer_compare_and_swap( &entry->value, value, MULLE_CONCURRENT_NO_POINTER);
257
-         if( found)
257
+         if( found != MULLE_CONCURRENT_NO_POINTER)
258 258
          {
259 259
             if( found == REDIRECT_VALUE)
260 260
                return( EBUSY);
... ...
@@ -291,15 +293,9 @@ static int   _mulle_concurrent_hashmapstorage_remove( struct _mulle_concurrent_h
291 291
       if( entry->hash == hash)
292 292
       {
293 293
          found = __mulle_atomic_pointer_compare_and_swap( &entry->value, MULLE_CONCURRENT_NO_POINTER, value);
294
-         if( found != MULLE_CONCURRENT_NO_POINTER)
295
-         {
296
-            if( found == REDIRECT_VALUE)
297
-               return( EBUSY);
298
-
299
-            // once a entry->hash it must not be erased (except
300
-            // during migration)
301
-         }
302
-         return( 0);
294
+         if( found == REDIRECT_VALUE)
295
+            return( EBUSY);
296
+         return( found == value ? 0 : ENOENT);
303 297
       }
304 298
       
305 299
       if( entry->hash == MULLE_CONCURRENT_NO_HASH)
... ...
@@ -361,8 +357,8 @@ int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
361 361
    if( ! allocator)
362 362
       allocator = &mulle_default_allocator;
363 363
    
364
-   assert( allocator->abafree && allocator->abafree != (void *) abort);
365
-   if( ! allocator->abafree || allocator->abafree == (void *) abort)
364
+   assert( allocator->abafree && allocator->abafree != (int (*)()) abort);
365
+   if( ! allocator->abafree || allocator->abafree == (int (*)()) abort)
366 366
       return( EINVAL);
367 367
 
368 368
    map->allocator = allocator;
... ...
@@ -423,7 +419,7 @@ static int  _mulle_concurrent_hashmap_migrate_storage( struct mulle_concurrent_h
423 423
       // acquire new storage
424 424
       alloced = _mulle_concurrent_alloc_hashmapstorage( ((unsigned int) p->mask + 1) * 2, map->allocator);
425 425
       if( ! alloced)
426
-         return( -1);
426
+         return( ENOMEM);
427 427
       
428 428
       // make this the next world, assume that's still set to 'p' (SIC)
429 429
       q = __mulle_atomic_pointer_compare_and_swap( &map->next_storage.pointer, alloced, p);
... ...
@@ -458,6 +454,7 @@ void  *_mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
458 458
    struct _mulle_concurrent_hashmapstorage   *p;
459 459
    void                                      *value;
460 460
    
461
+   // won't find invalid hash anyway
461 462
 retry:
462 463
    p     = _mulle_atomic_pointer_read( &map->storage.pointer);
463 464
    value = _mulle_concurrent_hashmapstorage_lookup( p, hash);
... ...
@@ -470,6 +467,7 @@ retry:
470 470
    return( value);
471 471
 }
472 472
 
473
+
473 474
 static int   _mulle_concurrent_hashmap_search_next( struct mulle_concurrent_hashmap *map,
474 475
                                                     unsigned int  *expect_mask,
475 476
                                                     unsigned int  *index,
... ...
@@ -483,7 +481,7 @@ static int   _mulle_concurrent_hashmap_search_next( struct mulle_concurrent_hash
483 483
 retry:
484 484
    p = _mulle_atomic_pointer_read( &map->storage.pointer);
485 485
    if( *expect_mask && (unsigned int) p->mask != *expect_mask)
486
-      return( -ECANCELED);
486
+      return( ECANCELED);
487 487
    
488 488
    for(;;)
489 489
    {
... ...
@@ -492,15 +490,15 @@ retry:
492 492
          return( 0);
493 493
       
494 494
       value = _mulle_atomic_pointer_read( &entry->value);
495
-      if( value != MULLE_CONCURRENT_NO_POINTER)
496
-         break;
497
-
498 495
       if( value == REDIRECT_VALUE)
499 496
       {
500 497
          if( _mulle_concurrent_hashmap_migrate_storage( map, p))
501
-            return( -ENOMEM);
498
+            return( ENOMEM);
502 499
          goto retry;
503 500
       }
501
+
502
+      if( value != MULLE_CONCURRENT_NO_POINTER)
503
+         break;
504 504
    }
505 505
    
506 506
    if( p_hash)
... ...
@@ -515,6 +513,14 @@ retry:
515 515
 }
516 516
 
517 517
 
518
+static inline void   assert_hash_value( intptr_t hash, void *value)
519
+{
520
+   assert( hash != MULLE_CONCURRENT_NO_HASH);
521
+   assert( value != MULLE_CONCURRENT_NO_POINTER);
522
+   assert( value != MULLE_CONCURRENT_INVALID_POINTER);
523
+}
524
+
525
+
518 526
 int  _mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
519 527
                                        intptr_t hash,
520 528
                                        void *value)
... ...
@@ -523,9 +529,7 @@ int  _mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
523 523
    unsigned int                              n;
524 524
    unsigned int                              max;
525 525
 
526
-   assert( hash != MULLE_CONCURRENT_NO_HASH);
527
-   assert( value);
528
-   assert( value != MULLE_CONCURRENT_NO_POINTER && value != MULLE_CONCURRENT_INVALID_POINTER);
526
+   assert_hash_value( hash, value);
529 527
    
530 528
 retry:
531 529
    p = _mulle_atomic_pointer_read( &map->storage.pointer);
... ...
@@ -556,12 +560,30 @@ retry:
556 556
 }
557 557
 
558 558
 
559
+int  mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
560
+                                      intptr_t hash,
561
+                                      void *value)
562
+{
563
+   if( ! map)
564
+      return( EINVAL);
565
+   if( hash == MULLE_CONCURRENT_NO_HASH)
566
+      return( EINVAL);
567
+   if( value == MULLE_CONCURRENT_NO_POINTER || value == MULLE_CONCURRENT_INVALID_POINTER)
568
+      return( EINVAL);
569
+
570
+   return( _mulle_concurrent_hashmap_insert( map, hash, value));
571
+}
572
+
573
+
574
+
559 575
 int  _mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
560 576
                                        intptr_t hash,
561 577
                                        void *value)
562 578
 {
563 579
    struct _mulle_concurrent_hashmapstorage   *p;
564 580
    
581
+   assert_hash_value( hash, value);
582
+   
565 583
 retry:
566 584
    p = _mulle_atomic_pointer_read( &map->storage.pointer);
567 585
    switch( _mulle_concurrent_hashmapstorage_remove( p, hash, value))
... ...
@@ -578,6 +600,20 @@ retry:
578 578
 }
579 579
 
580 580
 
581
+int  mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
582
+                                      intptr_t hash,
583
+                                      void *value)
584
+{
585
+   if( ! map)
586
+      return( EINVAL);
587
+   if( hash == MULLE_CONCURRENT_NO_HASH)
588
+      return( EINVAL);
589
+   if( value == MULLE_CONCURRENT_NO_POINTER || value == MULLE_CONCURRENT_INVALID_POINTER)
590
+      return( EINVAL);
591
+
592
+   return( _mulle_concurrent_hashmap_remove( map, hash, value));
593
+}
594
+
581 595
 
582 596
 #pragma mark -
583 597
 #pragma mark not so concurrent enumerator
... ...
@@ -592,7 +628,7 @@ int  _mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapen
592 592
    
593 593
    rval = _mulle_concurrent_hashmap_search_next( rover->map, &rover->mask, &rover->index, &hash, &value);
594 594
    
595
-   if( rval <= 0)
595
+   if( rval != 1)
596 596
       return( rval);
597 597
    
598 598
    if( p_hash)
... ...
@@ -623,17 +659,20 @@ retry:
623 623
    for(;;)
624 624
    {
625 625
       rval = _mulle_concurrent_hashmapenumerator_next( &rover, NULL, NULL);
626
-      if( ! rval)
627
-         break;
628
-      if( rval < 0)
626
+      if( rval == 1)
629 627
       {
630
-         _mulle_concurrent_hashmapenumerator_done( &rover);
631
-         goto retry;
628
+         ++count;
629
+         continue;
632 630
       }
633
-      ++count;
631
+
632
+      if( ! rval)
633
+         break;
634
+   
635
+      mulle_concurrent_hashmapenumerator_done( &rover);
636
+      goto retry;
634 637
    }
635 638
    
636
-   _mulle_concurrent_hashmapenumerator_done( &rover);
639
+   mulle_concurrent_hashmapenumerator_done( &rover);
637 640
    return( count);
638 641
 }
639 642
 
... ...
@@ -647,7 +686,7 @@ void  *mulle_concurrent_hashmap_lookup_any( struct mulle_concurrent_hashmap *map
647 647
    
648 648
    rover = mulle_concurrent_hashmap_enumerate( map);
649 649
    _mulle_concurrent_hashmapenumerator_next( &rover, NULL, &any);
650
-   _mulle_concurrent_hashmapenumerator_done( &rover);
650
+   mulle_concurrent_hashmapenumerator_done( &rover);
651 651
    
652 652
    return( any);
653 653
 }
... ...
@@ -3,7 +3,9 @@
3 3
 //  mulle-concurrent
4 4
 //
5 5
 //  Created by Nat! on 04.03.16.
6
-//  Copyright © 2016 Mulle kybernetiK. All rights reserved.
6
+//  Copyright © 2016 Nat! for Mulle kybernetiK.
7
+//  Copyright © 2016 Codeon GmbH.
8
+//  All rights reserved.
7 9
 //
8 10
 //  Redistribution and use in source and binary forms, with or without
9 11
 //  modification, are permitted provided that the following conditions are met:
... ...
@@ -61,33 +63,87 @@ struct mulle_concurrent_hashmap
61 61
 #pragma mark -
62 62
 #pragma mark single-threaded
63 63
 
64
-int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
65
-                                     unsigned int size,
66
-                                     struct mulle_allocator *allocator);
67
-void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map);
68 64
 
69
-unsigned int  _mulle_concurrent_hashmap_get_size( struct mulle_concurrent_hashmap *map);
65
+// Returns:
66
+//   0      : OK
67
+//   EINVAL : invalid argument
68
+//   ENOMEM : out of memory
69
+//
70
+static inline int  mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
71
+                                                  unsigned int size,
72
+                                                  struct mulle_allocator *allocator)
73
+{
74
+   int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
75
+                                       unsigned int size,
76
+                                       struct mulle_allocator *allocator);
77
+   if( ! map)
78
+      return( EINVAL);
79
+   return( _mulle_concurrent_hashmap_init( map, size, allocator));
80
+}
81
+
82
+
83
+static inline void  mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map)
84
+{
85
+   void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map);
86
+
87
+   if( map)
88
+      _mulle_concurrent_hashmap_done( map);
89
+}
90
+
91
+
92
+static inline unsigned int  mulle_concurrent_hashmap_get_size( struct mulle_concurrent_hashmap *map)
93
+{
94
+   unsigned int  _mulle_concurrent_hashmap_get_size( struct mulle_concurrent_hashmap *map);
95
+   
96
+   if( ! map)
97
+      return( 0);
98
+   return( _mulle_concurrent_hashmap_get_size( map));
99
+}
100
+
70 101
 
71 102
 #pragma mark -
72 103
 #pragma mark multi-threaded
73 104
 
74
-// if rval == 0, inserted
75
-// rval == EEXIST, detected duplicate
76
-// rval == ENOMEM, must be out of memory
77
-int  _mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
105
+// Return value (rval):
106
+//   0      : OK, inserted
107
+//   EEXIST : detected duplicate
108
+//   EINVAL : invalid argument
109
+//   ENOMEM : must be out of memory
110
+//
111
+// Do not use hash=0
112
+// Do not use value=0 or value=INTPTR_MAX
113
+//
114
+int   mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
78 115
                                        intptr_t hash,
79 116
                                        void *value);
80 117
 
118
+
119
+// if rval == NULL, not found
120
+
121
+static inline void  *mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
122
+                                                     intptr_t hash)
123
+{
124
+   void  *_mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
125
+                                           intptr_t hash);
126
+
127
+   if( ! map)
128
+      return( NULL);
129
+   return( _mulle_concurrent_hashmap_lookup( map, hash));
130
+}
131
+
132
+
81 133
 // if rval == 0, removed
82
-// rval == ENOENT, not found
134
+// rval == ENOENT, not found (hash/value pair does not exist (anymore))
135
+// rval == EINVAL, parameter has invalid value
83 136
 // rval == ENOMEM, must be out of memory
84
-//
85
-int  _mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
137
+
138
+int   mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
86 139
                                        intptr_t hash,
87 140
                                        void *value);
88 141
 
89
-void  *_mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
90
-                                         intptr_t hash);
142
+
143
+
144
+
91 145
 
92 146
 #pragma mark -
93 147
 #pragma mark limited multi-threaded
... ...
@@ -110,28 +166,69 @@ static inline struct mulle_concurrent_hashmapenumerator  mulle_concurrent_hashma
110 110
    struct mulle_concurrent_hashmapenumerator   rover;
111 111
    
112 112
    rover.map   = map;
113
-   rover.index = map ? 0 : -1;
113
+   rover.index = map ? 0 : (unsigned int) -1;
114 114
    rover.mask  = 0;
115 115
    
116 116
    return( rover);
117 117
 }
118 118
 
119 119
 
120
-//  1 : OK
121
-//  0 : nothing left
122
-// -ECANCELLED: mutation alert
123
-// -ENOMEM    : out of memory
124
-int  _mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapenumerator *rover,
120
+//  1         : OK
121
+//  0         : nothing left
122
+// ECANCELLED : mutation alert
123
+// ENOMEM     : out of memory
124
+// EINVAL     : wrong parameter value
125
+
126
+static inline int  mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapenumerator *rover,
125 127
                                                intptr_t *hash,
126
-                                               void **value);
128
+                                               void **value)
129
+{
130
+   int  _mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapenumerator *rover,
131
+                                                 intptr_t *hash,
132
+                                                 void **value);
133
+   if( ! rover)
134
+      return( -EINVAL);
135
+   return( _mulle_concurrent_hashmapenumerator_next( rover, hash, value));
136
+}
137
+
127 138
 
128
-static inline void  _mulle_concurrent_hashmapenumerator_done( struct mulle_concurrent_hashmapenumerator *rover)
139
+static inline void  mulle_concurrent_hashmapenumerator_done( struct mulle_concurrent_hashmapenumerator *rover)
129 140
 {
130 141
 }
131 142
 
132
-// conveniences using the enumerator
133 143
 
134
-void   *mulle_concurrent_hashmap_lookup_any( struct mulle_concurrent_hashmap *map);
135
-unsigned int  mulle_concurrent_hashmap_count( struct mulle_concurrent_hashmap *map);
144
+#pragma mark -
145
+#pragma mark enumerator conveniences
146
+
147
+void           *mulle_concurrent_hashmap_lookup_any( struct mulle_concurrent_hashmap *map);
148
+unsigned int   mulle_concurrent_hashmap_count( struct mulle_concurrent_hashmap *map);
149
+
150
+
151
+#pragma mark -
152
+#pragma mark various functions, no parameter checks
153
+
154
+int  _mulle_concurrent_hashmap_init( struct mulle_concurrent_hashmap *map,
155
+                                     unsigned int size,
156
+                                     struct mulle_allocator *allocator);
157
+void  _mulle_concurrent_hashmap_done( struct mulle_concurrent_hashmap *map);
158
+
159
+unsigned int  _mulle_concurrent_hashmap_get_size( struct mulle_concurrent_hashmap *map);
160
+
161
+
162
+int  _mulle_concurrent_hashmap_insert( struct mulle_concurrent_hashmap *map,
163
+                                       intptr_t hash,
164
+                                       void *value);
165
+
166
+void  *_mulle_concurrent_hashmap_lookup( struct mulle_concurrent_hashmap *map,
167
+                                         intptr_t hash);
168
+
169
+int  _mulle_concurrent_hashmap_remove( struct mulle_concurrent_hashmap *map,
170
+                                       intptr_t hash,
171
+                                       void *value);
172
+
173
+
174
+int  _mulle_concurrent_hashmapenumerator_next( struct mulle_concurrent_hashmapenumerator *rover,
175
+                                               intptr_t *hash,
176
+                                               void **value);
136 177
 
137 178
 #endif /* mulle_concurrent_hashmap_h */
... ...
@@ -35,7 +35,7 @@
35 35
 #define mulle_concurrent_h__
36 36
 
37 37
 
38
-#define MULLE_CONCURRENT_VERSION  ((0 << 20) | (5 << 8) | 0)
38
+#define MULLE_CONCURRENT_VERSION  ((1 << 20) | (0 << 8) | 0)
39 39
 
40 40
 #include <mulle_thread/mulle_thread.h>
41 41
 #include <mulle_allocator/mulle_allocator.h>
... ...
@@ -3,7 +3,9 @@
3 3
 //  mulle-concurrent
4 4
 //
5 5
 //  Created by Nat! on 07.03.16.
6
-//  Copyright © 2016 Mulle kybernetiK. All rights reserved.
6
+//  Copyright © 2016 Nat! for Mulle kybernetiK.
7
+//  Copyright © 2016 Codeon GmbH.
8
+//  All rights reserved.
7 9
 //
8 10
 //  Redistribution and use in source and binary forms, with or without
9 11
 //  modification, are permitted provided that the following conditions are met:
... ...
@@ -3,7 +3,9 @@
3 3
 //  mulle-concurrent
4 4
 //
5 5
 //  Created by Nat! on 07.03.16.
6
-//  Copyright © 2016 Mulle kybernetiK. All rights reserved.
6
+//  Copyright © 2016 Nat! for Mulle kybernetiK.
7
+//  Copyright © 2016 Codeon GmbH.
8
+//  All rights reserved.
7 9
 //
8 10
 //  Redistribution and use in source and binary forms, with or without
9 11
 //  modification, are permitted provided that the following conditions are met:
... ...
@@ -38,6 +40,7 @@
38 38
 
39 39
 //
40 40
 // if you don't like those values, you 'should' be able to redefine those
41
+// this file is not exposed (yet)
41 42
 //
42 43
 #define MULLE_CONCURRENT_NO_HASH           0
43 44
 
... ...
@@ -3,7 +3,9 @@
3 3
 //  mulle-concurrent
4 4
 //
5 5
 //  Created by Nat! on 06.03.16.
6
-//  Copyright © 2016 Mulle kybernetiK. All rights reserved.
6
+//  Copyright © 2016 Nat! for Mulle kybernetiK.
7
+//  Copyright © 2016 Codeon GmbH.
8
+//  All rights reserved.
7 9
 //
8 10
 //  Redistribution and use in source and binary forms, with or without
9 11
 //  modification, are permitted provided that the following conditions are met:
... ...
@@ -44,7 +46,7 @@ struct _mulle_concurrent_pointerarraystorage
44 44
 {
45 45
    mulle_atomic_pointer_t   n;
46 46
    uintptr_t                size;
47
-   
47
+
48 48
    mulle_atomic_pointer_t   entries[ 1];
49 49
 };
50 50
 
... ...
@@ -68,16 +70,16 @@ static struct _mulle_concurrent_pointerarraystorage *
68 68
                                                 struct mulle_allocator *allocator)
69 69
 {
70 70
    struct _mulle_concurrent_pointerarraystorage  *p;
71
-   
71
+
72 72
    if( n < 8)
73 73
       n = 8;
74
-   
74
+
75 75
    p = _mulle_allocator_calloc( allocator, 1, sizeof( void *) * (n - 1) +
76 76
                              sizeof( struct _mulle_concurrent_pointerarraystorage));
77 77
    if( ! p)
78 78
       return( p);
79 79
    p->size = n;
80
-   
80
+
81 81
    /*
82 82
     * in theory, one should be able to use different values for NO_POINTER and
83 83
     * INVALID_POINTER
... ...
@@ -86,7 +88,7 @@ static struct _mulle_concurrent_pointerarraystorage *
86 86
    {
87 87
       mulle_atomic_pointer_t   *q;
88 88
       mulle_atomic_pointer_t   *sentinel;
89
-      
89
+
90 90
       q        = p->entries;
91 91
       sentinel = &p->entries[ (unsigned int) p->size];
92 92
       while( q < sentinel)
... ...
@@ -95,7 +97,7 @@ static struct _mulle_concurrent_pointerarraystorage *
95 95
          ++q;
96 96
       }
97 97
    }
98
-   
98
+
99 99
    return( p);
100 100
 }
101 101
 
... ...
@@ -120,9 +122,10 @@ static int   _mulle_concurrent_pointerarraystorage_add( struct _mulle_concurrent
120 120
 {
121 121
    void           *found;
122 122
    unsigned int   i;
123
-   
123
+
124 124
    assert( p);
125
-   assert( value != MULLE_CONCURRENT_NO_POINTER && value != MULLE_CONCURRENT_INVALID_POINTER);
125
+   assert( value != MULLE_CONCURRENT_NO_POINTER);
126
+   assert( value != MULLE_CONCURRENT_INVALID_POINTER);
126 127
 
127 128
    for(;;)
128 129
    {
... ...
@@ -136,7 +139,7 @@ static int   _mulle_concurrent_pointerarraystorage_add( struct _mulle_concurrent
136 136
          _mulle_atomic_pointer_increment( &p->n);
137 137
          return( 0);
138 138
       }
139
-      
139
+
140 140
       if( found == REDIRECT_VALUE)
141 141
          return( EBUSY);
142 142
    }
... ...
@@ -151,7 +154,7 @@ static void   _mulle_concurrent_pointerarraystorage_copy( struct _mulle_concurre
151 151
    void                     *value;
152 152
    unsigned int             i;
153 153
    unsigned int             n;
154
-   
154
+
155 155
    n      = (unsigned int) (uintptr_t) _mulle_atomic_pointer_read( &dst->n);
156 156
    p      = &src->entries[ n];
157 157
    p_last = &src->entries[ src->size];
... ...
@@ -173,15 +176,15 @@ int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *
173 173
                                           struct mulle_allocator *allocator)
174 174
 {
175 175
    struct _mulle_concurrent_pointerarraystorage   *storage;
176
-   
176
+
177 177
    if( ! allocator)
178 178
       allocator = &mulle_default_allocator;
179 179
 
180
-   assert( allocator->abafree && allocator->abafree != (void *) abort);
181
-   
182
-   if( ! allocator->abafree || allocator->abafree == (void *) abort)
180
+   assert( allocator->abafree && allocator->abafree != (int (*)()) abort);
181
+
182
+   if( ! allocator->abafree)
183 183
       return( EINVAL);
184
-   
184
+
185 185
    array->allocator = allocator;
186 186
    storage          = _mulle_concurrent_alloc_pointerarraystorage( size, allocator);
187 187
 
... ...
@@ -190,7 +193,7 @@ int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *
190 190
 
191 191
    _mulle_atomic_pointer_nonatomic_write( &array->storage.pointer, storage);
192 192
    _mulle_atomic_pointer_nonatomic_write( &array->next_storage.pointer, storage);
193
-   
193
+
194 194
    return( 0);
195 195
 }
196 196
 
... ...
@@ -202,16 +205,40 @@ void  _mulle_concurrent_pointerarray_done( struct mulle_concurrent_pointerarray
202 202
 {
203 203
    struct _mulle_concurrent_pointerarraystorage   *storage;
204 204
    struct _mulle_concurrent_pointerarraystorage   *next_storage;
205
-   
205
+
206 206
    storage      = _mulle_atomic_pointer_nonatomic_read( &array->storage.pointer);
207 207
    next_storage = _mulle_atomic_pointer_nonatomic_read( &array->next_storage.pointer);
208
-   
208
+
209 209
    _mulle_allocator_abafree( array->allocator, storage);
210 210
    if( storage != next_storage)
211 211
       _mulle_allocator_abafree( array->allocator, next_storage);
212 212
 }
213 213
 
214 214
 
215
+unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *array)
216
+{
217
+   struct _mulle_concurrent_pointerarraystorage   *p;
218
+
219
+   p = _mulle_atomic_pointer_read( &array->storage.pointer);
220
+   return( (unsigned int) p->size);
221
+}
222
+
223
+
224
+//
225
+// obviously just a snapshot at some recent point in time
226
+//
227
+unsigned int   _mulle_concurrent_pointerarray_get_count( struct mulle_concurrent_pointerarray *array)
228
+{
229
+   struct _mulle_concurrent_pointerarraystorage   *p;
230
+
231
+   p = _mulle_atomic_pointer_read( &array->storage.pointer);
232
+   return( (unsigned int) (uintptr_t) _mulle_atomic_pointer_read( &p->n));
233
+}
234
+
235
+
236
+# pragma mark -
237
+# pragma mark multi-threaded
238
+
215 239
 static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurrent_pointerarray *array,
216 240
                                                       struct _mulle_concurrent_pointerarraystorage *p)
217 241
 {
... ...
@@ -221,18 +248,18 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
221 221
    struct _mulle_concurrent_pointerarraystorage   *previous;
222 222
 
223 223
    assert( p);
224
-   
224
+
225 225
    // acquire new storage
226 226
    alloced = NULL;
227 227
    q       = _mulle_atomic_pointer_read( &array->next_storage.pointer);
228 228
 
229 229
    assert( q);
230
-   
230
+
231 231
    if( q == p)
232 232
    {
233 233
       alloced = _mulle_concurrent_alloc_pointerarraystorage( (unsigned int) p->size * 2, array->allocator);
234 234
       if( ! alloced)
235
-         return( -1);
235
+         return( ENOMEM);
236 236
 
237 237
       // make this the next world, assume that's still set to 'p' (SIC)
238 238
       q = __mulle_atomic_pointer_compare_and_swap( &array->next_storage.pointer, alloced, p);
... ...
@@ -245,10 +272,10 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
245 245
       else
246 246
          q = alloced;
247 247
    }
248
-   
248
+
249 249
    // this thread can partake in copying
250 250
    _mulle_concurrent_pointerarraystorage_copy( q, p);
251
-   
251
+
252 252
    // now update world, giving it the same value as 'next_world'
253 253
    previous = __mulle_atomic_pointer_compare_and_swap( &array->storage.pointer, q, p);
254 254
 
... ...
@@ -256,7 +283,7 @@ static int  _mulle_concurrent_pointerarray_migrate_storage( struct mulle_concurr
256 256
    // already gone
257 257
    if( previous == p)
258 258
       _mulle_allocator_abafree( array->allocator, previous);
259
-   
259
+
260 260
    return( 0);
261 261
 }
262 262
 
... ...
@@ -266,7 +293,7 @@ void  *_mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray
266 266
 {
267 267
    struct _mulle_concurrent_pointerarraystorage   *p;
268 268
    void                                     *value;
269
-   
269
+
270 270
 retry:
271 271
    p     = _mulle_atomic_pointer_read( &array->storage.pointer);
272 272
    value = _mulle_concurrent_pointerarraystorage_get( p, index);
... ...
@@ -285,9 +312,9 @@ int  _mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *a
285 285
 {
286 286
    struct _mulle_concurrent_pointerarraystorage   *p;
287 287
 
288
-   assert( value);
288
+   assert( value != MULLE_CONCURRENT_NO_POINTER);
289 289
    assert( value != REDIRECT_VALUE);
290
-   
290
+
291 291
 retry:
292 292
    p = _mulle_atomic_pointer_read( &array->storage.pointer);
293 293
    switch( _mulle_concurrent_pointerarraystorage_add( p, value))
... ...
@@ -295,7 +322,7 @@ retry:
295 295
    case EBUSY   :
296 296
    case ENOSPC  :
297 297
       if( _mulle_concurrent_pointerarray_migrate_storage( array, p))
298
-         return( -1);
298
+         return( ENOMEM);
299 299
       goto retry;
300 300
    }
301 301
 
... ...
@@ -303,114 +330,104 @@ retry:
303 303
 }
304 304
 
305 305
 
306
-unsigned int  _mulle_concurrent_pointerarray_get_size( struct mulle_concurrent_pointerarray *array)
306
+int  mulle_concurrent_pointerarray_add( struct mulle_concurrent_pointerarray *array,
307
+                                        void *value)
307 308
 {
308
-   struct _mulle_concurrent_pointerarraystorage   *p;
309
-   
310
-   p = _mulle_atomic_pointer_read( &array->storage.pointer);
311
-   return( (unsigned int) p->size);
309
+   if( ! array)
310
+      return( EINVAL);
311
+   if( value == MULLE_CONCURRENT_NO_POINTER || value == MULLE_CONCURRENT_INVALID_POINTER)
312
+      return( EINVAL);
313
+   return( _mulle_concurrent_pointerarray_add( array, value));
312 314
 }
313 315
 
314 316
 
315
-//
316
-// obviously just a snapshot at some recent point in time
317
-//
318
-unsigned int   mulle_concurrent_pointerarray_get_count( struct mulle_concurrent_pointerarray *array)
317
+void  *mulle_concurrent_pointerarray_get( struct mulle_concurrent_pointerarray *array,
318
+                                          unsigned int i)
319 319
 {
320
-   struct _mulle_concurrent_pointerarraystorage   *p;
321
-   
322 320
    if( ! array)
323
-      return( 0);
324
-   
325
-   p = _mulle_atomic_pointer_read( &array->storage.pointer);
326
-   return( (unsigned int) (uintptr_t) _mulle_atomic_pointer_read( &p->n));
321
+      return( NULL);
322
+   return( _mulle_concurrent_pointerarray_get( array, i));
323
+}
324
+
325
+
326
+
327
+int  mulle_concurrent_pointerarray_find( struct mulle_concurrent_pointerarray *array,
328
+                                         void *value)
329
+{
330
+   if( ! array)
331
+      return( EINVAL);
332
+   if( value == MULLE_CONCURRENT_NO_POINTER || value == MULLE_CONCURRENT_INVALID_POINTER)
333
+      return( EINVAL);
334
+   return( _mulle_concurrent_pointerarray_find( array, value));
327 335
 }
328 336
 
329 337
 
330 338
 #pragma mark -
331 339
 #pragma mark not so concurrent enumerator
332 340
 
333
-int  _mulle_concurrent_pointerarrayenumerator_next( struct mulle_concurrent_pointerarrayenumerator *rover,
334
-                                              void **p_value)
341
+void  *_mulle_concurrent_pointerarrayenumerator_next( struct mulle_concurrent_pointerarrayenumerator *rover)
335 342
 {
336 343
    void           *value;
337 344
    unsigned int   n;
338
-   
345
+
339 346
    n = mulle_concurrent_pointerarray_get_count( rover->array);
340 347
    if( rover->index >= n)
341
-      return( 0);
342
-   
348
+      return( MULLE_CONCURRENT_NO_POINTER);
349
+
343 350
    value = _mulle_concurrent_pointerarray_get( rover->array, rover->index);
344
-   if( value == MULLE_CONCURRENT_NO_POINTER)
345
-      return( -1);
351
+   assert( value != MULLE_CONCURRENT_NO_POINTER);
346 352
 
347 353
    ++rover->index;
348
-   if( p_value)
349
-      *p_value = value;
350
-
351
-   return( 1);
354
+   return( value);
352 355
 }
353 356
 
354 357
 
355
-int  _mulle_concurrent_pointerarrayreverseenumerator_next( struct mulle_concurrent_pointerarrayreverseenumerator *rover,
356
-                                                     void **p_value)
358
+void   *_mulle_concurrent_pointerarrayreverseenumerator_next( struct mulle_concurrent_pointerarrayreverseenumerator *rover)
357 359
 {
358 360
    void   *value;
359
-   
361
+
360 362
    if( ! rover->index)
361
-      return( 0);
362
-   
363
-   value = _mulle_concurrent_pointerarray_get( rover->array, --rover->index);
364
-   if( value == MULLE_CONCURRENT_NO_POINTER)
365
-      return( -1);
363
+      return( MULLE_CONCURRENT_NO_POINTER);
366 364
 
367
-   if( p_value)
368
-      *p_value = value;
365
+   value = _mulle_concurrent_pointerarray_get( rover->array, --rover->index);
366
+   assert( value != MULLE_CONCURRENT_NO_POINTER);
369 367
 
370
-   return( 1);
368
+   return( value);
371 369
 }
372 370
 
373 371
 
374 372
 int   _mulle_concurrent_pointerarray_find( struct mulle_concurrent_pointerarray *array,
375
-                                       void *search)
373
+                                           void *search)
376 374
 {
377 375
    struct mulle_concurrent_pointerarrayenumerator   rover;
378 376
    int                                              found;
379 377
    void                                             *value;
380
-   
378
+
381 379
    found = 0;
382 380
    rover = mulle_concurrent_pointerarray_enumerate( array);
383
-   while( _mulle_concurrent_pointerarrayenumerator_next( &rover, (void **) &value) == 1)
384
-   {
381
+   while( value = _mulle_concurrent_pointerarrayenumerator_next( &rover))
385 382
       if( value == search)
386 383
       {
387 384
          found = 1;
388 385
          break;
389 386
       }
390
-   }
391
-   _mulle_concurrent_pointerarrayenumerator_done( &rover);
392
-   
387
+   mulle_concurrent_pointerarrayenumerator_done( &rover);
388
+
393 389
    return( found);
394 390
 }
395 391
 
396 392
 
397 393
 int   mulle_concurrent_pointerarray_map( struct mulle_concurrent_pointerarray *list,
398
-                                                void (*f)( void *, void *),
399
-                                                void *userinfo)
394
+                                         void (*f)( void *, void *),
395
+                                         void *userinfo)
400 396
 {
401 397
    struct mulle_concurrent_pointerarrayenumerator  rover;
402 398
    void                                            *value;
403
-   
399
+
404 400
    rover = mulle_concurrent_pointerarray_enumerate( list);
405
-   for(;;)
406
-   {
407
-      switch( _mulle_concurrent_pointerarrayenumerator_next( &rover, &value))
408
-      {
409
-      case -1 : return( -1);
410
-      case  1 : (*f)( value, userinfo); continue;
411
-      }
412
-      break;
413
-   }
414
-   _mulle_concurrent_pointerarrayenumerator_done( &rover);
401
+   while( value = _mulle_concurrent_pointerarrayenumerator_next( &rover))
402
+      (*f)( value, userinfo);
403
+   mulle_concurrent_pointerarrayenumerator_done( &rover);
404
+
415 405
    return( 0);
416 406
 }
... ...
@@ -3,7 +3,9 @@
3 3
 //  mulle-concurrent
4 4
 //
5 5
 //  Created by Nat! on 06.03.16.
6
-//  Copyright © 2016 Mulle kybernetiK. All rights reserved.
6
+//  Copyright © 2016 Nat! for Mulle kybernetiK.
7
+//  Copyright © 2016 Codeon GmbH.
8
+//  All rights reserved.
7 9
 //
8 10
 //  Redistribution and use in source and binary forms, with or without
9 11
 //  modification, are permitted provided that the following conditions are met:
... ...
@@ -56,28 +58,75 @@ struct mulle_concurrent_pointerarray
56 56
 };
57 57
 
58 58
 
59
-int  _mulle_concurrent_pointerarray_init( struct mulle_concurrent_pointerarray *array,