diff options
author | Lorenz <12514511+RincewindsHat@users.noreply.github.com> | 2023-03-11 10:34:47 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-03-11 10:34:47 (GMT) |
commit | 75342b53853a898bdd93c7d3da9374115bab6b6d (patch) | |
tree | 0c0af2b796bcdd8ddd42187beb5badb420db8dea /gl/windows-rwlock.c | |
parent | 6bbe0b7b0f609ecab831dec9be7690842bf0a0fc (diff) | |
parent | 357787868b5201ec3e874e7a225b1c944cbbdb4d (diff) | |
download | monitoring-plugins-75342b53853a898bdd93c7d3da9374115bab6b6d.tar.gz |
Merge branch 'master' into patch-1refs/pull/1842/head
Diffstat (limited to 'gl/windows-rwlock.c')
-rw-r--r-- | gl/windows-rwlock.c | 377 |
1 files changed, 377 insertions, 0 deletions
diff --git a/gl/windows-rwlock.c b/gl/windows-rwlock.c new file mode 100644 index 0000000..7cbd7bb --- /dev/null +++ b/gl/windows-rwlock.c | |||
@@ -0,0 +1,377 @@ | |||
1 | /* Read-write locks (native Windows implementation). | ||
2 | Copyright (C) 2005-2023 Free Software Foundation, Inc. | ||
3 | |||
4 | This file is free software: you can redistribute it and/or modify | ||
5 | it under the terms of the GNU Lesser General Public License as | ||
6 | published by the Free Software Foundation; either version 2.1 of the | ||
7 | License, or (at your option) any later version. | ||
8 | |||
9 | This file is distributed in the hope that it will be useful, | ||
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | GNU Lesser General Public License for more details. | ||
13 | |||
14 | You should have received a copy of the GNU Lesser General Public License | ||
15 | along with this program. If not, see <https://www.gnu.org/licenses/>. */ | ||
16 | |||
17 | /* Written by Bruno Haible <bruno@clisp.org>, 2005. | ||
18 | Based on GCC's gthr-win32.h. */ | ||
19 | |||
20 | #include <config.h> | ||
21 | |||
22 | /* Specification. */ | ||
23 | #include "windows-rwlock.h" | ||
24 | |||
25 | #include <errno.h> | ||
26 | #include <stdlib.h> | ||
27 | |||
28 | /* Don't assume that UNICODE is not defined. */ | ||
29 | #undef CreateEvent | ||
30 | #define CreateEvent CreateEventA | ||
31 | |||
32 | /* In this file, the waitqueues are implemented as circular arrays. */ | ||
33 | #define glwthread_waitqueue_t glwthread_carray_waitqueue_t | ||
34 | |||
35 | static void | ||
36 | glwthread_waitqueue_init (glwthread_waitqueue_t *wq) | ||
37 | { | ||
38 | wq->array = NULL; | ||
39 | wq->count = 0; | ||
40 | wq->alloc = 0; | ||
41 | wq->offset = 0; | ||
42 | } | ||
43 | |||
44 | /* Enqueues the current thread, represented by an event, in a wait queue. | ||
45 | Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */ | ||
46 | static HANDLE | ||
47 | glwthread_waitqueue_add (glwthread_waitqueue_t *wq) | ||
48 | { | ||
49 | HANDLE event; | ||
50 | unsigned int index; | ||
51 | |||
52 | if (wq->count == wq->alloc) | ||
53 | { | ||
54 | unsigned int new_alloc = 2 * wq->alloc + 1; | ||
55 | HANDLE *new_array = | ||
56 | (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE)); | ||
57 | if (new_array == NULL) | ||
58 | /* No more memory. */ | ||
59 | return INVALID_HANDLE_VALUE; | ||
60 | /* Now is a good opportunity to rotate the array so that its contents | ||
61 | starts at offset 0. */ | ||
62 | if (wq->offset > 0) | ||
63 | { | ||
64 | unsigned int old_count = wq->count; | ||
65 | unsigned int old_alloc = wq->alloc; | ||
66 | unsigned int old_offset = wq->offset; | ||
67 | unsigned int i; | ||
68 | if (old_offset + old_count > old_alloc) | ||
69 | { | ||
70 | unsigned int limit = old_offset + old_count - old_alloc; | ||
71 | for (i = 0; i < limit; i++) | ||
72 | new_array[old_alloc + i] = new_array[i]; | ||
73 | } | ||
74 | for (i = 0; i < old_count; i++) | ||
75 | new_array[i] = new_array[old_offset + i]; | ||
76 | wq->offset = 0; | ||
77 | } | ||
78 | wq->array = new_array; | ||
79 | wq->alloc = new_alloc; | ||
80 | } | ||
81 | /* Whether the created event is a manual-reset one or an auto-reset one, | ||
82 | does not matter, since we will wait on it only once. */ | ||
83 | event = CreateEvent (NULL, TRUE, FALSE, NULL); | ||
84 | if (event == INVALID_HANDLE_VALUE) | ||
85 | /* No way to allocate an event. */ | ||
86 | return INVALID_HANDLE_VALUE; | ||
87 | index = wq->offset + wq->count; | ||
88 | if (index >= wq->alloc) | ||
89 | index -= wq->alloc; | ||
90 | wq->array[index] = event; | ||
91 | wq->count++; | ||
92 | return event; | ||
93 | } | ||
94 | |||
95 | /* Notifies the first thread from a wait queue and dequeues it. */ | ||
96 | static void | ||
97 | glwthread_waitqueue_notify_first (glwthread_waitqueue_t *wq) | ||
98 | { | ||
99 | SetEvent (wq->array[wq->offset + 0]); | ||
100 | wq->offset++; | ||
101 | wq->count--; | ||
102 | if (wq->count == 0 || wq->offset == wq->alloc) | ||
103 | wq->offset = 0; | ||
104 | } | ||
105 | |||
106 | /* Notifies all threads from a wait queue and dequeues them all. */ | ||
107 | static void | ||
108 | glwthread_waitqueue_notify_all (glwthread_waitqueue_t *wq) | ||
109 | { | ||
110 | unsigned int i; | ||
111 | |||
112 | for (i = 0; i < wq->count; i++) | ||
113 | { | ||
114 | unsigned int index = wq->offset + i; | ||
115 | if (index >= wq->alloc) | ||
116 | index -= wq->alloc; | ||
117 | SetEvent (wq->array[index]); | ||
118 | } | ||
119 | wq->count = 0; | ||
120 | wq->offset = 0; | ||
121 | } | ||
122 | |||
123 | void | ||
124 | glwthread_rwlock_init (glwthread_rwlock_t *lock) | ||
125 | { | ||
126 | InitializeCriticalSection (&lock->lock); | ||
127 | glwthread_waitqueue_init (&lock->waiting_readers); | ||
128 | glwthread_waitqueue_init (&lock->waiting_writers); | ||
129 | lock->runcount = 0; | ||
130 | lock->guard.done = 1; | ||
131 | } | ||
132 | |||
133 | int | ||
134 | glwthread_rwlock_rdlock (glwthread_rwlock_t *lock) | ||
135 | { | ||
136 | if (!lock->guard.done) | ||
137 | { | ||
138 | if (InterlockedIncrement (&lock->guard.started) == 0) | ||
139 | /* This thread is the first one to need this lock. Initialize it. */ | ||
140 | glwthread_rwlock_init (lock); | ||
141 | else | ||
142 | { | ||
143 | /* Don't let lock->guard.started grow and wrap around. */ | ||
144 | InterlockedDecrement (&lock->guard.started); | ||
145 | /* Yield the CPU while waiting for another thread to finish | ||
146 | initializing this lock. */ | ||
147 | while (!lock->guard.done) | ||
148 | Sleep (0); | ||
149 | } | ||
150 | } | ||
151 | EnterCriticalSection (&lock->lock); | ||
152 | /* Test whether only readers are currently running, and whether the runcount | ||
153 | field will not overflow, and whether no writer is waiting. The latter | ||
154 | condition is because POSIX recommends that "write locks shall take | ||
155 | precedence over read locks", to avoid "writer starvation". */ | ||
156 | if (!(lock->runcount + 1 > 0 && lock->waiting_writers.count == 0)) | ||
157 | { | ||
158 | /* This thread has to wait for a while. Enqueue it among the | ||
159 | waiting_readers. */ | ||
160 | HANDLE event = glwthread_waitqueue_add (&lock->waiting_readers); | ||
161 | if (event != INVALID_HANDLE_VALUE) | ||
162 | { | ||
163 | DWORD result; | ||
164 | LeaveCriticalSection (&lock->lock); | ||
165 | /* Wait until another thread signals this event. */ | ||
166 | result = WaitForSingleObject (event, INFINITE); | ||
167 | if (result == WAIT_FAILED || result == WAIT_TIMEOUT) | ||
168 | abort (); | ||
169 | CloseHandle (event); | ||
170 | /* The thread which signalled the event already did the bookkeeping: | ||
171 | removed us from the waiting_readers, incremented lock->runcount. */ | ||
172 | if (!(lock->runcount > 0)) | ||
173 | abort (); | ||
174 | return 0; | ||
175 | } | ||
176 | else | ||
177 | { | ||
178 | /* Allocation failure. Weird. */ | ||
179 | do | ||
180 | { | ||
181 | LeaveCriticalSection (&lock->lock); | ||
182 | Sleep (1); | ||
183 | EnterCriticalSection (&lock->lock); | ||
184 | } | ||
185 | while (!(lock->runcount + 1 > 0)); | ||
186 | } | ||
187 | } | ||
188 | lock->runcount++; | ||
189 | LeaveCriticalSection (&lock->lock); | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | int | ||
194 | glwthread_rwlock_wrlock (glwthread_rwlock_t *lock) | ||
195 | { | ||
196 | if (!lock->guard.done) | ||
197 | { | ||
198 | if (InterlockedIncrement (&lock->guard.started) == 0) | ||
199 | /* This thread is the first one to need this lock. Initialize it. */ | ||
200 | glwthread_rwlock_init (lock); | ||
201 | else | ||
202 | { | ||
203 | /* Don't let lock->guard.started grow and wrap around. */ | ||
204 | InterlockedDecrement (&lock->guard.started); | ||
205 | /* Yield the CPU while waiting for another thread to finish | ||
206 | initializing this lock. */ | ||
207 | while (!lock->guard.done) | ||
208 | Sleep (0); | ||
209 | } | ||
210 | } | ||
211 | EnterCriticalSection (&lock->lock); | ||
212 | /* Test whether no readers or writers are currently running. */ | ||
213 | if (!(lock->runcount == 0)) | ||
214 | { | ||
215 | /* This thread has to wait for a while. Enqueue it among the | ||
216 | waiting_writers. */ | ||
217 | HANDLE event = glwthread_waitqueue_add (&lock->waiting_writers); | ||
218 | if (event != INVALID_HANDLE_VALUE) | ||
219 | { | ||
220 | DWORD result; | ||
221 | LeaveCriticalSection (&lock->lock); | ||
222 | /* Wait until another thread signals this event. */ | ||
223 | result = WaitForSingleObject (event, INFINITE); | ||
224 | if (result == WAIT_FAILED || result == WAIT_TIMEOUT) | ||
225 | abort (); | ||
226 | CloseHandle (event); | ||
227 | /* The thread which signalled the event already did the bookkeeping: | ||
228 | removed us from the waiting_writers, set lock->runcount = -1. */ | ||
229 | if (!(lock->runcount == -1)) | ||
230 | abort (); | ||
231 | return 0; | ||
232 | } | ||
233 | else | ||
234 | { | ||
235 | /* Allocation failure. Weird. */ | ||
236 | do | ||
237 | { | ||
238 | LeaveCriticalSection (&lock->lock); | ||
239 | Sleep (1); | ||
240 | EnterCriticalSection (&lock->lock); | ||
241 | } | ||
242 | while (!(lock->runcount == 0)); | ||
243 | } | ||
244 | } | ||
245 | lock->runcount--; /* runcount becomes -1 */ | ||
246 | LeaveCriticalSection (&lock->lock); | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | int | ||
251 | glwthread_rwlock_tryrdlock (glwthread_rwlock_t *lock) | ||
252 | { | ||
253 | if (!lock->guard.done) | ||
254 | { | ||
255 | if (InterlockedIncrement (&lock->guard.started) == 0) | ||
256 | /* This thread is the first one to need this lock. Initialize it. */ | ||
257 | glwthread_rwlock_init (lock); | ||
258 | else | ||
259 | { | ||
260 | /* Don't let lock->guard.started grow and wrap around. */ | ||
261 | InterlockedDecrement (&lock->guard.started); | ||
262 | /* Yield the CPU while waiting for another thread to finish | ||
263 | initializing this lock. */ | ||
264 | while (!lock->guard.done) | ||
265 | Sleep (0); | ||
266 | } | ||
267 | } | ||
268 | /* It's OK to wait for this critical section, because it is never taken for a | ||
269 | long time. */ | ||
270 | EnterCriticalSection (&lock->lock); | ||
271 | /* Test whether only readers are currently running, and whether the runcount | ||
272 | field will not overflow, and whether no writer is waiting. The latter | ||
273 | condition is because POSIX recommends that "write locks shall take | ||
274 | precedence over read locks", to avoid "writer starvation". */ | ||
275 | if (!(lock->runcount + 1 > 0 && lock->waiting_writers.count == 0)) | ||
276 | { | ||
277 | /* This thread would have to wait for a while. Return instead. */ | ||
278 | LeaveCriticalSection (&lock->lock); | ||
279 | return EBUSY; | ||
280 | } | ||
281 | lock->runcount++; | ||
282 | LeaveCriticalSection (&lock->lock); | ||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | int | ||
287 | glwthread_rwlock_trywrlock (glwthread_rwlock_t *lock) | ||
288 | { | ||
289 | if (!lock->guard.done) | ||
290 | { | ||
291 | if (InterlockedIncrement (&lock->guard.started) == 0) | ||
292 | /* This thread is the first one to need this lock. Initialize it. */ | ||
293 | glwthread_rwlock_init (lock); | ||
294 | else | ||
295 | { | ||
296 | /* Don't let lock->guard.started grow and wrap around. */ | ||
297 | InterlockedDecrement (&lock->guard.started); | ||
298 | /* Yield the CPU while waiting for another thread to finish | ||
299 | initializing this lock. */ | ||
300 | while (!lock->guard.done) | ||
301 | Sleep (0); | ||
302 | } | ||
303 | } | ||
304 | /* It's OK to wait for this critical section, because it is never taken for a | ||
305 | long time. */ | ||
306 | EnterCriticalSection (&lock->lock); | ||
307 | /* Test whether no readers or writers are currently running. */ | ||
308 | if (!(lock->runcount == 0)) | ||
309 | { | ||
310 | /* This thread would have to wait for a while. Return instead. */ | ||
311 | LeaveCriticalSection (&lock->lock); | ||
312 | return EBUSY; | ||
313 | } | ||
314 | lock->runcount--; /* runcount becomes -1 */ | ||
315 | LeaveCriticalSection (&lock->lock); | ||
316 | return 0; | ||
317 | } | ||
318 | |||
319 | int | ||
320 | glwthread_rwlock_unlock (glwthread_rwlock_t *lock) | ||
321 | { | ||
322 | if (!lock->guard.done) | ||
323 | return EINVAL; | ||
324 | EnterCriticalSection (&lock->lock); | ||
325 | if (lock->runcount < 0) | ||
326 | { | ||
327 | /* Drop a writer lock. */ | ||
328 | if (!(lock->runcount == -1)) | ||
329 | abort (); | ||
330 | lock->runcount = 0; | ||
331 | } | ||
332 | else | ||
333 | { | ||
334 | /* Drop a reader lock. */ | ||
335 | if (!(lock->runcount > 0)) | ||
336 | { | ||
337 | LeaveCriticalSection (&lock->lock); | ||
338 | return EPERM; | ||
339 | } | ||
340 | lock->runcount--; | ||
341 | } | ||
342 | if (lock->runcount == 0) | ||
343 | { | ||
344 | /* POSIX recommends that "write locks shall take precedence over read | ||
345 | locks", to avoid "writer starvation". */ | ||
346 | if (lock->waiting_writers.count > 0) | ||
347 | { | ||
348 | /* Wake up one of the waiting writers. */ | ||
349 | lock->runcount--; | ||
350 | glwthread_waitqueue_notify_first (&lock->waiting_writers); | ||
351 | } | ||
352 | else | ||
353 | { | ||
354 | /* Wake up all waiting readers. */ | ||
355 | lock->runcount += lock->waiting_readers.count; | ||
356 | glwthread_waitqueue_notify_all (&lock->waiting_readers); | ||
357 | } | ||
358 | } | ||
359 | LeaveCriticalSection (&lock->lock); | ||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | int | ||
364 | glwthread_rwlock_destroy (glwthread_rwlock_t *lock) | ||
365 | { | ||
366 | if (!lock->guard.done) | ||
367 | return EINVAL; | ||
368 | if (lock->runcount != 0) | ||
369 | return EBUSY; | ||
370 | DeleteCriticalSection (&lock->lock); | ||
371 | if (lock->waiting_readers.array != NULL) | ||
372 | free (lock->waiting_readers.array); | ||
373 | if (lock->waiting_writers.array != NULL) | ||
374 | free (lock->waiting_writers.array); | ||
375 | lock->guard.done = 0; | ||
376 | return 0; | ||
377 | } | ||