summaryrefslogtreecommitdiffstats
path: root/gl/glthread/lock.c
diff options
context:
space:
mode:
authorAndreas Baumann <mail@andreasbaumann.cc>2023-02-11 06:20:24 (GMT)
committerAndreas Baumann <mail@andreasbaumann.cc>2023-02-11 06:20:24 (GMT)
commitf867d7b44080fa9716deeff4476275f9a489879f (patch)
treebc964662fc3300dc626fb6d833d8ed5c8f46eca7 /gl/glthread/lock.c
parent9734c439cba0a02b087e50789e94ec9b07754608 (diff)
parentc07206f2ccc2356aa74bc6813a94c2190017d44e (diff)
downloadmonitoring-plugins-f867d7b44080fa9716deeff4476275f9a489879f.tar.gz
Merge branch 'master' into curlfixes
Diffstat (limited to 'gl/glthread/lock.c')
-rw-r--r--gl/glthread/lock.c866
1 files changed, 279 insertions, 587 deletions
diff --git a/gl/glthread/lock.c b/gl/glthread/lock.c
index f62aa30..82fb755 100644
--- a/gl/glthread/lock.c
+++ b/gl/glthread/lock.c
@@ -1,22 +1,21 @@
1/* Locking in multithreaded situations. 1/* Locking in multithreaded situations.
2 Copyright (C) 2005-2013 Free Software Foundation, Inc. 2 Copyright (C) 2005-2023 Free Software Foundation, Inc.
3 3
4 This program is free software; you can redistribute it and/or modify 4 This file is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by 5 it under the terms of the GNU Lesser General Public License as
6 the Free Software Foundation; either version 3, or (at your option) 6 published by the Free Software Foundation; either version 2.1 of the
7 any later version. 7 License, or (at your option) any later version.
8 8
9 This program is distributed in the hope that it will be useful, 9 This file is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of 10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details. 12 GNU Lesser General Public License for more details.
13 13
14 You should have received a copy of the GNU General Public License 14 You should have received a copy of the GNU Lesser General Public License
15 along with this program; if not, see <http://www.gnu.org/licenses/>. */ 15 along with this program. If not, see <https://www.gnu.org/licenses/>. */
16 16
17/* Written by Bruno Haible <bruno@clisp.org>, 2005. 17/* Written by Bruno Haible <bruno@clisp.org>, 2005.
18 Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h, 18 Based on GCC's gthr-posix.h, gthr-posix95.h. */
19 gthr-win32.h. */
20 19
21#include <config.h> 20#include <config.h>
22 21
@@ -24,15 +23,267 @@
24 23
25/* ========================================================================= */ 24/* ========================================================================= */
26 25
26#if USE_ISOC_THREADS || USE_ISOC_AND_POSIX_THREADS
27
28/* -------------------------- gl_lock_t datatype -------------------------- */
29
30int
31glthread_lock_init (gl_lock_t *lock)
32{
33 if (mtx_init (&lock->mutex, mtx_plain) != thrd_success)
34 return ENOMEM;
35 lock->init_needed = 0;
36 return 0;
37}
38
39int
40glthread_lock_lock (gl_lock_t *lock)
41{
42 if (lock->init_needed)
43 call_once (&lock->init_once, lock->init_func);
44 if (mtx_lock (&lock->mutex) != thrd_success)
45 return EAGAIN;
46 return 0;
47}
48
49int
50glthread_lock_unlock (gl_lock_t *lock)
51{
52 if (lock->init_needed)
53 call_once (&lock->init_once, lock->init_func);
54 if (mtx_unlock (&lock->mutex) != thrd_success)
55 return EINVAL;
56 return 0;
57}
58
59int
60glthread_lock_destroy (gl_lock_t *lock)
61{
62 if (lock->init_needed)
63 call_once (&lock->init_once, lock->init_func);
64 mtx_destroy (&lock->mutex);
65 return 0;
66}
67
68/* ------------------------- gl_rwlock_t datatype ------------------------- */
69
70int
71glthread_rwlock_init (gl_rwlock_t *lock)
72{
73 if (mtx_init (&lock->lock, mtx_plain) != thrd_success
74 || cnd_init (&lock->waiting_readers) != thrd_success
75 || cnd_init (&lock->waiting_writers) != thrd_success)
76 return ENOMEM;
77 lock->waiting_writers_count = 0;
78 lock->runcount = 0;
79 lock->init_needed = 0;
80 return 0;
81}
82
83int
84glthread_rwlock_rdlock (gl_rwlock_t *lock)
85{
86 if (lock->init_needed)
87 call_once (&lock->init_once, lock->init_func);
88 if (mtx_lock (&lock->lock) != thrd_success)
89 return EAGAIN;
90 /* Test whether only readers are currently running, and whether the runcount
91 field will not overflow, and whether no writer is waiting. The latter
92 condition is because POSIX recommends that "write locks shall take
93 precedence over read locks", to avoid "writer starvation". */
94 while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
95 {
96 /* This thread has to wait for a while. Enqueue it among the
97 waiting_readers. */
98 if (cnd_wait (&lock->waiting_readers, &lock->lock) != thrd_success)
99 {
100 mtx_unlock (&lock->lock);
101 return EINVAL;
102 }
103 }
104 lock->runcount++;
105 if (mtx_unlock (&lock->lock) != thrd_success)
106 return EINVAL;
107 return 0;
108}
109
110int
111glthread_rwlock_wrlock (gl_rwlock_t *lock)
112{
113 if (lock->init_needed)
114 call_once (&lock->init_once, lock->init_func);
115 if (mtx_lock (&lock->lock) != thrd_success)
116 return EAGAIN;
117 /* Test whether no readers or writers are currently running. */
118 while (!(lock->runcount == 0))
119 {
120 /* This thread has to wait for a while. Enqueue it among the
121 waiting_writers. */
122 lock->waiting_writers_count++;
123 if (cnd_wait (&lock->waiting_writers, &lock->lock) != thrd_success)
124 {
125 lock->waiting_writers_count--;
126 mtx_unlock (&lock->lock);
127 return EINVAL;
128 }
129 lock->waiting_writers_count--;
130 }
131 lock->runcount--; /* runcount becomes -1 */
132 if (mtx_unlock (&lock->lock) != thrd_success)
133 return EINVAL;
134 return 0;
135}
136
137int
138glthread_rwlock_unlock (gl_rwlock_t *lock)
139{
140 if (lock->init_needed)
141 call_once (&lock->init_once, lock->init_func);
142 if (mtx_lock (&lock->lock) != thrd_success)
143 return EAGAIN;
144 if (lock->runcount < 0)
145 {
146 /* Drop a writer lock. */
147 if (!(lock->runcount == -1))
148 {
149 mtx_unlock (&lock->lock);
150 return EINVAL;
151 }
152 lock->runcount = 0;
153 }
154 else
155 {
156 /* Drop a reader lock. */
157 if (!(lock->runcount > 0))
158 {
159 mtx_unlock (&lock->lock);
160 return EINVAL;
161 }
162 lock->runcount--;
163 }
164 if (lock->runcount == 0)
165 {
166 /* POSIX recommends that "write locks shall take precedence over read
167 locks", to avoid "writer starvation". */
168 if (lock->waiting_writers_count > 0)
169 {
170 /* Wake up one of the waiting writers. */
171 if (cnd_signal (&lock->waiting_writers) != thrd_success)
172 {
173 mtx_unlock (&lock->lock);
174 return EINVAL;
175 }
176 }
177 else
178 {
179 /* Wake up all waiting readers. */
180 if (cnd_broadcast (&lock->waiting_readers) != thrd_success)
181 {
182 mtx_unlock (&lock->lock);
183 return EINVAL;
184 }
185 }
186 }
187 if (mtx_unlock (&lock->lock) != thrd_success)
188 return EINVAL;
189 return 0;
190}
191
192int
193glthread_rwlock_destroy (gl_rwlock_t *lock)
194{
195 if (lock->init_needed)
196 call_once (&lock->init_once, lock->init_func);
197 mtx_destroy (&lock->lock);
198 cnd_destroy (&lock->waiting_readers);
199 cnd_destroy (&lock->waiting_writers);
200 return 0;
201}
202
203/* --------------------- gl_recursive_lock_t datatype --------------------- */
204
205int
206glthread_recursive_lock_init (gl_recursive_lock_t *lock)
207{
208 if (mtx_init (&lock->mutex, mtx_plain | mtx_recursive) != thrd_success)
209 return ENOMEM;
210 lock->init_needed = 0;
211 return 0;
212}
213
214int
215glthread_recursive_lock_lock (gl_recursive_lock_t *lock)
216{
217 if (lock->init_needed)
218 call_once (&lock->init_once, lock->init_func);
219 if (mtx_lock (&lock->mutex) != thrd_success)
220 return EAGAIN;
221 return 0;
222}
223
224int
225glthread_recursive_lock_unlock (gl_recursive_lock_t *lock)
226{
227 if (lock->init_needed)
228 call_once (&lock->init_once, lock->init_func);
229 if (mtx_unlock (&lock->mutex) != thrd_success)
230 return EINVAL;
231 return 0;
232}
233
234int
235glthread_recursive_lock_destroy (gl_recursive_lock_t *lock)
236{
237 if (lock->init_needed)
238 call_once (&lock->init_once, lock->init_func);
239 mtx_destroy (&lock->mutex);
240 return 0;
241}
242
243/* -------------------------- gl_once_t datatype -------------------------- */
244
245#endif
246
247/* ========================================================================= */
248
27#if USE_POSIX_THREADS 249#if USE_POSIX_THREADS
28 250
29/* -------------------------- gl_lock_t datatype -------------------------- */ 251/* -------------------------- gl_lock_t datatype -------------------------- */
30 252
31/* ------------------------- gl_rwlock_t datatype ------------------------- */ 253/* ------------------------- gl_rwlock_t datatype ------------------------- */
32 254
33# if HAVE_PTHREAD_RWLOCK 255# if HAVE_PTHREAD_RWLOCK && (HAVE_PTHREAD_RWLOCK_RDLOCK_PREFER_WRITER || (defined PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP && (__GNU_LIBRARY__ > 1)))
256
257# if defined PTHREAD_RWLOCK_INITIALIZER || defined PTHREAD_RWLOCK_INITIALIZER_NP
258
259# if !HAVE_PTHREAD_RWLOCK_RDLOCK_PREFER_WRITER
260 /* glibc with bug https://sourceware.org/bugzilla/show_bug.cgi?id=13701 */
261
262int
263glthread_rwlock_init_for_glibc (pthread_rwlock_t *lock)
264{
265 pthread_rwlockattr_t attributes;
266 int err;
34 267
35# if !defined PTHREAD_RWLOCK_INITIALIZER 268 err = pthread_rwlockattr_init (&attributes);
269 if (err != 0)
270 return err;
271 /* Note: PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP is the only value that
272 causes the writer to be preferred. PTHREAD_RWLOCK_PREFER_WRITER_NP does not
273 do this; see
274 http://man7.org/linux/man-pages/man3/pthread_rwlockattr_setkind_np.3.html */
275 err = pthread_rwlockattr_setkind_np (&attributes,
276 PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
277 if (err == 0)
278 err = pthread_rwlock_init(lock, &attributes);
279 /* pthread_rwlockattr_destroy always returns 0. It cannot influence the
280 return value. */
281 pthread_rwlockattr_destroy (&attributes);
282 return err;
283}
284
285# endif
286# else
36 287
37int 288int
38glthread_rwlock_init_multithreaded (gl_rwlock_t *lock) 289glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
@@ -152,11 +403,9 @@ glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
152 if (err != 0) 403 if (err != 0)
153 return err; 404 return err;
154 /* Test whether only readers are currently running, and whether the runcount 405 /* Test whether only readers are currently running, and whether the runcount
155 field will not overflow. */ 406 field will not overflow, and whether no writer is waiting. The latter
156 /* POSIX says: "It is implementation-defined whether the calling thread 407 condition is because POSIX recommends that "write locks shall take
157 acquires the lock when a writer does not hold the lock and there are 408 precedence over read locks", to avoid "writer starvation". */
158 writers blocked on the lock." Let's say, no: give the writers a higher
159 priority. */
160 while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0)) 409 while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
161 { 410 {
162 /* This thread has to wait for a while. Enqueue it among the 411 /* This thread has to wait for a while. Enqueue it among the
@@ -469,161 +718,25 @@ glthread_once_singlethreaded (pthread_once_t *once_control)
469 return 0; 718 return 0;
470} 719}
471 720
472#endif 721# if !(PTHREAD_IN_USE_DETECTION_HARD || USE_POSIX_THREADS_WEAK)
473
474/* ========================================================================= */
475
476#if USE_PTH_THREADS
477
478/* Use the GNU Pth threads library. */
479
480/* -------------------------- gl_lock_t datatype -------------------------- */
481
482/* ------------------------- gl_rwlock_t datatype ------------------------- */
483
484/* --------------------- gl_recursive_lock_t datatype --------------------- */
485
486/* -------------------------- gl_once_t datatype -------------------------- */
487
488static void
489glthread_once_call (void *arg)
490{
491 void (**gl_once_temp_addr) (void) = (void (**) (void)) arg;
492 void (*initfunction) (void) = *gl_once_temp_addr;
493 initfunction ();
494}
495
496int
497glthread_once_multithreaded (pth_once_t *once_control, void (*initfunction) (void))
498{
499 void (*temp) (void) = initfunction;
500 return (!pth_once (once_control, glthread_once_call, &temp) ? errno : 0);
501}
502
503int
504glthread_once_singlethreaded (pth_once_t *once_control)
505{
506 /* We know that pth_once_t is an integer type. */
507 if (*once_control == PTH_ONCE_INIT)
508 {
509 /* First time use of once_control. Invert the marker. */
510 *once_control = ~ PTH_ONCE_INIT;
511 return 1;
512 }
513 else
514 return 0;
515}
516
517#endif
518
519/* ========================================================================= */
520
521#if USE_SOLARIS_THREADS
522
523/* Use the old Solaris threads library. */
524
525/* -------------------------- gl_lock_t datatype -------------------------- */
526
527/* ------------------------- gl_rwlock_t datatype ------------------------- */
528
529/* --------------------- gl_recursive_lock_t datatype --------------------- */
530 722
531int 723int
532glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock) 724glthread_once_multithreaded (pthread_once_t *once_control,
725 void (*init_function) (void))
533{ 726{
534 int err; 727 int err = pthread_once (once_control, init_function);
535 728 if (err == ENOSYS)
536 err = mutex_init (&lock->mutex, USYNC_THREAD, NULL);
537 if (err != 0)
538 return err;
539 lock->owner = (thread_t) 0;
540 lock->depth = 0;
541 return 0;
542}
543
544int
545glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
546{
547 thread_t self = thr_self ();
548 if (lock->owner != self)
549 { 729 {
550 int err; 730 /* This happens on FreeBSD 11: The pthread_once function in libc returns
551 731 ENOSYS. */
552 err = mutex_lock (&lock->mutex); 732 if (glthread_once_singlethreaded (once_control))
553 if (err != 0) 733 init_function ();
554 return err; 734 return 0;
555 lock->owner = self;
556 } 735 }
557 if (++(lock->depth) == 0) /* wraparound? */ 736 return err;
558 {
559 lock->depth--;
560 return EAGAIN;
561 }
562 return 0;
563} 737}
564 738
565int 739# endif
566glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
567{
568 if (lock->owner != thr_self ())
569 return EPERM;
570 if (lock->depth == 0)
571 return EINVAL;
572 if (--(lock->depth) == 0)
573 {
574 lock->owner = (thread_t) 0;
575 return mutex_unlock (&lock->mutex);
576 }
577 else
578 return 0;
579}
580
581int
582glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
583{
584 if (lock->owner != (thread_t) 0)
585 return EBUSY;
586 return mutex_destroy (&lock->mutex);
587}
588
589/* -------------------------- gl_once_t datatype -------------------------- */
590
591int
592glthread_once_multithreaded (gl_once_t *once_control, void (*initfunction) (void))
593{
594 if (!once_control->inited)
595 {
596 int err;
597
598 /* Use the mutex to guarantee that if another thread is already calling
599 the initfunction, this thread waits until it's finished. */
600 err = mutex_lock (&once_control->mutex);
601 if (err != 0)
602 return err;
603 if (!once_control->inited)
604 {
605 once_control->inited = 1;
606 initfunction ();
607 }
608 return mutex_unlock (&once_control->mutex);
609 }
610 else
611 return 0;
612}
613
614int
615glthread_once_singlethreaded (gl_once_t *once_control)
616{
617 /* We know that gl_once_t contains an integer type. */
618 if (!once_control->inited)
619 {
620 /* First time use of once_control. Invert the marker. */
621 once_control->inited = ~ 0;
622 return 1;
623 }
624 else
625 return 0;
626}
627 740
628#endif 741#endif
629 742
@@ -631,427 +744,6 @@ glthread_once_singlethreaded (gl_once_t *once_control)
631 744
632#if USE_WINDOWS_THREADS 745#if USE_WINDOWS_THREADS
633 746
634/* -------------------------- gl_lock_t datatype -------------------------- */
635
636void
637glthread_lock_init_func (gl_lock_t *lock)
638{
639 InitializeCriticalSection (&lock->lock);
640 lock->guard.done = 1;
641}
642
643int
644glthread_lock_lock_func (gl_lock_t *lock)
645{
646 if (!lock->guard.done)
647 {
648 if (InterlockedIncrement (&lock->guard.started) == 0)
649 /* This thread is the first one to need this lock. Initialize it. */
650 glthread_lock_init (lock);
651 else
652 /* Yield the CPU while waiting for another thread to finish
653 initializing this lock. */
654 while (!lock->guard.done)
655 Sleep (0);
656 }
657 EnterCriticalSection (&lock->lock);
658 return 0;
659}
660
661int
662glthread_lock_unlock_func (gl_lock_t *lock)
663{
664 if (!lock->guard.done)
665 return EINVAL;
666 LeaveCriticalSection (&lock->lock);
667 return 0;
668}
669
670int
671glthread_lock_destroy_func (gl_lock_t *lock)
672{
673 if (!lock->guard.done)
674 return EINVAL;
675 DeleteCriticalSection (&lock->lock);
676 lock->guard.done = 0;
677 return 0;
678}
679
680/* ------------------------- gl_rwlock_t datatype ------------------------- */
681
682/* In this file, the waitqueues are implemented as circular arrays. */
683#define gl_waitqueue_t gl_carray_waitqueue_t
684
685static void
686gl_waitqueue_init (gl_waitqueue_t *wq)
687{
688 wq->array = NULL;
689 wq->count = 0;
690 wq->alloc = 0;
691 wq->offset = 0;
692}
693
694/* Enqueues the current thread, represented by an event, in a wait queue.
695 Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */
696static HANDLE
697gl_waitqueue_add (gl_waitqueue_t *wq)
698{
699 HANDLE event;
700 unsigned int index;
701
702 if (wq->count == wq->alloc)
703 {
704 unsigned int new_alloc = 2 * wq->alloc + 1;
705 HANDLE *new_array =
706 (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
707 if (new_array == NULL)
708 /* No more memory. */
709 return INVALID_HANDLE_VALUE;
710 /* Now is a good opportunity to rotate the array so that its contents
711 starts at offset 0. */
712 if (wq->offset > 0)
713 {
714 unsigned int old_count = wq->count;
715 unsigned int old_alloc = wq->alloc;
716 unsigned int old_offset = wq->offset;
717 unsigned int i;
718 if (old_offset + old_count > old_alloc)
719 {
720 unsigned int limit = old_offset + old_count - old_alloc;
721 for (i = 0; i < limit; i++)
722 new_array[old_alloc + i] = new_array[i];
723 }
724 for (i = 0; i < old_count; i++)
725 new_array[i] = new_array[old_offset + i];
726 wq->offset = 0;
727 }
728 wq->array = new_array;
729 wq->alloc = new_alloc;
730 }
731 /* Whether the created event is a manual-reset one or an auto-reset one,
732 does not matter, since we will wait on it only once. */
733 event = CreateEvent (NULL, TRUE, FALSE, NULL);
734 if (event == INVALID_HANDLE_VALUE)
735 /* No way to allocate an event. */
736 return INVALID_HANDLE_VALUE;
737 index = wq->offset + wq->count;
738 if (index >= wq->alloc)
739 index -= wq->alloc;
740 wq->array[index] = event;
741 wq->count++;
742 return event;
743}
744
745/* Notifies the first thread from a wait queue and dequeues it. */
746static void
747gl_waitqueue_notify_first (gl_waitqueue_t *wq)
748{
749 SetEvent (wq->array[wq->offset + 0]);
750 wq->offset++;
751 wq->count--;
752 if (wq->count == 0 || wq->offset == wq->alloc)
753 wq->offset = 0;
754}
755
756/* Notifies all threads from a wait queue and dequeues them all. */
757static void
758gl_waitqueue_notify_all (gl_waitqueue_t *wq)
759{
760 unsigned int i;
761
762 for (i = 0; i < wq->count; i++)
763 {
764 unsigned int index = wq->offset + i;
765 if (index >= wq->alloc)
766 index -= wq->alloc;
767 SetEvent (wq->array[index]);
768 }
769 wq->count = 0;
770 wq->offset = 0;
771}
772
773void
774glthread_rwlock_init_func (gl_rwlock_t *lock)
775{
776 InitializeCriticalSection (&lock->lock);
777 gl_waitqueue_init (&lock->waiting_readers);
778 gl_waitqueue_init (&lock->waiting_writers);
779 lock->runcount = 0;
780 lock->guard.done = 1;
781}
782
783int
784glthread_rwlock_rdlock_func (gl_rwlock_t *lock)
785{
786 if (!lock->guard.done)
787 {
788 if (InterlockedIncrement (&lock->guard.started) == 0)
789 /* This thread is the first one to need this lock. Initialize it. */
790 glthread_rwlock_init (lock);
791 else
792 /* Yield the CPU while waiting for another thread to finish
793 initializing this lock. */
794 while (!lock->guard.done)
795 Sleep (0);
796 }
797 EnterCriticalSection (&lock->lock);
798 /* Test whether only readers are currently running, and whether the runcount
799 field will not overflow. */
800 if (!(lock->runcount + 1 > 0))
801 {
802 /* This thread has to wait for a while. Enqueue it among the
803 waiting_readers. */
804 HANDLE event = gl_waitqueue_add (&lock->waiting_readers);
805 if (event != INVALID_HANDLE_VALUE)
806 {
807 DWORD result;
808 LeaveCriticalSection (&lock->lock);
809 /* Wait until another thread signals this event. */
810 result = WaitForSingleObject (event, INFINITE);
811 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
812 abort ();
813 CloseHandle (event);
814 /* The thread which signalled the event already did the bookkeeping:
815 removed us from the waiting_readers, incremented lock->runcount. */
816 if (!(lock->runcount > 0))
817 abort ();
818 return 0;
819 }
820 else
821 {
822 /* Allocation failure. Weird. */
823 do
824 {
825 LeaveCriticalSection (&lock->lock);
826 Sleep (1);
827 EnterCriticalSection (&lock->lock);
828 }
829 while (!(lock->runcount + 1 > 0));
830 }
831 }
832 lock->runcount++;
833 LeaveCriticalSection (&lock->lock);
834 return 0;
835}
836
837int
838glthread_rwlock_wrlock_func (gl_rwlock_t *lock)
839{
840 if (!lock->guard.done)
841 {
842 if (InterlockedIncrement (&lock->guard.started) == 0)
843 /* This thread is the first one to need this lock. Initialize it. */
844 glthread_rwlock_init (lock);
845 else
846 /* Yield the CPU while waiting for another thread to finish
847 initializing this lock. */
848 while (!lock->guard.done)
849 Sleep (0);
850 }
851 EnterCriticalSection (&lock->lock);
852 /* Test whether no readers or writers are currently running. */
853 if (!(lock->runcount == 0))
854 {
855 /* This thread has to wait for a while. Enqueue it among the
856 waiting_writers. */
857 HANDLE event = gl_waitqueue_add (&lock->waiting_writers);
858 if (event != INVALID_HANDLE_VALUE)
859 {
860 DWORD result;
861 LeaveCriticalSection (&lock->lock);
862 /* Wait until another thread signals this event. */
863 result = WaitForSingleObject (event, INFINITE);
864 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
865 abort ();
866 CloseHandle (event);
867 /* The thread which signalled the event already did the bookkeeping:
868 removed us from the waiting_writers, set lock->runcount = -1. */
869 if (!(lock->runcount == -1))
870 abort ();
871 return 0;
872 }
873 else
874 {
875 /* Allocation failure. Weird. */
876 do
877 {
878 LeaveCriticalSection (&lock->lock);
879 Sleep (1);
880 EnterCriticalSection (&lock->lock);
881 }
882 while (!(lock->runcount == 0));
883 }
884 }
885 lock->runcount--; /* runcount becomes -1 */
886 LeaveCriticalSection (&lock->lock);
887 return 0;
888}
889
890int
891glthread_rwlock_unlock_func (gl_rwlock_t *lock)
892{
893 if (!lock->guard.done)
894 return EINVAL;
895 EnterCriticalSection (&lock->lock);
896 if (lock->runcount < 0)
897 {
898 /* Drop a writer lock. */
899 if (!(lock->runcount == -1))
900 abort ();
901 lock->runcount = 0;
902 }
903 else
904 {
905 /* Drop a reader lock. */
906 if (!(lock->runcount > 0))
907 {
908 LeaveCriticalSection (&lock->lock);
909 return EPERM;
910 }
911 lock->runcount--;
912 }
913 if (lock->runcount == 0)
914 {
915 /* POSIX recommends that "write locks shall take precedence over read
916 locks", to avoid "writer starvation". */
917 if (lock->waiting_writers.count > 0)
918 {
919 /* Wake up one of the waiting writers. */
920 lock->runcount--;
921 gl_waitqueue_notify_first (&lock->waiting_writers);
922 }
923 else
924 {
925 /* Wake up all waiting readers. */
926 lock->runcount += lock->waiting_readers.count;
927 gl_waitqueue_notify_all (&lock->waiting_readers);
928 }
929 }
930 LeaveCriticalSection (&lock->lock);
931 return 0;
932}
933
934int
935glthread_rwlock_destroy_func (gl_rwlock_t *lock)
936{
937 if (!lock->guard.done)
938 return EINVAL;
939 if (lock->runcount != 0)
940 return EBUSY;
941 DeleteCriticalSection (&lock->lock);
942 if (lock->waiting_readers.array != NULL)
943 free (lock->waiting_readers.array);
944 if (lock->waiting_writers.array != NULL)
945 free (lock->waiting_writers.array);
946 lock->guard.done = 0;
947 return 0;
948}
949
950/* --------------------- gl_recursive_lock_t datatype --------------------- */
951
952void
953glthread_recursive_lock_init_func (gl_recursive_lock_t *lock)
954{
955 lock->owner = 0;
956 lock->depth = 0;
957 InitializeCriticalSection (&lock->lock);
958 lock->guard.done = 1;
959}
960
961int
962glthread_recursive_lock_lock_func (gl_recursive_lock_t *lock)
963{
964 if (!lock->guard.done)
965 {
966 if (InterlockedIncrement (&lock->guard.started) == 0)
967 /* This thread is the first one to need this lock. Initialize it. */
968 glthread_recursive_lock_init (lock);
969 else
970 /* Yield the CPU while waiting for another thread to finish
971 initializing this lock. */
972 while (!lock->guard.done)
973 Sleep (0);
974 }
975 {
976 DWORD self = GetCurrentThreadId ();
977 if (lock->owner != self)
978 {
979 EnterCriticalSection (&lock->lock);
980 lock->owner = self;
981 }
982 if (++(lock->depth) == 0) /* wraparound? */
983 {
984 lock->depth--;
985 return EAGAIN;
986 }
987 }
988 return 0;
989}
990
991int
992glthread_recursive_lock_unlock_func (gl_recursive_lock_t *lock)
993{
994 if (lock->owner != GetCurrentThreadId ())
995 return EPERM;
996 if (lock->depth == 0)
997 return EINVAL;
998 if (--(lock->depth) == 0)
999 {
1000 lock->owner = 0;
1001 LeaveCriticalSection (&lock->lock);
1002 }
1003 return 0;
1004}
1005
1006int
1007glthread_recursive_lock_destroy_func (gl_recursive_lock_t *lock)
1008{
1009 if (lock->owner != 0)
1010 return EBUSY;
1011 DeleteCriticalSection (&lock->lock);
1012 lock->guard.done = 0;
1013 return 0;
1014}
1015
1016/* -------------------------- gl_once_t datatype -------------------------- */
1017
1018void
1019glthread_once_func (gl_once_t *once_control, void (*initfunction) (void))
1020{
1021 if (once_control->inited <= 0)
1022 {
1023 if (InterlockedIncrement (&once_control->started) == 0)
1024 {
1025 /* This thread is the first one to come to this once_control. */
1026 InitializeCriticalSection (&once_control->lock);
1027 EnterCriticalSection (&once_control->lock);
1028 once_control->inited = 0;
1029 initfunction ();
1030 once_control->inited = 1;
1031 LeaveCriticalSection (&once_control->lock);
1032 }
1033 else
1034 {
1035 /* Undo last operation. */
1036 InterlockedDecrement (&once_control->started);
1037 /* Some other thread has already started the initialization.
1038 Yield the CPU while waiting for the other thread to finish
1039 initializing and taking the lock. */
1040 while (once_control->inited < 0)
1041 Sleep (0);
1042 if (once_control->inited <= 0)
1043 {
1044 /* Take the lock. This blocks until the other thread has
1045 finished calling the initfunction. */
1046 EnterCriticalSection (&once_control->lock);
1047 LeaveCriticalSection (&once_control->lock);
1048 if (!(once_control->inited > 0))
1049 abort ();
1050 }
1051 }
1052 }
1053}
1054
1055#endif 747#endif
1056 748
1057/* ========================================================================= */ 749/* ========================================================================= */