Rename __pthread_spin_unlock back to __pthread_unlock.
[kopensolaris-gnu/glibc.git] / linuxthreads / rwlock.c
1 /* Read-write lock implementation.
2    Copyright (C) 1998, 2000 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4    Contributed by Xavier Leroy <Xavier.Leroy@inria.fr>
5    and Ulrich Drepper <drepper@cygnus.com>, 1998.
6
7    The GNU C Library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Library General Public License as
9    published by the Free Software Foundation; either version 2 of the
10    License, or (at your option) any later version.
11
12    The GNU C Library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Library General Public License for more details.
16
17    You should have received a copy of the GNU Library General Public
18    License along with the GNU C Library; see the file COPYING.LIB.  If not,
19    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20    Boston, MA 02111-1307, USA.  */
21
22 #include <errno.h>
23 #include <pthread.h>
24 #include <stdlib.h>
25 #include "internals.h"
26 #include "queue.h"
27 #include "spinlock.h"
28 #include "restart.h"
29
30 /*
31  * Check whether the calling thread already owns one or more read locks on the
32  * specified lock. If so, return a pointer to the read lock info structure
33  * corresponding to that lock.
34  */
35
36 static pthread_readlock_info *
37 rwlock_is_in_list(pthread_descr self, pthread_rwlock_t *rwlock)
38 {
39   pthread_readlock_info *info;
40
41   for (info = self->p_readlock_list; info != NULL; info = info->pr_next)
42     {
43       if (info->pr_lock == rwlock)
44         return info;
45     }
46
47   return NULL;
48 }
49
50 /*
51  * Add a new lock to the thread's list of locks for which it has a read lock.
52  * A new info node must be allocated for this, which is taken from the thread's
53  * free list, or by calling malloc. If malloc fails, a null pointer is
54  * returned. Otherwise the lock info structure is initialized and pushed
55  * onto the thread's list.
56  */
57
58 static pthread_readlock_info *
59 rwlock_add_to_list(pthread_descr self, pthread_rwlock_t *rwlock)
60 {
61   pthread_readlock_info *info = self->p_readlock_free;
62
63   if (info != NULL)
64     self->p_readlock_free = info->pr_next;
65   else
66     info = malloc(sizeof *info);
67
68   if (info == NULL)
69     return NULL;
70
71   info->pr_lock_count = 1;
72   info->pr_lock = rwlock;
73   info->pr_next = self->p_readlock_list;
74   self->p_readlock_list = info;
75
76   return info;
77 }
78
79 /*
80  * If the thread owns a read lock over the given pthread_rwlock_t,
81  * and this read lock is tracked in the thread's lock list,
82  * this function returns a pointer to the info node in that list.
83  * It also decrements the lock count within that node, and if
84  * it reaches zero, it removes the node from the list.
85  * If nothing is found, it returns a null pointer.
86  */
87
88 static pthread_readlock_info *
89 rwlock_remove_from_list(pthread_descr self, pthread_rwlock_t *rwlock)
90 {
91   pthread_readlock_info **pinfo;
92
93   for (pinfo = &self->p_readlock_list; *pinfo != NULL; pinfo = &(*pinfo)->pr_next)
94     {
95       if ((*pinfo)->pr_lock == rwlock)
96         {
97           pthread_readlock_info *info = *pinfo;
98           if (--info->pr_lock_count == 0)
99             *pinfo = info->pr_next;
100           return info;
101         }
102     }
103
104   return NULL;
105 }
106
107 /*
108  * This function checks whether the conditions are right to place a read lock.
109  * It returns 1 if so, otherwise zero. The rwlock's internal lock must be
110  * locked upon entry.
111  */
112
113 static int
114 rwlock_can_rdlock(pthread_rwlock_t *rwlock, int have_lock_already)
115 {
116   /* Can't readlock; it is write locked. */
117   if (rwlock->__rw_writer != NULL)
118     return 0;
119
120   /* Lock prefers readers; get it. */
121   if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP)
122     return 1;
123
124   /* Lock prefers writers, but none are waiting. */
125   if (queue_is_empty(&rwlock->__rw_write_waiting))
126     return 1;
127
128   /* Writers are waiting, but this thread already has a read lock */
129   if (have_lock_already)
130     return 1;
131
132   /* Writers are waiting, and this is a new lock */
133   return 0;
134 }
135
136 /*
137  * This function helps support brain-damaged recursive read locking
138  * semantics required by Unix 98, while maintaining write priority.
139  * This basically determines whether this thread already holds a read lock
140  * already. It returns 1 if so, otherwise it returns 0.
141  *
142  * If the thread has any ``untracked read locks'' then it just assumes
143  * that this lock is among them, just to be safe, and returns 1.
144  *
145  * Also, if it finds the thread's lock in the list, it sets the pointer
146  * referenced by pexisting to refer to the list entry.
147  *
148  * If the thread has no untracked locks, and the lock is not found
149  * in its list, then it is added to the list. If this fails,
150  * then *pout_of_mem is set to 1.
151  */
152
153 static int
154 rwlock_have_already(pthread_descr *pself, pthread_rwlock_t *rwlock,
155     pthread_readlock_info **pexisting, int *pout_of_mem)
156 {
157   pthread_readlock_info *existing = NULL;
158   int out_of_mem = 0, have_lock_already = 0;
159   pthread_descr self = *pself;
160
161   if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP)
162     {
163       if (!self)
164         self = thread_self();
165
166       existing = rwlock_is_in_list(self, rwlock);
167
168       if (existing != NULL || self->p_untracked_readlock_count > 0)
169         have_lock_already = 1;
170       else
171         {
172           existing = rwlock_add_to_list(self, rwlock);
173           if (existing == NULL)
174             out_of_mem = 1;
175         }
176     }
177
178   *pout_of_mem = out_of_mem;
179   *pexisting = existing;
180   *pself = self;
181
182   return have_lock_already;
183 }
184
185 int
186 __pthread_rwlock_init (pthread_rwlock_t *rwlock,
187                      const pthread_rwlockattr_t *attr)
188 {
189   __pthread_init_lock(&rwlock->__rw_lock);
190   rwlock->__rw_readers = 0;
191   rwlock->__rw_writer = NULL;
192   rwlock->__rw_read_waiting = NULL;
193   rwlock->__rw_write_waiting = NULL;
194
195   if (attr == NULL)
196     {
197       rwlock->__rw_kind = PTHREAD_RWLOCK_DEFAULT_NP;
198       rwlock->__rw_pshared = PTHREAD_PROCESS_PRIVATE;
199     }
200   else
201     {
202       rwlock->__rw_kind = attr->__lockkind;
203       rwlock->__rw_pshared = attr->__pshared;
204     }
205
206   return 0;
207 }
208 strong_alias (__pthread_rwlock_init, pthread_rwlock_init)
209
210
211 int
212 __pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
213 {
214   int readers;
215   _pthread_descr writer;
216
217   __pthread_lock (&rwlock->__rw_lock, NULL);
218   readers = rwlock->__rw_readers;
219   writer = rwlock->__rw_writer;
220   __pthread_unlock (&rwlock->__rw_lock);
221
222   if (readers > 0 || writer != NULL)
223     return EBUSY;
224
225   return 0;
226 }
227 strong_alias (__pthread_rwlock_destroy, pthread_rwlock_destroy)
228
229 int
230 __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
231 {
232   pthread_descr self = NULL;
233   pthread_readlock_info *existing;
234   int out_of_mem, have_lock_already;
235
236   have_lock_already = rwlock_have_already(&self, rwlock,
237                                           &existing, &out_of_mem);
238
239   for (;;)
240     {
241       if (self == NULL)
242         self = thread_self ();
243
244       __pthread_lock (&rwlock->__rw_lock, self);
245
246       if (rwlock_can_rdlock(rwlock, have_lock_already))
247         break;
248
249       enqueue (&rwlock->__rw_read_waiting, self);
250       __pthread_unlock (&rwlock->__rw_lock);
251       suspend (self); /* This is not a cancellation point */
252     }
253
254   ++rwlock->__rw_readers;
255   __pthread_unlock (&rwlock->__rw_lock);
256
257   if (have_lock_already || out_of_mem)
258     {
259       if (existing != NULL)
260         existing->pr_lock_count++;
261       else
262         self->p_untracked_readlock_count++;
263     }
264
265   return 0;
266 }
267 strong_alias (__pthread_rwlock_rdlock, pthread_rwlock_rdlock)
268
269 int
270 __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
271 {
272   pthread_descr self = thread_self();
273   pthread_readlock_info *existing;
274   int out_of_mem, have_lock_already;
275   int retval = EBUSY;
276
277   have_lock_already = rwlock_have_already(&self, rwlock,
278       &existing, &out_of_mem);
279
280   __pthread_lock (&rwlock->__rw_lock, self);
281
282   /* 0 is passed to here instead of have_lock_already.
283      This is to meet Single Unix Spec requirements:
284      if writers are waiting, pthread_rwlock_tryrdlock
285      does not acquire a read lock, even if the caller has
286      one or more read locks already. */
287
288   if (rwlock_can_rdlock(rwlock, 0))
289     {
290       ++rwlock->__rw_readers;
291       retval = 0;
292     }
293
294   __pthread_unlock (&rwlock->__rw_lock);
295
296   if (retval == 0)
297     {
298       if (have_lock_already || out_of_mem)
299         {
300           if (existing != NULL)
301             existing->pr_lock_count++;
302           else
303             self->p_untracked_readlock_count++;
304         }
305     }
306
307   return retval;
308 }
309 strong_alias (__pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock)
310
311
312 int
313 __pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
314 {
315   pthread_descr self = thread_self ();
316
317   while(1)
318     {
319       __pthread_lock (&rwlock->__rw_lock, self);
320       if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
321         {
322           rwlock->__rw_writer = self;
323           __pthread_unlock (&rwlock->__rw_lock);
324           return 0;
325         }
326
327       /* Suspend ourselves, then try again */
328       enqueue (&rwlock->__rw_write_waiting, self);
329       __pthread_unlock (&rwlock->__rw_lock);
330       suspend (self); /* This is not a cancellation point */
331     }
332 }
333 strong_alias (__pthread_rwlock_wrlock, pthread_rwlock_wrlock)
334
335
336 int
337 __pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
338 {
339   int result = EBUSY;
340
341   __pthread_lock (&rwlock->__rw_lock, NULL);
342   if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
343     {
344       rwlock->__rw_writer = thread_self ();
345       result = 0;
346     }
347   __pthread_unlock (&rwlock->__rw_lock);
348
349   return result;
350 }
351 strong_alias (__pthread_rwlock_trywrlock, pthread_rwlock_trywrlock)
352
353
354 int
355 __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
356 {
357   pthread_descr torestart;
358   pthread_descr th;
359
360   __pthread_lock (&rwlock->__rw_lock, NULL);
361   if (rwlock->__rw_writer != NULL)
362     {
363       /* Unlocking a write lock.  */
364       if (rwlock->__rw_writer != thread_self ())
365         {
366           __pthread_unlock (&rwlock->__rw_lock);
367           return EPERM;
368         }
369       rwlock->__rw_writer = NULL;
370
371       if ((rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP
372            && !queue_is_empty(&rwlock->__rw_read_waiting))
373           || (th = dequeue(&rwlock->__rw_write_waiting)) == NULL)
374         {
375           /* Restart all waiting readers.  */
376           torestart = rwlock->__rw_read_waiting;
377           rwlock->__rw_read_waiting = NULL;
378           __pthread_unlock (&rwlock->__rw_lock);
379           while ((th = dequeue (&torestart)) != NULL)
380             restart (th);
381         }
382       else
383         {
384           /* Restart one waiting writer.  */
385           __pthread_unlock (&rwlock->__rw_lock);
386           restart (th);
387         }
388     }
389   else
390     {
391       /* Unlocking a read lock.  */
392       if (rwlock->__rw_readers == 0)
393         {
394           __pthread_unlock (&rwlock->__rw_lock);
395           return EPERM;
396         }
397
398       --rwlock->__rw_readers;
399       if (rwlock->__rw_readers == 0)
400         /* Restart one waiting writer, if any.  */
401         th = dequeue (&rwlock->__rw_write_waiting);
402       else
403         th = NULL;
404
405       __pthread_unlock (&rwlock->__rw_lock);
406       if (th != NULL)
407         restart (th);
408
409       /* Recursive lock fixup */
410
411       if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP)
412         {
413           pthread_descr self = thread_self();
414           pthread_readlock_info *victim = rwlock_remove_from_list(self, rwlock);
415
416           if (victim != NULL)
417             {
418               if (victim->pr_lock_count == 0)
419                 {
420                   victim->pr_next = self->p_readlock_free;
421                   self->p_readlock_free = victim;
422                 }
423             }
424           else
425             {
426               if (self->p_untracked_readlock_count > 0)
427                 self->p_untracked_readlock_count--;
428             }
429         }
430     }
431
432   return 0;
433 }
434 strong_alias (__pthread_rwlock_unlock, pthread_rwlock_unlock)
435
436
437
438 int
439 pthread_rwlockattr_init (pthread_rwlockattr_t *attr)
440 {
441   attr->__lockkind = 0;
442   attr->__pshared = 0;
443
444   return 0;
445 }
446
447
448 int
449 __pthread_rwlockattr_destroy (pthread_rwlockattr_t *attr)
450 {
451   return 0;
452 }
453 strong_alias (__pthread_rwlockattr_destroy, pthread_rwlockattr_destroy)
454
455
456 int
457 pthread_rwlockattr_getpshared (const pthread_rwlockattr_t *attr, int *pshared)
458 {
459   *pshared = attr->__pshared;
460   return 0;
461 }
462
463
464 int
465 pthread_rwlockattr_setpshared (pthread_rwlockattr_t *attr, int pshared)
466 {
467   if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED)
468     return EINVAL;
469
470   attr->__pshared = pshared;
471
472   return 0;
473 }
474
475
476 int
477 pthread_rwlockattr_getkind_np (const pthread_rwlockattr_t *attr, int *pref)
478 {
479   *pref = attr->__lockkind;
480   return 0;
481 }
482
483
484 int
485 pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *attr, int pref)
486 {
487   if (pref != PTHREAD_RWLOCK_PREFER_READER_NP
488       && pref != PTHREAD_RWLOCK_PREFER_WRITER_NP
489       && pref != PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP
490       && pref != PTHREAD_RWLOCK_DEFAULT_NP)
491     return EINVAL;
492
493   attr->__lockkind = pref;
494
495   return 0;
496 }