Adjust use of lll_futex_* macros.
[kopensolaris-gnu/glibc.git] / nptl / pthread_mutex_timedlock.c
1 /* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, write to the Free
17    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18    02111-1307 USA.  */
19
20 #include <assert.h>
21 #include <errno.h>
22 #include <time.h>
23 #include "pthreadP.h"
24 #include <lowlevellock.h>
25 #include <not-cancel.h>
26
27
28 int
29 pthread_mutex_timedlock (mutex, abstime)
30      pthread_mutex_t *mutex;
31      const struct timespec *abstime;
32 {
33   int oldval;
34   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
35   int result = 0;
36
37   /* We must not check ABSTIME here.  If the thread does not block
38      abstime must not be checked for a valid value.  */
39
40   switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
41     {
42       /* Recursive mutex.  */
43     case PTHREAD_MUTEX_RECURSIVE_NP:
44       /* Check whether we already hold the mutex.  */
45       if (mutex->__data.__owner == id)
46         {
47           /* Just bump the counter.  */
48           if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
49             /* Overflow of the counter.  */
50             return EAGAIN;
51
52           ++mutex->__data.__count;
53
54           goto out;
55         }
56
57       /* We have to get the mutex.  */
58       result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
59
60       if (result != 0)
61         goto out;
62
63       /* Only locked once so far.  */
64       mutex->__data.__count = 1;
65       break;
66
67       /* Error checking mutex.  */
68     case PTHREAD_MUTEX_ERRORCHECK_NP:
69       /* Check whether we already hold the mutex.  */
70       if (__builtin_expect (mutex->__data.__owner == id, 0))
71         return EDEADLK;
72
73       /* FALLTHROUGH */
74
75     case PTHREAD_MUTEX_TIMED_NP:
76     simple:
77       /* Normal mutex.  */
78       result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
79       break;
80
81     case PTHREAD_MUTEX_ADAPTIVE_NP:
82       if (! __is_smp)
83         goto simple;
84
85       if (lll_mutex_trylock (mutex->__data.__lock) != 0)
86         {
87           int cnt = 0;
88           int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
89                              mutex->__data.__spins * 2 + 10);
90           do
91             {
92               if (cnt++ >= max_cnt)
93                 {
94                   result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
95                   break;
96                 }
97
98 #ifdef BUSY_WAIT_NOP
99               BUSY_WAIT_NOP;
100 #endif
101             }
102           while (lll_mutex_trylock (mutex->__data.__lock) != 0);
103
104           mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
105         }
106       break;
107
108     case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
109     case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
110     case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
111     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
112       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
113                      &mutex->__data.__list.__next);
114
115       oldval = mutex->__data.__lock;
116       do
117         {
118         again:
119           if ((oldval & FUTEX_OWNER_DIED) != 0)
120             {
121               /* The previous owner died.  Try locking the mutex.  */
122               int newval = id | (oldval & FUTEX_WAITERS);
123
124               newval
125                 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
126                                                        newval, oldval);
127               if (newval != oldval)
128                 {
129                   oldval = newval;
130                   goto again;
131                 }
132
133               /* We got the mutex.  */
134               mutex->__data.__count = 1;
135               /* But it is inconsistent unless marked otherwise.  */
136               mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
137
138               ENQUEUE_MUTEX (mutex);
139               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
140
141               /* Note that we deliberately exit here.  If we fall
142                  through to the end of the function __nusers would be
143                  incremented which is not correct because the old
144                  owner has to be discounted.  */
145               return EOWNERDEAD;
146             }
147
148           /* Check whether we already hold the mutex.  */
149           if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
150             {
151               if (mutex->__data.__kind
152                   == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
153                 {
154                   THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
155                                  NULL);
156                   return EDEADLK;
157                 }
158
159               if (mutex->__data.__kind
160                   == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
161                 {
162                   THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
163                                  NULL);
164
165                   /* Just bump the counter.  */
166                   if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
167                     /* Overflow of the counter.  */
168                     return EAGAIN;
169
170                   ++mutex->__data.__count;
171
172                   return 0;
173                 }
174             }
175
176           result = lll_robust_mutex_timedlock (mutex->__data.__lock, abstime,
177                                                id);
178
179           if (__builtin_expect (mutex->__data.__owner
180                                 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
181             {
182               /* This mutex is now not recoverable.  */
183               mutex->__data.__count = 0;
184               lll_mutex_unlock (mutex->__data.__lock);
185               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
186               return ENOTRECOVERABLE;
187             }
188
189           if (result == ETIMEDOUT || result == EINVAL)
190             goto out;
191
192           oldval = result;
193         }
194       while ((oldval & FUTEX_OWNER_DIED) != 0);
195
196       mutex->__data.__count = 1;
197       ENQUEUE_MUTEX (mutex);
198       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
199       break;
200
201     case PTHREAD_MUTEX_PI_RECURSIVE_NP:
202     case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
203     case PTHREAD_MUTEX_PI_NORMAL_NP:
204     case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
205     case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
206     case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
207     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
208     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
209       {
210         int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
211         int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
212
213         if (robust)
214           /* Note: robust PI futexes are signaled by setting bit 0.  */
215           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
216                          (void *) (((uintptr_t) &mutex->__data.__list.__next)
217                                    | 1));
218
219         oldval = mutex->__data.__lock;
220
221         /* Check whether we already hold the mutex.  */
222         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
223           {
224             if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
225               {
226                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
227                 return EDEADLK;
228               }
229
230             if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
231               {
232                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
233
234                 /* Just bump the counter.  */
235                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
236                   /* Overflow of the counter.  */
237                   return EAGAIN;
238
239                 ++mutex->__data.__count;
240
241                 return 0;
242               }
243           }
244
245         oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
246                                                       id, 0);
247
248         if (oldval != 0)
249           {
250             /* The mutex is locked.  The kernel will now take care of
251                everything.  The timeout value must be a relative value.
252                Convert it.  */
253             INTERNAL_SYSCALL_DECL (__err);
254
255             int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
256                                       FUTEX_LOCK_PI, 1, abstime);
257             if (INTERNAL_SYSCALL_ERROR_P (e, __err))
258               {
259                 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
260                   return ETIMEDOUT;
261
262                 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
263                     || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
264                   {
265                     assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
266                             || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
267                                 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
268                     /* ESRCH can happen only for non-robust PI mutexes where
269                        the owner of the lock died.  */
270                     assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
271                             || !robust);
272
273                     /* Delay the thread until the timeout is reached.
274                        Then return ETIMEDOUT.  */
275                     struct timespec reltime;
276                     struct timespec now;
277
278                     INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
279                                       &now);
280                     reltime.tv_sec = abstime->tv_sec - now.tv_sec;
281                     reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
282                     if (reltime.tv_nsec < 0)
283                       {
284                         reltime.tv_nsec += 1000000000;
285                         --reltime.tv_sec;
286                       }
287                     if (reltime.tv_sec >= 0)
288                       while (nanosleep_not_cancel (&reltime, &reltime) != 0)
289                         continue;
290
291                     return ETIMEDOUT;
292                   }
293
294                 return INTERNAL_SYSCALL_ERRNO (e, __err);
295               }
296
297             oldval = mutex->__data.__lock;
298
299             assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
300           }
301
302         if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
303           {
304             atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
305
306             /* We got the mutex.  */
307             mutex->__data.__count = 1;
308             /* But it is inconsistent unless marked otherwise.  */
309             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
310
311             ENQUEUE_MUTEX_PI (mutex);
312             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
313
314             /* Note that we deliberately exit here.  If we fall
315                through to the end of the function __nusers would be
316                incremented which is not correct because the old owner
317                has to be discounted.  */
318             return EOWNERDEAD;
319           }
320
321         if (robust
322             && __builtin_expect (mutex->__data.__owner
323                                  == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
324           {
325             /* This mutex is now not recoverable.  */
326             mutex->__data.__count = 0;
327
328             INTERNAL_SYSCALL_DECL (__err);
329             INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
330                               FUTEX_UNLOCK_PI, 0, 0);
331
332             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
333             return ENOTRECOVERABLE;
334           }
335
336         mutex->__data.__count = 1;
337         if (robust)
338           {
339             ENQUEUE_MUTEX_PI (mutex);
340             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
341           }
342         }
343       break;
344
345     case PTHREAD_MUTEX_PP_RECURSIVE_NP:
346     case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
347     case PTHREAD_MUTEX_PP_NORMAL_NP:
348     case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
349       {
350         int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
351
352         oldval = mutex->__data.__lock;
353
354         /* Check whether we already hold the mutex.  */
355         if (mutex->__data.__owner == id)
356           {
357             if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
358               return EDEADLK;
359
360             if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
361               {
362                 /* Just bump the counter.  */
363                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
364                   /* Overflow of the counter.  */
365                   return EAGAIN;
366
367                 ++mutex->__data.__count;
368
369                 return 0;
370               }
371           }
372
373         int oldprio = -1, ceilval;
374         do
375           {
376             int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
377                           >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
378
379             if (__pthread_current_priority () > ceiling)
380               {
381                 result = EINVAL;
382               failpp:
383                 if (oldprio != -1)
384                   __pthread_tpp_change_priority (oldprio, -1);
385                 return result;
386               }
387
388             result = __pthread_tpp_change_priority (oldprio, ceiling);
389             if (result)
390               return result;
391
392             ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
393             oldprio = ceiling;
394
395             oldval
396               = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
397                                                      ceilval | 1, ceilval);
398
399             if (oldval == ceilval)
400               break;
401
402             do
403               {
404                 oldval
405                   = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
406                                                          ceilval | 2,
407                                                          ceilval | 1);
408
409                 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
410                   break;
411
412                 if (oldval != ceilval)
413                   {
414                     /* Reject invalid timeouts.  */
415                     if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
416                       {
417                         result = EINVAL;
418                         goto failpp;
419                       }
420
421                     struct timeval tv;
422                     struct timespec rt;
423
424                     /* Get the current time.  */
425                     (void) __gettimeofday (&tv, NULL);
426
427                     /* Compute relative timeout.  */
428                     rt.tv_sec = abstime->tv_sec - tv.tv_sec;
429                     rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
430                     if (rt.tv_nsec < 0)
431                       {
432                         rt.tv_nsec += 1000000000;
433                         --rt.tv_sec;
434                       }
435
436                     /* Already timed out?  */
437                     if (rt.tv_sec < 0)
438                       {
439                         result = ETIMEDOUT;
440                         goto failpp;
441                       }
442
443                     lll_futex_timed_wait (&mutex->__data.__lock,
444                                           ceilval | 2, &rt,
445                                           // XYZ check mutex flag
446                                           LLL_SHARED);
447                   }
448               }
449             while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
450                                                         ceilval | 2, ceilval)
451                    != ceilval);
452           }
453         while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
454
455         assert (mutex->__data.__owner == 0);
456         mutex->__data.__count = 1;
457       }
458       break;
459
460     default:
461       /* Correct code cannot set any other type.  */
462       return EINVAL;
463     }
464
465   if (result == 0)
466     {
467       /* Record the ownership.  */
468       mutex->__data.__owner = id;
469       ++mutex->__data.__nusers;
470     }
471
472  out:
473   return result;
474 }