Reimplement robust mutex handling.
authordrepper <drepper>
Wed, 15 Feb 2006 16:56:21 +0000 (16:56 +0000)
committerdrepper <drepper>
Wed, 15 Feb 2006 16:56:21 +0000 (16:56 +0000)
nptl/pthread_mutex_lock.c
nptl/pthread_mutex_timedlock.c
nptl/pthread_mutex_trylock.c
nptl/pthread_mutex_unlock.c

index 420711a..dd22567 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -27,6 +27,7 @@
 #ifndef LLL_MUTEX_LOCK
 # define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex)
 # define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex)
+# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_lock (mutex, id)
 #endif
 
 
@@ -36,6 +37,7 @@ __pthread_mutex_lock (mutex)
 {
   assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
 
+  int oldval;
   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
 
   int retval = 0;
@@ -107,60 +109,83 @@ __pthread_mutex_lock (mutex)
       break;
 
     case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
-      /* Check whether we already hold the mutex.  */
-      if (abs (mutex->__data.__owner) == id)
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
+      oldval = mutex->__data.__lock;
+      do
        {
-         /* Just bump the counter.  */
-         if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
-           /* Overflow of the counter.  */
-           return EAGAIN;
-
-         ++mutex->__data.__count;
-
-         return 0;
-       }
-
-      /* We have to get the mutex.  */
-      LLL_MUTEX_LOCK (mutex->__data.__lock);
+         if ((oldval & FUTEX_OWNER_DIED) != 0)
+           {
+             /* The previous owner died.  Try locking the mutex.  */
+             int newval;
+             while ((newval
+                     = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                            id, oldval))
+                    != oldval)
+               {
+                 if ((newval & FUTEX_OWNER_DIED) == 0)
+                   goto normal;
+                 oldval = newval;
+               }
 
-      mutex->__data.__count = 1;
+             /* We got the mutex.  */
+             mutex->__data.__count = 1;
+             /* But it is inconsistent unless marked otherwise.  */
+             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+             ENQUEUE_MUTEX (mutex);
+
+             /* Note that we deliberately exit here.  If we fall
+                through to the end of the function __nusers would be
+                incremented which is not correct because the old
+                owner has to be discounted.  If we are not supposed
+                to increment __nusers we actually have to decrement
+                it here.  */
+#ifdef NO_INCR
+             --mutex->__data.__nusers;
+#endif
 
-      goto robust;
+             return EOWNERDEAD;
+           }
 
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
-      /* Check whether we already hold the mutex.  */
-      if (__builtin_expect (abs (mutex->__data.__owner) == id, 0))
-       return EDEADLK;
+       normal:
+         /* Check whether we already hold the mutex.  */
+         if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
+                               == id, 0))
+           {
+             if (mutex->__data.__kind
+                 == PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
+               return EDEADLK;
 
-      /* FALLTHROUGH */
+             if (mutex->__data.__kind
+                 == PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
+               {
+                 /* Just bump the counter.  */
+                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                   /* Overflow of the counter.  */
+                   return EAGAIN;
 
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
-      LLL_MUTEX_LOCK (mutex->__data.__lock);
+                 ++mutex->__data.__count;
 
-    robust:
-      if (__builtin_expect (mutex->__data.__owner
-                           == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
-       {
-         /* This mutex is now not recoverable.  */
-         mutex->__data.__count = 0;
-         lll_mutex_unlock (mutex->__data.__lock);
-         return ENOTRECOVERABLE;
-       }
+                 return 0;
+               }
+           }
 
-      /* This mutex is either healthy or we can try to recover it.  */
-      assert (mutex->__data.__owner == 0
-             || mutex->__data.__owner == PTHREAD_MUTEX_OWNERDEAD);
+         oldval = LLL_ROBUST_MUTEX_LOCK (mutex->__data.__lock, id);
 
-      if (__builtin_expect (mutex->__data.__owner
-                           == PTHREAD_MUTEX_OWNERDEAD, 0))
-       {
-         retval = EOWNERDEAD;
-         /* We signal ownership of a not yet recovered robust mutex
-            by storing the negative thread ID.  */
-         id = -id;
+         if (__builtin_expect (mutex->__data.__owner
+                               == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+           {
+             /* This mutex is now not recoverable.  */
+             mutex->__data.__count = 0;
+             lll_mutex_unlock (mutex->__data.__lock);
+             return ENOTRECOVERABLE;
+           }
        }
+      while ((oldval & FUTEX_OWNER_DIED) != 0);
 
+      mutex->__data.__count = 1;
       ENQUEUE_MUTEX (mutex);
       break;
 
index bc4ead7..b69caed 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -28,6 +28,7 @@ pthread_mutex_timedlock (mutex, abstime)
      pthread_mutex_t *mutex;
      const struct timespec *abstime;
 {
+  int oldval;
   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
   int result = 0;
 
@@ -103,67 +104,83 @@ pthread_mutex_timedlock (mutex, abstime)
       break;
 
     case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
-      /* Check whether we already hold the mutex.  */
-      if (abs (mutex->__data.__owner) == id)
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
+      oldval = mutex->__data.__lock;
+      do
        {
-         /* Just bump the counter.  */
-         if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
-           /* Overflow of the counter.  */
-           return EAGAIN;
-
-         ++mutex->__data.__count;
+         if ((oldval & FUTEX_OWNER_DIED) != 0)
+           {
+             /* The previous owner died.  Try locking the mutex.  */
+             int newval;
+             while ((newval
+                     = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                            id, oldval))
+                    != oldval)
+               {
+                 if ((newval & FUTEX_OWNER_DIED) == 0)
+                   goto normal;
+                 oldval = newval;
+               }
 
-         goto out;
-       }
+             /* We got the mutex.  */
+             mutex->__data.__count = 1;
+             /* But it is inconsistent unless marked otherwise.  */
+             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
 
-      /* We have to get the mutex.  */
-      result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+             ENQUEUE_MUTEX (mutex);
 
-      if (result != 0)
-       goto out;
+             /* Note that we deliberately exist here.  If we fall
+                through to the end of the function __nusers would be
+                incremented which is not correct because the old
+                owner has to be discounted.  */
+             return EOWNERDEAD;
+           }
 
-      /* Only locked once so far.  */
-      mutex->__data.__count = 1;
-      goto robust;
+       normal:
+         /* Check whether we already hold the mutex.  */
+         if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
+                               == id, 0))
+           {
+             if (mutex->__data.__kind
+                 == PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
+               return EDEADLK;
 
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
-      /* Check whether we already hold the mutex.  */
-      if (__builtin_expect (abs (mutex->__data.__owner) == id, 0))
-       return EDEADLK;
+             if (mutex->__data.__kind
+                 == PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
+               {
+                 /* Just bump the counter.  */
+                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                   /* Overflow of the counter.  */
+                   return EAGAIN;
 
-      /* FALLTHROUGH */
+                 ++mutex->__data.__count;
 
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
-      result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+                 return 0;
+               }
+           }
 
-      if (result != 0)
-       goto out;
+         result = lll_robust_mutex_timedlock (mutex->__data.__lock, abstime,
+                                              id);
 
-    robust:
-      if (__builtin_expect (mutex->__data.__owner
-                           == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
-       {
-         /* This mutex is now not recoverable.  */
-         mutex->__data.__count = 0;
-         lll_mutex_unlock (mutex->__data.__lock);
-         return ENOTRECOVERABLE;
-       }
+         if (__builtin_expect (mutex->__data.__owner
+                               == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+           {
+             /* This mutex is now not recoverable.  */
+             mutex->__data.__count = 0;
+             lll_mutex_unlock (mutex->__data.__lock);
+             return ENOTRECOVERABLE;
+           }
 
-      /* This mutex is either healthy or we can try to recover it.  */
-      assert (mutex->__data.__owner == 0
-             || mutex->__data.__owner == PTHREAD_MUTEX_OWNERDEAD);
+         if (result == ETIMEDOUT || result == EINVAL)
+           goto out;
 
-      if (__builtin_expect (mutex->__data.__owner
-                           == PTHREAD_MUTEX_OWNERDEAD, 0))
-       {
-         result = EOWNERDEAD;
-         /* We signal ownership of a not yet recovered robust mutex
-            by storing the negative thread ID.  */
-         mutex->__data.__owner = -id;
-         ++mutex->__data.__nusers;
+         oldval = result;
        }
+      while ((oldval & FUTEX_OWNER_DIED) != 0);
 
+      mutex->__data.__count = 1;
       ENQUEUE_MUTEX (mutex);
       break;
 
index ae73ecc..5a13ea6 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -28,6 +28,7 @@ int
 __pthread_mutex_trylock (mutex)
      pthread_mutex_t *mutex;
 {
+  int oldval;
   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
 
   switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
@@ -77,73 +78,88 @@ __pthread_mutex_trylock (mutex)
 
 
     case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
-      /* Check whether we already hold the mutex.  */
-      if (abs (mutex->__data.__owner) == id)
-       {
-         /* Just bump the counter.  */
-         if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
-           /* Overflow of the counter.  */
-           return EAGAIN;
-
-         ++mutex->__data.__count;
-
-         return 0;
-       }
-
-      /* We have to get the mutex.  */
-      if (lll_mutex_trylock (mutex->__data.__lock) == 0)
-       {
-         mutex->__data.__count = 1;
-
-         goto robust;
-       }
-
-      break;
-
     case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
-      /* Check whether we already hold the mutex.  */
-      if (__builtin_expect (abs (mutex->__data.__owner) == id, 0))
-       return EDEADLK;
-
-      /* FALLTHROUGH */
-
     case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
     case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
-      if (lll_mutex_trylock (mutex->__data.__lock) != 0)
-       break;
-
-    robust:
-      if (__builtin_expect (mutex->__data.__owner
-                           == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
-       {
-         /* This mutex is now not recoverable.  */
-         mutex->__data.__count = 0;
-         lll_mutex_unlock (mutex->__data.__lock);
-         return ENOTRECOVERABLE;
-       }
-
-      /* This mutex is either healthy or we can try to recover it.  */
-      assert (mutex->__data.__owner == 0
-             || mutex->__data.__owner == PTHREAD_MUTEX_OWNERDEAD);
-
-      /* Record the ownership.  */
-      int retval = 0;
-      if (__builtin_expect (mutex->__data.__owner
-                           == PTHREAD_MUTEX_OWNERDEAD, 0))
+      oldval = mutex->__data.__lock;
+      do
        {
-         retval = EOWNERDEAD;
-         /* We signal ownership of a not yet recovered robust
-            mutex by storing the negative thread ID.  */
-         id = -id;
+         if ((oldval & FUTEX_OWNER_DIED) != 0)
+           {
+             /* The previous owner died.  Try locking the mutex.  */
+             int newval;
+             while ((newval
+                     = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                            id, oldval))
+                    != oldval)
+               {
+                 if ((newval & FUTEX_OWNER_DIED) == 0)
+                   goto normal;
+                 oldval = newval;
+               }
+
+             /* We got the mutex.  */
+             mutex->__data.__count = 1;
+             /* But it is inconsistent unless marked otherwise.  */
+             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+             ENQUEUE_MUTEX (mutex);
+
+             /* Note that we deliberately exist here.  If we fall
+                through to the end of the function __nusers would be
+                incremented which is not correct because the old
+                owner has to be discounted.  */
+             return EOWNERDEAD;
+           }
+
+       normal:
+         /* Check whether we already hold the mutex.  */
+         if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
+                               == id, 0))
+           {
+             if (mutex->__data.__kind
+                 == PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
+               return EDEADLK;
+
+             if (mutex->__data.__kind
+                 == PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
+               {
+                 /* Just bump the counter.  */
+                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                   /* Overflow of the counter.  */
+                   return EAGAIN;
+
+                 ++mutex->__data.__count;
+
+                 return 0;
+               }
+           }
+
+         oldval = lll_robust_mutex_trylock (mutex->__data.__lock, id);
+         if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
+           return EBUSY;
+
+       robust:
+         if (__builtin_expect (mutex->__data.__owner
+                               == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+           {
+             /* This mutex is now not recoverable.  */
+             mutex->__data.__count = 0;
+             if (oldval == id)
+               lll_mutex_unlock (mutex->__data.__lock);
+             return ENOTRECOVERABLE;
+           }
        }
+      while ((oldval & FUTEX_OWNER_DIED) != 0);
 
       ENQUEUE_MUTEX (mutex);
 
       mutex->__data.__owner = id;
       ++mutex->__data.__nusers;
+      mutex->__data.__count = 1;
+
+      return 0;
 
-      return retval
-;
     default:
       /* Correct code cannot set any other type.  */
       return EINVAL;
index 4d87381..d41eefe 100644 (file)
@@ -41,23 +41,32 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
       if (--mutex->__data.__count != 0)
        /* We still hold the mutex.  */
        return 0;
-      break;
+      goto normal;
 
     case PTHREAD_MUTEX_ERRORCHECK_NP:
       /* Error checking mutex.  */
       if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
          || ! lll_mutex_islocked (mutex->__data.__lock))
        return EPERM;
-      break;
+      /* FALLTHROUGH */
 
     case PTHREAD_MUTEX_TIMED_NP:
     case PTHREAD_MUTEX_ADAPTIVE_NP:
-      /* Normal mutex.  Nothing special to do.  */
+      /* Always reset the owner field.  */
+    normal:
+      mutex->__data.__owner = 0;
+      if (decr)
+       /* One less user.  */
+       --mutex->__data.__nusers;
+
+      /* Unlock.  */
+      lll_mutex_unlock (mutex->__data.__lock);
       break;
 
     case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
       /* Recursive mutex.  */
-      if (mutex->__data.__owner == -THREAD_GETMEM (THREAD_SELF, tid))
+      if ((mutex->__data.__lock & FUTEX_TID_MASK)
+         == THREAD_GETMEM (THREAD_SELF, tid))
        {
          if (--mutex->__data.__count != 0)
            /* We still hold the mutex.  */
@@ -78,7 +87,8 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
     case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
     case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
     case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
-      if (abs (mutex->__data.__owner) != THREAD_GETMEM (THREAD_SELF, tid)
+      if ((mutex->__data.__lock & FUTEX_TID_MASK)
+         != THREAD_GETMEM (THREAD_SELF, tid)
          || ! lll_mutex_islocked (mutex->__data.__lock))
        return EPERM;
 
@@ -86,15 +96,21 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
         making the state consistent, mark the mutex as unrecoverable
         and make all waiters.  */
       if (__builtin_expect (mutex->__data.__owner
-                           == -THREAD_GETMEM (THREAD_SELF, tid)
-                           || (mutex->__data.__owner
-                               == PTHREAD_MUTEX_NOTRECOVERABLE), 0))
+                           == PTHREAD_MUTEX_INCONSISTENT, 0))
       notrecoverable:
        newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
 
     robust:
       /* Remove mutex from the list.  */
       DEQUEUE_MUTEX (mutex);
+
+      mutex->__data.__owner = newowner;
+      if (decr)
+       /* One less user.  */
+       --mutex->__data.__nusers;
+
+      /* Unlock.  */
+      lll_robust_mutex_unlock (mutex->__data.__lock);
       break;
 
     default:
@@ -102,15 +118,6 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
       return EINVAL;
     }
 
-  /* Always reset the owner field.  */
-  mutex->__data.__owner = newowner;
-  if (decr)
-    /* One less user.  */
-    --mutex->__data.__nusers;
-
-  /* Unlock.  */
-  lll_mutex_unlock (mutex->__data.__lock);
-
   return 0;
 }