__pthread_mutex_lock_full

static int
__pthread_mutex_lock_full (pthread_mutex_t *mutex)
{
  int oldval;
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);

  switch (PTHREAD_MUTEX_TYPE (mutex))
    {
    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
             &mutex->__data.__list.__next);
      /* We need to set op_pending before starting the operation.  Also
     see comments at ENQUEUE_MUTEX.  */
      __asm ("" ::: "memory");

      oldval = mutex->__data.__lock;
      /* This is set to FUTEX_WAITERS iff we might have shared the
     FUTEX_WAITERS flag with other threads, and therefore need to keep it
     set to avoid lost wake-ups.  We have the same requirement in the
     simple mutex algorithm.
     We start with value zero for a normal mutex, and FUTEX_WAITERS if we
     are building the special case mutexes for use from within condition
     variables.  */
      unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER;
      while (1)
    {
      /* Try to acquire the lock through a CAS from 0 (not acquired) to
         our TID | assume_other_futex_waiters.  */
      if (__glibc_likely (oldval == 0))
        {
          oldval
            = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                id | assume_other_futex_waiters, 0);
          if (__glibc_likely (oldval == 0))
        break;
        }

      if ((oldval & FUTEX_OWNER_DIED) != 0)
        {
          /* The previous owner died.  Try locking the mutex.  */
          int newval = id;
#ifdef NO_INCR
          /* We are not taking assume_other_futex_waiters into accoount
         here simply because we'll set FUTEX_WAITERS anyway.  */
          newval |= FUTEX_WAITERS;
#else
          newval |= (oldval & FUTEX_WAITERS) | assume_other_futex_waiters;
#endif

          newval
        = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                               newval, oldval);

          if (newval != oldval)
        {
          oldval = newval;
          continue;
        }

          /* We got the mutex.  */
          mutex->__data.__count = 1;
          /* But it is inconsistent unless marked otherwise.  */
          mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

          /* We must not enqueue the mutex before we have acquired it.
         Also see comments at ENQUEUE_MUTEX.  */
          __asm ("" ::: "memory");
          ENQUEUE_MUTEX (mutex);
          /* We need to clear op_pending after we enqueue the mutex.  */
          __asm ("" ::: "memory");
          THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

          /* Note that we deliberately exit here.  If we fall
         through to the end of the function __nusers would be
         incremented which is not correct because the old
         owner has to be discounted.  If we are not supposed
         to increment __nusers we actually have to decrement
         it here.  */
#ifdef NO_INCR
          --mutex->__data.__nusers;
#endif

          return EOWNERDEAD;
        }

      /* Check whether we already hold the mutex.  */
      if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
        {
          int kind = PTHREAD_MUTEX_TYPE (mutex);
          if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
        {
          /* We do not need to ensure ordering wrt another memory
             access.  Also see comments at ENQUEUE_MUTEX. */
          THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                 NULL);
          return EDEADLK;
        }

          if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
        {
          /* We do not need to ensure ordering wrt another memory
             access.  */
          THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                 NULL);

          /* Just bump the counter.  */
          if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
            /* Overflow of the counter.  */
            return EAGAIN;

          ++mutex->__data.__count;

          return 0;
        }
        }

      /* We cannot acquire the mutex nor has its owner died.  Thus, try
         to block using futexes.  Set FUTEX_WAITERS if necessary so that
         other threads are aware that there are potentially threads
         blocked on the futex.  Restart if oldval changed in the
         meantime.  */
      if ((oldval & FUTEX_WAITERS) == 0)
        {
          if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
                            oldval | FUTEX_WAITERS,
                            oldval)
          != 0)
        {
          oldval = mutex->__data.__lock;
          continue;
        }
          oldval |= FUTEX_WAITERS;
        }

      /* It is now possible that we share the FUTEX_WAITERS flag with
         another thread; therefore, update assume_other_futex_waiters so
         that we do not forget about this when handling other cases
         above and thus do not cause lost wake-ups.  */
      assume_other_futex_waiters |= FUTEX_WAITERS;

      /* Block using the futex and reload current lock value.  */
      lll_futex_wait (&mutex->__data.__lock, oldval,
              PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
      oldval = mutex->__data.__lock;
    }

# define lll_futex_wait(futexp, val, private) \
  lll_futex_timed_wait (futexp, val, NULL, private)

# define lll_futex_timed_wait(futexp, val, timeout, private)     \
  lll_futex_syscall (4, futexp,                                 \
             __lll_private_flag (FUTEX_WAIT, private),  \
             val, timeout)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值