Qspinlock的分析(仅分析快速获取部分,剩下部分是mcs锁的原理)

/**
 * queued_spin_lock_slowpath - acquire the queued spinlock
 * @lock: Pointer to queued spinlock structure
 * @val: Current value of the queued spinlock 32-bit word
 *
 * (queue tail, pending bit, lock value)
 *
 *              fast     :    slow                                  :    unlock
 *                       :                                          :
 * uncontended  (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
 *                       :       | ^--------.------.             /  :
 *                       :       v           \      \            |  :
 * pending               :    (0,1,1) +--> (0,1,0)   \           |  :
 *                       :       | ^--'              |           |  :
 *                       :       v                   |           |  :
 * uncontended           :    (n,x,y) +--> (n,0,0) --'           |  :
 *   queue               :       | ^--'                          |  :
 *                       :       v                               |  :
 * contended             :    (*,x,y) +--> (*,0,0) ---> (*,0,1) -'  :
 *   queue               :         ^--'                             :
 */
void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{
    struct mcs_spinlock *prev, *next, *node;
    u32 old, tail;
    int idx;

    BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));

    if (pv_enabled())
        goto queue;

    if (virt_spin_lock(lock))
        return;

pending的加入使qspinlock对3竞争状态有极好的适应性
A lock B pending C watch wait until timeout

其实这个pending位首先是一个pending的监测,做C的watch wait until timeout
而在timeout之内pending者上位获得lock则进入一个巨大的拉长了的atomic_cmpxchg,
xchg失败的条件是pending有人了,或是有人在排tail了

1. 仅有pending则    等待pending态变化
    /*
     * Wait for in-progress pending->locked hand-overs with a bounded
     * number of spins so that we guarantee forward progress.
     *
     * 0,1,0 -> 0,0,1
     */
    if (val == _Q_PENDING_VAL) {
        1.1 这其实是为了应付少量竞争,对于仅触发到pending和get到锁两种情况的竞争(大多数3线程竞争的情况)
        1.2 在这种弱竞争条件下,不必进入更深的锁处理逻辑
        1.2 看到pending变化就小等一番,隐含着没有更多人来竞争、我即将获取锁的预期
        int cnt = _Q_PENDING_LOOPS;
        val = smp_cond_load_acquire(&lock->val.counter,
                           (VAL != _Q_PENDING_VAL) || !cnt--);
    }
2. pending态变化了或超时
    2.1 全部归零->该去取锁
    2.2 锁被取走,但没有pending,没有人排tail->尝试搞到pending
    2.3 一直pending态,超时
    2.4 有人取走了锁,且又变pending态->慢慢排
    2.5 有人在排tail->锁和pending不再中要->慢慢排
    /*
     * If we observe any contention; queue.
     */
    2.3/2.4/2.5情况的处理
    if (val & ~_Q_LOCKED_MASK)
        快速获取锁的预期几乎破产,除了有人一直locked这种情况以外,都将进入排队
        goto queue;

    /*
     * trylock || pending
     *
     * 0,0,0 -> 0,0,1 ; trylock
     * 0,0,1 -> 0,1,1 ; pending
     */
    如果有人一直lock,我就尝试强势写入pending位
    val = queued_fetch_set_pending_acquire(lock);

    /*
     * If we observe any contention; undo and queue.
     */
    回头看一眼,我的pending位是不是抢到
    if (unlikely(val & ~_Q_LOCKED_MASK)) {
        什么?!有人已经先手抢了?!乖乖排队去
        if (!(val & _Q_PENDING_MASK))
            什么?!什么?!有人已经在tail排队了?!赶紧把我写上的pending抹掉,乖乖排队去
            clear_pending(lock);
            我们假设此处抹除pending动作发生时,
            排队的兄弟们突然都拿完锁且完全释放后再次有下一个人开始写pending,
            或是有人在查pending,        而被我们抹除了,会不会有问题?
            不会有,因为这里是pending位最后的生效期,过了此处,pending再无意义
        goto queue;
    }

    /*
     * We're pending, wait for the owner to go away.
     *
     * 0,1,1 -> 0,1,0
     *
     * this wait loop must be a load-acquire such that we match the
     * store-release that clears the locked bit and create lock
     * sequentiality; this is because not all
     * clear_pending_set_locked() implementations imply full
     * barriers.
     */
    成功抓到pending位,等lock释放
    if (val & _Q_LOCKED_MASK)
        smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK));

    /*
     * take ownership and clear the pending bit.
     *
     * 0,1,0 -> 0,0,1
     */
    
    clear_pending_set_locked(lock);
    return;

    /*
     * End of pending bit optimistic spinning and beginning of MCS
     * queuing.
     */
queue:
    node = this_cpu_ptr(&mcs_nodes[0]);
    idx = node->count++;
    tail = encode_tail(smp_processor_id(), idx);

    node += idx;

    /*
     * Ensure that we increment the head node->count before initialising
     * the actual node. If the compiler is kind enough to reorder these
     * stores, then an IRQ could overwrite our assignments.
     */
    barrier();

    node->locked = 0;
    node->next = NULL;
    pv_init_node(node);

    /*
     * We touched a (possibly) cold cacheline in the per-cpu queue node;
     * attempt the trylock once more in the hope someone let go while we
     * weren't watching.
     */
    if (queued_spin_trylock(lock))
        goto release;

    /*
     * We have already touched the queueing cacheline; don't bother with
     * pending stuff.
     *
     * p,*,* -> n,*,*
     *
     * RELEASE, such that the stores to @node must be complete.
     */
    old = xchg_tail(lock, tail);
    next = NULL;

    /*
     * if there was a previous node; link it and wait until reaching the
     * head of the waitqueue.
     */
    if (old & _Q_TAIL_MASK) {
        prev = decode_tail(old);

        /*
         * We must ensure that the stores to @node are observed before
         * the write to prev->next. The address dependency from
         * xchg_tail is not sufficient to ensure this because the read
         * component of xchg_tail is unordered with respect to the
         * initialisation of @node.
         */
        smp_store_release(&prev->next, node);

        pv_wait_node(node, prev);
        arch_mcs_spin_lock_contended(&node->locked);

        /*
         * While waiting for the MCS lock, the next pointer may have
         * been set by another lock waiter. We optimistically load
         * the next pointer & prefetch the cacheline for writing
         * to reduce latency in the upcoming MCS unlock operation.
         */
        next = READ_ONCE(node->next);
        if (next)
            prefetchw(next);
    }

    /*
     * we're at the head of the waitqueue, wait for the owner & pending to
     * go away.
     *
     * *,x,y -> *,0,0
     *
     * this wait loop must use a load-acquire such that we match the
     * store-release that clears the locked bit and create lock
     * sequentiality; this is because the set_locked() function below
     * does not imply a full barrier.
     *
     * The PV pv_wait_head_or_lock function, if active, will acquire
     * the lock and return a non-zero value. So we have to skip the
     * smp_cond_load_acquire() call. As the next PV queue head hasn't been
     * designated yet, there is no way for the locked value to become
     * _Q_SLOW_VAL. So both the set_locked() and the
     * atomic_cmpxchg_relaxed() calls will be safe.
     *
     * If PV isn't active, 0 will be returned instead.
     *
     */
    if ((val = pv_wait_head_or_lock(lock, node)))
        goto locked;

    val = smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_PENDING_MASK));

locked:
    /*
     * claim the lock:
     *
     * n,0,0 -> 0,0,1 : lock, uncontended
     * *,*,0 -> *,*,1 : lock, contended
     *
     * If the queue head is the only one in the queue (lock value == tail)
     * and nobody is pending, clear the tail code and grab the lock.
     * Otherwise, we only need to grab the lock.
     */

    /* In the PV case we might already have _Q_LOCKED_VAL set */
    if ((val & _Q_TAIL_MASK) == tail) {
        /*
         * The smp_cond_load_acquire() call above has provided the
         * necessary acquire semantics required for locking.
         */
        old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
        if (old == val)
            goto release; /* No contention */
    }

    /* Either somebody is queued behind us or _Q_PENDING_VAL is set */
    set_locked(lock);

    /*
     * contended path; wait for next if not observed yet, release.
     */
    if (!next) {
        while (!(next = READ_ONCE(node->next)))
            cpu_relax();
    }

    arch_mcs_spin_unlock_contended(&next->locked);
    pv_kick_node(lock, next);

release:
    /*
     * release the node
     */
    __this_cpu_dec(mcs_nodes[0].count);
}
EXPORT_SYMBOL(queued_spin_lock_slowpath);

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值