rk3288 mutex互斥量的实现

本文解析了Linux内核中的mutex结构,探讨了mutex_lock和mutex_unlock的fastpath与slowpath实现原理,涉及原子操作、spinlock和等待队列管理。重点讲解了如何在fastpath中快速获取锁和在slowpath中处理竞争情况。
摘要由CSDN通过智能技术生成

mutex内核结构体

// include/linux/mutex.h
struct mutex {
    /* 1: unlocked, 0: locked, negative: locked, possible waiters */
    atomic_t        count;										//1. count要么等于1要么等于0
    spinlock_t      wait_lock;									//2. mutex的实现要借助spinlock
    struct list_head    wait_list;								//3. 等待mutex的进程放在这里
#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
    struct task_struct  *owner;									//4. owner只用于调试或优化性能,并不是用来指定释放权限的。
#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
    struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
#ifdef CONFIG_DEBUG_MUTEXES
    void            *magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
    struct lockdep_map  dep_map;
#endif
};

初始化mutex之后,可以用mutex_lock来获取互斥量,用mutex_unlock来释放互斥量。

mutex_lock函数的实现

// kernel/locking/mutex.c
void __sched mutex_lock(struct mutex *lock)
{
    might_sleep();
    /*
     * The locking fastpath is the 1->0 transition from
     * 'unlocked' into 'locked' state.
     */
    __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);			//1.先尝试fastpath,失败再用slowpath
    mutex_set_owner(lock);					//3. 执行到这里肯定成功了,设置owner
}

fastpath
在arch/arm/include/asm/mutex.h中,定义了__mutex_fastpath_lock函数,不同的cpu用不同的头文件。

#if __LINUX_ARM_ARCH__ < 6
#include <asm-generic/mutex-xchg.h>
#else
#include <asm-generic/mutex-dec.h>
#endif

在include/asm-generic/mutex-dec.h中

static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
    if (unlikely(atomic_dec_return_acquire(count) < 0))			//如果count初始值为1,减1后为0,if条件不成立,成功获得mutex
        fail_fn(count);											//减1后<0,则使用slowpath休眠等待mutex
}		//fail_fn就是__mutex_lock_slowpath

在大部分情况下,mutex当前值都是1,所以fastpath函数可以非常快速地获得mutex。

slowpath
如果mutex值是0或负数,则需要调用__mutex_lock_slowpath慢慢处理,可能会休眠等待。

// kernel/locking/mutex.c
__visible void __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{   
    struct mutex *lock = container_of(lock_count, struct mutex, count);

    __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,						//这里是核心函数
                NULL, _RET_IP_, NULL, 0);
}

下面的是slowpath的核心代码

static __always_inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
            struct lockdep_map *nest_lock, unsigned long ip,
            struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
    struct task_struct *task = current;
    struct mutex_waiter waiter;
    unsigned long flags;
    int ret;

    if (use_ww_ctx) {						//当use_ww_ctx等于0,不走这个分支
        struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
        if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
            return -EALREADY;
    }

    preempt_disable();						//先禁止抢占preempt
    mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);

    if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {		//这是一个优化函数
        /* got the lock, yay! */
        preempt_enable();
        return 0;
    }

    spin_lock_mutex(&lock->wait_lock, flags);				//获取spinlock

    /*
     * Once more, try to acquire the lock. Only try-lock the mutex if
     * it is unlocked to reduce unnecessary xchg() operations.
     */
    if (!mutex_is_locked(lock) &&									//判断是否被占用,如果count==1,那就不需要等待了
        (atomic_xchg_acquire(&lock->count, 0) == 1))				//回写count,写0表示设置为占用,如果count<=0,那么本来就被占用了,如果count=1,那么就获取锁
        goto skip_wait;

    debug_mutex_lock_common(lock, &waiter);
    debug_mutex_add_waiter(lock, &waiter, task);

    /* add waiting tasks to the end of the waitqueue (FIFO): */
    list_add_tail(&waiter.list, &lock->wait_list);
    waiter.task = task;										//把当前进程放入mutex的wait_list,wait_list是一个FIFO队列

    lock_contended(&lock->dep_map, ip);

    for (;;) {
        /*
         * Lets try to take the lock again - this is needed even if
         * we get here for the first time (shortly after failing to
         * acquire the lock), to make sure that we get a wakeup once
         * it's unlocked. Later on, if we sleep, this is the
         * operation that gives us the lock. We xchg it to -1, so
         * that when we release the lock, we properly wake up the
         * other waiters. We only attempt the xchg if the count is
         * non-negative in order to avoid unnecessary xchg operations:
         */
        // 如果count==1,把它设为-1(被我lock了),break退出循环(成功获取)
        // 如果count==0,把它设为-1(被别人lock了),继续往下走
        // 如果count<0,它表示本来就被别人lock了,不用再设为-1,继续往下走
        if (atomic_read(&lock->count) >= 0 &&
            (atomic_xchg_acquire(&lock->count, -1) == 1))
            break;

        /*
         * got a signal? (This code gets eliminated in the
         * TASK_UNINTERRUPTIBLE case.)
         */
        if (unlikely(signal_pending_state(state, task))) {		//如果有信号就退出
            ret = -EINTR;
            goto err;
        }

        if (use_ww_ctx && ww_ctx->acquired > 0) {
            ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
            if (ret)
                goto err;
        }

        __set_task_state(task, state);					//把当前进程设备非RUNNING状态

        /* didn't get the lock, go to sleep: */
        spin_unlock_mutex(&lock->wait_lock, flags);		//释放spinlock
        schedule_preempt_disabled();					//主动发起调度
        spin_lock_mutex(&lock->wait_lock, flags);		//再次获得spinlock
    }													//能运行到这里,是被信号或者mutex_unlock唤醒的,在for循环里的再次判断
    __set_task_state(task, TASK_RUNNING);				//如果跑到这里了,说明获得了锁。可以把当前状态设置为RUNNING了

    mutex_remove_waiter(lock, &waiter, task);			//从mutex的wait_list中删除本进程
    /* set it to 0 if there are no waiters left: */
    if (likely(list_empty(&lock->wait_list)))			//如果wait_list是空的,表示无人等待mutex
        atomic_set(&lock->count, 0);					//把count设置为0
    debug_mutex_free_waiter(&waiter);

skip_wait:
    /* got the lock - cleanup and rejoice! */
    lock_acquired(&lock->dep_map, ip);
    mutex_set_owner(lock);										//设置owner为当前进程

    if (use_ww_ctx) {								//如果use_ww_ctx为0,不走这分支
        struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
        ww_mutex_set_context_slowpath(ww, ww_ctx);
    }

    spin_unlock_mutex(&lock->wait_lock, flags);				//释放spinlock
    preempt_enable();										//使能preempt
    return 0;

err:
    mutex_remove_waiter(lock, &waiter, task);
    spin_unlock_mutex(&lock->wait_lock, flags);
    debug_mutex_free_waiter(&waiter);
    mutex_release(&lock->dep_map, 1, ip);
    preempt_enable();
    return ret;
}

mutex_unlock函数的实现

同样的mutex_unlcok也有fastpath和slowpath两条路径,如果fastpath成功就不使用slowpath。

// kernel/locking/mutex.c
void __sched mutex_unlock(struct mutex *lock)
{
    /*
     * The unlocking fastpath is the 0->1 transition from 'locked'
     * into 'unlocked' state:
     */
#ifndef CONFIG_DEBUG_MUTEXES
    /*
     * When debugging is enabled we must not clear the owner before time,
     * the slow path will always be taken, and that clears the owner field
     * after verifying that it was indeed current.
     */
    mutex_clear_owner(lock);
#endif
    __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);				//先使用fastpath,如果失败再使用slowpath
}

fastpath
和lock一样,在arch/arm/include/asm/mutex.h中,区分cpu架构对应不同的头文件。
但总的来说不同cpu架构,代码比较类似:

static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{																//加1后新值等于1,表示无人等待mutex,直接返回
    if (unlikely(atomic_inc_return_release(count) <= 0)) 		//新值<=0,需要调用slowpath唤醒别的进程
        fail_fn(count);			//fail_fn就是__mutex_unlock_slowpath
}  

slowpath
__mutex_unlock_slowpath也和lock函数类似,一层层调用到__mutex_unlock_common_slowpath:

static inline void
__mutex_unlock_common_slowpath(struct mutex *lock, int nested)
{
    unsigned long flags;
    WAKE_Q(wake_q);

    /*
     * As a performance measurement, release the lock before doing other
     * wakeup related duties to follow. This allows other tasks to acquire
     * the lock sooner, while still handling cleanups in past unlock calls.
     * This can be done as we do not enforce strict equivalence between the
     * mutex counter and wait_list.
     *
     *
     * Some architectures leave the lock unlocked in the fastpath failure
     * case, others need to leave it locked. In the later case we have to
     * unlock it here - as the lock counter is currently 0 or negative.
     */
    if (__mutex_slowpath_needs_to_unlock())							//执行到这里count被设为1了
        atomic_set(&lock->count, 1);

    spin_lock_mutex(&lock->wait_lock, flags);						//获取spinlock
    mutex_release(&lock->dep_map, nested, _RET_IP_);
    debug_mutex_unlock(lock);

    if (!list_empty(&lock->wait_list)) {							//从wait_list中取出第一个进程
        /* get the first entry from the wait-list: */
        struct mutex_waiter *waiter =
                list_entry(lock->wait_list.next,
                       struct mutex_waiter, list);

        debug_mutex_wake_waiter(lock, waiter);
        wake_q_add(&wake_q, waiter->task);
    }

    spin_unlock_mutex(&lock->wait_lock, flags);						//释放spinlock
    wake_up_q(&wake_q);												//唤醒第一个等待进程
}

个人理解

mutex_lock()
有fastpath和slowpath两种获取方法

  • fastpath的本质就是直接获取count计数,如果直接能获取。就不做其他复杂处理了。(这是一般情况下比较可能的情况)
  • slowpath的情况就复杂了,因为fastpath无法直接获取,就说明mutex_lock之前mutex已经在被使用中了。
    如果mutex已经被使用了,那么只能一直循环等待,直到获取到mutex才执行获取mutex的情况。

mutex_unlock()
同样fastpath和slowpath两种情况

  • fastpath就是把count计数加1,如果大于0(就是等于1)。那么啥事没有了,直接就好了。但是如果小于等于0,说明mutex_unlock在释放前,mutex还有被等待占用的情况。如果在被等待占用的情况下,就要执行slowpath了。
  • slowpath就是要把mutex设置为1,然后从等待mutex的队列中选取第一个,去唤醒第一个进程。这个进程因为在循环等待获取mutex,这样就把mutex所有权切换到了第一个等待进程。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

习惯就好zz

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值