linux内核互斥锁mutex实现详解(基于ARM处理器)

linux内核互斥锁mutex实现详解(基于ARM处理器)

1、互斥锁mutex结构体

这里写图片描述

  • count: 互斥锁变量,0表示被占用(已经被获取),1表示没有被占用(空闲可获取)
  • owner: 当前获取该锁的任务(线程/进程),细节没有仔细看,对应实时系统,高优先级进程尝试获取被低优先级进程抢占的互斥锁使,可以通过类似owner类似的指针,优先级反转,主动切换到低优先级进程,等低优先级释放互斥锁时检查到高优先级进程因未获取到互斥锁而暂停了,主动执行进程调度,让出cpu给高优先级进程
  • wait_list: 等待获取互斥锁的任务链表,互斥锁释放时会唤醒wait_list表头任务
  • wait_lock: wait_list是多任务(进程/线程)共享的,需要采用自旋锁互斥访问,自旋锁会不停循环检查且不会阻塞任务的执行,适合时间短的加锁。

2、互斥锁mutex初始化(__mutex_init)

void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
    atomic_set(&lock->count, 1); // 锁计数器设置为1,表示锁当前未被占用
    spin_lock_init(&lock->wait_lock); // wait_lock自旋锁初始化owner, next都为0(详细细节可参考自旋锁解析部分)
    INIT_LIST_HEAD(&lock->wait_list); // wait_list初始化(等待获取互斥锁的任务链表)
    mutex_clear_owner(lock); // owner指针清零
\#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
    osq_lock_init(&lock->osq);
\#endif

    debug_mutex_init(lock, name, key);
}

3、互斥锁mutex获取(mutex_lock)

void __sched mutex_lock(struct mutex *lock)
{
    might_sleep();
    /*
     * The locking fastpath is the 1->0 transition from
     * 'unlocked' into 'locked' state.
     */
    __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); // 获取互斥锁(通过lock->count地址可获取lock地址)
    mutex_set_owner(lock); // 设置owner指针,当前线程的thread_info可以通过sp低13位清零获取到
}
__visible void __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
    struct mutex *lock = container_of(lock_count, struct mutex, count); // 结构体成员变量地址减该变量偏移即得到结构体地址,addr(lock) = addr(count) - offset(count)

    __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
                NULL, _RET_IP_, NULL, 0);
}
static __always_inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
            struct lockdep_map *nest_lock, unsigned long ip,
            struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
    struct task_struct *task = current; // 通过sp低13位清零获取到thread_info,再通过thread_info获取到task指针
    struct mutex_waiter waiter;
    unsigned long flags;
    int ret;

    preempt_disable();
    mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);

    if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
        /* got the lock, yay! */
        preempt_enable();
        return 0;
    }

    spin_lock_mutex(&lock->wait_lock, flags); // 获取lock->wait_lock自旋锁,直到获取成功为止,因为接下来的代码要对lock的成员变量进行读写,需要互斥访问

    /*
     * Once more, try to acquire the lock. Only try-lock the mutex if
     * it is unlocked to reduce unnecessary xchg() operations.
     */
    if (!mutex_is_locked(lock) &&
        (atomic_xchg_acquire(&lock->count, 0) == 1))  // 自旋锁对lock读写已经锁定,可直接读lock互斥计数器,如果count为1(锁没被占用),读count的值同时将1写回到count(读count的值是为了判断锁是否被占用,写0表示锁被占用,此处有两种情况,1、count为0,锁被他线程占用,再写0没有影响,2、count为1,锁没被占用,写0表示当前线程以获取,其他线程再去获取就为0,不能再次获取互斥锁)
        goto skip_wait; // 获取到互斥锁,直接跳过等待代码

    debug_mutex_lock_common(lock, &waiter);
    debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));

    /* add waiting tasks to the end of the waitqueue (FIFO): */
    list_add_tail(&waiter.list, &lock->wait_list); // 将waiter.list添加到互斥锁等待链表末尾(fifo)
    waiter.task = task; // 设置waiter task指针为当前线程的task

    lock_contended(&lock->dep_map, ip);

    for (;;) {
        /*
         * Lets try to take the lock again - this is needed even if
         * we get here for the first time (shortly after failing to
         * acquire the lock), to make sure that we get a wakeup once
         * it's unlocked. Later on, if we sleep, this is the
         * operation that gives us the lock. We xchg it to -1, so
         * that when we release the lock, we properly wake up the
         * other waiters. We only attempt the xchg if the count is
         * non-negative in order to avoid unnecessary xchg operations:
         */
        if (atomic_read(&lock->count) >= 0 &&
            (atomic_xchg_acquire(&lock->count, -1) == 1)) // 循环读取互斥锁计数器count的值,直到count为1,表示互斥锁可用,此处读取互斥锁计数器的同时也将互斥锁的值设置为0了
            break; // 如果获取到互斥锁,则跳出循环

        /*
         * got a signal? (This code gets eliminated in the
         * TASK_UNINTERRUPTIBLE case.)
         */
        if (unlikely(signal_pending_state(state, task))) {
            ret = -EINTR;
            goto err;
        }

        if (use_ww_ctx && ww_ctx->acquired > 0) {
            ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
            if (ret)
                goto err;
        }

        __set_task_state(task, state); // 设置当前任务状态TASK_UNINTERRUPTIBLE

        /* didn't get the lock, go to sleep: */
        spin_unlock_mutex(&lock->wait_lock, flags); // 释放自旋锁(此处对lock的成员变量修改读写都已经完成,其他任务可读写修改)
        schedule_preempt_disabled(); // 执行一次调度,主动切换到其他任务,等其他任务释放互斥锁是会唤醒当前任务,继续执行下面的函数
        spin_lock_mutex(&lock->wait_lock, flags); // 任务被唤醒,重新获取自旋锁
    }
    __set_task_state(task, TASK_RUNNING); // 设置当前任务运行状态TASK_RUNNING

    mutex_remove_waiter(lock, &waiter, current_thread_info()); // 获取到互斥锁的时候直接执行的break,并没有释放自旋锁,此处从互斥锁链表将当前任务删除将不会影响其他任务,不存在多任务并发访问的情况
    /* set it to 0 if there are no waiters left: */
    if (likely(list_empty(&lock->wait_list)))
        atomic_set(&lock->count, 0);
    debug_mutex_free_waiter(&waiter);

skip_wait:
    /* got the lock - cleanup and rejoice! */
    lock_acquired(&lock->dep_map, ip);
    mutex_set_owner(lock); // 设置互斥锁owner为当前任务(线程)

    if (use_ww_ctx) {
        struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
        ww_mutex_set_context_slowpath(ww, ww_ctx);
    }

    spin_unlock_mutex(&lock->wait_lock, flags); // 释放互自旋锁
    preempt_enable();
    return 0;

err:
    mutex_remove_waiter(lock, &waiter, task_thread_info(task));
    spin_unlock_mutex(&lock->wait_lock, flags);
    debug_mutex_free_waiter(&waiter);
    mutex_release(&lock->dep_map, 1, ip);
    preempt_enable();
    return ret;
}

4、互斥锁mutex释放

void __sched mutex_unlock(struct mutex *lock)
{
    /*
     * The unlocking fastpath is the 0->1 transition from 'locked'
     * into 'unlocked' state:
     */
\#ifndef CONFIG_DEBUG_MUTEXES
    /*
     * When debugging is enabled we must not clear the owner before time,
     * the slow path will always be taken, and that clears the owner field
     * after verifying that it was indeed current.
     */
    mutex_clear_owner(lock); // 清空互斥锁owner(此处不需要加自旋锁,不存在读写冲突问题,没有获取到锁的任务不会读写owner,没有获取到互斥的任务更不会释放互斥锁)
\#endif
    __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
}
__visible void
__mutex_unlock_slowpath(atomic_t *lock_count)
{
    struct mutex *lock = container_of(lock_count, struct mutex, count);

    __mutex_unlock_common_slowpath(lock, 1);
}
/*
 * Release the lock, slowpath:
 */
static inline void
__mutex_unlock_common_slowpath(struct mutex *lock, int nested)
{
    unsigned long flags;

    /*
     * As a performance measurement, release the lock before doing other
     * wakeup related duties to follow. This allows other tasks to acquire
     * the lock sooner, while still handling cleanups in past unlock calls.
     * This can be done as we do not enforce strict equivalence between the
     * mutex counter and wait_list.
     *
     *
     * Some architectures leave the lock unlocked in the fastpath failure
     * case, others need to leave it locked. In the later case we have to
     * unlock it here - as the lock counter is currently 0 or negative.
     */
    if (__mutex_slowpath_needs_to_unlock())
        atomic_set(&lock->count, 1); // 释放自旋锁(获取互斥锁函数对count读写最终都是通过原子操作一次实现读写,原子操作由硬件确保多任务互斥读写,此处不需要加自旋锁,在这条语句与下条语句之间有可能其他任务申请到互斥锁)

    spin_lock_mutex(&lock->wait_lock, flags); // 后续代码需要修改lock成员变量,使用自旋锁进行互斥读写
    mutex_release(&lock->dep_map, nested, _RET_IP_);
    debug_mutex_unlock(lock);

    if (!list_empty(&lock->wait_list)) { // 互斥锁等待链表不为空,有任务挂起等待获取互斥锁
        /* get the first entry from the wait-list: */
        struct mutex_waiter *waiter =
                list_entry(lock->wait_list.next,
                       struct mutex_waiter, list); // 获取互斥锁等待链表第一个元素

        debug_mutex_wake_waiter(lock, waiter);

        wake_up_process(waiter->task); // 唤醒等待链表表头任务(fifo),添加到就绪红黑树,此处只是唤醒了任务,并没有将需要唤醒的任务从互斥锁等待链表移除,上面已经讲了,互斥锁刚释放瞬间有可能被其他任务获取,唤醒并不代表能立刻得到执行权限,在真正执行前也可能被其他任务抢占,获取互斥锁的for循环并没有再次将自己添加到互斥锁等待链表,因此此处只是唤醒,最终由任务自己获取的互斥锁后,将自己从等待链表移除
    }

    spin_unlock_mutex(&lock->wait_lock, flags); // 释放自旋锁(自旋锁是对互斥锁的成员进行保护,不会挂起,互斥锁暂停的处理器,而互斥锁是对获取到互斥锁之后释放互斥锁之前的代码进行保护,会因为获取不到互斥锁而阻塞,暂停的是任务,处理器让出给其他任务执行)
}

展开阅读全文
博主设置当前文章不允许评论。

没有更多推荐了,返回首页