linux内核互斥锁的代码分析

自旋锁很好用,但是有个缺点,就是一直会占用cpu,直到获取锁。有时候我们不需要这样子,我们可以在锁被占用的时候把进程切换出去,直到锁被释放,我们再回来加锁,这样子才更符合我们实际应用。

mutex_init(struct mutex *lock)//互斥锁初始化
void mutex_lock(struct mutex *lock)//互斥锁加锁
void mutex_unlock(struct mutex *lock)//互斥锁解锁

二、互斥锁

1.锁的结构体

struct mutex {
	atomic_long_t		owner;
	spinlock_t		wait_lock;
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
	struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
	struct list_head	wait_list;
#ifdef CONFIG_DEBUG_MUTEXES
	void			*magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map	dep_map;
#endif
};

我们可以看到,这个结构体包含以下几个成员:1.owner,一个64位的整形(原子操作);2.wait_lock,自旋锁,该自旋锁的作用是保护后面的等待队列的原子性;3.wait_list,等待队列。

2.锁的初始化

我们是使用mutex_init函数来初始化一个互斥锁的:

#define mutex_init(mutex)						\
do {									\
	static struct lock_class_key __key;				\
									\
	__mutex_init((mutex), #mutex, &__key);				\
} while (0)

void __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
	atomic_long_set(&lock->owner, 0);//初始化锁的所有者
	spin_lock_init(&lock->wait_lock);//初始化自旋锁
	INIT_LIST_HEAD(&lock->wait_list);//初始化等待队列
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
	osq_lock_init(&lock->osq);
#endif

	debug_mutex_init(lock, name, key);
}

互斥锁初始化比较简单,就是把owner,wait_lock,wait_list这几个成员初始化一下。

3.加锁操作

我们是使用mutex_lock函数来进行加锁操作的:

void __sched mutex_lock(struct mutex *lock)
{
	might_sleep();

	if (!__mutex_trylock_fast(lock))
		__mutex_lock_slowpath(lock);
}
EXPORT_SYMBOL(mutex_lock);

static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
{
	unsigned long curr = (unsigned long)current;//记录当前进程的信息
	unsigned long zero = 0UL;

	//判断owner是否为0,如果为0表示当前锁没有被占用,则把current写入owner返回true表示加锁成功
	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
		return true;
	//owner不为0,表示锁被占用了,返回false
	return false;
}

__mutex_trylock_fast是尝试加锁,使用原子操作尝试加锁,具体是对比owner和0是否一致,一致则写入curr到owner,返回true,不一致则直接返回false。
关于current,这里大概说一下:

#define current get_current()

static __always_inline struct task_struct *get_current(void)
{
	unsigned long sp_el0;

	asm ("mrs %0, sp_el0" : "=r" (sp_el0));

	return (struct task_struct *)sp_el0;
}

所以,current就是获取cpu的sp_el0寄存器的值,这个值是记录EL0状态下的sp指针,由于arm64就是使用这个寄存器记录当前进程的task_struct 地址,方便cpu快速获取进程的信息,这是这个寄存器的用法。我们可以直接把current表示为进程的信息就可以了。全部的ARM64架构下每个 CPU 当前运行进程的 task_struct 的指针current_task存放到 sp_el0 寄存器中。

接下来看看尝试加锁失败都进行的慢速加锁__mutex_lock_slowpath:

static noinline void __sched
__mutex_lock_slowpath(struct mutex *lock)
{
	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
}

static int __sched
__mutex_lock(struct mutex *lock, long state, unsigned int subclass,
	     struct lockdep_map *nest_lock, unsigned long ip)
{
	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
}

static __always_inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
		    struct lockdep_map *nest_lock, unsigned long ip,
		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
	struct mutex_waiter waiter;
	bool first = false;
	struct ww_mutex *ww;
	int ret;

	might_sleep();//空函数
	//根据lock找到对应的ww_mutex结构体,因为ww_mutex的base就是lock
	ww = container_of(lock, struct ww_mutex, base);
	if (use_ww_ctx && ww_ctx) {//传入参数use_ww_ctx为false和ww_ctx为NULL,所以下面的不用看
		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
			return -EALREADY;

		/*
		 * Reset the wounded flag after a kill. No other process can
		 * race and wound us here since they can't have a valid owner
		 * pointer if we don't have any locks held.
		 */
		if (ww_ctx->acquired == 0)
			ww_ctx->wounded = 0;
	}

	preempt_disable();//关闭抢占
	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);//空操作
	
	if (__mutex_trylock(lock) ||  //再一次trylock,跟第一次快速加锁是一样的
	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
		/* got the lock, yay! */
		lock_acquired(&lock->dep_map, ip);
		if (use_ww_ctx && ww_ctx)
			ww_mutex_set_context_fastpath(ww, ww_ctx);
		preempt_enable();//开启抢占
		return 0;
	}

	spin_lock(&lock->wait_lock);//自旋锁加锁,自旋锁是保护等待队列这个临界区的,后面应该就是要操作等待队列了
	/*
	 * After waiting to acquire the wait_lock, try again.
	 */
	if (__mutex_trylock(lock)) {	//再一次尝试加锁
		if (use_ww_ctx && ww_ctx)
			__ww_mutex_check_waiters(lock, ww_ctx);

		goto skip_wait;
	}

	debug_mutex_lock_common(lock, &waiter);

	lock_contended(&lock->dep_map, ip);//空操作

	if (!use_ww_ctx) {//use_ww_ctx为false,下面的函数会执行
		/* add waiting tasks to the end of the waitqueue (FIFO): */
		//把waiter加入到lock的等待队列中
		__mutex_add_waiter(lock, &waiter, &lock->wait_list);


#ifdef CONFIG_DEBUG_MUTEXES
		waiter.ww_ctx = MUTEX_POISON_WW_CTX;
#endif
	} else {//下面的函数不会执行
		/*
		 * Add in stamp order, waking up waiters that must kill
		 * themselves.
		 */
		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
		if (ret)
			goto err_early_kill;

		waiter.ww_ctx = ww_ctx;
	}

	waiter.task = current;//waiter已经在lock的等待队列中,但是其task信息还没有,这里设置为current

	set_current_state(state);//设置当前进程状态为不可中断
	for (;;) {
		/*
		 * Once we hold wait_lock, we're serialized against
		 * mutex_unlock() handing the lock off to us, do a trylock
		 * before testing the error conditions to make sure we pick up
		 * the handoff.
		 */
		if (__mutex_trylock(lock))//尝试加锁,如果成功则跳出死循环
			goto acquired;

		/*
		 * Check for signals and kill conditions while holding
		 * wait_lock. This ensures the lock cancellation is ordered
		 * against mutex_unlock() and wake-ups do not go missing.
		 */
		//判断当前进程状态
		if (unlikely(signal_pending_state(state, current))) {
			ret = -EINTR;
			goto err;
		}

		if (use_ww_ctx && ww_ctx) {
			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
			if (ret)
				goto err;
		}

		spin_unlock(&lock->wait_lock);//自旋锁解锁
		schedule_preempt_disabled();//主动调用进程调度,并禁止内核抢占

		/*
		 * ww_mutex needs to always recheck its position since its waiter
		 * list is not FIFO ordered.
		 */
		//ww_ctx为NULL,不用看
		if ((use_ww_ctx && ww_ctx) || !first) {
			first = __mutex_waiter_is_first(lock, &waiter);
			if (first)
				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
		}

		set_current_state(state);//设置当前进程状态为不可中断
		/*
		 * Here we order against unlock; we must either see it change
		 * state back to RUNNING and fall through the next schedule(),
		 * or we must see its unlock and acquire.
		 */
		//这里我们再次尝试加锁,如果加锁成功就退出死循环
		if (__mutex_trylock(lock) ||
		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
			break;
		//否则自旋锁加锁,再次执行循环
		spin_lock(&lock->wait_lock);//自旋锁加锁
	}
	spin_lock(&lock->wait_lock);//自旋锁加锁
acquired:
	//来到这里,说明我们已经得到了互斥锁
	__set_current_state(TASK_RUNNING);//设置当前进程状态为running

	if (use_ww_ctx && ww_ctx) {//ww_ctx为空,下面的不用看
		/*
		 * Wound-Wait; we stole the lock (!first_waiter), check the
		 * waiters as anyone might want to wound us.
		 */
		if (!ww_ctx->is_wait_die &&
		    !__mutex_waiter_is_first(lock, &waiter))
			__ww_mutex_check_waiters(lock, ww_ctx);
	}
	//获取到互斥锁了,把当前任务从lock的等待队里移除
	mutex_remove_waiter(lock, &waiter, current);
	//如果lock的等待队列是空的,则修改锁的状态
	if (likely(list_empty(&lock->wait_list)))
		__mutex_clear_flag(lock, MUTEX_FLAGS);

	debug_mutex_free_waiter(&waiter);

skip_wait:
	/* got the lock - cleanup and rejoice! */
	lock_acquired(&lock->dep_map, ip);

	if (use_ww_ctx && ww_ctx)//ww_ctx为NULL
		ww_mutex_lock_acquired(ww, ww_ctx);

	spin_unlock(&lock->wait_lock);//修改等待队列完毕,自旋锁解锁
	preempt_enable();//开启抢占
	return 0;

err:
	__set_current_state(TASK_RUNNING);
	mutex_remove_waiter(lock, &waiter, current);
err_early_kill:
	spin_unlock(&lock->wait_lock);
	debug_mutex_free_waiter(&waiter);
	mutex_release(&lock->dep_map, 1, ip);
	preempt_enable();
	return ret;
}

4.解锁操作

我们是使用mutex_unlock函数来进行解锁操作的:

void __sched mutex_unlock(struct mutex *lock)
{
#ifndef CONFIG_DEBUG_LOCK_ALLOC
	if (__mutex_unlock_fast(lock))
		return;
#endif
	__mutex_unlock_slowpath(lock, _RET_IP_);
}
EXPORT_SYMBOL(mutex_unlock);

互斥锁解锁,不像前面的解锁这么简单了,前面的自旋锁和读写自旋锁都是直接通过原子操作修改val就可以了,这里分为快速解锁和慢速解锁,我们先看快速的,__mutex_unlock_fast:

static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
{
	unsigned long curr = (unsigned long)current;

	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
		return true;

	return false;
}

atomic_long_cmpxchg_release函数我们已经很熟悉了,就是比较lock->owner和curr是否一致,一致则直接把0写到lock->owner中。我们在看看慢速解锁,__mutex_unlock_slowpath:

static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
{
	struct task_struct *next = NULL;
	DEFINE_WAKE_Q(wake_q);//静态定义唤醒队列
	unsigned long owner;

	mutex_release(&lock->dep_map, 1, ip);

	/*
	 * Release the lock before (potentially) taking the spinlock such that
	 * other contenders can get on with things ASAP.
	 *
	 * Except when HANDOFF, in that case we must not clear the owner field,
	 * but instead set it to the top waiter.
	 */
	owner = atomic_long_read(&lock->owner);//原子操作读取lock->owner的值
	for (;;) {
		unsigned long old;

#ifdef CONFIG_DEBUG_MUTEXES
		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
#endif
		//MUTEX_FLAG_HANDOFF表示解锁需要将锁交给下一个waiter
		//如果MUTEX_FLAG_HANDOFF位为1,表示需要唤醒下一个
		if (owner & MUTEX_FLAG_HANDOFF)
			break;
		//清除owner的current信息,只留下MUTEX_FLAGS信息,用来记录队列状态
		old = atomic_long_cmpxchg_release(&lock->owner, owner,
						  __owner_flags(owner));
		
		if (old == owner) {
			//MUTEX_FLAG_WAITERS表示等待队列不为空
			//如果等待队列不为空,需要break去修改等待队列
			if (owner & MUTEX_FLAG_WAITERS)
				break;
			//如果等待队列为空,直接返回就可以了
			return;
		}

		owner = old;
	}

	spin_lock(&lock->wait_lock);//自旋锁加锁
	debug_mutex_unlock(lock);
	
	if (!list_empty(&lock->wait_list)) {//如果wait_list不为空
		/* get the first entry from the wait-list: */
		//等待队列的第一个对象出列
		struct mutex_waiter *waiter =
			list_first_entry(&lock->wait_list,
					 struct mutex_waiter, list);

		next = waiter->task;//获取下一个任务的task信息

		debug_mutex_wake_waiter(lock, waiter);
		wake_q_add(&wake_q, next);//把下一个任务的task信息加入到唤醒队列
	}
	//把下一个任务的task信息写入到lock的owner中
	if (owner & MUTEX_FLAG_HANDOFF)
		__mutex_handoff(lock, next);

	spin_unlock(&lock->wait_lock);//自旋锁解锁

	wake_up_q(&wake_q);//唤醒下一个任务
}
  • 2
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
Linux 互斥锁主要是通过内核中的 mutex API 实现的。下面是一个简单的互斥锁示例代码: ``` #include <pthread.h> #include <stdio.h> pthread_mutex_t mutex; void *thread_func(void *arg) { pthread_mutex_lock(&mutex); // 加锁 printf("Thread %d is in critical section\n", *((int*)arg)); pthread_mutex_unlock(&mutex); // 解锁 return NULL; } int main() { pthread_t tid[2]; int thread_num[2] = {1, 2}; pthread_mutex_init(&mutex, NULL); // 初始化互斥锁 pthread_create(&tid[0], NULL, thread_func, &thread_num[0]); pthread_create(&tid[1], NULL, thread_func, &thread_num[1]); pthread_join(tid[0], NULL); pthread_join(tid[1], NULL); pthread_mutex_destroy(&mutex); // 销毁互斥锁 return 0; } ``` 在该示例中,我们首先使用 `pthread_mutex_init()` 初始化互斥锁并创建两个线程。线程函数 `thread_func()` 中,通过 `pthread_mutex_lock()` 和 `pthread_mutex_unlock()` 来进行锁的加解锁操作。 下面是互斥锁的源代码分析互斥锁的数据结构定义如下: ``` typedef struct { int count; int owner; struct futex q; } mutex_t; ``` 其中,`count` 表示锁的计数,`owner` 表示当前持有锁的线程 ID,`q` 表示等待队列。 下面是互斥锁的加锁操作 `mutex_lock()` 的源代码: ``` void mutex_lock(mutex_t *lock) { if (atomic_inc_return(&lock->count) == 1) { lock->owner = current_thread_id(); return; } if (lock->owner == current_thread_id()) return; futex_down(&lock->q, lock->count, current_thread_id()); lock->owner = current_thread_id(); } ``` 在该函数中,我们首先通过原子加操作 `atomic_inc_return()` 来将 `lock->count` 加 1,并判断锁是否已经被占用。如果是第一个线程获取锁,那么直接将 `lock->owner` 设置为当前线程 ID 并返回,否则则将当前线程加入到等待队列中并阻塞。 下面是互斥锁的解锁操作 `mutex_unlock()` 的源代码: ``` void mutex_unlock(mutex_t *lock) { if (!atomic_dec_return(&lock->count)) { lock->owner = 0; futex_up(&lock->q, 1); } } ``` 在该函数中,我们首先通过原子减操作 `atomic_dec_return()` 将 `lock->count` 减 1,并判断是否为 0。如果为 0,则将 `lock->owner` 设置为 0 并唤醒等待队列中的一个线程。 综上所述,Linux 互斥锁主要是通过内核中的 mutex API 实现的。在加锁操作中,通过原子操作对计数进行加一,并根据计数判断是否需要将当前线程加入到等待队列中;在解锁操作中,通过原子操作对计数进行减一,并根据计数判断是否需要唤醒等待队列中的一个线程。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小坚学Linux

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值