OK6410A 开发板 (八) 73 linux-5.11 OK6410A linux 内核同步机制 互斥锁的实现

  • 解决的问题是什么
所有异常原因
  • 限制是什么
加锁函数会引起睡眠,所以不能...
实现
/
https://elixir.bootlin.com/linux/v4.0/source/kernel/locking/mutex.c#L95
	
/*
我们将互斥锁/解锁逻辑分为单独的fastpath和slowpath函数,以减少fastpath上的寄存器压力。
我们还将fastpath放在内核映像的第一位,以确保CPU将分支预测为默认的untaken。
*/
__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
/*
为此任务专门锁定互斥锁。
如果互斥锁现在不可用,它将一直睡眠,直到它可以得到它。
互斥锁稍后必须由获取它的同一任务释放。
不允许递归锁定。
如果不首先解锁互斥,任务可能无法退出。
另外,在互斥锁仍然锁定的情况下,不能释放互斥锁所在的内核内存。
互斥锁必须先初始化(或静态定义),然后才能被锁定。
memset()-不允许将互斥量设置为0。
(CONFIG_DEBUG_mutexs.CONFIG选项打开将强制执行限制并执行死锁调试的调试检查。)
此函数类似于(但不等同于)down()。
*/
void __sched mutex_lock(struct mutex *lock)
{
	might_sleep(); // https://www.cnblogs.com/sky-heaven/p/7150622.html // 警告系统编程人员 mutex_lock 可能睡眠 
/*
*	锁定快速路径是从
* 	“解锁”进入“锁定”状态。不睡眠.
*/
	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); // https://elixir.bootlin.com/linux/v4.0/source/include/asm-generic/mutex-xchg.h#L26
	mutex_set_owner(lock);
}

///
///
///https://elixir.bootlin.com/linux/v4.0/source/include/asm-generic/mutex-xchg.h#L26
/*
包含/asm generic/mutex xchg.h
基于xchg()的互斥快速路径的通用实现。
注意:基于xchg的实现可能不如基于原子减量/增量的实现更理想。
如果您的体系结构有一个合理的原子dec/inc,那么您可能应该使用asm generic/mutex-dec.h
或者您可以在asm/mutex.h中打开优化版本的代码。
 */


/*

__mutex_fastpath_lock
-尝试通过将计数从1移动到0来获取锁
@count:类型为atomic的指针
@fail_fn:如果原始值不是1,则调用函数将计数

从1更改为小于1的值,如果原始值不是1,则调用<fail_fn>。

即使“1”断言不是真的,此函数也必须使值小于1。
 */

static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
	if (unlikely(atomic_xchg(count, 0) != 1))
/*
我们未能获取锁,因此标记它以确保任何等待的任务都被解锁的慢速路径唤醒。
 */
		if (likely(atomic_xchg(count, -1) != 1))
			fail_fn(count);
}
/
///https://elixir.bootlin.com/linux/v4.0/source/arch/arm/include/asm/atomic.h#L201
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
///https://elixir.bootlin.com/linux/v4.0/source/arch/arm/include/asm/cmpxchg.h#L105
#define xchg(ptr,x) \
	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
///https://elixir.bootlin.com/linux/v4.0/source/arch/arm/include/asm/cmpxchg.h#L27
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
	extern void __bad_xchg(volatile void *, int);
	unsigned long ret;
#ifdef swp_is_buggy
	unsigned long flags;
#endif
#if __LINUX_ARM_ARCH__ >= 6
	unsigned int tmp;
#endif

	smp_mb();
	prefetchw((const void *)ptr);

	switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
	case 1:
		asm volatile("@	__xchg1\n"
		"1:	ldrexb	%0, [%3]\n"
		"	strexb	%1, %2, [%3]\n"
		"	teq	%1, #0\n"
		"	bne	1b"
			: "=&r" (ret), "=&r" (tmp)
			: "r" (x), "r" (ptr)
			: "memory", "cc");
		break;
	case 4:
		asm volatile("@	__xchg4\n"
		"1:	ldrex	%0, [%3]\n"
		"	strex	%1, %2, [%3]\n"
		"	teq	%1, #0\n"
		"	bne	1b"
			: "=&r" (ret), "=&r" (tmp)
			: "r" (x), "r" (ptr)
			: "memory", "cc");
		break;
#elif defined(swp_is_buggy)
#ifdef CONFIG_SMP
#error SMP is not supported on this platform
#endif
	case 1:
		raw_local_irq_save(flags);
		ret = *(volatile unsigned char *)ptr;
		*(volatile unsigned char *)ptr = x;
		raw_local_irq_restore(flags);
		break;

	case 4:
		raw_local_irq_save(flags);
		ret = *(volatile unsigned long *)ptr;
		*(volatile unsigned long *)ptr = x;
		raw_local_irq_restore(flags);
		break;
#else
	case 1:
		asm volatile("@	__xchg1\n"
		"	swpb	%0, %1, [%2]"
			: "=&r" (ret)
			: "r" (x), "r" (ptr)
			: "memory", "cc");
		break;
	case 4:
		asm volatile("@	__xchg4\n"
		"	swp	%0, %1, [%2]"
			: "=&r" (ret)
			: "r" (x), "r" (ptr)
			: "memory", "cc");
		break;
#endif
	default:
		__bad_xchg(ptr, size), ret = 0;
		break;
	}
	smp_mb();

	return ret;
}
/
///https://elixir.bootlin.com/linux/v4.0/source/kernel/locking/mutex.h#L20
static inline void mutex_set_owner(struct mutex *lock)
{
}


//
//
/
/
/
/



如果加锁 失败的情况下,就开始调用函数  __mutex_lock_slowpath ,该函数中睡眠
//
///https://elixir.bootlin.com/linux/v4.0/source/kernel/locking/mutex.c#L824

__visible void __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
	struct mutex *lock = container_of(lock_count, struct mutex, count);

	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
			    NULL, _RET_IP_, NULL, 0);
}
///https://elixir.bootlin.com/linux/v4.0/source/kernel/locking/mutex.c#L517
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
static __always_inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
		    struct lockdep_map *nest_lock, unsigned long ip,
		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
	unsigned long flags;
	int ret;

	preempt_disable();
	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);

	if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
		/* got the lock, yay! */
		preempt_enable();
		return 0;
	}

	spin_lock_mutex(&lock->wait_lock, flags);

	/*
	 * Once more, try to acquire the lock. Only try-lock the mutex if
	 * it is unlocked to reduce unnecessary xchg() operations.
	 */
	if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1))
		goto skip_wait;

	debug_mutex_lock_common(lock, &waiter);
	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

	lock_contended(&lock->dep_map, ip);

	for (;;) {
		/*
		 * Lets try to take the lock again - this is needed even if
		 * we get here for the first time (shortly after failing to
		 * acquire the lock), to make sure that we get a wakeup once
		 * it's unlocked. Later on, if we sleep, this is the
		 * operation that gives us the lock. We xchg it to -1, so
		 * that when we release the lock, we properly wake up the
		 * other waiters. We only attempt the xchg if the count is
		 * non-negative in order to avoid unnecessary xchg operations:
		 */
		if (atomic_read(&lock->count) >= 0 &&
		    (atomic_xchg(&lock->count, -1) == 1))
			break;

		/*
		 * got a signal? (This code gets eliminated in the
		 * TASK_UNINTERRUPTIBLE case.)
		 */
		if (unlikely(signal_pending_state(state, task))) {
			ret = -EINTR;
			goto err;
		}

		if (use_ww_ctx && ww_ctx->acquired > 0) {
			ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
			if (ret)
				goto err;
		}

		__set_task_state(task, state);

		/* didn't get the lock, go to sleep: */
		spin_unlock_mutex(&lock->wait_lock, flags);
		schedule_preempt_disabled();
		spin_lock_mutex(&lock->wait_lock, flags);
	}
	__set_task_state(task, TASK_RUNNING);

	mutex_remove_waiter(lock, &waiter, current_thread_info());
	/* set it to 0 if there are no waiters left: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);
	debug_mutex_free_waiter(&waiter);

skip_wait:
	/* got the lock - cleanup and rejoice! */
	lock_acquired(&lock->dep_map, ip);
	mutex_set_owner(lock);

	if (use_ww_ctx) {
		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
		ww_mutex_set_context_slowpath(ww, ww_ctx);
	}

	spin_unlock_mutex(&lock->wait_lock, flags);
	preempt_enable();
	return 0;

err:
	mutex_remove_waiter(lock, &waiter, task_thread_info(task));
	spin_unlock_mutex(&lock->wait_lock, flags);
	debug_mutex_free_waiter(&waiter);
	mutex_release(&lock->dep_map, 1, ip);
	preempt_enable();
	return ret;
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值