/
https://elixir.bootlin.com/linux/v4.0/source/kernel/locking/mutex.c#L95/*
我们将互斥锁/解锁逻辑分为单独的fastpath和slowpath函数,以减少fastpath上的寄存器压力。
我们还将fastpath放在内核映像的第一位,以确保CPU将分支预测为默认的untaken。
*/
__visible void __sched __mutex_lock_slowpath(atomic_t*lock_count);/*
为此任务专门锁定互斥锁。
如果互斥锁现在不可用,它将一直睡眠,直到它可以得到它。
互斥锁稍后必须由获取它的同一任务释放。
不允许递归锁定。
如果不首先解锁互斥,任务可能无法退出。
另外,在互斥锁仍然锁定的情况下,不能释放互斥锁所在的内核内存。
互斥锁必须先初始化(或静态定义),然后才能被锁定。
memset()-不允许将互斥量设置为0。
(CONFIG_DEBUG_mutexs.CONFIG选项打开将强制执行限制并执行死锁调试的调试检查。)
此函数类似于(但不等同于)down()。
*/void __sched mutex_lock(structmutex*lock){might_sleep();// https://www.cnblogs.com/sky-heaven/p/7150622.html // 警告系统编程人员 mutex_lock 可能睡眠 /*
* 锁定快速路径是从
* “解锁”进入“锁定”状态。不睡眠.
*/__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);// https://elixir.bootlin.com/linux/v4.0/source/include/asm-generic/mutex-xchg.h#L26mutex_set_owner(lock);}/////////https://elixir.bootlin.com/linux/v4.0/source/include/asm-generic/mutex-xchg.h#L26/*
包含/asm generic/mutex xchg.h
基于xchg()的互斥快速路径的通用实现。
注意:基于xchg的实现可能不如基于原子减量/增量的实现更理想。
如果您的体系结构有一个合理的原子dec/inc,那么您可能应该使用asm generic/mutex-dec.h
或者您可以在asm/mutex.h中打开优化版本的代码。
*//*
__mutex_fastpath_lock
-尝试通过将计数从1移动到0来获取锁
@count:类型为atomic的指针
@fail_fn:如果原始值不是1,则调用函数将计数
从1更改为小于1的值,如果原始值不是1,则调用<fail_fn>。
即使“1”断言不是真的,此函数也必须使值小于1。
*/staticinlinevoid__mutex_fastpath_lock(atomic_t*count,void(*fail_fn)(atomic_t*)){if(unlikely(atomic_xchg(count,0)!=1))/*
我们未能获取锁,因此标记它以确保任何等待的任务都被解锁的慢速路径唤醒。
*/if(likely(atomic_xchg(count,-1)!=1))fail_fn(count);}////https://elixir.bootlin.com/linux/v4.0/source/arch/arm/include/asm/atomic.h#L201#defineatomic_xchg(v, new)(xchg(&((v)->counter), new))///https://elixir.bootlin.com/linux/v4.0/source/arch/arm/include/asm/cmpxchg.h#L105#definexchg(ptr,x)\((__typeof__(*(ptr)))__xchg((unsignedlong)(x),(ptr),sizeof(*(ptr))))///https://elixir.bootlin.com/linux/v4.0/source/arch/arm/include/asm/cmpxchg.h#L27staticinlineunsignedlong__xchg(unsignedlong x,volatilevoid*ptr,int size){externvoid__bad_xchg(volatilevoid*,int);unsignedlong ret;#ifdefswp_is_buggyunsignedlong flags;#endif#if__LINUX_ARM_ARCH__ >=6unsignedint tmp;#endifsmp_mb();prefetchw((constvoid*)ptr);switch(size){#if__LINUX_ARM_ARCH__ >=6case1:asmvolatile("@ __xchg1\n""1: ldrexb %0, [%3]\n"" strexb %1, %2, [%3]\n"" teq %1, #0\n"" bne 1b":"=&r"(ret),"=&r"(tmp):"r"(x),"r"(ptr):"memory","cc");break;case4:asmvolatile("@ __xchg4\n""1: ldrex %0, [%3]\n"" strex %1, %2, [%3]\n"" teq %1, #0\n"" bne 1b":"=&r"(ret),"=&r"(tmp):"r"(x),"r"(ptr):"memory","cc");break;#elifdefined(swp_is_buggy)#ifdefCONFIG_SMP#errorSMP is not supported on this platform#endifcase1:raw_local_irq_save(flags);
ret =*(volatileunsignedchar*)ptr;*(volatileunsignedchar*)ptr = x;raw_local_irq_restore(flags);break;case4:raw_local_irq_save(flags);
ret =*(volatileunsignedlong*)ptr;*(volatileunsignedlong*)ptr = x;raw_local_irq_restore(flags);break;#elsecase1:asmvolatile("@ __xchg1\n"" swpb %0, %1, [%2]":"=&r"(ret):"r"(x),"r"(ptr):"memory","cc");break;case4:asmvolatile("@ __xchg4\n"" swp %0, %1, [%2]":"=&r"(ret):"r"(x),"r"(ptr):"memory","cc");break;#endifdefault:__bad_xchg(ptr, size), ret =0;break;}smp_mb();return ret;}////https://elixir.bootlin.com/linux/v4.0/source/kernel/locking/mutex.h#L20staticinlinevoidmutex_set_owner(structmutex*lock){}////////
如果加锁 失败的情况下,就开始调用函数 __mutex_lock_slowpath ,该函数中睡眠
/////https://elixir.bootlin.com/linux/v4.0/source/kernel/locking/mutex.c#L824
__visible void __sched
__mutex_lock_slowpath(atomic_t*lock_count){structmutex*lock =container_of(lock_count,structmutex, count);__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,0,NULL, _RET_IP_,NULL,0);}///https://elixir.bootlin.com/linux/v4.0/source/kernel/locking/mutex.c#L517/*
* Lock a mutex (possibly interruptible), slowpath:
*/static __always_inline int __sched
__mutex_lock_common(structmutex*lock,long state,unsignedint subclass,structlockdep_map*nest_lock,unsignedlong ip,structww_acquire_ctx*ww_ctx,const bool use_ww_ctx){structtask_struct*task = current;structmutex_waiter waiter;unsignedlong flags;int ret;preempt_disable();mutex_acquire_nest(&lock->dep_map, subclass,0, nest_lock, ip);if(mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)){/* got the lock, yay! */preempt_enable();return0;}spin_lock_mutex(&lock->wait_lock, flags);/*
* Once more, try to acquire the lock. Only try-lock the mutex if
* it is unlocked to reduce unnecessary xchg() operations.
*/if(!mutex_is_locked(lock)&&(atomic_xchg(&lock->count,0)==1))goto skip_wait;debug_mutex_lock_common(lock,&waiter);debug_mutex_add_waiter(lock,&waiter,task_thread_info(task));/* add waiting tasks to the end of the waitqueue (FIFO): */list_add_tail(&waiter.list,&lock->wait_list);
waiter.task = task;lock_contended(&lock->dep_map, ip);for(;;){/*
* Lets try to take the lock again - this is needed even if
* we get here for the first time (shortly after failing to
* acquire the lock), to make sure that we get a wakeup once
* it's unlocked. Later on, if we sleep, this is the
* operation that gives us the lock. We xchg it to -1, so
* that when we release the lock, we properly wake up the
* other waiters. We only attempt the xchg if the count is
* non-negative in order to avoid unnecessary xchg operations:
*/if(atomic_read(&lock->count)>=0&&(atomic_xchg(&lock->count,-1)==1))break;/*
* got a signal? (This code gets eliminated in the
* TASK_UNINTERRUPTIBLE case.)
*/if(unlikely(signal_pending_state(state, task))){
ret =-EINTR;goto err;}if(use_ww_ctx && ww_ctx->acquired >0){
ret =__ww_mutex_lock_check_stamp(lock, ww_ctx);if(ret)goto err;}__set_task_state(task, state);/* didn't get the lock, go to sleep: */spin_unlock_mutex(&lock->wait_lock, flags);schedule_preempt_disabled();spin_lock_mutex(&lock->wait_lock, flags);}__set_task_state(task, TASK_RUNNING);mutex_remove_waiter(lock,&waiter,current_thread_info());/* set it to 0 if there are no waiters left: */if(likely(list_empty(&lock->wait_list)))atomic_set(&lock->count,0);debug_mutex_free_waiter(&waiter);
skip_wait:/* got the lock - cleanup and rejoice! */lock_acquired(&lock->dep_map, ip);mutex_set_owner(lock);if(use_ww_ctx){structww_mutex*ww =container_of(lock,structww_mutex, base);ww_mutex_set_context_slowpath(ww, ww_ctx);}spin_unlock_mutex(&lock->wait_lock, flags);preempt_enable();return0;
err:mutex_remove_waiter(lock,&waiter,task_thread_info(task));spin_unlock_mutex(&lock->wait_lock, flags);debug_mutex_free_waiter(&waiter);mutex_release(&lock->dep_map,1, ip);preempt_enable();return ret;}