核心游记之 lock_kernel

start_kernel 开始后的第一个C 函数.

include/linux/smp_lock.h中有宏定义:

 

#ifdef CONFIG_LOCK_KERNEL

#define kernel_locked()        (current->lock_depth >= 0)

 

extern void __lockfunc lock_kernel(void)    __acquires(kernel_lock);
extern void __lockfunc unlock_kernel(void)    __releases(kernel_lock);

#else

#define lock_kernel()                do { } while(0)
#define unlock_kernel()                do { } while(0)
#define release_kernel_lock(task)        do { } while(0)
#define reacquire_kernel_lock(task)        0
#define kernel_locked()                1

#endif /* CONFIG_LOCK_KERNEL */

 

lock_kernel 的真实身份还需要CONFIG_LOCK_KERNEL  确定.

内核使用了arch/arm/configs/s3c2410_defconfig作为默认的配置文件,Code maturity level options 选项下没有设置

CONFIG_LOCK_KERNEL  项.所以在这里lock_kernel应该就是do { } while(0) 的空操作.


搞清楚了,但还是要看一下CONFIG_LOCK_KERNEL  定义的情况下lock_kernel的情况:lib/kernel_lock.c中有定义

也是要根据宏的定义具体判断使用哪个实现.

CONFIG_PREEMPT_BKL 判断使用'big kernel semaphore还是'big kernel lock;

即使使用 'big kernel lock CONFIG_PREEMPT(竞态) 宏不同lock_kernel实现也不同;

 

2410这种单处理器不会有多个cpu竞争的情况,所以不需要大内核锁/信号量来解决资源竞争问题...


#ifdef CONFIG_PREEMPT_BKL
/*
* The 'big kernel semaphore'
*
* This mutex is taken and released recursively by lock_kernel()
* and unlock_kernel().  It is transparently dropped and reaquired
* over schedule().  It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
* Note: code locked by this semaphore will only be serialized against
* other code using the same locking facility. The code guarantees that
* the task remains on the same CPU.
*
* Don't use in new code.
*/
static DECLARE_MUTEX(kernel_sem);

/*
* Re-acquire the kernel semaphore.
*
* This function is called with preemption off.
*
* We are executing in schedule() so the code must be extremely careful
* about recursion, both due to the down() and due to the enabling of
* preemption. schedule() will re-check the preemption flag after
* reacquiring the semaphore.
*//*
* Getting the big kernel semaphore.
*/
void __lockfunc lock_kernel(void)
{
    struct task_struct *task = current;
    int depth = task->lock_depth + 1;

    if (likely(!depth))
        /*
         * No recursion worries - we set up lock_depth _after_
         */
        down(&kernel_sem);

    task->lock_depth = depth;
}

void __lockfunc unlock_kernel(void)
{
    struct task_struct *task = current;

    BUG_ON(task->lock_depth < 0);

    if (likely(--task->lock_depth < 0))
        up(&kernel_sem);
}

#else

/*
* The 'big kernel lock'
*
* This spinlock is taken and released recursively by lock_kernel()
* and unlock_kernel().  It is transparently dropped and reaquired
* over schedule().  It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
* Don't use in new code.
*/
static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);

/*
* Acquire/release the underlying lock from the scheduler.
*
* This is called with preemption disabled, and should
* return an error value if it cannot get the lock and
* TIF_NEED_RESCHED gets set.
*
* If it successfully gets the lock, it should increment
* the preemption count like any spinlock does.
*
* (This works on UP too - _raw_spin_trylock will never
* return false in that case)
*/

/*
* These are the BKL spinlocks - we try to be polite about preemption.
* If SMP is not on (ie UP preemption), this all goes away because the
* _raw_spin_trylock() will always succeed.
*/
#ifdef CONFIG_PREEMPT
static inline void __lock_kernel(void)
{
    preempt_disable();
    if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
        /*
         * If preemption was disabled even before this
         * was called, there's nothing we can be polite
         * about - just spin.
         */
        if (preempt_count() > 1) {
            _raw_spin_lock(&kernel_flag);
            return;
        }

        /*
         * Otherwise, let's wait for the kernel lock
         * with preemption enabled..
         */
        do {
            preempt_enable();
            while (spin_is_locked(&kernel_flag))
                cpu_relax();
            preempt_disable();
        } while (!_raw_spin_trylock(&kernel_flag));
    }
}

#else

/*
* Non-preemption case - just get the spinlock
*/
static inline void __lock_kernel(void)
{
    _raw_spin_lock(&kernel_flag);
}
#endif

 

/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously, so we only need to
* worry about other CPU's.
*/
void __lockfunc lock_kernel(void)
{
    int depth = current->lock_depth+1;
    if (likely(!depth))
        __lock_kernel();
    current->lock_depth = depth;
}

#endif

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值