struct thread_info->flags值:
* thread information flags:
* TIF_SIGPENDING - signal pending //有信号没有处理
* TIF_NEED_RESCHED- rescheduling necessary //当一个睡眠的进程被唤醒,当其要加入运行队列时,如果其动态优先级比当前正在运行进程current的优先级高,那么会在current线程上设置TIF_NEED_RESCHED,以告诉内核有新的高优先级的线程在等待内核调度。通常,一个睡眠的进程会在中断处理函数中被唤醒。
struct thread_info->preempt_count 值(线程的抢占计数器):
= 0:线程可以被抢占
> 0: 线程已经被禁止抢占,被抢占的情况分为:
1)preempt_count的低8位表示:当前线程被加锁的次数,比如调用了_raw_spin_trylock()(也会关闭中断),该值会+1
2)preempt_count的中间8位表示:当前线程禁止soft irq和tasklet下半段(或者说推迟函数被禁止)的次数,见__local_bh_disable(SOFTIRQ_OFFSET)和local_bh_disable()
3)preempt_count的bit[16-25]表示:当前线程被硬中断打断的次数,见irq_enter()
4)preempt_count的bit 26 表示:当前线程被非屏蔽中断抢占的次数
5)preempt_count的bit 30 表示:当前线程正在进行的次数,一般为1,也就是说线程被内核其他线程抢占的时候不能再被抢占
另外,通过把抢占计数器设置为正而显式地禁止内核抢占,由preempt_disable完成。
/*
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
*
* The hardirq count can in theory reach the same as NR_IRQS.
* In reality, the number of nested IRQS is limited to the stack
* size as well. For archs with over 1000 IRQS it is not practical
* to expect that they will all nest. We give a max of 10 bits for
* hardirq nesting. An arch may choose to give less than 10 bits.
* m68k expects it to be 8.
*
* - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
* - bit 26 is the NMI_MASK
* - bit 28 is the PREEMPT_ACTIVE flag //貌似被重新定义到第30bit了
*
* PREEMPT_MASK: 0x000000FF
* SOFTIRQ_MASK: 0x0000FF00
* HARDIRQ_MASK: 0x03FF0000
* NMI_MASK: 0x04000000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define NMI_BITS 1
#define MAX_HARDIRQ_BITS 10
#ifndef HARDIRQ_BITS
# define HARDIRQ_BITS MAX_HARDIRQ_BITS
#endif
抢占计数的增减本质上都是通过以下宏来实现的,有些函数只是它们的封装:
# define add_preempt_count(val) do { preempt_count() += (val); } while (0)
# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
注意锁相关的封装宏如下:
#define preempt_disable()
->#define inc_preempt_count() add_preempt_count(1)
#define preempt_enable()
->... sub_preempt_count(1)
#define preempt_check_resched() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
宏CONFIG_PREEMPT:表示是否开启抢占当前线程的功能(包括内核态抢占和用户态抢占)。
比如在IRQ中断处理过程中,当irq_handler处理完后准备返回内核空间或者用户空间时,是否能够抢占当前执行的线程,见entry-armv.S:
__irq_svc: //中断向量入口
svc_entry
irq_handler //通用的硬件中断处理ISR
...
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count //获取当前线程的抢占计数
ldr r0, [tsk, #TI_FLAGS] @ get flags
teq r8, #0 @ if preempt count != 0 //检查当前线程的抢占计数
movne r0, #0 @ force flags to 0 //如果抢占计数大于0(当前线程不能被抢占),则清掉线程的flag;
tst r0, #_TIF_NEED_RESCHED //否则(抢占计数为0的情况==当前线程可以被抢占),如果有高优先级的线程等待调度,(通过检查当前进程的flag是否被置了_TIF_NEED_RESCHED)
blne svc_preempt //开启抢占调度
#endif
svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__irq_svc)
.ltorg
#ifdef CONFIG_PREEMPT
svc_preempt:
mov r8, lr
1: bl preempt_schedule_irq @ irq en/disable is done inside //抢占调度
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
moveq pc, r8 @ go again
b 1b
#endif
/*
* this is the entry point to schedule() from kernel preemption
* off of irq context.
* Note, that this is called and return with irqs disabled. This will
* protect us against recursive calling from irq.
*/
asmlinkage void __sched preempt_schedule_irq(void) //在禁止抢占的情况下(其中一种情况就是关闭了中断,即在中断的上下文)才调用这个函数
{
struct thread_info *ti = current_thread_info();
/* Catch callers which need to be fixed */
BUG_ON(ti->preempt_count || !irqs_disabled());
do {
add_preempt_count(PREEMPT_ACTIVE); //将当前线程的thread_info->preempt_count的第30位的值+1(0x40000000),表示在当前进程上发生了内核抢占,且正在进行
local_irq_enable(); //打开中断,起码其他即将被调度的线程是需要timer中断的
__schedule(); //让内核调度其他急需执行的线程
local_irq_disable(); //确保返回时也关闭中断,避免被其他中断
sub_preempt_count(PREEMPT_ACTIVE);//preempt_count的第30位的值-1,表示在当前进程上发生的内核抢占,已经完成
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched()); //如果又有其他高优先级的线程需要被调度,则循环
}
/*
* We use bit 30 of the preempt_count to indicate that kernel
* preemption is occurring. See <asm/hardirq.h>.
*/
#define PREEMPT_ACTIVE 0x40000000
/*
* this is the entry point to schedule() from in-kernel preemption
* off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
asmlinkage void __sched notrace preempt_schedule(void) //在抢占enable的情况下,进行抢占调度的函数
{
struct thread_info *ti = current_thread_info();
/*
* If there is a non-zero preempt_count or interrupts are disabled,
* we do not want to preempt the current task. Just return..
*/
if (likely(ti->preempt_count || irqs_disabled())) //如果当前线程已经禁止抢占或已经关中断时,不会再调度其他进程。也就是说线程被抢占后一定要该线程的前一次的抢占完成后才能再被抢占,即抢占不能嵌套执行
return;
do {
add_preempt_count_notrace(PREEMPT_ACTIVE);
__schedule();
sub_preempt_count_notrace(PREEMPT_ACTIVE);
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched());
}
用户抢占发生的时机:
1)从系统调用返回用户空间的时候
2)中断处理程序返回用户空间的时候。
内核抢占发生的时机:
1)从中断(异常)返回时,preempt_count为0且need_resched置位(见从中断返回);
2)在异常处理程序中(特别是系统调用)调用preempt_enable()来允许内核抢占发生;
【Linux Kernel】抢占
最新推荐文章于 2024-07-19 14:40:46 发布