linux操作系统 抢占式,Linux操作系统内核抢占补丁的基本原理(2)

Linux操作系统内核抢占补丁的基本原理(2)

2008-02-23 07:26:45来源:互联网 阅读 ()

c4468b3f4df77e96b0a416fa2a870fba.png

int this_cpu, c;

#ifdef CONFIG_PREEMPT

ctx_sw_off();

#endif

if (!current->active_mm) BUG();

need_resched_back:

prev = current;

this_cpu = prev->processor;

if (in_interrupt())

goto scheduling_in_interrupt;

release_kernel_lock(prev, this_cpu);

/* Do "administrative" work here while we don't hold any locks */

if (softirq_active(this_cpu) & softirq_mask(this_cpu))

goto handle_softirq;

handle_softirq_back:

/*

* 'sched_data' is protected by the fact that we can run

* only one process per CPU.

*/

sched_data = & aligned_data[this_cpu].schedule_data;

spin_lock_irq(&runqueue_lock);

/* move an exhausted RR process to be last.. */

if (prev->policy == SCHED_RR)

goto move_rr_last;

move_rr_back:

switch (prev->state) {

case TASK_INTERRUPTIBLE:

if (signal_pending(prev)) {

prev->state = TASK_RUNNING;

break;

}

default:

#ifdef CONFIG_PREEMPT

if (prev->state & TASK_PREEMPTED)

break; 如果是内核抢占调度,则保留运行队列

#endif

del_from_runqueue(prev);

#ifdef CONFIG_PREEMPT

case TASK_PREEMPTED:

#endif

case TASK_RUNNING:

}

prev->need_resched = 0;

/*

* this is the scheduler proper:

*/

repeat_schedule:

/*

* Default process to select..

*/

next = idle_task(this_cpu);

c = -1000;

if (task_on_runqueue(prev))

goto still_running;

still_running_back:

list_for_each(tmp, &runqueue_head) {

p = list_entry(tmp, struct task_struct, run_list);

if (can_schedule(p, this_cpu)) {

int weight = goodness(p, this_cpu, prev->active_mm);

if (weight > c)

c = weight, next = p;

}

}

/* Do we need to re-calculate counters? */

if (!c)

goto recalculate;

/*

* from this point on nothing can prevent us from

* switching to the next task, save this fact in

* sched_data.

*/

sched_data->curr = next;

#ifdef CONFIG_SMP

next->has_cpu = 1;

next->processor = this_cpu;

#endif

spin_unlock_irq(&runqueue_lock);

if (prev == next)

goto same_process;

#ifdef CONFIG_SMP

/*

* maintain the per-process 'last schedule' value.

* (this has to be recalculated even if we reschedule to

* the same process) Currently this is only used on SMP,

* and it's approximate, so we do not have to maintain

* it while holding the runqueue spinlock.

*/

sched_data->last_schedule = get_cycles();

/*

* We drop the scheduler lock early (it's a global spinlock),

* thus we have to lock the previous process from getting

* rescheduled during switch_to().

*/

#endif /* CONFIG_SMP */

kstat.context_swtch ;

/*

* there are 3 processes which are affected by a context switch:

*

* prev == .... ==> (last => next)

*

* It's the 'much more previous' 'prev' that is on next's stack,

* but prev is set to (the just run) 'last' process by switch_to().

* This might sound slightly confusing but makes tons of sense.

*/

prepare_to_switch();

{

struct mm_struct *mm = next->mm;

struct mm_struct *oldmm = prev->active_mm;

if (!mm) {

if (next->active_mm) BUG();

next->active_mm = oldmm;

atomic_inc(&oldmm->mm_count);

enter_lazy_tlb(oldmm, next, this_cpu);

} else {

if (next->active_mm != mm) BUG();

switch_mm(oldmm, mm, next, this_cpu);

}

if (!prev->mm) {

prev->active_mm = NULL;

mmdrop(oldmm);

}

}

/*

* This just switches the register state and the

* stack.

*/

switch_to(prev, next, prev);

__schedule_tail(prev);

same_process:

reacquire_kernel_lock(current);

if (current->need_resched)

goto need_resched_back;

#ifdef CONFIG_PREEMPT

ctx_sw_on_no_preempt();

#endif

return;

recalculate:

{

struct task_struct *p;

spin_unlock_irq(&runqueue_lock);

read_lock(&tasklist_lock);

for_each_task(p)

p->counter = (p->counter >> 1) NICE_TO_TICKS(p->nice);

read_unlock(&tasklist_lock);

spin_lock_irq(&runqueue_lock);

}

goto repeat_schedule;

still_running:

c = goodness(prev, this_cpu, prev->active_mm);

next = prev;

goto still_running_back;

handle_softirq:

do_softirq();

goto handle_softirq_back;

move_rr_last:

if (!prev->counter) {

prev->counter = NICE_TO_TICKS(prev->nice);

move_last_runqueue(prev);

}

goto move_rr_back;

scheduling_in_interrupt:

printk("Scheduling in interrupt\n");

BUG();

return;

}

void schedule_tail(struct task_struct *prev)

{

__schedule_tail(prev);

#ifdef CONFIG_PREEMPT

ctx_sw_on();

#endif

}

标签:

版权申明:本站文章部分自网络,如有侵权,请联系:west999com@outlook.com

特别注意:本站所有转载文章言论不代表本站观点,本站所提供的摄影照片,插画,设计作品,如需使用,请与原作者联系,版权归原作者所有

相关文章

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值