start ¡ûclock();
while (true)
{
execute();
start¡ûstart +period;
delay until start;
}
...
menu "CASIO scheduler"
config SCHED_EDF_POLICY
bool "EDF scheduling policy"
default y
endmenu
...
#define SCHED_NORMAL 0
#define SCHED_FIFO 1
#define SCHED_RR 2
...
#ifdef CONFIG_SCHED_EDF_POLICY
#define SCHED_EDF 6
#endif
...
struct task_struct{
...
#ifdef CONFIG_SCHED_EDF_POLICY
unsigned int edf_id;
unsigned long long deadline;
#endif // CONFIG_SCHED_EDF_POLICY
};
#ifdef CONFIG_SCHED_EDF_POLICY
struct edf_task{
struct rb_node edf_node;
unsigned long long cl;
unsigned long long ch;
unsigned long long period;
int level;
struct task_struct *task;
};
struct edf_rq{
struct rb_root edf_root;
struct list_head edf_list;
atomic_t nr_running;
};
#endif // CONFIG_SCHED_EDF_POLICY
struct rq{
...
#ifdef CONFIG_SCHED_EDF_POLICY
struct edf_rq edf_rq;
#endif // CONFIG_SCHED_EDF_POLICY
...
};
void __init sched_init(void)
{
...
for_each_possible_cpu(1){
...
struct rq *rq;
...
rq=cpu_rq(1);
...
#ifdef CONFIG_SCHED_EDF_POLICY
init_edf_rq(&rq->edf_rq);
#endif // CONFIG_SCHED_EDF_POLICY
...
}
}
const struct shced_class edf_sched_class={
.next =&rt_sched_class,
.enqueue_task =enqueue_task_edf,
.dequeue_task =dequeue_task_edf,
.check_preempt_curr=check_preempt_curr_edf,
.pick_next_task=pick_next_task_edf,
...
};
static void enqueue_task_edf(struct rq*rq ,struct task-struct *p,int wakeup)
{
struct edf_task *t=NULL;
if(p)
{
t=find_edf_task_list(&rq->edf_rq,p);
if(t)
{
t->deadline=sched_clock()+p->period;
Insert_edf_task_rb_tree(&rq->edf_rq,t);
atomic_inc(&rq->edf_rq,nr_running);
}
}
}
static void dequeue_task_edf(struct rq *rq,struct task_struct *p,int sleep)
{
struct edf_task *t=NULL;
if(p){
t=find_edf_task_list(rq->edf_rq,p);
if(t){
remove_edf_task_rb_tree(&rq->edf_rq,t);
atomic_dec(&rq->edf_rq,nr_running);
if(t->task->state==Task_DEAD||t->task->state==EXIT_DEAD||
t->task->state==EXIT_ZOMBIE)
{
rem_edf_task_list(&rq->edf_rq,t->task);
}
}
}
}
static void check_preempt_curr_edf(struct rq *rq, struct task_struct *p)
{
struct edf_task *t=NULL,*curr=NULL;
if(rq->curr->policy!=SCHED_EDF){
resched_task(rq->curr);
}
else{
t=earliest_deadline_edf_task_rb_tree(&rq->edf_rq);
if(t){
curr=find_edf_task_list(&rq->edf_rq,rq->curr);
if(curr){
if(t->deadline < curr->deadline)
resched_task(rq->curr);
}
else{
printk(KERN_ALERT "check_preempt_curr_edf\n");
}
}
}
}
static struct task_struct *pick_next_task_edf(struct rq *rq)
{
struct edf_task *t=NULL;
t=earliest_deadline_edf_task_rb_tree(&rq->edf_rq);
if(t){
return t->task;
}
return NULL;
}
...
#ifdef CONFIG_SCHED_EDF_POLICY
#include "sched_edf.c"
#endif // CONFIG_SCHED_EDF_POLICY
#ifdef CONFIG_SCHED_EDF_POLICY
#define sched_class_highest(&edf_sched_class)
#else
#define sched_class_highest(&rt_sched_class)
#endif // CONFIG_SCHED_EDF_POLICY
struct sched_param{
int sched_priorty;
#ifdef CONFIG_SCHED_EDF_POLICY
unsigned int edf_id;
unsigned long long deadline;
unsigned int level;
#endif // CONFIG_SCHED_EDF_POLICY
};
static inline int rt_policy(int policy)
{
if(unlikely(policy==SCHED_FIFO)
||unlikely(policy==SCHED_FIFO)
#ifdef CONFIG_SCHED_EDF_POLICY
||unlikely(policy==SCHED_EDF)
#endif // CONFIG_SCHED_EDF_POLICY
)
return 1;
return 0;
}
static void __setscheduler(struct rq *rq,struct task_struct *p,int policy,int prio)
{
...
p->policy=policy;
switch(p->policy){
...
#ifdef CONFIG_SCHED_EDF_POLICY
case SCHED_EDF:
p->sched_class=&edf_sched_class;
break;
#endif // CONFIG_SCHED_EDF_POLICY
}
...
}
linux edf
最新推荐文章于 2022-08-11 17:36:30 发布