进程的组织

等待队列:

等待队列是由双向链表实现的,其元素指向进程描述符的指针。每个等待队列都有一个等待队列头;等待队列头是一个类型为wait_queue_head_t的数据结构;

struct __wait_queue_head {
	spinlock_t lock;
	struct list_head task_list;
};
typedef struct   __wait_queue_head     wait_queue_head_t;
#define DECLARE_WAIT_QUEUE_HEAD(name) \
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) {\
                                          .lock= SPIN_LOCK_UNLOCKED,\
                                          .task_list= { &(name).task_list, &(name).task_list } }
struct __wait_queue {
	unsigned int flags;
#define WQ_FLAG_EXCLUSIVE	0x01
	struct task_struct * task;
	wait_queue_func_t func;
	struct list_head task_list;
};

#define __WAITQUEUE_INITIALIZER(name, tsk) {				\
	.task		= tsk,						\
	.func		= default_wake_function,			\
	.task_list	= { NULL, NULL } }

#define DEFINE_WAIT(name)						\
	wait_queue_t name = {						\
		.task		= current,				\
		.func		= autoremove_wake_function,		\
		.task_list	= {	.next = &(name).task_list,	\
					.prev = &(name).task_list,	\
				},					\
	}
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
int ret = default_wake_function(wait, mode, sync, key);


if (ret)
list_del_init(&wait->task_list);
return ret;
}
int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key)
{
task_t *p = curr->task;
return try_to_wake_up(p, mode, sync);
}

等待队列表中每个元素代表一个睡眠进程,该进程等待某一时间的发生;他的描述符地址正好放在task字段中

如果要唤醒等待队列中的所有睡眠进程也不好;如果多个进程正在等待互斥访问某一释放的的资源,仅唤醒一个进程最好,其它进程继续睡眠;这就避免了雷鸣般群兽问题及唤醒多个进程竞争一个资源。

因此有两种睡眠进程:互斥进程和非互斥进程(flags是否为1 /0)


DECLARE_WAIT_QUEUE_HEAD(name)宏定义一个新等待队列的头;同时也对等待队列的头的变量lock、task_list字段进行初始化;

函数:

static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
{
q->flags = 0;
q->task = p;
q->func = default_wake_function;
}

非互斥进程p将由default_wake_function唤醒;default_wake_function函数是在try_to_wake_up()的一个封装;

DEFINE_WAIT(name)声明一个wait_queue_t类型的新变量;并用CPU上运行的当前进程的描述符和唤醒函数autoremove_wake_function()的地址初始化这个新变量;这个函数调用default_wake_function()来唤醒进程。然后从等待队列中删除对应的元素;最后内核开发者可以通过init_waitqueue_func_entry()函数来自定义唤醒函数;该函数负责初始化等待队列的元素。


要等待特定条件的进程可以调用下面函数

1、sleep_on()

#define	SLEEP_ON_VAR					\
	unsigned long flags;				\
	wait_queue_t wait;				\
	init_waitqueue_entry(&wait, current);

#define SLEEP_ON_HEAD					\
	spin_lock_irqsave(&q->lock,flags);		\
	__add_wait_queue(q, &wait);			\
	spin_unlock(&q->lock);

#define	SLEEP_ON_TAIL					\
	spin_lock_irq(&q->lock);			\
	__remove_wait_queue(q, &wait);			\
	spin_unlock_irqrestore(&q->lock, flags);

void fastcall __sched sleep_on(wait_queue_head_t *q)

 //将当前进程的描述符放入等待队列的链表中;

{
SLEEP_ON_VAR


current->state = TASK_UNINTERRUPTIBLE;


SLEEP_ON_HEAD
schedule();
SLEEP_ON_TAIL
}

把当前进程设置为task_uninterruptable并插入特定的等待队列;然后调用调度程序;当睡眠进程唤醒时;调度程序会重新执行sleep_on函数,

并且把该进程从等待队列中删除

2、interruptable_sleep_on()和sleep_on()一样,但稍有不同,前者把进程的状态设为task_interruptable而不是task_uninterruptable;

3、sleep_on_time_out()和interruptable_sleep_on_timeout()和前面函数类似,但他们允许定义一个时间间隔;过了这个时间间隔后,进程将由内核唤醒;为了做到这一点;要他们将调用schedule_timeout()

4、linux2.6中引入prepare_to_wait()、prepare_to_wait_exclusive()和finish_wait()函数提供另一种途径来使当前进程在一个等待队列中睡眠

void fastcall
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
	unsigned long flags;

	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
	spin_lock_irqsave(&q->lock, flags);
	if (list_empty(&wait->task_list))//如果wait->task_list没有加入等待队列中则加入
		__add_wait_queue(q, wait);
	/*
	 * don't alter the task state if this is just going to
	 * queue an async wait queue callback
	 */
	if (is_sync_wait(wait))
		set_current_state(state);//#define is_sync_wait(wait)<span style="white-space:pre">	</span>(!(wait) || ((wait)->task))
	spin_unlock_irqrestore(&q->lock, flags);
}

#define __wait_event(wq, condition) 					\
do {									\
	DEFINE_WAIT(__wait);						\
									\
	for (;;) {							\
		prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);	\
		if (condition)						\
			break;						\
		schedule();						\
	}	//进程被唤醒继续执行							\
	finish_wait(&wq, &__wait);					\
} while (0)
#define wait_event(wq, condition) 					\
do {									\
	if (condition)	 						\
		break;							\
	__wait_event(wq, condition);					\
} while (0)


sleep_on类似函数不能再一下天剑下使用,那就是因为必须测试条件且当条件还没有得到验证时又接着让进程去睡眠;由于哪些条件时所谓的竞争产生的根源,所以不鼓励这样用;

内核通过一下函数把休眠进程唤醒并把进程设置为task_running状态;

wake_up.......

/***
 * try_to_wake_up - wake up a thread
 * @p: the to-be-woken-up thread
 * @state: the mask of task states that can be woken
 * @sync: do a synchronous wakeup?
 *
 * Put it on the run-queue if it's not already there. The "current"
 * thread is always on the run-queue (except when the actual
 * re-schedule is in progress), and as such you're allowed to do
 * the simpler "current->state = TASK_RUNNING" to mark yourself
 * runnable without the overhead of this.
 *
 * returns failure only if the task is already active.
 */
static int try_to_wake_up(task_t * p, unsigned int state, int sync)
{
	int cpu, this_cpu, success = 0;
	unsigned long flags;
	long old_state;
	runqueue_t *rq;
#ifdef CONFIG_SMP
	unsigned long load, this_load;
	struct sched_domain *sd;
	int new_cpu;
#endif

	rq = task_rq_lock(p, &flags);
	schedstat_inc(rq, ttwu_cnt);
	old_state = p->state;
	if (!(old_state & state))
		goto out;

	if (p->array)
		goto out_running;

	cpu = task_cpu(p);
	this_cpu = smp_processor_id();

#ifdef CONFIG_SMP
	if (unlikely(task_running(rq, p)))
		goto out_activate;

	new_cpu = cpu;

	if (cpu == this_cpu || unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
		goto out_set_cpu;

	load = source_load(cpu);
	this_load = target_load(this_cpu);

	/*
	 * If sync wakeup then subtract the (maximum possible) effect of
	 * the currently running task from the load of the current CPU:
	 */
	if (sync)
		this_load -= SCHED_LOAD_SCALE;

	/* Don't pull the task off an idle CPU to a busy one */
	if (load < SCHED_LOAD_SCALE/2 && this_load > SCHED_LOAD_SCALE/2)
		goto out_set_cpu;

	new_cpu = this_cpu; /* Wake to this CPU if we can */

	/*
	 * Scan domains for affine wakeup and passive balancing
	 * possibilities.
	 */
	for_each_domain(this_cpu, sd) {
		unsigned int imbalance;
		/*
		 * Start passive balancing when half the imbalance_pct
		 * limit is reached.
		 */
		imbalance = sd->imbalance_pct + (sd->imbalance_pct - 100) / 2;

		if ((sd->flags & SD_WAKE_AFFINE) &&
				!task_hot(p, rq->timestamp_last_tick, sd)) {
			/*
			 * This domain has SD_WAKE_AFFINE and p is cache cold
			 * in this domain.
			 */
			if (cpu_isset(cpu, sd->span)) {
				schedstat_inc(sd, ttwu_wake_affine);
				goto out_set_cpu;
			}
		} else if ((sd->flags & SD_WAKE_BALANCE) &&
				imbalance*this_load <= 100*load) {
			/*
			 * This domain has SD_WAKE_BALANCE and there is
			 * an imbalance.
			 */
			if (cpu_isset(cpu, sd->span)) {
				schedstat_inc(sd, ttwu_wake_balance);
				goto out_set_cpu;
			}
		}
	}

	new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
out_set_cpu:
	schedstat_inc(rq, ttwu_attempts);
	new_cpu = wake_idle(new_cpu, p);
	if (new_cpu != cpu) {
		schedstat_inc(rq, ttwu_moved);
		set_task_cpu(p, new_cpu);
		task_rq_unlock(rq, &flags);
		/* might preempt at this point */
		rq = task_rq_lock(p, &flags);
		old_state = p->state;
		if (!(old_state & state))
			goto out;
		if (p->array)
			goto out_running;

		this_cpu = smp_processor_id();
		cpu = task_cpu(p);
	}

out_activate:
#endif /* CONFIG_SMP */
	if (old_state == TASK_UNINTERRUPTIBLE) {
		rq->nr_uninterruptible--;
		/*
		 * Tasks on involuntary sleep don't earn
		 * sleep_avg beyond just interactive state.
		 */
		p->activated = -1;
	}

	/*
	 * Sync wakeups (i.e. those types of wakeups where the waker
	 * has indicated that it will leave the CPU in short order)
	 * don't trigger a preemption, if the woken up task will run on
	 * this cpu. (in this case the 'I will reschedule' promise of
	 * the waker guarantees that the freshly woken up task is going
	 * to be considered on this CPU.)
	 */
	activate_task(p, rq, cpu == this_cpu);
	if (!sync || cpu != this_cpu) {
		if (TASK_PREEMPTS_CURR(p, rq))
			resched_task(rq->curr);
	}
	success = 1;

out_running:
	p->state = TASK_RUNNING;
out:
	task_rq_unlock(rq, &flags);

	return success;
}


/*
 * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just
 * wake everything up.  If it's an exclusive wakeup (nr_exclusive == small +ve
 * number) then we wake all the non-exclusive tasks and one exclusive task.
 *
 * There are circumstances in which we can try to wake a task which has already
 * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
 * zero in this (rare) case, and we handle it by continuing to scan the queue.
 */
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
			     int nr_exclusive, int sync, void *key)
{
	struct list_head *tmp, *next;

	list_for_each_safe(tmp, next, &q->task_list) {
		wait_queue_t *curr;
		unsigned flags;
		curr = list_entry(tmp, wait_queue_t, task_list);
		flags = curr->flags;
		if (curr->func(curr, mode, sync, key) &&    //curr->func()函数会调用try_to_wake_up()
		    (flags & WQ_FLAG_EXCLUSIVE) &&
		    !--nr_exclusive)   如果进程是互斥进程,且进程被唤醒,nr_execlusive为0 则结束循环
			break;
	}
}


/**
 * __wake_up - wake up threads blocked on a waitqueue.
 * @q: the waitqueue
 * @mode: which threads
 * @nr_exclusive: how many wake-one or wake-many threads to wake up
 */
void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
				int nr_exclusive, void *key)
{
	unsigned long flags;

	spin_lock_irqsave(&q->lock, flags);
	__wake_up_common(q, mode, nr_exclusive, 0, key);
	spin_unlock_irqrestore(&q->lock, flags);
}
链表中的非互斥进程总是在双向链表的开始的位置,互斥进程在链表的后面




  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值