ksoftirqd内核线程-处理软中断

softirq属于底半步的一种方式,软中断是相对于硬中断的。

在中断的下半部机制中,软中断是执行效率最快的,同时,相对来说对于开发者也是最麻烦的,它的麻烦在于两个因素:

  • 同一个软中断支持在不同的 cpu 上并发执行,这也就导致了软中断执行的代码需要考虑 SMP 下的并发,实现上要更复杂。
  • 软中断不支持动态的定义,只能将软中断静态地编译到内核镜像中,而不能使用外部模块加载的方式

它的执行效率也体现在两点上:

  • 因为支持 cpu 上并发执行,所以通常情况下不需要等待(tasklet无法并发执行,且有其他限制),但是硬中断能抢占它执行。
  • 通常情况下软中断执行在中断上下文中,硬中断结束之后会立马执行软中断,为什么说是通常情况下运行在中断上下文而不是一定运行在中断上下文?这是因为在特殊情况下,软中断也会由内核线程(ksoftirqd)来实现

本文主要讲内核线程的ksoftirq创建和执行

softirq的初始化流程如下 

spawn_ksoftirqd创建于SMP初始化之前,借助smpboot_register_percpu_thread创建了每CPU内核线程ksoftirqd/xx

tatic struct smp_hotplug_thread softirq_threads = {
	.store			= &ksoftirqd, //struct task_struct __percpu	**store;
	.thread_should_run	= ksoftirqd_should_run, //判断是否有软中断pending,有的话执行下面的thread_fn
	.thread_fn		= run_ksoftirqd, //<---smpboot_thread_fn thread
	.thread_comm		= "ksoftirqd/%u",
};

static __init int spawn_ksoftirqd(void)
{
	register_cpu_notifier(&cpu_nfb);

	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));//每个cpu创建一个线程,softirq--->smpboot
	//Register a per_cpu thread related to hotplug

	return 0;
}
early_initcall(spawn_ksoftirqd);
/**
 * smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug
 * @plug_thread:	Hotplug thread descriptor
 *
 * Creates and starts the threads on all online cpus.
 */
int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
{
	unsigned int cpu;
	int ret = 0;

	get_online_cpus();
	mutex_lock(&smpboot_threads_lock);
	for_each_online_cpu(cpu) {
		ret = __smpboot_create_thread(plug_thread, cpu);//creat thread
		if (ret) {
			smpboot_destroy_threads(plug_thread);
			goto out;
		}
		smpboot_unpark_thread(plug_thread, cpu);
	}
	list_add(&plug_thread->list, &hotplug_threads);//add to global list head
out:
	mutex_unlock(&smpboot_threads_lock);
	put_online_cpus();
	return ret;
}
static int
__smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
{
	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
	struct smpboot_thread_data *td;//thread related data

	if (tsk)
		return 0;

	td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
	if (!td)
		return -ENOMEM;
	td->cpu = cpu;
	td->ht = ht;//ht-->td(smpboot_thread_data)

	tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
				    ht->thread_comm);//create cpu 关联的线程 smpboot_thread_fn
	if (IS_ERR(tsk)) {
		kfree(td);
		return PTR_ERR(tsk);
	}
	get_task_struct(tsk);
	*per_cpu_ptr(ht->store, cpu) = tsk;
	if (ht->create) {
		/*
		 * Make sure that the task has actually scheduled out
		 * into park position, before calling the create
		 * callback. At least the migration thread callback
		 * requires that the task is off the runqueue.
		 */
		if (!wait_task_inactive(tsk, TASK_PARKED))
			WARN_ON(1);
		else
			ht->create(cpu);
	}
	return 0;
}
/**
 * smpboot_thread_fn - percpu hotplug thread loop function
 * @data:	thread data pointer
 *
 * Checks for thread stop and park conditions. Calls the necessary
 * setup, cleanup, park and unpark functions for the registered
 * thread.
 *
 * Returns 1 when the thread should exit, 0 otherwise.
 */
static int smpboot_thread_fn(void *data)
{
	struct smpboot_thread_data *td = data;
	struct smp_hotplug_thread *ht = td->ht;

	while (1) {
		set_current_state(TASK_INTERRUPTIBLE);
		preempt_disable();
		if (kthread_should_stop()) {
			__set_current_state(TASK_RUNNING);
			preempt_enable();
			if (ht->cleanup)
				ht->cleanup(td->cpu, cpu_online(td->cpu));
			kfree(td);
			return 0;
		}

		if (kthread_should_park()) {
			__set_current_state(TASK_RUNNING);
			preempt_enable();
			if (ht->park && td->status == HP_THREAD_ACTIVE) {
				BUG_ON(td->cpu != smp_processor_id());
				ht->park(td->cpu);
				td->status = HP_THREAD_PARKED;
			}
			kthread_parkme();
			/* We might have been woken for stop */
			continue;
		}

		BUG_ON(td->cpu != smp_processor_id());

		/* Check for state change setup */
		switch (td->status) {
		case HP_THREAD_NONE:
			__set_current_state(TASK_RUNNING);
			preempt_enable();
			if (ht->setup)
				ht->setup(td->cpu);
			td->status = HP_THREAD_ACTIVE;
			continue;

		case HP_THREAD_PARKED:
			__set_current_state(TASK_RUNNING);
			preempt_enable();
			if (ht->unpark)
				ht->unpark(td->cpu);
			td->status = HP_THREAD_ACTIVE;
			continue;
		}

		if (!ht->thread_should_run(td->cpu)) {//ksoftirqd_should_run,返回0代表没有软中断pending
			preempt_enable_no_resched();
			schedule();
		} else {//非0代表有softirq处于pending状态
			__set_current_state(TASK_RUNNING);
			preempt_enable();
			ht->thread_fn(td->cpu); //run_ksoftirqd
		}
	}
}
static int ksoftirqd_should_run(unsigned int cpu)
{
	return local_softirq_pending();
}

static void run_ksoftirqd(unsigned int cpu)
{
	local_irq_disable();//关cpu的中断
	if (local_softirq_pending()) {
		/*
		 * We can safely run softirq on inline stack, as we are not deep
		 * in the task stack here.
		 */
		__do_softirq();
		local_irq_enable();//开本地cpu中断
		cond_resched_rcu_qs();
		return;
	}
	local_irq_enable();
}
typedef struct {
	unsigned int __softirq_pending;
} ____cacheline_aligned   irq_cpustat_t;



#ifndef __ARCH_IRQ_STAT
extern irq_cpustat_t irq_stat[];		/* defined in asm/hardirq.h */
#define __IRQ_STAT(cpu, member)	(irq_stat[cpu].member)
#endif

  /* arch independent irq_stat fields */  //检查本地CPU否有软中断在pending状态
#define local_softirq_pending() \
	__IRQ_STAT(smp_processor_id(), __softirq_pending)---------------软中断pending 寄存器(不是硬件的寄存器)
	
	
	
	irq_stat[0].__softirq_pending  代表cpu0的软中断pending bit
	irq_stat[1].__softirq_pending  代表cpu1的软中断pending bit
	
	。。。。
	irq_stat[n].__softirq_pending  代表cpun的软中断pending bit
	__softirq_pending 的bit 定义如下,当对应bit为1代表有软中断pending,0代表没有,低位优先级较高
	enum
{
	HI_SOFTIRQ=0,
	TIMER_SOFTIRQ,
	NET_TX_SOFTIRQ,
	NET_RX_SOFTIRQ,
	BLOCK_SOFTIRQ,
	BLOCK_IOPOLL_SOFTIRQ,
	TASKLET_SOFTIRQ, //6
	SCHED_SOFTIRQ,
	HRTIMER_SOFTIRQ,
	RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */

	NR_SOFTIRQS
};

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值