Linux之kthread_worker机制

Linux之kthread_worker机制

内核驱动中往往会用到 kthread_workerkthread_work,例如在SPI子系统中master初始化队列 spi_master_initialize_queue 中就用到了该机制,所以就来简单分析一下这个东东。

本质上来说他们是一种内核线程使用和管理的方式,类似 work_structworkqueue_struct 机制。我们从结数据结构、使用方式,以及具体实现,几个角度来进行分析。

1. 数据结构

kthread_workerkthread_work 的数据结构定义在:include\linux\kthread.h

struct kthread_worker {
	unsigned int		flags;
	spinlock_t		lock;						//保护work_list链表的自旋锁
	struct list_head	work_list;				//kthread_work工作链表,相当于流水线
	struct list_head	delayed_work_list;		//延时工作链表
	struct task_struct	*task;					//为该kthread_worker执行任务的线程对应的task_struct结构
	struct kthread_work	*current_work;			//当前正在处理的kthread_work
};

struct kthread_work {
	struct list_head	node;					//kthread_work链表的链表元素
	kthread_work_func_t	func;					//执行函数,该kthread_work所要做的事情
	struct kthread_worker	*worker;			//处理该kthread_work的kthread_worker
	/* Number of canceling calls that are running at the moment. */
	int			canceling;						//表示取消work
};

根据结构体定义,kthread_workerkthread_work 的关系如下图:

在这里插入图片描述

2. 使用方式

看的出来一个kthread_worker下面可以有多个kthread_work

2.1 准备kthread_worker

定义 kthread_worker 并初始化它

struct kthread_worker hi_worker; 	// 声明一个kthread_worker
kthread_init_worker(&hi_worker); 	// 初始化kthread_worker

初始化完成后,为 kthread_worker 创建并启动一个内核线程来处理 work,创建成功后返回一个 task_struct 结构体

struct task_struct *kworker_task = kthread_run(kthread_worker_fn, &hi_worker, "nvme%d", dev->instance);

这里传入的 kthread_worker_fn 函数是固定的

2.2 准备kthread_work

定义一个 kthread_work 并初始化他,为他指定工作函数 xxx_work_fn

struct kthread_work hi_work;				//声明一个kthread_work
kthread_init_work(&hi_work, xxx_work_fn); 	//初始化kthread_work,设置work执行函数
2.3 启动work

上面两步准备好了 workerwork,如果有需要处理的 work 的话,那么需要把这个 work 挂到 worker 上 :

kthread_queue_work(&hi_worker, &hi_work);
2.4 停止当前线程

等待当前线程上的所有work完成后,停止该线程

kthread_flush_worker(&hi_worker);		//等待work上当前所有正在执行或挂起的工作完成
kthread_stop(kworker_task);

3. 源码实现

kernel\kthread.c

include\linux\kthread.h

3.1 kthread_init_worker

用于初始化一个 kthread_worker

#define kthread_init_worker(worker)					\
	do {								\
		static struct lock_class_key __key;			\
		__kthread_init_worker((worker), "("#worker")->lock", &__key); \
	} while (0)

进而调用到__kthread_init_worker

void __kthread_init_worker(struct kthread_worker *worker,
				const char *name,
				struct lock_class_key *key)
{
	memset(worker, 0, sizeof(struct kthread_worker));
	spin_lock_init(&worker->lock);							//lock初始化
	lockdep_set_class_and_name(&worker->lock, key, name);	//lockdep相关
	INIT_LIST_HEAD(&worker->work_list);						//work_list初始化
	INIT_LIST_HEAD(&worker->delayed_work_list);				//delayed_work_list初始化
}
3.2 执行线程 kthread_worker_fn

定义并初始化完 kthread_worker 后,调用到 thread_run 创建并运行一个内核线程,线程函数为kthread_worker_fn ,传入的参数是 kthread_worker 的指针:

int kthread_worker_fn(void *worker_ptr)
{
	struct kthread_worker *worker = worker_ptr;
	struct kthread_work *work;

	/*
	 * FIXME: Update the check and remove the assignment when all kthread
	 * worker users are created using kthread_create_worker*() functions.
	 */
	WARN_ON(worker->task && worker->task != current);
    // current是内核的一个全局变量,专门用来表示当前运行的进程或者线程
	worker->task = current;

	if (worker->flags & KTW_FREEZABLE)
		set_freezable();

repeat:
    // 设置当前线程的运行状态(当前的线程可以接收中断)
	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
	// 判断当前线程是否应该停止运行,详见下
	if (kthread_should_stop()) {
        // 设置当前进程线程为正在运行态
		__set_current_state(TASK_RUNNING);
		spin_lock_irq(&worker->lock);
        // task原来是指向当前线程,现在要停止运行了,指针设置为指向空
		worker->task = NULL;
		spin_unlock_irq(&worker->lock);
		return 0;
	}
	// 若线程没有结束运行,则接着执行下面的代码
	work = NULL;
	spin_lock_irq(&worker->lock);
    // 遍历worker的work_list链表
	if (!list_empty(&worker->work_list)) {
        // 获取链表上的kthread_work
		work = list_first_entry(&worker->work_list,
					struct kthread_work, node);
        // 将该work节点从worker的链表里面删除
		list_del_init(&work->node);
	}
    //设置worker的当前work为从链表中取出来的work
	worker->current_work = work;
	spin_unlock_irq(&worker->lock);

	if (work) {
        // 设置当前线程为正在运行
		__set_current_state(TASK_RUNNING);
        // 执行work的工作函数
		work->func(work);
	} else if (!freezing(current))
        // 没有work,线程进入休眠状态
		schedule();

	try_to_freeze();
    cond_resched();
    // 跳转到repeat,循环执行
	goto repeat;
}

kthread_should_stop

bool kthread_should_stop(void)
{
    // 判断当前线程的标志位
	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
}
EXPORT_SYMBOL(kthread_should_stop);

调用kthread_stop()函数后,设置线程flags为KTHREAD_SHOULD_STOP

3.3 kthread_init_work

kthread_init_work 用于初始化一个已经定义了的 kthread_work:

#define kthread_init_work(work, fn)					\
	do {								\
		memset((work), 0, sizeof(struct kthread_work));		\
		INIT_LIST_HEAD(&(work)->node);				\     //链表初始化
		(work)->func = (fn);					\		  //指定fn
	} while (0)
3.4 kthread_queue_work

把具体的工作交付给 worker 其实就是把 work 中的链表节点插入到worker对应的链表中

bool kthread_queue_work(struct kthread_worker *worker,
			struct kthread_work *work)
{
	bool ret = false;
	unsigned long flags;
	
    // 加锁,加锁前先关中断
	raw_spin_lock_irqsave(&worker->lock, flags);
	if (!queuing_blocked(worker, work)) {
		kthread_insert_work(worker, work, &worker->work_list);
		ret = true;
	}
    // 解锁,解锁后开中断
	raw_spin_unlock_irqrestore(&worker->lock, flags);
	return ret;
}

queuing_blocked

static inline bool queuing_blocked(struct kthread_worker *worker,
				   struct kthread_work *work)
{
	lockdep_assert_held(&worker->lock);
	// 判断节点是否为空,不为空表示已经挂载到worker,返回后不需要再挂载
    // work->canceling非0表示要退出,返回后也不需要挂载了
	return !list_empty(&work->node) || work->canceling;
}

kthread_insert_work

static void kthread_insert_work(struct kthread_worker *worker,
				struct kthread_work *work,
				struct list_head *pos)
{
	kthread_insert_work_sanity_check(worker, work);
	// work对应的结点串到worker的结点链表中
	list_add_tail(&work->node, pos);
	work->worker = worker;
    // current_work 表示当前worker正在执行的具体工作,为0表示还没开始工作
	if (!worker->current_work && likely(worker->task))
		wake_up_process(worker->task); // 唤醒worker开始工作,通过参数来唤醒,所以上面要先判空
}
// 然后具体工作等待被worker处理
3.5 kthread_flush_worker
void kthread_flush_worker(struct kthread_worker *worker)
{
	struct kthread_flush_work fwork = {
        //初始化一个kthread_work,fn为kthread_flush_work_fn
		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
        //初始化completion
		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
	};
	
    // 在worker的链表最末尾新加了一个链表节点,这个具体工作最后被处理(就是最后执行kthread_flush_work_fn)
	kthread_queue_work(worker, &fwork.work);
    // 参数类型为 struct completion 完成量
    // 进程进入休眠状态等待唤醒,kthread_flush_work_fn 中唤醒此完成量,其实就是执行到最后kthread_work对应的函数就是这个,那么前面的都执行完毕了
	wait_for_completion(&fwork.done);
}

kthread_flush_work_fn

static void kthread_flush_work_fn(struct kthread_work *work)
{
	struct kthread_flush_work *fwork =
		container_of(work, struct kthread_flush_work, work);
    // 唤醒阻塞在kthread_flush_worker的线程
	complete(&fwork->done);
}

4. 内核使用场景

内核在SPI驱动的SPI主机控制器这块使用了这一机制

spi_register_master
    spi_master_initialize_queue
    	spi_init_queue
    	spi_start_queue
4.1 spi_init_queue
static int spi_init_queue(struct spi_master *master)
{
	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };

	master->running = false;
	master->busy = false;

    // 初始化控制器master中的kworker
	kthread_init_worker(&master->kworker);
    // 创建并运行一个内核线程,名字为master->dev
	master->kworker_task = kthread_run(kthread_worker_fn,
					   &master->kworker, "%s",
					   dev_name(&master->dev));
	if (IS_ERR(master->kworker_task)) {
		dev_err(&master->dev, "failed to create message pump task\n");
		return PTR_ERR(master->kworker_task);
	}
    // 初始化work为master->pump_messages,fn为spi_pump_messages
	kthread_init_work(&master->pump_messages, spi_pump_messages);

	/*
	 * Master config will indicate if this controller should run the
	 * message pump with high (realtime) priority to reduce the transfer
	 * latency on the bus by minimising the delay between a transfer
	 * request and the scheduling of the message pump thread. Without this
	 * setting the message pump thread will remain at default priority.
	 */
	if (master->rt) {
		dev_info(&master->dev,
			"will run message pump with realtime priority\n");
		sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
	}

	return 0;
}
4.2 spi_init_queue
static int spi_start_queue(struct spi_master *master)
{
	unsigned long flags;

	spin_lock_irqsave(&master->queue_lock, flags);

	if (master->running || master->busy) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return -EBUSY;
	}

	master->running = true;
	master->cur_msg = NULL;
	spin_unlock_irqrestore(&master->queue_lock, flags);
	// 将work(pump_messages)入队,并唤行内核线程master->kworker,执行队列中的work,操作函数为spi_pump_messages
	kthread_queue_work(&master->kworker, &master->pump_messages);

	return 0;
}
4.3 spi_sync

操作SPI设备最终会调用到 spi_sync

spi_sync
    __spi_sync
    	if (master->transfer == spi_queued_transfer) 
            __spi_queued_transfer
            	list_add_tail(&msg->queue, &master->queue);
				if (!master->busy && need_pump)
                    //若master空闲,将pump_messages加入kworker中
					kthread_queue_work(&master->kworker, &master->pump_messages);
				
				__spi_pump_messages(master, false);
				wait_for_completion(&done);
4.4 spi_pump_messages

master->pump_messages的工作函数为spi_pump_messages

static void spi_pump_messages(struct kthread_work *work)
{
	struct spi_master *master =
		container_of(work, struct spi_master, pump_messages);

	__spi_pump_messages(master, true);
}

static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
{
    //同样在__spi_pump_messages也会有kthread_queue_work
    if (master->idling) {
		kthread_queue_work(&master->kworker, &master->pump_messages);
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return;
	}
    //执行spi传输
    ret = spi_map_msg(master, master->cur_msg);
    ret = master->transfer_one_message(master, master->cur_msg);

}
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值