原文地址:http://blog.csdn.net/angle_birds/article/details/8448070
项目需要,在驱动模块里用内核计时器timer_list实现了一个状态机。
郁闷的是,运行时总报错“Scheduling>一、workqueue简介
workqueue与tasklet类似,都是允许内核代码请求某个函数在将来的时间被调用(抄《ldd3》上的)
每个workqueue就是一个内核进程。
workqueue与tasklet的区别:
1.tasklet是通过软中断实现的,在软中断上下文中运行,tasklet代码必须是原子的
>二、workqueue的API
workqueue的API自2.6.20后发生了变化
- #include <linux/workqueue.h>
- struct> container_of(p_work, struct> create_workqueue("test_workqueue");
- if (!test_workqueue)
- panic("Failed>
首先,创建一个workqueue,实际上就是建立一个内核进程
- create_workqueue("tap_workqueue")
- --> __create_workqueue(“tap_workqueue”, 0, 0)
- --> __create_workqueue_key((name), (singlethread), (freezeable), NULL, NULL){
- wq = kzalloc(sizeof(*wq), GFP_KERNEL);
- wq->cpu_wq = alloc_percpu(struct> name;
- wq->singlethread = singlethread;
- wq->freezeable = freezeable;
- INIT_LIST_HEAD(&wq->list);
- for_each_possible_cpu(cpu) {
- cwq = init_cpu_workqueue(wq, cpu);
- err = create_workqueue_thread(cwq, cpu);
- start_workqueue_thread(cwq, cpu);
- }
- }
create_workqueue_thread 建立了一个内核进程> cwq->wq;
- create_workqueue("tap_workqueue")
- const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
- struct> kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
- if (IS_ERR(p))
- return PTR_ERR(p);
- cwq->thread = p;
- return 0;
- }
内核进程worker_thread做的事情很简单,死循环而已,不停的执行workqueue上的work_list
(linux_2_6_24/kernel/workqueue.c)
- int> __cwq;
- /*下面定义等待队列项*/
- DEFINE_WAIT(wait);
- /*下面freezeable一般为0*/
- if (cwq->wq->freezeable)
- set_freezable();
- /*提高优先级别*/
- set_user_nice(current, -5);
- for (;;) {
- /*在cwq->more_work上等待, 若有人调用queue_work,该函数将调用wake_up(&cwq->more_work) 激活本进程*/
- prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
- /*work队列空则切换出去*/
- if (!freezing(current) && !kthread_should_stop() && list_empty(&cwq->worklist))
- schedule();
- /*切换回来则结束等待 说明有人唤醒cwq->more_work上的等待 有work需要处理*/
- finish_wait(&cwq->more_work, &wait);
- /*下面空,因为没有定义电源管理*/
- try_to_freeze();
- if (kthread_should_stop())
- break;
- /*run_workqueue依次处理工作队列上所有的work*/
- run_workqueue(cwq);
- }
- return 0;
- }
- /*run_workqueue依次处理工作队列上所有的work*/
- static> list_entry(cwq->worklist.next,
- struct> work->func;
- #ifdef CONFIG_LOCKDEP
- /*
- * It is permissible to free> work->lockdep_map;
- #endif
- cwq->current_work = work;
- list_del_init(cwq->worklist.next);
- spin_unlock_irq(&cwq->lock);
- BUG_ON(get_wq_data(work) != cwq);
- work_clear_pending(work);
- lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
- f(work); /*执行work项中的func*/
-
- lock_release(&lockdep_map, 1, _THIS_IP_);
- lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
- if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
- printk(KERN_ERR "BUG:> NULL;
- }
- cwq->run_depth--;
- spin_unlock_irq(&cwq->lock);
- }
将一个work加入到指定workqueue的work_list中(文件linux_2_6_24 /kernel /workqueue .c)
int fastcall queue_work
(struct> 0
;
- if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
- BUG_ON(!list_empty(&work->entry));
- __queue_work(wq_per_cpu(wq, get_cpu()), work);
- put_cpu();
- ret = 1;
- }
- return ret;
- }
- /* Preempt must be disabled. */
- static void __queue_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
- {
- unsigned long flags;
- spin_lock_irqsave(&cwq->lock, flags);
- insert_work(cwq, work, 1);
- spin_unlock_irqrestore(&cwq->lock, flags);
- }
- static void insert_work(struct cpu_workqueue_struct *cwq,
- struct work_struct *work, int tail)
- {
- set_wq_data(work, cwq);
- /*
- * Ensure that we get the right work->data if we see the
- * result of list_add() below, see try_to_grab_pending().
- */
- smp_wmb();
- if (tail)
- list_add_tail(&work->entry, &cwq->worklist);
- else
- list_add(&work->entry, &cwq->worklist);
- wake_up(&cwq->more_work);
- }
四、共享队列
其实内核有自己的一个workqueue,叫keventd_wq,这个工作队列也叫做“共享队列”。
do_basic_setup --> init_workqueues --> create_workqueue("events");
若驱动模块使用的workqueue功能很简单的话,可以使用“共享队列”,不用自己再建一个队列
使用共享队列,有这样一套API
- int schedule_work(struct work_struct *work)
- {
- queue_work(keventd_wq, work);
- }
- int schedule_delayed_work(struct delayed_work *dwork,unsigned long delay)
- {
- timer_stats_timer_set_start_info(&dwork->timer);
- return queue_delayed_work(keventd_wq, dwork, delay);
- }
- void flush_scheduled_work(void)
- {
- flush_workqueue(keventd_wq);
- }