两个结构体
struct work_struct {
atomic_long_t data;
struct list_head entry;
work_func_t func;
};
struct workqueue_struct {
unsigned int flags;
union {
struct cpu_workqueue_struct __percpu *pcpu;
struct cpu_workqueue_struct *single;
unsigned long v;
} cpu_wq;
struct list_head list;
struct mutex flush_mutex;
int work_color;
int flush_color;
atomic_t nr_cwqs_to_flush;
struct wq_flusher *first_flusher;
struct list_head flusher_queue;
struct list_head flusher_overflow;
mayday_mask_t mayday_mask;
struct worker *rescuer;
int saved_max_active;
const char *name;
};
工作队列的使用步骤如下:
//0.首先定义两个结构体:
struct work_struct my_work;
struct workqueue_struct *my_work_queue;
//1.填充一个work_struct
INIT_WORK(&my_work, my_work_process);
//2.显式的创建一个工作队列
my_work_queue = create_workqueue(“my_work”);
//3.将工作提交到工作队列
queue_work(my_work_queue, &my_work);
//4. 调用工作队列处理函数
my_work_process(struct work_struct *work)
- 填充一个work_struct
define INIT_WORK(_work, _func) \
do { \
__INIT_WORK((_work), (_func), 0); \
} while (0)
即:
define __INIT_WORK(_work, _func, _onstack) \
do { \
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
INIT_LIST_HEAD(&(_work)->entry); \
PREPARE_WORK((_work), (_func)); \
} while (0)
展开后:
__init_work(my_work, 0);
my_work->data = (atomic_long_t) WORK_DATA_INIT();
INIT_LIST_HEAD(&my_work->entry);
PREPARE_WORK(my_work, my_work_process);
- 显式的创建一个工作队列
define create_workqueue(name) \
alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
define create_freezable_workqueue(name) \
alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
define create_singlethread_workqueue(name) \
alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
create_workqueue 与 create_singlethread_workqueue都是调用了alloc_workqueue
define alloc_workqueue(name, flags, max_active) \
__alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
struct workqueue_struct *__alloc_workqueue_key(const char *name,unsigned int flags, int max_active, struct lock_class_key *key, const char *lock_name)
{
struct workqueue_struct *wq;
unsigned int cpu;
if (flags & WQ_MEM_RECLAIM)
flags |= WQ_RESCUER;
if (flags & WQ_UNBOUND)
flags |= WQ_HIGHPRI;
max_active = max_active ?: WQ_DFL_ACTIVE;
max_active = wq_clamp_max_active(max_active, flags, name);
wq = kzalloc(sizeof(*wq), GFP_KERNEL);
wq->flags = flags;
wq->saved_max_active = max_active;
mutex_init(&wq->flush_mutex);
atomic_set(&wq->nr_cwqs_to_flush, 0);
INIT_LIST_HEAD(&wq->flusher_queue);
INIT_LIST_HEAD(&wq->flusher_overflow);
wq->name = name;
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
INIT_LIST_HEAD(&wq->list);
alloc_cwqs(wq);
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
struct global_cwq *gcwq = get_gcwq(cpu);
cwq->gcwq = gcwq;
cwq->wq = wq;
cwq->flush_color = -1;
cwq->max_active = max_active;
INIT_LIST_HEAD(&cwq->delayed_works);
}
if (flags & WQ_RESCUER) {
struct worker *rescuer;
alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL);
wq->rescuer = rescuer = alloc_worker();
rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
rescuer->task->flags |= PF_THREAD_BOUND;
wake_up_process(rescuer->task);
}
spin_lock(&workqueue_lock);
if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
for_each_cwq_cpu(cpu, wq)
get_cwq(cpu, wq)->max_active = 0;
list_add(&wq->list, &workqueues);
spin_unlock(&workqueue_lock);
return wq;
}
EXPORT_SYMBOL_GPL(__alloc_workqueue_key);