5.3 workqueue工作队列

工作队列的优点:利用进程上下文来执行中断下半部操作。

因此工作队列允许重新调度和睡眠,是异步执行的进程上下文,另外它还能解决软中断和tasklet执行时间过长导致的系统实时性下降等问题。

workqueue的组成:

struct work_struct {               
____atomic_long_t data;
____struct list_head entry;
____work_func_t func;
#ifdef CONFIG_LOCKDEP
____struct lockdep_map lockdep_map;
#endif
};

struct worker {
____/* on idle list while idle, on busy hash table while busy */
____union {
________struct list_head____entry;__/* L: while idle */
________struct hlist_node___hentry;_/* L: while busy */
____};

____struct work_struct__*current_work;__/* L: work being processed */
____work_func_t_____current_func;___/* L: current_work's fn */
____struct pool_workqueue___*current_pwq; /* L: current_work's pwq */
____bool____________desc_valid;_/* ->desc is valid */
____struct list_head____scheduled;__/* L: scheduled works */

____/* 64 bytes boundary on 64bit, 32 on 32bit */

____struct task_struct__*task;______/* I: worker task */
____struct worker_pool__*pool;______/* I: the associated pool */
________________________/* L: for rescuers */
____struct list_head____node;_______/* A: anchored at pool->workers */
________________________/* A: runs through worker->node */
                                                                      
____unsigned long_______last_active;____/* L: last active timestamp */
____unsigned int________flags;______/* X: flags */
____int_________id;_____/* I: worker id */

____/*
____ * Opaque string set with work_set_desc().  Printed out with task
____ * dump for debugging - WARN, BUG, panic or sysrq.
____ */
____char____________desc[WORKER_DESC_LEN];

____/* used only by rescuers to point to the target workqueue */
____struct workqueue_struct_*rescue_wq;_/* I: the workqueue to rescue */
};

struct worker_pool {
____spinlock_t______lock;_______/* the pool lock */
____int_________cpu;________/* I: the associated cpu */
____int_________node;_______/* I: the associated node ID */
____int_________id;_____/* I: pool ID */
____unsigned int________flags;______/* X: flags */

____struct list_head____worklist;___/* L: list of pending works */
____int_________nr_workers;_/* L: total number of workers */

____/* nr_idle includes the ones off idle_list for rebinding */
____int_________nr_idle;____/* L: currently idle ones */

____struct list_head____idle_list;__/* X: list of idle workers */
____struct timer_list___idle_timer;_/* L: worker idle timeout */
____struct timer_list___mayday_timer;___/* L: SOS timer for workers */

____/* a workers is either on busy_hash or idle_list, or the manager */
____DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
________________________/* L: hash of busy workers */

____/* see manage_workers() for details on the two manager mutexes */
____struct mutex________manager_arb;____/* manager arbitration */
____struct mutex________attach_mutex;___/* attach/detach exclusion */
____struct list_head____workers;____/* A: attached workers */
____struct completion___*detach_completion; /* all workers detached */

____struct ida______worker_ida;_/* worker IDs for task name */

____struct workqueue_attrs__*attrs;_____/* I: worker attributes */
____struct hlist_node___hash_node;__/* PL: unbound_pool_hash node */
____int_________refcnt;_____/* PL: refcnt for unbound pools */

____/*
____ * The current concurrency level.  As it's likely to be accessed
____ * from other CPUs during try_to_wake_up(), put it in a separate
____ * cacheline.
____ */
____atomic_t________nr_running ____cacheline_aligned_in_smp;

____/*
____ * Destruction of pool is sched-RCU protected to allow dereferences
____ * from get_work_pool().
____ */
____struct rcu_head_____rcu;
} ____cacheline_aligned_in_smp;
struct pool_workqueue {
____struct worker_pool__*pool;______/* I: the associated pool */
____struct workqueue_struct *wq;________/* I: the owning workqueue */ 
____int_________work_color;_/* L: current color */
____int_________flush_color;____/* L: flushing color */
____int_________refcnt;_____/* L: reference count */
____int_________nr_in_flight[WORK_NR_COLORS];
________________________/* L: nr of in_flight works */
____int_________nr_active;__/* L: nr of active works */
____int_________max_active;_/* L: max active works */
____struct list_head____delayed_works;__/* L: delayed works */
____struct list_head____pwqs_node;__/* WR: node on wq->pwqs */
____struct list_head____mayday_node;____/* MD: node on wq->maydays */

____/*
____ * Release of unbound pwq is punted to system_wq.  See put_pwq()
____ * and pwq_unbound_release_workfn() for details.  pool_workqueue
____ * itself is also sched-RCU protected so that the first pwq can be
____ * determined without grabbing wq->mutex.
____ */
____struct work_struct__unbound_release_work;
____struct rcu_head_____rcu;
} __aligned(1 << WORK_STRUCT_FLAG_BITS);

struct pool_workqueue {
____struct worker_pool__*pool;______/* I: the associated pool */
____struct workqueue_struct *wq;________/* I: the owning workqueue */ 
____int_________work_color;_/* L: current color */
____int_________flush_color;____/* L: flushing color */
____int_________refcnt;_____/* L: reference count */
____int_________nr_in_flight[WORK_NR_COLORS];
________________________/* L: nr of in_flight works */
____int_________nr_active;__/* L: nr of active works */
____int_________max_active;_/* L: max active works */
____struct list_head____delayed_works;__/* L: delayed works */
____struct list_head____pwqs_node;__/* WR: node on wq->pwqs */
____struct list_head____mayday_node;____/* MD: node on wq->maydays */

____/*
____ * Release of unbound pwq is punted to system_wq.  See put_pwq()
____ * and pwq_unbound_release_workfn() for details.  pool_workqueue
____ * itself is also sched-RCU protected so that the first pwq can be
____ * determined without grabbing wq->mutex.
____ */
____struct work_struct__unbound_release_work;
____struct rcu_head_____rcu;
} __aligned(1 << WORK_STRUCT_FLAG_BITS);

workqueue与worker_pool与pool_workqueue之间的关系:

static int __init init_workqueues(void)
{
...
____/* initialize CPU pools */
____for_each_possible_cpu(cpu) {
________struct worker_pool *pool;

________i = 0;
________for_each_cpu_worker_pool(pool, cpu) {
____________BUG_ON(init_worker_pool(pool));
____________pool->cpu = cpu;
____________cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
____________pool->attrs->nice = std_nice[i++];
____________pool->node = cpu_to_node(cpu);

____________/* alloc pool ID */
____________mutex_lock(&wq_pool_mutex);
____________BUG_ON(worker_pool_assign_id(pool));
____________mutex_unlock(&wq_pool_mutex);
________}
____}

____/* create the initial worker */
____for_each_online_cpu(cpu) {
________struct worker_pool *pool;

________for_each_cpu_worker_pool(pool, cpu) {
____________pool->flags &= ~POOL_DISASSOCIATED;
____________BUG_ON(!create_worker(pool));
________}
____}

 ____/* create default unbound and ordered wq attrs */
 ____for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
 ________struct workqueue_attrs *attrs;
 
 ________BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
 ________attrs->nice = std_nice[i];
 ________unbound_std_wq_attrs[i] = attrs;
 
 ________/*
 ________ * An ordered wq should have only one pwq as ordering is
 ________ * guaranteed by max_active which is enforced by pwqs.
 ________ * Turn off NUMA so that dfl_pwq is used for all nodes.
 ________ */                                                      
 ________BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
 ________attrs->nice = std_nice[i];
 ________attrs->no_numa = true;
 ________ordered_wq_attrs[i] = attrs;
 ____}

____system_wq = alloc_workqueue("events", 0, 0);    /*创建新的工作队列*/
____system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
____system_long_wq = alloc_workqueue("events_long", 0, 0);
____system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
____________________    WQ_UNBOUND_MAX_ACTIVE);
____system_freezable_wq = alloc_workqueue("events_freezable",
____________________      WQ_FREEZABLE, 0);
____system_power_efficient_wq = alloc_workqueue("events_power_efficient",
____________________      WQ_POWER_EFFICIENT, 0);
____system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
____________________      WQ_FREEZABLE | WQ_POWER_EFFICIENT,
____________________      0);
____BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
____       !system_unbound_wq || !system_freezable_wq ||
____       !system_power_efficient_wq ||
____       !system_freezable_power_efficient_wq);
____return 0;
}
#ifdef CONFIG_LOCKDEP
#define alloc_workqueue(fmt, flags, max_active, args...)________\
({__________________________________\
____static struct lock_class_key __key;_____________\
____const char *__lock_name;____________________\
____________________________________\
______lock_name = #fmt#args;____________________\
____________________________________\
______alloc_workqueue_key((fmt), (flags), (max_active),_____\
____________      &__key, __lock_name, ##args);_____\
})
#else
#define alloc_workqueue(fmt, flags, max_active, args...)________\
______alloc_workqueue_key((fmt), (flags), (max_active),_____\
____________      NULL, NULL, ##args)
#endif

工作队列的使用(系统默认的工作队列):

1、初始化一个work,内核提供了相应的宏INIT_WORK(),并填充func。

#define INIT_WORK(_work, _func)_____________________\
______INIT_WORK((_work), (_func), 0)                 

/*                                                                   
 * initialize all of a work item in one go
 *
 * NOTE! No point in using "atomic_long_set()": using a direct
 * assignment of the work data initializer allows the compiler
 * to generate better code.
 */
#ifdef CONFIG_LOCKDEP
#define __INIT_WORK(_work, _func, _onstack)_____________\
____do {________________________________\
________static struct lock_class_key __key;_________\
____________________________________\
__________init_work((_work), _onstack);_____________\
________(_work)->data = (atomic_long_t) WORK_DATA_INIT();___\
________lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
________INIT_LIST_HEAD(&(_work)->entry);____________\
________(_work)->func = (_func);________________\
____} while (0)
#else
#define __INIT_WORK(_work, _func, _onstack)_____________\
____do {________________________________\
__________init_work((_work), _onstack);_____________\
________(_work)->data = (atomic_long_t) WORK_DATA_INIT();___\
________INIT_LIST_HEAD(&(_work)->entry);____________\
________(_work)->func = (_func);________________\
____} while (0)
#endif

struct work_struct {            

____atomic_long_t data;   
____struct list_head entry;
____work_func_t func;
#ifdef CONFIG_LOCKDEP
____struct lockdep_map lockdep_map;
#endif
};

/*data包含两个部分域:低比特位位:存放work相关的flags,高比特位:存放上次执行该work的worker_pool的ID号或上次pool_workqueue数据结构指针*/  
当data字段包含WORK_STRUCT_PWQ_BIT标志位时,表示高比特位存放上一次pool_workqueue数据结构指针,这时低8位用于存放一些标志位;

当data段不包含WORK_STRUCT_PWQ_BIT标志位时,表示其高比特位域存放上次执行该work的worker_pool的ID号,低5位用于存放一些标志位。

2、初始化完work后,就可以调用schedule_work()函数完成把work挂入系统的默认workqueue中。(如果是使用自己定义的workqueue,则调用queue_work())

 static inline bool schedule_work(struct work_struct *work
 {
 ____return queue_work(system_wq, work);
 }                                                        

schedule_work()把work挂入系统默认BOUND类型的工作队列system_wq中。

3、取消workqueue:cancel_work_sync()。

cancel_work_queue()会等待该work执行完毕。

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值