1. CMWQ 的几个基本概念
关于 workqueue 中几个概念都是 work 相关的数据结构非常容易混淆,大概可以这样来理解:
- work :工作。
- workqueue :工作的集合。workqueue 和 work 是一对多的关系。
- worker :工人。在代码中 worker 对应一个 worker_thread 内核线程。
- worker_pool:工人的集合。worker_pool 和 worker 是一对多的关系。
- pwq(pool_workqueue):中间人 / 中介,负责建立起 workqueue 和 worker_pool 之间的关系。对于BOUND类型,每个CPU有一个workqueue 和 pwq 是一对多的关系,pwq 和 worker_pool 是一对一的关系。
1.2 内核公共部分用到几个关键变量
cpu_worker_pools
cpu_worker_pools是系统静态定义的Per-CPU类型的worker_pool数据结构
/* the per-cpu worker pools */
static DEFINE_PER_CPU_SHARED_ALIGNED(structworker_pool [NR_STD_WORKER_POOLS],
cpu_worker_pools);
typeof (struct worker_pool[NR_STD_WORKER_POOLS]) cpu_worker_pools
crash_arm> p cpu_worker_pools -x
PER-CPU DATA TYPE:
struct worker_pool cpu_worker_pools[2];
PER-CPU ADDRESSES:
[0]: ef77ff80
[1]: ef78df80
[2]: ef79bf80
[3]: ef7a9f80
相当于8个worker_pool结构体,下面相当于对cpu_worker_pool初始化,每个cpu两个worker_pool,一个高优先级,一个低优先级。
//Workqueue flags and constants,工作队列的flag很多,但创建时,基本上分成三种情况来的。
1)非WQ_UNBOUND情况,简称BOUND类型
2)WQ_UNBOUND|__WQ_ORDERED时,当做__WQ_ORDERED类型处理
3)WQ_UNBOUND
enum {
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */
WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
WQ_POWER_EFFICIENT = 1 << 7,
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
};
/* PL: hash of all unbound pools keyed by pool->attrs */
static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
//相当于下面,定义了一个hlist_head 类型的数组,这个unbound_pool_hash用来管理所有UNBOUND类型的worker_pool,通过wqattrs_equal判断系统中是否已经有了类型相关的worker_pool,wqattrs_equal先比较nice值,然后比较cpumask位图是否一致。
struct hlist_head unbound_pool_hash[1 << (UNBOUND_POOL_HASH_ORDER)] =
{ [0 ... ((1 << (UNBOUND_POOL_HASH_ORDER)) - 1)] = HLIST_HEAD_INIT }/* I: attributes used when instantiating standard unbound pools on demand */
//UNBOUD类型的workqueue属性,高优先级的用于创建非BOUND的workqueue_struct时,复制这里内容给新建的worker_pool->attrs,此属性用来在unbound_pool_hash 查找是否已存在worker_pool
static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
//__WQ_ORDERED类型,似乎要结合UNBOUD才生效
/* I: attributes used when instantiating ordered pools on demand */
static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
static int __init init_workqueues(void)
{
/* initialize CPU pools */
for_each_possible_cpu(cpu) {
struct worker_pool *pool;
i = 0;
for_each_cpu_worker_pool(pool, cpu) {
BUG_ON(init_worker_pool(pool));
pool->cpu = cpu;
cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
pool->attrs->nice = std_nice[i++];
pool->node = cpu_to_node(cpu);
/* alloc pool ID */
mutex_lock(&wq_pool_mutex);
BUG_ON(worker_pool_assign_id(pool));
mutex_unlock(&wq_pool_mutex);
}
}
/* create the initial worker */
for_each_online_cpu(cpu) {
struct worker_pool *pool;
for_each_cpu_worker_pool(pool, cpu) {
pool->flags &= ~POOL_DISASSOCIATED;
BUG_ON(!create_worker(pool));
}
}
//创建系统开机后一系列默认的workqueue
system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
system_long_wq = alloc_workqueue("events_long", 0, 0);
system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,WQ_UNBOUND_MAX_ACTIVE);
system_freezable_wq = alloc_workqueue("events_freezable",WQ_FREEZABLE, 0);
system_power_efficient_wq = alloc_workqueue("events_power_efficient", WQ_POWER_EFFICIENT, 0);
system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
WQ_FREEZABLE | WQ_POWER_EFFICIENT,0);
BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||!system_unbound_wq || !system_freezable_wq ||
!system_power_efficient_wq ||!system_freezable_power_efficient_wq);
return 0;
}
struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
unsigned int flags,
int max_active,
struct lock_class_key *key,
const char *lock_name, ...) {
/* see the comment above the definition of WQ_POWER_EFFICIENT */
if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
flags |= WQ_UNBOUND;
/* allocate wq and format name */
if (flags & WQ_UNBOUND)
tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
//新建工作队列workqueue_struct
wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
if (flags & WQ_UNBOUND) {
//unbound类型的工作队列搞一个这么属性,unbound类型的worker_pool 下面创建时也搞了一个属性
wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
}
max_active = max_active ?: WQ_DFL_ACTIVE;
max_active = wq_clamp_max_active(max_active, flags, wq->name);
/* init wq */
wq->flags = flags;
wq->saved_max_active = max_active;
mutex_init(&wq->mutex);
atomic_set(&wq->nr_pwqs_to_flush, 0);
INIT_LIST_HEAD(&wq->pwqs);
INIT_LIST_HEAD(&wq->flusher_queue);
INIT_LIST_HEAD(&wq->flusher_overflow);
INIT_LIST_HEAD(&wq->maydays);
INIT_LIST_HEAD(&wq->list);
if (alloc_and_link_pwqs(wq) < 0)
goto err_free_wq;
}
static int alloc_and_link_pwqs(struct workqueue_struct *wq)
{
bool highpri = wq->flags & WQ_HIGHPRI;
int cpu, ret;
//初步理解下面这段代码,似乎优先级是 非WQ_UNBOUND > __WQ_ORDERED > WQ_UNBOUND
//假如UNBOUND|__WQ_ORDERED;那就按__WQ_ORDERED走了。
//假如只有纯__WQ_ORDERED类型,这里按 非WQ_UNBOUND走了,直接第一个if进入就完了。
if (!(wq->flags & WQ_UNBOUND)) {
//处理 非WQ_UNBOUND 类型workqueue,给每个cpu分配一个pool_workqueue,关联cpu_worker_pools[high]
wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
for_each_possible_cpu(cpu) {
struct pool_workqueue *pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
struct worker_pool *cpu_pools = per_cpu(cpu_worker_pools, cpu);
//将刚分配的pool_workqueue 与 最初每个cpu分配两个worker_pool中的worker_pool[high] 关联
init_pwq(pwq, wq, &cpu_pools[highpri]);
mutex_lock(&wq->mutex);
link_pwq(pwq); //
mutex_unlock(&wq->mutex);
}
return 0;
} else if (wq->flags & __WQ_ORDERED) {
//WQ_UNBOUND|__WQ_ORDERED 才会进来这里,下文也会给他创建worker_pool 和pool_workqueue,然后关联起来
ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
return ret;
} else {
//WQ_UNBOUND类型,但不含 __WQ_ORDERED 才走这里;下文也创建worker_pool 和pool_workqueue,然后关联起来
return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
}
}
/* initialize newly alloced @pwq which is associated with @wq and @pool */
static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
struct worker_pool *pool)
{
pwq->pool = pool; //pool_workqueue->pool 指向worker_pool
pwq->wq = wq; //pool_workqueue->wq 指向workqueue_struct,于是worker_pool和工作队列workqueue_struct就通过pool_workqueue关联上了
pwq->flush_color = -1;
pwq->refcnt = 1;
INIT_LIST_HEAD(&pwq->delayed_works);
INIT_LIST_HEAD(&pwq->pwqs_node);
INIT_LIST_HEAD(&pwq->mayday_node);
INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
}
/* sync @pwq with the current state of its associated wq and link it */
static void link_pwq(struct pool_workqueue *pwq)
{
//把pool_workqueue->pwqs_node加入到workqueue_struct->pwqs里,相当于把pool_workqueue加到workqueue_struct->pwqs里了
struct workqueue_struct *wq = pwq->wq;
list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
}
//WQ_UNBOUND类型才会走这里
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs)
{
struct workqueue_attrs *new_attrs, *tmp_attrs;
struct pool_workqueue **pwq_tbl, *dfl_pwq;
int node, ret;
pwq_tbl = kzalloc(nr_node_ids * sizeof(pwq_tbl[0]), GFP_KERNEL);
new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
/* make a copy of @attrs and sanitize it */
//将最初分配unbound/order的两个工作队列属性中的highpri, copy给这里new_attrs
copy_workqueue_attrs(new_attrs, attrs);
cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
copy_workqueue_attrs(tmp_attrs, new_attrs);
get_online_cpus();
mutex_lock(&wq_pool_mutex);
//这函数里会新建一个pwq,并根据wq找或者创建对应的worker_pool,然后将pwq和worker_pool关联起来
dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
for_each_node(node) {
dfl_pwq->refcnt++;
pwq_tbl[node] = dfl_pwq; //这里pwq_tbl只是管理指针
}
}
mutex_unlock(&wq_pool_mutex);
/* all pwqs have been created successfully, let's install'em */
mutex_lock(&wq->mutex);
copy_workqueue_attrs(wq->unbound_attrs, new_attrs);
/* save the previous pwq and install the new one */
for_each_node(node)
pwq_tbl[node] = numa_pwq_tbl_install(wq, node, pwq_tbl[node]);
//把刚建的dfl_pwq 加入到wq->pwqs 链表里
link_pwq(dfl_pwq);
swap(wq->dfl_pwq, dfl_pwq);
mutex_unlock(&wq->mutex);
/* put the old pwqs */
for_each_node(node)
put_pwq_unlocked(pwq_tbl[node]);
put_pwq_unlocked(dfl_pwq);
put_online_cpus();
ret = 0;
}
static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs)
{
struct worker_pool *pool;
struct pool_workqueue *pwq;
//找到worker_pool,没找到就分配一个
pool = get_unbound_pool(attrs);
//分配一个pool_workqueue
pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
//将worker_pool 和 workqueue_struct 通过pool_workqueue关联起来
init_pwq(pwq, wq, pool);
return pwq;
}
static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
{
u32 hash = wqattrs_hash(attrs);
struct worker_pool *pool;
int node;
/* do we already have a matching pool? */
hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
if (wqattrs_equal(pool->attrs, attrs)) {
pool->refcnt++;
return pool;
}
}
/* nope, create a new one */
//就是这里创建的unbound 类型的worker_pool
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool || init_worker_pool(pool) < 0)
goto fail;
if (worker_pool_assign_id(pool) < 0)
goto fail;
/* create and start the initial worker */
if (!create_worker(pool))
goto fail;
copy_workqueue_attrs(pool->attrs, attrs);
/* install */ //加入hash
hash_add(unbound_pool_hash, &pool->hash_node, hash);
return pool;
}
至此这些工作队列就 通过 pool_workqueue 和worker_pool 关联起来了。那么
魅族内核团队里workqueue