驱动中常见的队列操作函数

/**
 * destroy_workqueue - safely terminate a workqueue
 * @wq: target workqueue
 *
 * Safely destroy a workqueue. All work currently pending will be done first.
 */
void destroy_workqueue(struct workqueue_struct *wq)
{
struct pool_workqueue *pwq;
int node;


/* drain it before proceeding with destruction */
drain_workqueue(wq);


/* sanity checks */
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq) {
int i;


for (i = 0; i < WORK_NR_COLORS; i++) {
if (WARN_ON(pwq->nr_in_flight[i])) {
mutex_unlock(&wq->mutex);
return;
}
}


if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
   WARN_ON(pwq->nr_active) ||
   WARN_ON(!list_empty(&pwq->delayed_works))) {
mutex_unlock(&wq->mutex);
return;
}
}
mutex_unlock(&wq->mutex);


/*
* wq list is used to freeze wq, remove from list after
* flushing is complete in case freeze races us.
*/
mutex_lock(&wq_pool_mutex);
list_del_init(&wq->list);
mutex_unlock(&wq_pool_mutex);


workqueue_sysfs_unregister(wq);


if (wq->rescuer) {
kthread_stop(wq->rescuer->task);
kfree(wq->rescuer);
wq->rescuer = NULL;
}


if (!(wq->flags & WQ_UNBOUND)) {
/*
* The base ref is never dropped on per-cpu pwqs.  Directly
* free the pwqs and wq.
*/
free_percpu(wq->cpu_pwqs);
kfree(wq);
} else {
/*
* We're the sole accessor of @wq at this point.  Directly
* access numa_pwq_tbl[] and dfl_pwq to put the base refs.
* @wq will be freed when the last pwq is released.
*/
for_each_node(node) {
pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
put_pwq_unlocked(pwq);
}


/*
* Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
* put.  Don't access it afterwards.
*/
pwq = wq->dfl_pwq;
wq->dfl_pwq = NULL;
put_pwq_unlocked(pwq);
}
}

EXPORT_SYMBOL_GPL(destroy_workqueue);



/**
 * flush_workqueue - ensure that any scheduled work has run to completion.
 * @wq: workqueue to flush
 *
 * This function sleeps until all work items which were queued on entry
 * have finished execution, but it is not livelocked by new incoming ones.
 */
void flush_workqueue(struct workqueue_struct *wq)
{
struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list),
.flush_color = -1,
.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
};
int next_color;


lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);


mutex_lock(&wq->mutex);


/*
* Start-to-wait phase
*/
next_color = work_next_color(wq->work_color);


if (next_color != wq->flush_color) {
/*
* Color space is not full.  The current work_color
* becomes our flush_color and work_color is advanced
* by one.
*/
WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
this_flusher.flush_color = wq->work_color;
wq->work_color = next_color;


if (!wq->first_flusher) {
/* no flush in progress, become the first flusher */
WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);


wq->first_flusher = &this_flusher;


if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
      wq->work_color)) {
/* nothing to flush, done */
wq->flush_color = next_color;
wq->first_flusher = NULL;
goto out_unlock;
}
} else {
/* wait in queue */
WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
list_add_tail(&this_flusher.list, &wq->flusher_queue);
flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
}
} else {
/*
* Oops, color space is full, wait on overflow queue.
* The next flush completion will assign us
* flush_color and transfer to flusher_queue.
*/
list_add_tail(&this_flusher.list, &wq->flusher_overflow);
}


mutex_unlock(&wq->mutex);


wait_for_completion(&this_flusher.done);


/*
* Wake-up-and-cascade phase
*
* First flushers are responsible for cascading flushes and
* handling overflow.  Non-first flushers can simply return.
*/
if (wq->first_flusher != &this_flusher)
return;


mutex_lock(&wq->mutex);


/* we might have raced, check again with mutex held */
if (wq->first_flusher != &this_flusher)
goto out_unlock;


wq->first_flusher = NULL;


WARN_ON_ONCE(!list_empty(&this_flusher.list));
WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);


while (true) {
struct wq_flusher *next, *tmp;


/* complete all the flushers sharing the current flush color */
list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
if (next->flush_color != wq->flush_color)
break;
list_del_init(&next->list);
complete(&next->done);
}


WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
    wq->flush_color != work_next_color(wq->work_color));


/* this flush_color is finished, advance by one */
wq->flush_color = work_next_color(wq->flush_color);


/* one color has been freed, handle overflow queue */
if (!list_empty(&wq->flusher_overflow)) {
/*
* Assign the same color to all overflowed
* flushers, advance work_color and append to
* flusher_queue.  This is the start-to-wait
* phase for these overflowed flushers.
*/
list_for_each_entry(tmp, &wq->flusher_overflow, list)
tmp->flush_color = wq->work_color;


wq->work_color = work_next_color(wq->work_color);


list_splice_tail_init(&wq->flusher_overflow,
     &wq->flusher_queue);
flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
}


if (list_empty(&wq->flusher_queue)) {
WARN_ON_ONCE(wq->flush_color != wq->work_color);
break;
}


/*
* Need to flush more colors.  Make the next flusher
* the new first flusher and arm pwqs.
*/
WARN_ON_ONCE(wq->flush_color == wq->work_color);
WARN_ON_ONCE(wq->flush_color != next->flush_color);


list_del_init(&next->list);
wq->first_flusher = next;


if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
break;


/*
* Meh... this color is already done, clear first
* flusher and repeat cascading.
*/
wq->first_flusher = NULL;
}


out_unlock:
mutex_unlock(&wq->mutex);
}
EXPORT_SYMBOL_GPL(flush_workqueue);


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值