Linux内核工作队列之任务执行

在前面介绍了工作队列,工作队列上面任务,以及工作队列创建的内核线程,现在讨论线程上面处理的
每一个任务,即函数worker_thread()。
[] process_one_work
[] worker_thread
[] kthread
之所以任务第一个跑的函数是kthread,是由于函数kthread_create_on_node()创建时,其设置的kthread函数。

/**  * worker_thread - the worker thread function  * @__worker: self  *  * The worker thread function.  All workers belong to a worker_pool -  * either a per-cpu one or dynamic unbound one.  These workers process all  * work items regardless of their specific target workqueue.  The only  * exception is work items which belong to workqueues with a rescuer which  * will be explained in rescuer_thread().  */ static int worker_thread(void *__worker) {  struct worker *worker = __worker;  struct worker_pool *pool = worker->pool;

 /* tell the scheduler that this is a workqueue worker */  worker->task->flags |= PF_WQ_WORKER; woke_up:  spin_lock_irq(&pool->lock);

 /* am I supposed to die? */  if (unlikely(worker->flags & WORKER_DIE)) {   spin_unlock_irq(&pool->lock);   WARN_ON_ONCE(!list_empty(&worker->entry));   worker->task->flags &= ~PF_WQ_WORKER;   return 0;  }

 worker_leave_idle(worker); recheck:  /* no more worker necessary? */  if (!need_more_worker(pool))   goto sleep;

 /* do we need to manage? */  if (unlikely(!may_start_working(pool)) && manage_workers(worker))   goto recheck;

 /*   * ->scheduled list can only be filled while a worker is   * preparing to process a work or actually processing it.   * Make sure nobody diddled with it while I was sleeping.   */  WARN_ON_ONCE(!list_empty(&worker->scheduled));

 /*   * Finish PREP stage.  We're guaranteed to have at least one idle   * worker or that someone else has already assumed the manager   * role.  This is where @worker starts participating in concurrency   * management if applicable and concurrency management is restored   * after being rebound.  See rebind_workers() for details.   */  worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);

 do {   struct work_struct *work =    list_first_entry(&pool->worklist,       struct work_struct, entry);

  if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {    /* optimization path, not strictly necessary */    process_one_work(worker, work);    if (unlikely(!list_empty(&worker->scheduled)))     process_scheduled_works(worker);   } else {    move_linked_works(work, &worker->scheduled, NULL);    process_scheduled_works(worker);   }  } while (keep_working(pool));

 worker_set_flags(worker, WORKER_PREP, false); sleep:  if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker))   goto recheck;

 /*   * pool->lock is held and there's no work to process and no need to   * manage, sleep.  Workers are woken up only while holding   * pool->lock or from local cpu, so setting the current state   * before releasing pool->lock is enough to prevent losing any   * event.   */  worker_enter_idle(worker);  __set_current_state(TASK_INTERRUPTIBLE);  spin_unlock_irq(&pool->lock);  schedule();  goto woke_up; }

 

/**  * process_one_work - process single work  * @worker: self  * @work: work to process  *  * Process @work.  This function contains all the logics necessary to  * process a single work including synchronization against and  * interaction with other workers on the same cpu, queueing and  * flushing.  As long as context requirement is met, any worker can  * call this function to process a work.  *  * CONTEXT:  * spin_lock_irq(pool->lock) which is released and regrabbed.  */ static void process_one_work(struct worker *worker, struct work_struct *work) __releases(&pool->lock) __acquires(&pool->lock) {  struct pool_workqueue *pwq = get_work_pwq(work);  struct worker_pool *pool = worker->pool;  bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;  int work_color;  struct worker *collision; #ifdef CONFIG_LOCKDEP  /*   * It is permissible to free the struct work_struct from   * inside the function that is called from it, this we need to   * take into account for lockdep too.  To avoid bogus "held   * lock freed" warnings as well as problems when looking into   * work->lockdep_map, make a copy and use that here.   */  struct lockdep_map lockdep_map;

 lockdep_copy_map(&lockdep_map, &work->lockdep_map); #endif  /*   * Ensure we're on the correct CPU.  DISASSOCIATED test is   * necessary to avoid spurious warnings from rescuers servicing the   * unbound or a disassociated pool.   */  WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&        !(pool->flags & POOL_DISASSOCIATED) &&        raw_smp_processor_id() != pool->cpu);

 /*   * A single work shouldn't be executed concurrently by   * multiple workers on a single cpu.  Check whether anyone is   * already processing the work.  If so, defer the work to the   * currently executing one.   */  collision = find_worker_executing_work(pool, work);  if (unlikely(collision)) {   move_linked_works(work, &collision->scheduled, NULL);   return;  }

 /* claim and dequeue */  debug_work_deactivate(work);  hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);  worker->current_work = work;  worker->current_func = work->func;  worker->current_pwq = pwq;  work_color = get_work_color(work);

 list_del_init(&work->entry);

 /*   * CPU intensive works don't participate in concurrency   * management.  They're the scheduler's responsibility.   */  if (unlikely(cpu_intensive))   worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);

 /*   * Unbound pool isn't concurrency managed and work items should be   * executed ASAP.  Wake up another worker if necessary.   */  if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))   wake_up_worker(pool);

 /*   * Record the last pool and clear PENDING which should be the last   * update to @work.  Also, do this inside @pool->lock so that   * PENDING and queued state changes happen together while IRQ is   * disabled.   */  set_work_pool_and_clear_pending(work, pool->id);

 spin_unlock_irq(&pool->lock);

 lock_map_acquire_read(&pwq->wq->lockdep_map);  lock_map_acquire(&lockdep_map);  trace_workqueue_execute_start(work);  worker->current_func(work);  /*   * While we must be careful to not use "work" after this, the trace   * point will only record its address.   */  trace_workqueue_execute_end(work);  lock_map_release(&lockdep_map);  lock_map_release(&pwq->wq->lockdep_map);

 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {   pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"          "     last function: %pf\n",          current->comm, preempt_count(), task_pid_nr(current),          worker->current_func);   debug_show_held_locks(current);   dump_stack();  }

 /*   * The following prevents a kworker from hogging CPU on !PREEMPT   * kernels, where a requeueing work item waiting for something to   * happen could deadlock with stop_machine as such work item could   * indefinitely requeue itself while all other CPUs are trapped in   * stop_machine.   */  cond_resched();

 spin_lock_irq(&pool->lock);

 /* clear cpu intensive status */  if (unlikely(cpu_intensive))   worker_clr_flags(worker, WORKER_CPU_INTENSIVE);

 /* we're done with it, release */  hash_del(&worker->hentry);  worker->current_work = NULL;  worker->current_func = NULL;  worker->current_pwq = NULL;  worker->desc_valid = false;  pwq_dec_nr_in_flight(pwq, work_color); }

 

 

 
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值