__elv_next_request 和 blk_peek_request

static inline struct request *__elv_next_request(struct request_queue *q)
{
 struct request *rq;
 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
 while (1) {
  if (!list_empty(&q->queue_head)) {
   rq = list_entry_rq(q->queue_head.next);
   return rq;
  }
  /*
   * Flush request is running and flush request isn't queueable
   * in the drive, we can hold the queue till flush request is
   * finished. Even we don't do this, driver can't dispatch next
   * requests and will requeue them. And this can improve
   * throughput too. For example, we have request flush1, write1,
   * flush 2. flush1 is dispatched, then queue is hold, write1
   * isn't inserted to queue. After flush1 is finished, flush2
   * will be dispatched. Since disk cache is already clean,
   * flush2 will be finished very soon, so looks like flush2 is
   * folded to flush1.
   * Since the queue is hold, a flag is set to indicate the queue
   * should be restarted later. Please see flush_end_io() for
   * details.
   */
  if (fq->flush_pending_idx != fq->flush_running_idx &&
    !queue_flush_queueable(q)) {
   fq->flush_queue_delayed = 1;
   return NULL;
  }
  if (unlikely(blk_queue_bypass(q)) ||
      !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
   return NULL;
 }
}
static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
{
 struct elevator_queue *e = q->elevator;
 if (e->type->ops.elevator_activate_req_fn)
  e->type->ops.elevator_activate_req_fn(q, rq);
}
static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{
 struct elevator_queue *e = q->elevator;
 if (e->type->ops.elevator_deactivate_req_fn)
  e->type->ops.elevator_deactivate_req_fn(q, rq);
}


#####################################################################

struct request *blk_peek_request(struct request_queue *q)
{
 struct request *rq;
 int ret;
 while ((rq = __elv_next_request(q)) != NULL) {
  rq = blk_pm_peek_request(q, rq);
  if (!rq)
   break;
  if (!(rq->cmd_flags & REQ_STARTED)) {
   /*
    * This is the first time the device driver
    * sees this request (possibly after
    * requeueing).  Notify IO scheduler.
    */
   if (rq->cmd_flags & REQ_SORTED)
    elv_activate_rq(q, rq);
   /*
    * just mark as started even if we don't start
    * it, a request that has been delayed should
    * not be passed by new incoming requests
    */
   rq->cmd_flags |= REQ_STARTED;
   trace_block_rq_issue(q, rq);
  }
  if (!q->boundary_rq || q->boundary_rq == rq) {
   q->end_sector = rq_end_sector(rq);
   q->boundary_rq = NULL;
  }
  if (rq->cmd_flags & REQ_DONTPREP)
   break;
  if (q->dma_drain_size && blk_rq_bytes(rq)) {
   /*
    * make sure space for the drain appears we
    * know we can do this because max_hw_segments
    * has been adjusted to be one fewer than the
    * device can handle
    */
   rq->nr_phys_segments++;
  }
  if (!q->prep_rq_fn)
   break;
  ret = q->prep_rq_fn(q, rq);
  if (ret == BLKPREP_OK) {
   break;
  } else if (ret == BLKPREP_DEFER) {
   /*
    * the request may have been (partially) prepped.
    * we need to keep this request in the front to
    * avoid resource deadlock.  REQ_STARTED will
    * prevent other fs requests from passing this one.
    */
   if (q->dma_drain_size && blk_rq_bytes(rq) &&
       !(rq->cmd_flags & REQ_DONTPREP)) {
    /*
     * remove the space for the drain we added
     * so that we don't add it again
     */
    --rq->nr_phys_segments;
   }
   rq = NULL;
   break;
  } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
   int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
   rq->cmd_flags |= REQ_QUIET;
   /*
    * Mark this request as started so we don't trigger
    * any debug logic in the end I/O path.
    */
   blk_start_request(rq);
   __blk_end_request_all(rq, err);
  } else {
   printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
   break;
  }
 }
 return rq;
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值