static void blk_queue_bio(struct request_queue *q, struct bio *bio)
{const bool sync = !!(bio->bi_rw &REQ_SYNC);struct blk_plug *plug;int el_ret, rw_flags, where =ELEVATOR_INSERT_SORT;struct request *req;
unsignedint request_count = 0;/** low level driver can indicate that it wants pages above a
* certain limit bounced to low memory (ie for highmem, or even
* ISA dma in theory)*/
/* 为了建立bounce buffer,以防止不适合这次I/O操作的时候利用bounce buffer*/blk_queue_bounce(q,&bio);if (bio_integrity_enabled(bio) &&bio_integrity_prep(bio)) { //数据完整性校验 bio_endio(bio,-EIO);return;
}if (bio->bi_rw & (REQ_FLUSH |REQ_FUA)) {
spin_lock_irq(q->queue_lock);where =ELEVATOR_INSERT_FLUSH;gotoget_rq;
}/** Check if we can merge with the plugged list before grabbing
* any locks.*/
if (!blk_queue_nomerges(q) && //请求队列不允许合并请求blk_attempt_plug_merge(q, bio,&request_count)) //将bio合并到当前plugged的请求队列中return;
spin_lock_irq(q->queue_lock);
el_ret= elv_merge(q, &req, bio); //elv_merge是核心函数,找到bio前向或者后向合并的请求if (el_ret ==ELEVATOR_BACK_MERGE) { //进行后向合并操作if(bio_attempt_back_merge(q, req, bio)) {
elv_bio_merged(q, req, bio);if (!attempt_back_merge(q, req))
elv_merged_request(q, req, el_ret);gotoout_unlock;
}
}else if (el_ret ==ELEVATOR_FRONT_MERGE) { // 进行前向合并操作if(bio_attempt_front_merge(q, req, bio)) {
elv_bio_merged(q, req, bio);if (!attempt_front_merge(q, req))
elv_merged_request(q, req, el_ret);gotoout_unlock;
}
}
/* 无法找到对应的请求实现合并 */get_rq:/** This sync check and mask will be re-done in init_request_from_bio(),
* but we need to set it earlier to expose the sync flag to the
* rq allocator and io schedulers.*/rw_flags=bio_data_dir(bio);if(sync)
rw_flags|=REQ_SYNC;/** Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.*/req=get_request(q, rw_flags, bio, GFP_NOIO); //获取一个empty request请求if(IS_ERR(req)) {
bio_endio(bio, PTR_ERR(req));/*@q is dead*/
gotoout_unlock;
}/** After dropping the lock and possibly sleeping here, our request
* may now be mergeable after it had proven unmergeable (above).
* We don‘t worry about that case for efficiency. It won‘t happen
* often, and the elevators are able to handle it.*/init_request_from_bio(req, bio); //采用bio对request请求进行初始化if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
req->cpu =raw_smp_processor_id();
plug= current->plug;if(plug) {/** If this is the first request added after a plug, fire
* of a plug trace.*/
if (!request_count)
trace_block_plug(q);else{if (request_count >=BLK_MAX_REQUEST_COUNT) {
blk_flush_plug_list(plug,false); //请求数量达到队列上限值,进行unplug操作 trace_block_plug(q);
}
}
list_add_tail(&req->queuelist, &plug->list); //将请求加入到队列 blk_account_io_start(req,true);
}else{
spin_lock_irq(q->queue_lock);
add_acct_request(q, req,where);
__blk_run_queue(q);
out_unlock:
spin_unlock_irq(q->queue_lock);
}
}