int mpage_readpage(struct page *page, get_block_t get_block)
mpage_bio_submit-> submit_bio->generic_make_request->make_request_fn==blk_queue_bio
int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
queuebio首先看是否能与最后一次的reqest合并,条件是
if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
return ELEVATOR_BACK_MERGE;
else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
return ELEVATOR_FRONT_MERGE;
return ELEVATOR_NO_MERGE;
如果不能则寻找其他的request是否能合并
/*
* See if our hash lookup can find a potential backmerge.
*/
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
return ELEVATOR_BACK_MERGE;
}
如果都不能合并,则
req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
init_request_from_bio(req, bio);//申请新的请求
add_acct_request(q, req, where);//把新的请求添加进队列
__blk_run_queue(q);//run 队列
__blk_run_queue_uncond(q);
q->request_fn(q);
调用q的request_fn是设备,在初始化generic disk 的时候初始化 queue 时候调用blk_init_queue传入的
q = blk_init_queue(request_fn, NULL);
struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
{
return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
}
EXPORT_SYMBOL(blk_init_queue);
blk_init_queue_node-> q->request_fn = rfn;
对于scsi来说
q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1590 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1591 {
1592 struct request_queue *q;
1593
1594 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1595 if (!q)
1596 return NULL;
1597
1598 blk_queue_prep_rq(q, scsi_prep_fn);
1600 blk_queue_softirq_done(q, scsi_softirq_done);
1601 return q;
1602 }
143 void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
144 {
145 q->prep_rq_fn = pfn;//scsi_prep_fn/这个函数会把request转换城scsi_cmd
146 }
173 void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)
174 {
175 q->softirq_done_fn = fn;scsi_softirq_done/应该是完成request的时候会调
176 }
从request到scsi_cmd,什么时候呢,就是在request_fn的时候调用struct request *blk_peek_request去获得一个request的时候,会把这个request做转换
static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
struct request *req)
{
struct scsi_cmnd *cmd;
if (!req->special) {
/* Bail if we can't get a reference to the device */
if (!get_device(&sdev->sdev_gendev))
return NULL;
cmd = scsi_get_command(sdev, GFP_ATOMIC);
if (unlikely(!cmd)) {
put_device(&sdev->sdev_gendev);
return NULL;
}
req->special = cmd;
} else {
cmd = req->special;
}
/* pull a tag out of the request if we have one */
cmd->tag = req->tag;
cmd->request = req;
cmd->cmnd = req->cmd;
cmd->prot_op = SCSI_PROT_NORMAL;
return cmd;
}
所以scsi_prep_fn函数来到 执行本函数中最重要的过程,drv->init_command。这个drv是啥?来自gendisk的private_data字段。还记得sd_probe吗?我们在其中把它赋值给了对应scsi_disk结构的driver字段,就是前面那个sd_template常量,别告诉我你又忘了。如果真忘了,那就好好从头开始,从scsi磁盘驱动的初始化函数init_sd开始。
整个块设备驱动层的处理就结束了,我还是在网上找到一个图,正好可以总结上面的过程:
从前面分析可以看出,请求队列queue是top level与middle level之间的纽带。上层请求会在请求队列中维护,处理函数的方法由上下各层提供。在请求队列的处理过程中,将普通的块设备请求转换成标准的scsi命令,然后再通过middle level与low level之间的接口将请求递交给scsi host。
sd_init_command完事後,request_fn就會调用 rtn = scsi_dispatch_cmd(cmd);,这个scsi_dispatch_cmd会最终调用host的
对于mmc的block来说,
mq->queue = blk_init_queue(mmc_request_fn, lock);
static void mmc_request_fn(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
unsigned long flags;
struct mmc_context_info *cntx;
if (!mq) {
while ((req = blk_fetch_request(q)) != NULL) {
req->cmd_flags |= REQ_QUIET;
__blk_end_request_all(req, -EIO);
}
return;
}
cntx = &mq->card->host->context_info;
if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
/*
* New MMC request arrived when MMC thread may be
* blocked on the previous request to be complete
* with no current request fetched
*/
spin_lock_irqsave(&cntx->lock, flags);
if (cntx->is_waiting_last_req) {
cntx->is_new_req = true;
wake_up_interruptible(&cntx->wait);
}
spin_unlock_irqrestore(&cntx->lock, flags);
} else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
wake_up_process(mq->thread);
}
static int mmc_queue_thread(void *d)
{
struct mmc_queue *mq = d;
struct request_queue *q = mq->queue;
参考:http://blog.csdn.net/hs794502825/article/details/8719034
http://blog.csdn.net/yunsongice/article/details/6171308
http://blog.csdn.net/yunsongice/article/details/6171299
http://blog.csdn.net/yangjianghua/article/details/12346309
current->flags |= PF_MEMALLOC;
down(&mq->thread_sem);
do {
struct request *req = NULL;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
req = blk_fetch_request(q);
mq->mqrq_cur->req = req;
spin_unlock_irq(q->queue_lock);
if (req || mq->mqrq_prev->req) {
set_current_state(TASK_RUNNING);
mq->issue_fn(mq, req);
cond_resched();
if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
conti