linux 文件系统bio,Linux文件系统学习:io的plug过程-blk_init_queue - Linux文件系统类型 - 神农笔记...

struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)

{

return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);

}

struct request_queue *

blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)

{

struct request_queue *uninit_q, *q;

uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);

if (!uninit_q)

return NULL;

q = blk_init_allocated_queue(uninit_q, rfn, lock);

if (!q)

blk_cleanup_queue(uninit_q);

return q;

}

struct request_queue *

blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,

spinlock_t *lock)

{

if (!q)

return NULL;

q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0);

if (!q->fq)

return NULL;

if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))

goto fail;

q->request_fn= rfn;

q->prep_rq_fn= NULL;

q->unprep_rq_fn= NULL;

q->queue_flags|= QUEUE_FLAG_DEFAULT;

if (lock)

q->queue_lock= lock;

blk_queue_make_request(q, blk_queue_bio);

q->sg_reserved_size = INT_MAX;

mutex_lock(&q->sysfs_lock);

if (elevator_init(q, NULL)) {

mutex_unlock(&q->sysfs_lock);

goto fail;

}

mutex_unlock(&q->sysfs_lock);

return q;

fail:

blk_free_flush_queue(q->fq);

return NULL;

}

这里就是可以看出两种模式的区分了,很明显了,至于blk_queue_make_request里面当然是一个队里loop类个实现,这里就不求甚解了。

这里如果是自己blk_queue_make_request就是没有任何调度和蓄洪可言了。就直接的到驱动了。

如果用blk_queue_bio转一下就会有蓄洪,和电梯。下面我看下blk_queue_bio

static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)

{

const bool sync = !!(bio->bi_rw & REQ_SYNC);

struct blk_plug *plug;

int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;

struct request *req;

unsigned int request_count = 0;

blk_queue_bounce(q, &bio);

blk_queue_split(q, &bio, q->bio_split);

if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {

bio->bi_error = -EIO;

bio_endio(bio);

return BLK_QC_T_NONE;

}

if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {

spin_lock_irq(q->queue_lock);

where = ELEVATOR_INSERT_FLUSH;

goto get_rq;

}

//nomerge 是如果没有合并的要求

if (!blk_queue_nomerges(q)) {

if (blk_attempt_plug_merge(q, bio, &request_count, NULL))

return BLK_QC_T_NONE;

} else

request_count = blk_plug_queued_count(q);//统计这个队列里面的request个数

spin_lock_irq(q->queue_lock);

el_ret = elv_merge(q, &req, bio);//这函数是梳理电梯的,请求,该合并的合并该链接在一块的连接在一块

if (el_ret == ELEVATOR_BACK_MERGE) {

if (bio_attempt_back_merge(q, req, bio)) {

elv_bio_merged(q, req, bio);

if (!attempt_back_merge(q, req))

elv_merged_request(q, req, el_ret);

goto out_unlock;

}

} else if (el_ret == ELEVATOR_FRONT_MERGE) {

if (bio_attempt_front_merge(q, req, bio)) {

elv_bio_merged(q, req, bio);

if (!attempt_front_merge(q, req))

elv_merged_request(q, req, el_ret);

goto out_unlock;

}

}

get_rq:

rw_flags = bio_data_dir(bio);

if (sync)

rw_flags |= REQ_SYNC;

req = get_request(q, rw_flags, bio, GFP_NOIO);

if (IS_ERR(req)) {

bio->bi_error = PTR_ERR(req);

bio_endio(bio);

goto out_unlock;

}

init_request_from_bio(req, bio);

if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))

req->cpu = raw_smp_processor_id();

plug = current->plug;

if (plug) {

if (!request_count)

trace_block_plug(q);

else {

if (request_count >= BLK_MAX_REQUEST_COUNT) {//如果这个队列的个数太多了,就通过下面的blk_flush_plug_list进行泄洪

blk_flush_plug_list(plug, false);

trace_block_plug(q);

}

}

list_add_tail(&req->queuelist, &plug->list);//如果这个队列还不是很多,就继续加入到尾部。

blk_account_io_start(req, true);

} else {

spin_lock_irq(q->queue_lock);

add_acct_request(q, req, where);

__blk_run_queue(q);

out_unlock:

spin_unlock_irq(q->queue_lock);

}

return BLK_QC_T_NONE;

}

这里其实还不是很清楚 蓄洪和泄洪,只是看到了一些框架。

1、这个request_count 个数的统计

unsigned int blk_plug_queued_count(struct request_queue *q)

{

...

plug = current->plug;

...

if (q->mq_ops)

plug_list = &plug->mq_list;

else

plug_list = &plug->list;

list_for_each_entry(rq, plug_list, queuelist) {

if (rq->q == q)

ret++;

}

out:

return ret;

}

就是在统计这个list的个数,那么这个list是没有实际意义的,只是统计个数,为什么不用int去做。

2、blk_flush_plug_list进行泄洪我们还要继续看

void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)

{

struct request_queue *q;

unsigned long flags;

struct request *rq;

LIST_HEAD(list);

unsigned int depth;

flush_plug_callbacks(plug, from_schedule);

if (!list_empty(&plug->mq_list))

blk_mq_flush_plug_list(plug, from_schedule);

if (list_empty(&plug->list))

return;

list_splice_init(&plug->list, &list);

list_sort(NULL, &list, plug_rq_cmp);

q = NULL;

depth = 0;

local_irq_save(flags);

while (!list_empty(&list)) {

rq = list_entry_rq(list.next);

list_del_init(&rq->queuelist);

BUG_ON(!rq->q);

if (rq->q != q) {

if (q)

queue_unplugged(q, depth, from_schedule);

q = rq->q;

depth = 0;

spin_lock(q->queue_lock);

}

if (unlikely(blk_queue_dying(q))) {

__blk_end_request_all(rq, -ENODEV);

continue;

}

if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))

__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);

else

__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);

depth++;

}

if (q)

queue_unplugged(q, depth, from_schedule);

local_irq_restore(flags);

}

就是将list的各个元素拿出来加入到电梯队列里面去,电梯怎么取呢?

832a80a8c0eb1550855a46d4a43693f1.png

这个是电梯的接口了,我们下次再关注这些电梯的算法接口。

现在问题是这些__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); rq是从list里面取出来的,是哪里加进去的呢?

我们在上面好像没有看到。

--- Linux文件系统学习系列笔记 ---

(原创笔记,转载请联系博主授权)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值