最近内核block层调试IO性能(磁盘nvme,IO调度算法bfq,内核版本centos 8.3,4.18.0-240),启动fio压测一段时间后,就发现fio莫名其妙会卡死了。一看内核日志,有如下异常打印
- nvme nvme1: I/O 61 QID 5 timeout, aborting
- nvme nvme1: Abort status:0x0
- nvme nvme1: I/O 0 QID 5 timeout, reset controller
这是nvme磁盘控制器的报错信息,看着是超时了,难道fio压测把nvme盘搞挂了?因为我在IO派发流程的__blk_mq_sched_dispatch_requests->blk_mq_do_dispatch_sched->blk_mq_dispatch_rq_list流程流程的blk_mq_do_dispatch_sched函数做了一些优化,所以首先怀疑nvme控制器报timeout错误与我的代码有关。而只要blk_mq_do_dispatch_sched函数回退,再fio测试就没事了,反复验证几次都是这样。
太神奇了,难道我的代码触发了nvme控制器的bug?虽然觉得难以置信,但越是难以置信的问题,越要从最基础的知识点分析。首先看看” nvme nvme1: I/O 61 QID 5 timeout, aborting”报错是在哪里?这个很容易查到,是在nvme_timeout函数!
- static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
- {
- dev_warn(dev->ctrl.device,
- "I/O %d QID %d timeout, completion polled\n",
- req->tag, nvmeq->qid);
- dev_warn(dev->ctrl.device,
- "I/O %d QID %d timeout, reset controller\n",
- req->tag, nvmeq->qid);
- dev_warn(nvmeq->dev->ctrl.device,
- "I/O %d QID %d timeout, aborting\n",
- req->tag, nvmeq->qid);
- }
那这个函数的触发流程是什么呢?看了一下block层IO派发流程,是这样一个流程:
首先是IO请求(简称为rq或者req)派发给磁盘控制过程,要执行blk_mq_start_request函数,启动一个IO请求超时派发定时器,代码如下:
- void blk_mq_start_request(struct request *rq)
- {
- //启动rq超时派发定时器
- blk_add_timer(rq);
- //设置rq->state为 MQ_RQ_IN_FLIGHT
- WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
- }
- void blk_add_timer(struct request *req)
- {
- struct request_queue *q = req->q;
- unsigned long expiry;
- if (!req->timeout)
- req->timeout = q->rq_timeout;
- req->rq_flags &= ~RQF_TIMED_OUT;
- //expiry是rq超时派发完成的时间点,系统jiffies时钟到expiry,rq还没派发完成,就超时了
- expiry = jiffies + req->timeout;
- WRITE_ONCE(req->deadline, expiry);
- expiry = blk_rq_timeout(round_jiffies_up(expiry));
- if (!timer_pending(&q->timeout) ||
- time_before(expiry, q->timeout.expires)) {
- unsigned long diff = q->timeout.expires - expiry;
- if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
- //启动timeout定时器
- mod_timer(&q->timeout, expiry);
- }
- }
定时器函数源码如下:
- static void blk_rq_timed_out_timer(struct timer_list *t)
- {
- struct request_queue *q = from_timer(q, t, timeout);
- //启动worker,对应函数是 blk_mq_timeout_work()
- kblockd_schedule_work(&q->timeout_work);
- }
在定时器函数里其实就启动了q->timeout_work,它对应的函数是blk_mq_timeout_work。源码如下:
- static void blk_mq_timeout_work(struct work_struct *work)
- {
- //在这里检查哪个rq超时派发了
- blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
- }
就是在这个函数,检查哪些IO请求过了规定时间还没传输完成,源码如下:
- static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
- struct request *rq, void *priv, bool reserved)
- {
- unsigned long *next = priv;
- //如果rq在指定时间还没派发完成该if成立,执行rq超时派发完成异常处理
- if (blk_mq_req_expired(rq, next))
- blk_mq_rq_timed_out(rq, reserved);
- return true;
- }
- static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
- {
- unsigned long deadline;
- //如果rq已经派发完成了,rq->state由MQ_RQ_IN_FLIGHT改为MQ_RQ_IDLE,则这里直接返回false
- if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
- return false;
- if (rq->rq_flags & RQF_TIMED_OUT)
- return false;
- //rq->deadline是rq超时派发完成时间点,如果rq在rq->deadline时间点还没派发完成,该函数返回true
- deadline = READ_ONCE(rq->deadline);
- if (time_after_eq(jiffies, deadline))
- return true;
- if (*next == 0)
- *next = deadline;
- else if (time_after(*next, deadline))
- *next = deadline;
- return false;
- }
- static void blk_mq_rq_timed_out(struct request *req, bool reserved)
- {
- req->rq_flags |= RQF_TIMED_OUT;
- if (req->q->mq_ops->timeout) {
- enum blk_eh_timer_return ret;
- ret = req->q->mq_ops->timeout(req, reserved);//nvme_timeout
- if (ret == BLK_EH_DONE)
- return;
- WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
- }
- blk_add_timer(req);
- }
如果某个IO请求在规定时间内(30s)没有传输完成,就会执行blk_mq_timeout_work-> blk_mq_check_expired-> blk_mq_rq_timed_out-> nvme_timeout 函数,报” nvme nvme1: I/O 61 QID 5 timeout, aborting”等nvme控制timeout错误。
分析到这里,因为是我在blk_mq_do_dispatch_sched()函数修改的代码导致某个IO请求超时派发完成,最终导致” nvme nvme1: I/O 61 QID 5 timeout, aborting”等nvme控制timeout错误。这个问题应该与nvme硬件没关系,是个软件问题!看下我在blk_mq_do_dispatch_sched函数修改的代码:
- static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
- {
- struct request_queue *q = hctx->queue;
- struct elevator_queue *e = q->elevator;
- LIST_HEAD(rq_list);
- int ret = 0;
- int rq_count = 0;
- do {
- struct request *rq;
- if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
- break;
- if (!list_empty_careful(&hctx->dispatch)) {
- ret = -EAGAIN;
- break;
- }
- //if (!blk_mq_get_dispatch_budget(hctx))
- // break;
- rq = e->type->ops.dispatch_request(hctx);
- if (!rq) {
- //blk_mq_put_dispatch_budget(hctx);
- blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
- break;
- }
- list_add(&rq->queuelist, &rq_list);
- rq_count++;
- if(rq_count >= 5){
- rq_count = 0;
- //当派发rq时遇到busy返回false退出while循环
- if(!blk_mq_dispatch_rq_list(q, &rq_list,false))
- {
- break;
- }
- }
- //} while (blk_mq_dispatch_rq_list(q, &rq_list, true));
- } while (1);
- if(!list_empty(&rq_list))
- blk_mq_dispatch_rq_list(q, &rq_list, false);
- return ret;
- }
红色是添加的代码,绿色是删除的代码。这些修改怎么会导致IO请求超时派发最终导致nvme控制器timeout报错呢?真是一个神奇的问题!调试过程很繁琐,我也始终无法理解为什么原生blk_mq_do_dispatch_sched代码为什么一次只派发一个IO请求:执行rq = e->type->ops.dispatch_request(hctx)从IO调度队列取出一个IO请求,然后while (blk_mq_dispatch_rq_list(q, &rq_list, true))这里派发这个IO请求。
为什么不能从IO队列取出多个IO请求再一次性执行blk_mq_dispatch_rq_list(q, &rq_list, true)派发多个IO请求呢(这个思路就是上边展示的blk_mq_do_dispatch_sched函数增加的红色代码的作用)?突然有了个灵感,这是因为blk_mq_dispatch_rq_list()函数只能派发同一个硬件队列的IO请求?毕竟rq = e->type->ops.dispatch_request(hctx)返回的IO请求不能预料是哪个硬件队列的!干脆rq = e->type->ops.dispatch_request(hctx)取出一个IO请求,立即执行while (blk_mq_dispatch_rq_list(q, &rq_list, true))派发。我的代码是rq = e->type->ops.dispatch_request(hctx)连续取出多个IO请求,然后再执行blk_mq_dispatch_rq_list()一次性派发这么多个IO请求。如果这些IO请求属于不同的硬件队列,就会导致nvme控制timeout异常,从而导致IO请求派发失败。
有了这个思路,在blk_mq_dispatch_rq_list函数添加调试验证一下,主要就是打印派发rq的硬件队列:
- bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
- bool got_budget)
- {
- LIST_HEAD(tmp_list);
- do {
- struct blk_mq_queue_data bd;
- rq = list_first_entry(list, struct request, queuelist);
- hctx = rq->mq_hctx;
- list_del_init(&rq->queuelist);
- bd.rq = rq;
- if (list_empty(list))
- bd.last = true;
- printk("blk_mq_dispatch_rq_list:1 %s %d bd.last:%d hctx->queue_num:%d rq:0x%llx",current->comm,current->pid,bd.last,hctx->queue_num,(u64)rq);
- else {
- nxt = list_first_entry(list, struct request, queuelist);
- bd.last = !blk_mq_get_driver_tag(nxt);
- printk("blk_mq_dispatch_rq_list:2 %s %d bd.last:%d hctx->queue_num:%d rq:0x%llx",current->comm,current->pid,bd.last,hctx->queue_num,(u64)rq);
- }
- //把IO请求rq派发给驱动
- ret = q->mq_ops->queue_rq(hctx, &bd);
- .............
- } while (!list_empty(list));
- ...................
- }
下次卡死时,打印
- //在blk_mq_dispatch_rq_list()函数中的while循环里连续派发了两个rq,但这两个rq属于不同的硬件队列
- blk_mq_dispatch_rq_list:2 fio 35111 bq.last:0 hctx->queue_num:36 rq:0xffff9f44291f1320
- blk_mq_dispatch_rq_list:1 fio 35111 bq.last:1 hctx->queue_num:12 rq:0xffff9f44291f30e0
- ............
- //30s 后触发nvme控制timeout超时报错
- nvme nvme1: I/O 61 QID 5 timeout, aborting
- nvme nvme1: Abort status:0x0
- nvme nvme1: I/O 0 QID 5 timeout, reset controller
果然,blk_mq_dispatch_rq_list()函数中派发不同硬件队列的IO请求时,就会导致IO请求派发失败,30s都还没传输完成。然后就会触发” nvme nvme1: I/O 61 QID 5 timeout, aborting”报错。
为了进一步验证我的思路,在blk_mq_do_dispatch_sched()函数又做了一些调整:rq = e->type->ops.dispatch_request(hctx)取出的rq请求如果与上一个取出的rq所属不同的硬件队列,就立即执行 blk_mq_dispatch_rq_list()派发rq_list链表暂存的rq。这样保证rq_list的传递给blk_mq_dispatch_rq_list()要派发的rq都是同一个硬件队列的。果然,这样就一切正常了!完美搞定!