1.6.3 scsi块设备驱动层处理
好了,了解完必要的scsi设备驱动知识以后,我们就可以安心分析scsi_request_fn函数了。大家回忆一下对,这个函数指针通过几次传递并最终在blk_init_queue_node()中被赋予了q->request_fn。所以这一层的重点就是这个scsi_request_fn函数。
在看scsi_request_fn之前,注意回忆一下scsi_alloc_queue函数的1598行至1560行还赋了三个函数指针:
1590 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1591 { 1592 struct request_queue *q; 1593 1594 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 1595 if (!q) 1596 return NULL; 1597 1598 blk_queue_prep_rq(q, scsi_prep_fn); 1599 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); 1600 blk_queue_softirq_done(q, scsi_softirq_done); 1601 return q; 1602 }
143 void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn) 144 { 145 q->prep_rq_fn = pfn; 146 }
313 void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff) 314 { 315 q->issue_flush_fn = iff; 316 } 173 void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn) 174 { 175 q->softirq_done_fn = fn; 176 } |
分别是把scsi_prep_fn赋给了q->prep_rq_fn,把scsi_issue_flush_fn赋给了q->issue_flush_fn,把scsi_softirq_done赋给了q->softirq_done_fn。尤其是scsi_prep_fn我们马上就会用到。
好,让我们继续前面的话题,重点关注scsi_request_fn():
1422 static void scsi_request_fn(struct request_queue *q) 1423 { 1424 struct scsi_device *sdev = q->queuedata; 1425 struct scsi_Host *shost; 1426 struct scsi_cmnd *cmd; 1427 struct request *req; 1428 1429 if (!sdev) { 1430 printk("scsi: killing requests for dead queue/n"); 1431 while ((req = elv_next_request(q)) != NULL) 1432 scsi_kill_request(req, q); 1433 return; 1434 } 1435 1436 if(!get_device(&sdev->sdev_gendev)) 1437 /* We must be tearing the block queue down already */ 1438 return; 1439 1440 /* 1441 * To start with, we keep looping until the queue is empty, or until 1442 * the host is no longer able to accept any more requests. 1443 */ 1444 shost = sdev->host; 1445 while (!blk_queue_plugged(q)) { 1446 int rtn; 1447 /* 1448 * get next queueable request. We do this early to make sure 1449 * that the request is fully prepared even if we cannot 1450 * accept it. 1451 */ 1452 req = elv_next_request(q); 1453 if (!req || !scsi_dev_queue_ready(q, sdev)) 1454 break; 1455 1456 if (unlikely(!scsi_device_online(sdev))) { 1457 sdev_printk(KERN_ERR, sdev, 1458 "rejecting I/O to offline device/n"); 1459 scsi_kill_request(req, q); 1460 continue; 1461 } 1462 1463 1464 /* 1465 * Remove the request from the request list. 1466 */ 1467 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1468 blkdev_dequeue_request(req); 1469 sdev->device_busy++; /* 说明命令正在执行中 */ 1470 1471 spin_unlock(q->queue_lock); 1472 cmd = req->special; 1473 if (unlikely(cmd == NULL)) { 1474 printk(KERN_CRIT "impossible request in %s./n" 1475 "please mail a stack trace to " 1476 "linux-scsi@vger.kernel.org/n", 1477 __FUNCTION__); 1478 blk_dump_rq_flags(req, "foo"); 1479 BUG(); 1480 } 1481 spin_lock(shost->host_lock); 1482 1483 if (!scsi_host_queue_ready(q, shost, sdev)) 1484 goto not_ready; 1485 if (sdev->single_lun) { 1486 if (scsi_target(sdev)->starget_sdev_user && 1487 scsi_target(sdev)->starget_sdev_user != sdev) 1488 goto not_ready; 1489 scsi_target(sdev)->starget_sdev_user = sdev; 1490 } 1491 shost->host_busy++; 1492 1493 /* 1494 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will 1495 * take the lock again. 1496 */ 1497 spin_unlock_irq(shost->host_lock); 1498 1499 /* 1500 * Finally, initialize any error handling parameters, and set up 1501 * the timers for timeouts. 1502 */ 1503 scsi_init_cmd_errh(cmd); 1504 1505 /* 1506 * Dispatch the command to the low-level driver. 1507 */ 1508 rtn = scsi_dispatch_cmd(cmd); 1509 spin_lock_irq(q->queue_lock); 1510 if(rtn) { 1511 /* we're refusing the command; because of 1512 * the way locks get dropped, we need to 1513 * check here if plugging is required */ 1514 if(sdev->device_busy == 0) 1515 blk_plug_device(q); 1516 1517 break; 1518 } 1519 } 1520 1521 goto out; 1522 1523 not_ready: 1524 spin_unlock_irq(shost->host_lock); 1525 1526 /* 1527 * lock q, handle tag, requeue req, and decrement device_busy. We 1528 * must return with queue_lock held. 1529 * 1530 * Decrementing device_busy without checking it is OK, as all such 1531 * cases (host limits or settings) should run the queue at some 1532 * later time. 1533 */ 1534 spin_lock_irq(q->queue_lock); 1535 blk_requeue_request(q, req); 1536 sdev->device_busy--; 1537 if(sdev->device_busy == 0) 1538 blk_plug_device(q); 1539 out: 1540 /* must be careful here...if we trigger the ->remove() function 1541 * we cannot be holding the q lock */ 1542 spin_unlock_irq(q->queue_lock); 1543 put_device(&sdev->sdev_gendev); 1544 spin_lock_irq(q->queue_lock); 1545 } |
scsi_request_fn函数为scsi设备请求队列处理函数,前面看到该函数被注册到了request_queue->request_fn上。块设备请求的bio最终会merge到request queue中,然后通过unplug_fn函数调用request_queue->request_fn,实现scsi_reuqest_fn函数的调用。
scsi_request_fn函数实现了请求队列的处理,首先1452-1468行按照电梯算法从请求队列中摘取一个request,所以我们首先关注1452行的elv_next_request(),来自block/elevator.c:
712 struct request *elv_next_request(request_queue_t *q) 713 { 714 struct request *rq; 715 int ret; 716 717 while ((rq = __elv_next_request(q)) != NULL) { |