linux2.6.28块设备mmc_sd卡card/block.c磁盘设备驱动注册

///
第一条线 mmc子系统核心初始化
drivers/mmc/core/core.c
//子系统初始化 subsys_initcall(mmc_init);
第二条线 mmc控制器平台驱动注册

   module_init(sdhci_drv_init);

第三条线 磁盘设备驱动注册
drivers/mmc/card/block.c
module_init(mmc_blk_init);

图片

图片
//

module_init(mmc_blk_init);
module_exit(mmc_blk_exit);

MODULE_LICENSE(“GPL”);
MODULE_DESCRIPTION(“Multimedia Card (MMC) block device driver”);

static void __exit mmc_blk_exit(void)
{
mmc_unregister_driver(&mmc_driver);
unregister_blkdev(MMC_BLOCK_MAJOR, “mmc”);
}

//

static int __init mmc_blk_init(void)
{
int res;
//block/genhd.c 或linux/fs.h
//参数1 :主设备号
//参数2:主设备名字(在/proc/devices中显示的名字)
//将块设备名和主设备号注册到块层中
res = register_blkdev(MMC_BLOCK_MAJOR, “mmc”);
if (res)
goto out;
//将mmc_driver设备驱动注册到驱动模型中
res = mmc_register_driver(&mmc_driver);
if (res)
goto out2;

return 0;

out2:
unregister_blkdev(MMC_BLOCK_MAJOR, “mmc”);
out:
return res;
}
//

/*

  • MMC device driver (e.g., Flash card, I/O card…)
    */
    struct mmc_driver {
    struct device_driver drv;//当前驱动
    int (*probe)(struct mmc_card *);//驱动的匹配到之后的处理函数
    void (*remove)(struct mmc_card *);
    int (*suspend)(struct mmc_card *, pm_message_t);
    int (*resume)(struct mmc_card *);
    };
    ///

static struct mmc_driver mmc_driver = {
.drv = {
.name = “mmcblk”,
},
.probe = mmc_blk_probe,
.remove = mmc_blk_remove,
.suspend = mmc_blk_suspend,
.resume = mmc_blk_resume,
};

//

static DECLARE_BITMAP(dev_use, MMC_NUM_MINORS);

/*

  • There is one mmc_blk_data per slot.
    */
    struct mmc_blk_data {
    spinlock_t lock;
    struct gendisk *disk;
    struct mmc_queue queue;

    unsigned int usage;
    unsigned int read_only;
    };

static DEFINE_MUTEX(open_lock);
//

static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;

mutex_lock(&open_lock);
md = disk->private_data;
if (md && md->usage == 0)
    md = NULL;
if (md)
    md->usage++;
mutex_unlock(&open_lock);

return md;

}
//

static void mmc_blk_put(struct mmc_blk_data *md)
{
mutex_lock(&open_lock);
md->usage–;
if (md->usage == 0) {
int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
__clear_bit(devidx, dev_use);

    put_disk(md->disk);
    kfree(md);
}
mutex_unlock(&open_lock);

}
//

static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
int ret = -ENXIO;

if (md) {
    if (md->usage == 2)
        check_disk_change(bdev);
    ret = 0;

    if ((mode & FMODE_WRITE) && md->read_only) {
        mmc_blk_put(md);
        ret = -EROFS;
    }
}

return ret;

}
//

static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
{
struct mmc_blk_data *md = disk->private_data;

mmc_blk_put(md);
return 0;

}
//

static int
mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
geo->heads = 4;
geo->sectors = 16;
return 0;
}
//

static struct block_device_operations mmc_bdops = {
.open = mmc_blk_open,
.release = mmc_blk_release,
.getgeo = mmc_blk_getgeo,
.owner = THIS_MODULE,
};
//

struct mmc_blk_request {
struct mmc_request mrq;
struct mmc_command cmd;
struct mmc_command stop;
struct mmc_data data;
};
//

static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
{
int err;
u32 blocks;

struct mmc_request mrq;
struct mmc_command cmd;
struct mmc_data data;
unsigned int timeout_us;

struct scatterlist sg;

memset(&cmd, 0, sizeof(struct mmc_command));

cmd.opcode = MMC_APP_CMD;
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;

err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err)
    return (u32)-1;
if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
    return (u32)-1;

memset(&cmd, 0, sizeof(struct mmc_command));

cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

memset(&data, 0, sizeof(struct mmc_data));

data.timeout_ns = card->csd.tacc_ns * 100;
data.timeout_clks = card->csd.tacc_clks * 100;

timeout_us = data.timeout_ns / 1000;
timeout_us += data.timeout_clks * 1000 /
    (card->host->ios.clock / 1000);

if (timeout_us > 100000) {
    data.timeout_ns = 100000000;
    data.timeout_clks = 0;
}

data.blksz = 4;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;

memset(&mrq, 0, sizeof(struct mmc_request));

mrq.cmd = &cmd;
mrq.data = &data;

sg_init_one(&sg, &blocks, 4);

mmc_wait_for_req(card->host, &mrq);

if (cmd.error || data.error)
    return (u32)-1;

blocks = ntohl(blocks);

return blocks;

}
//

static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request brq;
int ret = 1;

mmc_claim_host(card->host);

do {
    struct mmc_command cmd;
    u32 readcmd, writecmd;

    memset(&brq, 0, sizeof(struct mmc_blk_request));
    brq.mrq.cmd = &brq.cmd;
    brq.mrq.data = &brq.data;

    brq.cmd.arg = req->sector;
    if (!mmc_card_blockaddr(card))
        brq.cmd.arg <<= 9;
    brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
    brq.data.blksz = 512;
    brq.stop.opcode = MMC_STOP_TRANSMISSION;
    brq.stop.arg = 0;
    brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
    brq.data.blocks = req->nr_sectors;

    if (brq.data.blocks > 1) {
        /* SPI multiblock writes terminate using a special
         * token, not a STOP_TRANSMISSION request.
         */
        if (!mmc_host_is_spi(card->host)
                || rq_data_dir(req) == READ)
            brq.mrq.stop = &brq.stop;
        readcmd = MMC_READ_MULTIPLE_BLOCK;
        writecmd = MMC_WRITE_MULTIPLE_BLOCK;
    } else {
        brq.mrq.stop = NULL;
        readcmd = MMC_READ_SINGLE_BLOCK;
        writecmd = MMC_WRITE_BLOCK;
    }

    if (rq_data_dir(req) == READ) {
        brq.cmd.opcode = readcmd;
        brq.data.flags |= MMC_DATA_READ;
    } else {
        brq.cmd.opcode = writecmd;
        brq.data.flags |= MMC_DATA_WRITE;
    }

    mmc_set_data_timeout(&brq.data, card);

    brq.data.sg = mq->sg;
    brq.data.sg_len = mmc_queue_map_sg(mq);

    mmc_queue_bounce_pre(mq);

    mmc_wait_for_req(card->host, &brq.mrq);

    mmc_queue_bounce_post(mq);

    /*
     * Check for errors here, but don't jump to cmd_err
     * until later as we need to wait for the card to leave
     * programming mode even when things go wrong.
     */
    if (brq.cmd.error) {
        printk(KERN_ERR "%s: error %d sending read/write command\n",
               req->rq_disk->disk_name, brq.cmd.error);
    }

    if (brq.data.error) {
        printk(KERN_ERR "%s: error %d transferring data\n",
               req->rq_disk->disk_name, brq.data.error);
    }

    if (brq.stop.error) {
        printk(KERN_ERR "%s: error %d sending stop command\n",
               req->rq_disk->disk_name, brq.stop.error);
    }

    if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
        do {
            int err;

            cmd.opcode = MMC_SEND_STATUS;
            cmd.arg = card->rca << 16;
            cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
            err = mmc_wait_for_cmd(card->host, &cmd, 5);
            if (err) {
                printk(KERN_ERR "%s: error %d requesting status\n",
                       req->rq_disk->disk_name, err);
                goto cmd_err;
            }
            /*
             * Some cards mishandle the status bits,
             * so make sure to check both the busy
             * indication and the card state.
             */
        } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
            (R1_CURRENT_STATE(cmd.resp[0]) == 7));

#if 0
if (cmd.resp[0] & ~0x00000900)
printk(KERN_ERR “%s: status = %08x\n”,
req->rq_disk->disk_name, cmd.resp[0]);
if (mmc_decode_status(cmd.resp))
goto cmd_err;
#endif
}

    if (brq.cmd.error || brq.data.error || brq.stop.error)
        goto cmd_err;

    /*
     * A block was successfully transferred.
     */
    spin_lock_irq(&md->lock);
    ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
    spin_unlock_irq(&md->lock);
} while (ret);

mmc_release_host(card->host);

return 1;

cmd_err:
/*
* If this is an SD card and we’re writing, we can first
* mark the known good sectors as ok.
*
* If the card is not SD, we can still ok written sectors
* as reported by the controller (which might be less than
* the real number of written sectors, but never more).
*
* For reads we just fail the entire chunk as that should
* be safe in all cases.
*/
if (rq_data_dir(req) != READ) {
if (mmc_card_sd(card)) {
u32 blocks;

        blocks = mmc_sd_num_wr_blocks(card);
        if (blocks != (u32)-1) {
            spin_lock_irq(&md->lock);
            ret = __blk_end_request(req, 0, blocks << 9);
            spin_unlock_irq(&md->lock);
        }
    } else {
        spin_lock_irq(&md->lock);
        ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
        spin_unlock_irq(&md->lock);
    }
}

mmc_release_host(card->host);

spin_lock_irq(&md->lock);
while (ret)
    ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
spin_unlock_irq(&md->lock);

return 0;

}

//

static inline int mmc_blk_readonly(struct mmc_card *card)
{
return mmc_card_readonly(card) ||
!(card->csd.cmdclass & CCC_BLOCK_WRITE);
}
//

static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
{
struct mmc_blk_data *md;
int devidx, ret;

devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
if (devidx >= MMC_NUM_MINORS)
    return ERR_PTR(-ENOSPC);
__set_bit(devidx, dev_use);

md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
if (!md) {
    ret = -ENOMEM;
    goto out;
}


/*
 * Set the read-only status based on the supported commands
 * and the write protect switch.
 */
md->read_only = mmc_blk_readonly(card);

//分配通用gendisk解构体
md->disk = alloc_disk(1 << MMC_SHIFT);
if (md->disk == NULL) {
ret = -ENOMEM;
goto err_kfree;
}

spin_lock_init(&md->lock);
md->usage = 1;

//initialise a queue structure.
//初始化请求队列
ret = mmc_init_queue(&md->queue, card, &md->lock);
if (ret)
goto err_putdisk;

md->queue.issue_fn = mmc_blk_issue_rq;
md->queue.data = md;

md->disk->major    = MMC_BLOCK_MAJOR;
md->disk->first_minor = devidx << MMC_SHIFT;
md->disk->fops = &mmc_bdops;
md->disk->private_data = md;
md->disk->queue = md->queue.queue;
md->disk->driverfs_dev = &card->dev;

/*
 * As discussed on lkml, GENHD_FL_REMOVABLE should:
 *
 * - be set for removable media with permanent block devices
 * - be unset for removable block devices with permanent media
 *
 * Since MMC block devices clearly fall under the second
 * case, we do not set GENHD_FL_REMOVABLE.  Userspace
 * should use the block device creation/destruction hotplug
 * messages to tell when the card is present.
 */

sprintf(md->disk->disk_name, "mmcblk%d", devidx);

blk_queue_hardsect_size(md->queue.queue, 512);

if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
    /*
     * The EXT_CSD sector count is in number or 512 byte
     * sectors.
     */

//设置磁盘容量,以扇区为单位
set_capacity(md->disk, card->ext_csd.sectors);
} else {
/*
* The CSD capacity field is in units of read_blkbits.
* set_capacity takes units of 512 bytes.
*/
set_capacity(md->disk,
card->csd.capacity << (card->csd.read_blkbits - 9));
}
return md;

err_putdisk:
put_disk(md->disk);
err_kfree:
kfree(md);
out:
return ERR_PTR(ret);
}
//

static int
mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
{
struct mmc_command cmd;
int err;

/* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
if (mmc_card_blockaddr(card))
    return 0;

mmc_claim_host(card->host);
cmd.opcode = MMC_SET_BLOCKLEN;
cmd.arg = 512;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 5);
mmc_release_host(card->host);

if (err) {
    printk(KERN_ERR "%s: unable to set block size to %d: %d\n",
        md->disk->disk_name, cmd.arg, err);
    return -EINVAL;
}

return 0;

}
//

//
图片
/
// 块驱动的初始化及磁盘分区的注册
static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md;
int err;

char cap_str[10];

/*
 * Check that the card supports the command class(es) we need.
 */
if (!(card->csd.cmdclass & CCC_BLOCK_READ))
    return -ENODEV;

//分配通用gendisk结构,块设备号,初始化请求队列,设置通用磁盘gendisk结构体的成员变量
md = mmc_blk_alloc(card);
if (IS_ERR(md))
return PTR_ERR(md);
//设置块长
err = mmc_blk_set_blksize(md, card);
if (err)
goto out;

string_get_size(get_capacity(md->disk) << 9, STRING_UNITS_2,
        cap_str, sizeof(cap_str));
printk(KERN_INFO "%s: %s %s %s %s\n",
    md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
    cap_str, md->read_only ? "(ro)" : "");

//关联mmc_card{} <—>mmc_blk_data{}
mmc_set_drvdata(card, md);
//激活磁盘设备,调用此函数后就可以立即对磁盘设备进行操作了
add_disk(md->disk);
return 0;

out:
mmc_blk_put(md);

return err;

}
//
//

static struct bus_type mmc_bus_type = {
.name = “mmc”,
.dev_attrs = mmc_dev_attrs,
.match = mmc_bus_match,
.uevent = mmc_bus_uevent,
.probe = mmc_bus_probe,
.remove = mmc_bus_remove,
.suspend = mmc_bus_suspend,
.resume = mmc_bus_resume,
};
//

static int mmc_bus_probe(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
struct mmc_card *card = dev_to_mmc_card(dev);
//
//static struct mmc_driver mmc_driver = {
// .probe = mmc_blk_probe,
//此处调用的是mmc_blk_probe
return drv->probe(card);
}
//

static void mmc_blk_remove(struct mmc_card *card)
{
struct mmc_blk_data *md = mmc_get_drvdata(card);

if (md) {
    /* Stop new requests from getting into the queue */
    del_gendisk(md->disk);

    /* Then flush out any already in there */
    mmc_cleanup_queue(&md->queue);

    mmc_blk_put(md);
}
mmc_set_drvdata(card, NULL);

}
//

#ifdef CONFIG_PM
static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
{
struct mmc_blk_data *md = mmc_get_drvdata(card);

if (md) {
    mmc_queue_suspend(&md->queue);
}
return 0;

}
//

static int mmc_blk_resume(struct mmc_card *card)
{
struct mmc_blk_data *md = mmc_get_drvdata(card);

if (md) {
    mmc_blk_set_blksize(md, card);
    mmc_queue_resume(&md->queue);
}
return 0;

}
#else
#define mmc_blk_suspend NULL
#define mmc_blk_resume NULL
#endif

//

/**

  • mmc_register_driver - register a media driver
  • @drv: MMC media driver
    */
    int mmc_register_driver(struct mmc_driver *drv)
    {
    drv->drv.bus = &mmc_bus_type;
    注册驱动
    //mmc_driver设备驱动注册到驱动模型,挂在mmc_bus_type上面
    return driver_register(&drv->drv);
    }

//

/**

  • mmc_init_queue - initialise a queue structure.

  • @mq: mmc queue

  • @card: mmc card to attach this queue

  • @lock: queue lock

  • Initialise a MMC card request queue.
    */
    int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
    {
    struct mmc_host *host = card->host;
    u64 limit = BLK_BOUNCE_HIGH;
    int ret;

    if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
    limit = *mmc_dev(host)->dma_mask;

    mq->card = card;
    //生成一个请求队列。其中rfn函数就是我们用户自己的request函数。
    //生成的这个队列会放到gendisk结构里面,gendisk是来表示一个独立的磁盘设备或分区
    mq->queue = blk_init_queue(mmc_request, lock);
    if (!mq->queue)
    return -ENOMEM;

    mq->queue->queuedata = mq;
    mq->req = NULL;

    blk_queue_prep_rq(mq->queue, mmc_prep_request);
    blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
    queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_hw_segs == 1) {
unsigned int bouncesz;

    bouncesz = MMC_QUEUE_BOUNCESZ;

    if (bouncesz > host->max_req_size)
        bouncesz = host->max_req_size;
    if (bouncesz > host->max_seg_size)
        bouncesz = host->max_seg_size;
    if (bouncesz > (host->max_blk_count * 512))
        bouncesz = host->max_blk_count * 512;

    if (bouncesz > 512) {
        mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
        if (!mq->bounce_buf) {
            printk(KERN_WARNING "%s: unable to "
                "allocate bounce buffer\n",
                mmc_card_name(card));
        }
    }

    if (mq->bounce_buf) {
        blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
        blk_queue_max_sectors(mq->queue, bouncesz / 512);
        blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
        blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
        blk_queue_max_segment_size(mq->queue, bouncesz);

        mq->sg = kmalloc(sizeof(struct scatterlist),
            GFP_KERNEL);
        if (!mq->sg) {
            ret = -ENOMEM;
            goto cleanup_queue;
        }
        sg_init_table(mq->sg, 1);

        mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
            bouncesz / 512, GFP_KERNEL);
        if (!mq->bounce_sg) {
            ret = -ENOMEM;
            goto cleanup_queue;
        }
        sg_init_table(mq->bounce_sg, bouncesz / 512);
    }
}

#endif

if (!mq->bounce_buf) {
    blk_queue_bounce_limit(mq->queue, limit);
    blk_queue_max_sectors(mq->queue,
        min(host->max_blk_count, host->max_req_size / 512));
    blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
    blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
    blk_queue_max_segment_size(mq->queue, host->max_seg_size);

    mq->sg = kmalloc(sizeof(struct scatterlist) *
        host->max_phys_segs, GFP_KERNEL);
    if (!mq->sg) {
        ret = -ENOMEM;
        goto cleanup_queue;
    }
    sg_init_table(mq->sg, host->max_phys_segs);
}

init_MUTEX(&mq->thread_sem);

//kthread_run是个宏定义,首先调用kthread_create()创建线程,
//如果创建成功,再调用wake_up_process()唤醒新创建的线程
mq->thread = kthread_run(mmc_queue_thread, mq, “mmcqd”);
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
goto free_bounce_sg;
}

return 0;

free_bounce_sg:
if (mq->bounce_sg)
kfree(mq->bounce_sg);
mq->bounce_sg = NULL;
cleanup_queue:
if (mq->sg)
kfree(mq->sg);
mq->sg = NULL;
if (mq->bounce_buf)
kfree(mq->bounce_buf);
mq->bounce_buf = NULL;
blk_cleanup_queue(mq->queue);
return ret;
}
///

static int mmc_queue_thread(void *d)
{
struct mmc_queue *mq = d;
struct request_queue *q = mq->queue;

current->flags |= PF_MEMALLOC;

down(&mq->thread_sem);
do {
    struct request *req = NULL;

    spin_lock_irq(q->queue_lock);
    set_current_state(TASK_INTERRUPTIBLE);
    if (!blk_queue_plugged(q))
        req = elv_next_request(q);
    mq->req = req;
    spin_unlock_irq(q->queue_lock);

    if (!req) {
        if (kthread_should_stop()) {
            set_current_state(TASK_RUNNING);
            break;
        }
        up(&mq->thread_sem);
        schedule();
        down(&mq->thread_sem);
        continue;
    }
    set_current_state(TASK_RUNNING);

//mmc_init_queue通过blk_init_queue(mmc_request, lock);绑定了请求处理函数mmc_request
//而mmc_request唤醒与mmc对应的内核线程来处理请求,
//与该线程对应的处理函数mmc_queue_thread()执行与mmc对应的 mq->issue_fn(mq, req);
//mmc_blk_probe函数–>mmc_blk_alloc(card);函数里
// md->queue.issue_fn = mmc_blk_issue_rq;
//对于存储设备而言, mq->issue_fn()函数指向mmc_blk_issue_rq;函数

    mq->issue_fn(mq, req);

} while (1);
up(&mq->thread_sem);

return 0;

}
///

/*

  • Generic MMC request handler. This is called for any queue on a

  • particular host. When the host is not busy, we look for a request

  • on any queue on this host, and attempt to issue it. This may

  • not be the queue we were asked to process.
    */
    static void mmc_request(struct request_queue *q)
    {
    struct mmc_queue *mq = q->queuedata;
    struct request *req;
    int ret;

    if (!mq) {
    printk(KERN_ERR “MMC: killing requests for dead queue\n”);
    while ((req = elv_next_request(q)) != NULL) {
    do {
    ret = __blk_end_request(req, -EIO,
    blk_rq_cur_bytes(req));
    } while (ret);
    }
    return;
    }
    //唤醒与mmc对应的内核线程来处理请求,
    //与该线程对应的处理函数mmc_queue_thread()执行与mmc对应的 mq->issue_fn(mq, req);
    if (!mq->req)
    wake_up_process(mq->thread);
    }
    //

struct mmc_queue {
struct mmc_card *card;
struct task_struct *thread;
struct semaphore thread_sem;
unsigned int flags;
struct request *req;
int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
struct request_queue *queue;
struct scatterlist *sg;
char *bounce_buf;
struct scatterlist *bounce_sg;
unsigned int bounce_sg_len;
};
/

/**

  • blk_init_queue - prepare a request queue for use with a block device
  • @rfn: The function to be called to process requests that have been
  •    placed on the queue.
    
  • @lock: Request queue spin lock
  • Description:
  • If a block device wishes to use the standard request handling procedures,
  • which sorts requests and coalesces adjacent requests, then it must
  • call blk_init_queue(). The function @rfn will be called when there
  • are requests on the queue that need to be processed. If the device
  • supports plugging, then @rfn may not be called immediately when requests
  • are available on the queue, but may be called at some time later instead.
  • Plugged queues are generally unplugged when a buffer belonging to one
  • of the requests on the queue is needed, or due to memory pressure.
  • @rfn is not required, or even expected, to remove all requests off the
  • queue, but only as many as it can handle at a time. If it does leave
  • requests on the queue, it is responsible for arranging that the requests
  • get dealt with eventually.
  • The queue spin lock must be held while manipulating the requests on the
  • request queue; this lock will be taken also from interrupt context, so irq
  • disabling is needed for it.
  • Function returns a pointer to the initialized request queue, or %NULL if
  • it didn’t succeed.
  • Note:
  • blk_init_queue() must be paired with a blk_cleanup_queue() call
  • when the block device is deactivated (such as at module unload).
    **/

struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
{
return blk_init_queue_node(rfn, lock, -1);
}
//
struct request_queue *
blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
{
struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);

if (!q)
    return NULL;

q->node = node_id;
if (blk_init_free_list(q)) {
    kmem_cache_free(blk_requestq_cachep, q);
    return NULL;
}

/*
 * if caller didn't supply a lock, they get per-queue locking with
 * our embedded lock
 */
if (!lock)
    lock = &q->__queue_lock;

图片
q->request_fn = rfn;//mmc_request
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
q->queue_flags = (1 << QUEUE_FLAG_CLUSTER |
1 << QUEUE_FLAG_STACKABLE);
q->queue_lock = lock;

blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);

blk_queue_make_request(q, __make_request);
blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);

blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);

q->sg_reserved_size = INT_MAX;

blk_set_cmd_filter_defaults(&q->cmd_filter);

/*
 * all done
 */
if (!elevator_init(q, NULL)) {
    blk_queue_congestion_threshold(q);
    return q;
}

blk_put_queue(q);
return NULL;

}

//
/**

  • blk_queue_make_request - define an alternate make_request function for a device
  • @q: the request queue for the device to be affected
  • @mfn: the alternate make_request function
  • Description:
  • The normal way for &struct bios to be passed to a device
  • driver is for them to be collected into requests on a request
  • queue, and then to allow the device driver to select requests
  • off that queue when it is ready. This works well for many block
  • devices. However some block devices (typically virtual devices
  • such as md or lvm) do not benefit from the processing on the
  • request queue, and are served best by having the requests passed
  • directly to them. This can be achieved by providing a function
  • to blk_queue_make_request().
  • Caveat:
  • The driver that does this must be able to deal appropriately
  • with buffers in “highmemory”. This can be accomplished by either calling
  • __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
  • blk_queue_bounce() to create a buffer in normal memory.
    **/
    void blk_queue_make_request(struct request_queue *q, make_request_fn mfn)
    {
    /

    * set defaults
    */
    q->nr_requests = BLKDEV_MAX_RQ;
    blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
    blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
    blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
    blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
    图片
    q->make_request_fn = mfn;//__make_request
    q->backing_dev_info.ra_pages =
    (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
    q->backing_dev_info.state = 0;
    q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
    blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
    blk_queue_hardsect_size(q, 512);
    blk_queue_dma_alignment(q, 511);
    blk_queue_congestion_threshold(q);
    q->nr_batching = BLK_BATCH_REQ;
q->unplug_thresh = 4;        /* hmm */
q->unplug_delay = (3 * HZ) / 1000;    /* 3 milliseconds */
if (q->unplug_delay == 0)
    q->unplug_delay = 1;

q->unplug_timer.function = blk_unplug_timeout;
q->unplug_timer.data = (unsigned long)q;

/*
 * by default assume old behaviour and bounce for any highmem page
 */
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);

}
图片

  • 11
    点赞
  • 14
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

xx-xx-xxx-xxx

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值