Linux SPI 驱动分析(2)— 框架层源码分析
目录
9.2.1、spi_transfer_one_message
上一篇文章(Linux SPI 驱动分析(1)— 结构框架)介绍了 SPI 相关的 Framework ,数据结构,以及相关的开放的 APIs,本章来详细分析这些核心的 APIs 的具体实现以及它们之间的相互关系。
分析的先后顺序依然是根据使用的基本顺序,即,分配、初始化、注册、数据传输过程等几个方面,这几个方面大致就包含了 SPI 的基本内容。具体的顺序按照 (Linux SPI 驱动分析(1)— 结构框架) 的第 4 小结《对外接口 APIs》进行顺序阐述;
1、spi_alloc_master
分配一个 spi_controller(spi_master),既然是源码分析,那么直接贴源码,然后分析流程:
static inline struct spi_controller *spi_alloc_master(struct device *host,
unsigned int size)
{
return __spi_alloc_controller(host, size, false);
}
struct spi_controller *__spi_alloc_controller(struct device *dev,
unsigned int size, bool slave)
{
struct spi_controller *ctlr;
if (!dev)
return NULL;
ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
if (!ctlr)
return NULL;
device_initialize(&ctlr->dev);
ctlr->bus_num = -1;
ctlr->num_chipselect = 1;
ctlr->slave = slave;
if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
ctlr->dev.class = &spi_slave_class;
else
ctlr->dev.class = &spi_master_class;
ctlr->dev.parent = dev;
pm_suspend_ignore_children(&ctlr->dev, true);
spi_controller_set_devdata(ctlr, &ctlr[1]);
return ctlr;
}
EXPORT_SYMBOL_GPL(__spi_alloc_controller);
请注意,这个接口不仅仅是分配一个 spi_controller 的结构,他 kzalloc 的时候,其实是分配了一个 spi_controller + 一个 size 的结构,然后首地址为一个 spi_controller,并对其做了基本的初始化操作,最后调用:
spi_controller_set_devdata(ctlr, &ctlr[1]);
也就是:
static inline void spi_controller_set_devdata(struct spi_controller *ctlr,
void *data)
{
dev_set_drvdata(&ctlr->dev, data);
}
static inline void dev_set_drvdata(struct device *dev, void *data)
{
dev->driver_data = data;
}
传入的参数是 &ctlr[1] 也就是跳过了第一个 ctlr 的地址,将分配余下的 *size* 大小的内存空间通过 set_devdata 的方式挂接到了 device->driver_data 下;
函数返回的是指向 spi_controller 的指针!这个额外的 size 大小,就是流程芯片公司自己定义他们的结构体用的,打个比方:
static int s3c64xx_spi_probe(struct platform_device *pdev)
{
.......
struct s3c64xx_spi_driver_data *sdd;
struct spi_master *master;
master = spi_alloc_master(&pdev->dev,
sizeof(struct s3c64xx_spi_driver_data));
platform_set_drvdata(pdev, master);
sdd = spi_master_get_devdata(master);
.......
}
Look,芯片厂商通过 spi_alloc_master 分配的多余的内容,全部给了他自己定义的这个 s3c64xx_spi_driver_data 结构,并通过:spi_master_get_devdata -> spi_controller_get_devdata -> dev_get_drvdata 来获得了在 spi_alloc_master 期间的这个自定义的数据结构!
2、spi_register_controller
分配好了SoC 的 SPI Controller 抽象的数据结构后,接下来就是注册,内容实属不少:
int spi_register_controller(struct spi_controller *ctlr)
{
struct device *dev = ctlr->dev.parent;
struct boardinfo *bi;
int status = -ENODEV;
int id, first_dynamic;
if (!dev)
return -ENODEV;
/*
* Make sure all necessary hooks are implemented before registering
* the SPI controller.
*/
status = spi_controller_check_ops(ctlr); ----------(A)
if (status)
return status;
if (!spi_controller_is_slave(ctlr)) {
status = of_spi_register_master(ctlr);
if (status)
return status;
}
/* even if it's just one always-selected device, there must
* be at least one chipselect
*/
if (ctlr->num_chipselect == 0)
return -EINVAL;
if (ctlr->bus_num >= 0) {
/* devices with a fixed bus num must check-in with the num */
mutex_lock(&board_lock);
id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
ctlr->bus_num + 1, GFP_KERNEL);
mutex_unlock(&board_lock);
if (WARN(id < 0, "couldn't get idr"))
return id == -ENOSPC ? -EBUSY : id;
ctlr->bus_num = id;
} else if (ctlr->dev.of_node) {
/* allocate dynamic bus number using Linux idr */
id = of_alias_get_id(ctlr->dev.of_node, "spi");
if (id >= 0) {
ctlr->bus_num = id;
mutex_lock(&board_lock);
id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
ctlr->bus_num + 1, GFP_KERNEL);
mutex_unlock(&board_lock);
if (WARN(id < 0, "couldn't get idr"))
return id == -ENOSPC ? -EBUSY : id;
}
}
if (ctlr->bus_num < 0) {
first_dynamic = of_alias_get_highest_id("spi");
if (first_dynamic < 0)
first_dynamic = 0;
else
first_dynamic++;
mutex_lock(&board_lock);
id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
0, GFP_KERNEL);
mutex_unlock(&board_lock);
if (WARN(id < 0, "couldn't get idr"))
return id;
ctlr->bus_num = id;
}
INIT_LIST_HEAD(&ctlr->queue); ----------(B)
spin_lock_init(&ctlr->queue_lock);
spin_lock_init(&ctlr->bus_lock_spinlock);
mutex_init(&ctlr->bus_lock_mutex);
mutex_init(&ctlr->io_mutex);
ctlr->bus_lock_flag = 0;
init_completion(&ctlr->xfer_completion);
if (!ctlr->max_dma_len)
ctlr->max_dma_len = INT_MAX;
/* register the device, then userspace will see it.
* registration fails if the bus ID is in use.
*/
dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); ----------(C)
status = device_add(&ctlr->dev);
if (status < 0) {
/* free bus id */
mutex_lock(&board_lock);
idr_remove(&spi_master_idr, ctlr->bus_num);
mutex_unlock(&board_lock);
goto done;
}
dev_dbg(dev, "registered %s %s\n",
spi_controller_is_slave(ctlr) ? "slave" : "master",
dev_name(&ctlr->dev));
/*
* If we're using a queued driver, start the queue. Note that we don't
* need the queueing logic if the driver is only supporting high-level
* memory operations.
*/
if (ctlr->transfer) {
dev_info(dev, "controller is unqueued, this is deprecated\n");
} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
status = spi_controller_initialize_queue(ctlr); ----------(D)
if (status) {
device_del(&ctlr->dev);
/* free bus id */
mutex_lock(&board_lock);
idr_remove(&spi_master_idr, ctlr->bus_num);
mutex_unlock(&board_lock);
goto done;
}
}
/* add statistics */
spin_lock_init(&ctlr->statistics.lock);
mutex_lock(&board_lock);
list_add_tail(&ctlr->list, &spi_controller_list); ----------(E)
list_for_each_entry(bi, &board_list, list) ----------(F)
spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
mutex_unlock(&board_lock);
/* Register devices from the device tree and ACPI */
of_register_spi_devices(ctlr);
acpi_register_spi_devices(ctlr);
done:
return status;
}
EXPORT_SYMBOL_GPL(spi_register_controller);
代码不少,我这里分了 A~F 关键地点,进行逐步描述:
**(A)、**检查传入的 spi_controller 结构中,挂接的 ops 是不是全的,也就是说 core 层需要的一些必要的钩子函数是不是已经指定好了,这里需要确定 SPI 控制器的那个传输的函数 transfer 、transfer_one、transfer_one_message 至少挂接一个才行,也就是说,芯片厂家必须实现至少一种控制实际 SoC 的传输 SPI 的方式,Core 层才能够正常运行;
**(B)、**一些链表,spin_lock 的初始化;
**©、**底层设备模型添加;
**(D)、**如果芯片厂家没有实现 transfer 钩子函数的话,那么至少使用了挂接了 transfer_one 或者 transfer_one_message,这意味着将会使用 spi core 层的新的传输机制,这里初始化 queue 机制,后面马上进行详细描述;
**(E)、**将 spi_controller 挂接到全局的 spi_controller_list 链表
**(F)、**匹配 borad_info,也就是 spi slave 的 结构 spi_device
整个过程中,需要着重理解的是 transfer 的部分,在很久以前的结构中,spi_controller 叫做 spi_master 的时代,其实是只有 transfer 对接函数的,也就是芯片厂家底层实现 transfer(获取数据,指挥实际 SoC 的硬件进行数据传输),然后 SPI Core 软件层来用这个。现在提供了 transfer 、transfer_one、transfer_one_message 三种,他们的区别,看 spi.h 中的注释是:
@transfer: adds a message to the controller’s transfer queue.
@transfer_one_message: the subsystem calls the driver to transfer a single message while queuing transfers that arrive in the meantime. When the driver is finished with this message, it must call *spi_finalize_current_message()* so the subsystem can issue the next message
@transfer_one: transfer a single spi_transfer.
- return 0 if the transfer is finished,
- return 1 if the transfer is still in progress. When
the driver is finished with this transfer it must call *spi_finalize_current_transfer()* so the subsystem can issue the next transfer.
Note: transfer_one and transfer_one_message are mutually exclusive; when both are set, the generic subsystem does not call your transfer_one callback
看注释可以知道,软件上,多个 transfer 构成 message:
transfer 是添加一个 message 到传输 queue;
transfer_one 是最底层指挥硬件去传输一个单一的 spi_transfer 内容
transfer_one_message 是最底层指挥硬件去传输一个单一的 spi_message 内容
而且根据 transfer_one and transfer_one_message 两个接口互斥
注:根据注释说明,transfer_one 和 transfer_one_message 两个接口互斥
看了两款芯片的底层对接,都是指定了 transfer_one 实现,也就是实现了基于单一 spi_transfer 的一次数据传输
既然现在都是用 transfer_one(或者transfer_one_message)那着重看看这个初始化 *spi_controller_initialize_queue*
static int spi_controller_initialize_queue(struct spi_controller *ctlr)
{
int ret;
ctlr->transfer = spi_queued_transfer; ----(A)
if (!ctlr->transfer_one_message) ----(B)
ctlr->transfer_one_message = spi_transfer_one_message;
/* Initialize and start queue */
ret = spi_init_queue(ctlr);
if (ret) {
dev_err(&ctlr->dev, "problem initializing queue\n");
goto err_init_queue;
}
ctlr->queued = true;
ret = spi_start_queue(ctlr);
if (ret) {
dev_err(&ctlr->dev, "problem starting queue\n");
goto err_start_queue;
}
return 0;
err_start_queue:
spi_destroy_queue(ctlr);
err_init_queue:
return ret;
}
(A):先将 transfer 赋值成为 *spi_queued_transfer* 调用(因为只有当 transfer 没有挂指针的时候,才会走进这个函数)
(B):如果没有指定 transfer_one_message,那么为他挂上 core 层的 *spi_transfer_one_message*
这两个调用待会使用的时候具体分析,稍后呈现,这里,我们需要记住:
*spi_controller->transfer* 和 ***spi_controller->****spi_transfer_one_message* 已经赋了 core 层的函数钩子;
代码不多,主要是调用了两个函数 *spi_init_queue* 和 spi_start_queue,并将 *ctlr->queued = true*; 赋值成为 true:
2.1、spi_init_queue
static int spi_init_queue(struct spi_controller *ctlr)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
ctlr->running = false;
ctlr->busy = false;
kthread_init_worker(&ctlr->kworker);
ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
"%s", dev_name(&ctlr->dev));
if (IS_ERR(ctlr->kworker_task)) {
dev_err(&ctlr->dev, "failed to create message pump task\n");
return PTR_ERR(ctlr->kworker_task);
}
kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
/*
* Controller config will indicate if this controller should run the
* message pump with high (realtime) priority to reduce the transfer
* latency on the bus by minimising the delay between a transfer
* request and the scheduling of the message pump thread. Without this
* setting the message pump thread will remain at default priority.
*/
if (ctlr->rt) {
dev_info(&ctlr->dev,
"will run message pump with realtime priority\n");
sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m);
}
return 0;
}
*spi_init_queue* 函数初始化了 spi_controller 结构的 kthread_worker 和 kthread_work 结构(内核 kthread_worker 和 kthread_work 机制)这个机制相当于是内核线程,并指定了 work 的执行函数为 ***spi_pump_messages ,***最后如果对接底层芯片的 spi_controller 指定了 spi_controller->rt 的话,意思是开启 realtime 发送,那么将执行线程变为 SCHED_FIFO 的实时调度类型,也就是更高的优先级(实时优先级)。
2.2、spi_start_queue
static int spi_start_queue(struct spi_controller *ctlr)
{
unsigned long flags;
spin_lock_irqsave(&ctlr->queue_lock, flags);
if (ctlr->running || ctlr->busy) {
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return -EBUSY;
}
ctlr->running = true;
ctlr->cur_msg = NULL;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
return 0;
}
函数调用比较简单,直接 running 状态为 true,并设置当前 cur_msg 为 NULL,将 2.1 章节的 kthread_worker 上挂上了一个 work(叫做pump_messages 的 work),这个 work 的 func 为 2.1 章节赋值的 *spi_pump_messages*
注册部分,到这里分析完毕,重点关注并记住 *spi_pump_messages* 函数,后面在数据传输的时候用到它!
3、spi_alloc_device
分配 spi_device ,也就是分配一个 spi slave 的结构,他的实现很简单:
struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
{
struct spi_device *spi;
if (!spi_controller_get(ctlr))
return NULL;
spi = kzalloc(sizeof(*spi), GFP_KERNEL);
if (!spi) {
spi_controller_put(ctlr);
return NULL;
}
spi->master = spi->controller = ctlr;
spi->dev.parent = &ctlr->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
spi->cs_gpio = -ENOENT;
spin_lock_init(&spi->statistics.lock);
device_initialize(&spi->dev);
return spi;
}
EXPORT_SYMBOL_GPL(spi_alloc_device);
分配了空间,并进行简单的初始化。更多的情况,不是调用这个函数来指定一个 spi slave,更多的是使用 spi board info 的方式!
4、spi_add_device
这个是紧接着 spi_alloc_device 调用的:
int spi_add_device(struct spi_device *spi)
{
static DEFINE_MUTEX(spi_add_lock);
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
int status;
/* Chipselects are numbered 0..max; validate. */
if (spi->chip_select >= ctlr->num_chipselect) {
dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
ctlr->num_chipselect);
return -EINVAL;
}
/* Set the bus ID string */
spi_dev_set_name(spi);
/* We need to make sure there's no other device with this
* chipselect **BEFORE** we call setup(), else we'll trash
* its configuration. Lock against concurrent add() calls.
*/
mutex_lock(&spi_add_lock);
status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
if (status) {
dev_err(dev, "chipselect %d already in use\n",
spi->chip_select);
goto done;
}
if (ctlr->cs_gpios)
spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
/* Drivers may modify this initial i/o setup, but will
* normally rely on the device being setup. Devices
* using SPI_CS_HIGH can't coexist well otherwise...
*/
status = spi_setup(spi);
if (status < 0) {
dev_err(dev, "can't setup %s, status %d\n",
dev_name(&spi->dev), status);
goto done;
}
/* Device may be bound to an active driver when this returns */
status = device_add(&spi->dev);
if (status < 0)
dev_err(dev, "can't add %s, status %d\n",
dev_name(&spi->dev), status);
else
dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
done:
mutex_unlock(&spi_add_lock);
return status;
}
EXPORT_SYMBOL_GPL(spi_add_device);
调用到了 spi_setup 最后调用 device_add 添加到设备模型中
5、spi_new_device
这个接口实现了 3 和 4 中两个接口的合并,即,先调用了 *spi_alloc_device* 在调用 *spi_add_device*:
struct spi_device *spi_new_device(struct spi_controller *ctlr,
struct spi_board_info *chip)
{
struct spi_device *proxy;
int status;
/* NOTE: caller did any chip->bus_num checks necessary.
*
* Also, unless we change the return value convention to use
* error-or-pointer (not NULL-or-pointer), troubleshootability
* suggests syslogged diagnostics are best here (ugh).
*/
proxy = spi_alloc_device(ctlr);
if (!proxy)
return NULL;
WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
proxy->chip_select = chip->chip_select;
proxy->max_speed_hz = chip->max_speed_hz;
proxy->mode = chip->mode;
proxy->irq = chip->irq;
strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
proxy->dev.platform_data = (void *) chip->platform_data;
proxy->controller_data = chip->controller_data;
proxy->controller_state = NULL;
if (chip->properties) {
status = device_add_properties(&proxy->dev, chip->properties);
if (status) {
dev_err(&ctlr->dev,
"failed to add properties to '%s': %d\n",
chip->modalias, status);
goto err_dev_put;
}
}
status = spi_add_device(proxy);
if (status < 0)
goto err_remove_props;
return proxy;
err_remove_props:
if (chip->properties)
device_remove_properties(&proxy->dev);
err_dev_put:
spi_dev_put(proxy);
return NULL;
}
使用从实用性来说,最好不好单独调用 3,4,接口,而是调用这个 spi_new_device
6、spi_register_board_info
相比 3,4,5接口来说这个接口最为常用,基本不会使用 3,4,5,都是直接注册一个叫做 spi_board_info 的东西,这个 spi_board_info 的东西代表了 spi slave,也就是表示了 spi_device 这个玩意,这个时候我们使用 spi_register_board_info 去注册一个 spi_board_info:
struct boardinfo {
struct list_head list;
struct spi_board_info board_info;
};
struct spi_board_info {
char modalias[SPI_NAME_SIZE];
const void *platform_data;
const struct property_entry *properties;
void *controller_data;
int irq;
/* slower signaling on noisy or low voltage boards */
u32 max_speed_hz;
u16 bus_num;
u16 chip_select;
u16 mode;
};
调用函数为:
int spi_register_board_info(struct spi_board_info const *info, unsigned n)
{
struct boardinfo *bi;
int i;
if (!n)
return 0;
bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
if (!bi)
return -ENOMEM;
for (i = 0; i < n; i++, bi++, info++) {
struct spi_controller *ctlr;
memcpy(&bi->board_info, info, sizeof(*info));
if (info->properties) {
bi->board_info.properties =
property_entries_dup(info->properties);
if (IS_ERR(bi->board_info.properties))
return PTR_ERR(bi->board_info.properties);
}
mutex_lock(&board_lock);
list_add_tail(&bi->list, &board_list);
list_for_each_entry(ctlr, &spi_controller_list, list)
spi_match_controller_to_boardinfo(ctlr,
&bi->board_info);
mutex_unlock(&board_lock);
}
return 0;
}
核心的流程是将这个传入的 n 个 *spi_board_info* 按照他们的上级结构 *boardinfo* 来分配 n 个 boardinfo 结构,并完成必要的初始化,然后将其添加到全局的 *board_list* 链表
然后遍历 spi_controller_list (也就是当前所有的 SoC 的 SPI 控制器的抽象结构 spi_controller),循环调用 *spi_match_controller_to_boardinfo*:
static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
struct spi_board_info *bi)
{
struct spi_device *dev;
if (ctlr->bus_num != bi->bus_num)
return;
dev = spi_new_device(ctlr, bi);
if (!dev)
dev_err(ctlr->dev.parent, "can't create new device for %s\n",
bi->modalias);
}
根据总线是否匹配,他调用到了 spi_new_device,结合上面的 3,4,5 小结内容,我们清楚了他其实是在添加一个 spi_device(也就是 spi slave)
所以这里小结一下增加一个 spi device 的常用的用法:
1、定义板载的 n 个 spi slave,并用 spi_board_info 来抽象他们(名字,片选,模式,速度)
2、调用 spi_register_board_info 来将这些 n 个 spi 的 device 信息添加到 SPI Core 层
3、spi_register_board_info 会遍历所有的 spi_controller,并调用到 spi_match_controller_to_boardinfo
4、spi_match_controller_to_boardinfo 根据总线是否匹配,来调用 spi_new_device
5、 spi_new_device 进而调用到 spi_alloc_device 和 spi_add_device,进行spi_device 的结构分配、初始化和增加
—> spi_register_board_info
|—> *spi_match_controller_to_boardinfo*
|—> *spi_new_device*
|—> *spi_alloc_device*
|—> *spi_add_device*
7、spi_message_init
初始化 message 函数,这个比较简单,不在多说,直接上代码:
static inline void spi_message_init_no_memset(struct spi_message *m)
{
INIT_LIST_HEAD(&m->transfers);
INIT_LIST_HEAD(&m->resources);
}
static inline void spi_message_init(struct spi_message *m)
{
memset(m, 0, sizeof *m);
spi_message_init_no_memset(m);
}
8、spi_message_add_tail
message 相关的结构操作都比较简单,没什么好说的:
static inline void
spi_message_add_tail(struct spi_transfer *t, struct spi_message *m)
{
list_add_tail(&t->transfer_list, &m->transfers);
}
添加 message 和 transfer 相关的函数不在具体分析,都比较简单,留给读者花上几分钟看看
9、spi_async
此接口用于异步传输:
int spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
int ret;
unsigned long flags;
ret = __spi_validate(spi, message);
if (ret != 0)
return ret;
spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
if (ctlr->bus_lock_flag)
ret = -EBUSY;
else
ret = __spi_async(spi, message);
spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(spi_async);
调用到了 __spi_async 函数:
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
/*
* Some controllers do not support doing regular SPI transfers. Return
* ENOTSUPP when this is the case.
*/
if (!ctlr->transfer)
return -ENOTSUPP;
message->spi = spi;
SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
trace_spi_message_submit(message);
return ctlr->transfer(spi, message);
}
调用到了 spi_controller->transfer,还记得我们在第 2 节分析的吗?在注册 spi_controller 的时候调用到了 *spi_controller_initialize_queue*
static int spi_controller_initialize_queue(struct spi_controller *ctlr)
{
int ret;
ctlr->transfer = spi_queued_transfer;
if (!ctlr->transfer_one_message)
ctlr->transfer_one_message = spi_transfer_one_message;
....
}
所以在发起 async 数据传输的时候呢,会调用到 ctlr->transfer,其实就是 *spi_queued_transfer*:
9.1、spi_queued_transfer
这个是具体实施数据传输的前一步:
static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
{
return __spi_queued_transfer(spi, msg, true);
}
static int __spi_queued_transfer(struct spi_device *spi,
struct spi_message *msg,
bool need_pump)
{
struct spi_controller *ctlr = spi->controller;
unsigned long flags;
spin_lock_irqsave(&ctlr->queue_lock, flags);
if (!ctlr->running) {
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return -ESHUTDOWN;
}
msg->actual_length = 0;
msg->status = -EINPROGRESS;
list_add_tail(&msg->queue, &ctlr->queue);
if (!ctlr->busy && need_pump)
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return 0;
}
可以看到,这里将传入的 spi_message->queue 挂入了 spi_controller->queue,开启了 kthread_work(pump_messages)。到这里,我们需要看这个 kthread_work(pump_messages)被唤醒后的执行函数,也就是初始化 queue 的时候,挂上去的 spi_pump_messages 函数:
9.2、spi_pump_messages
static void spi_pump_messages(struct kthread_work *work)
{
struct spi_controller *ctlr =
container_of(work, struct spi_controller, pump_messages);
__spi_pump_messages(ctlr, true);
}
static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{
unsigned long flags;
bool was_busy = false;
int ret;
/* Lock queue */
spin_lock_irqsave(&ctlr->queue_lock, flags);
/* Make sure we are not already running a message */
if (ctlr->cur_msg) {
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
/* If another context is idling the device then defer */
if (ctlr->idling) {
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
/* Check if the queue is idle */
if (list_empty(&ctlr->queue) || !ctlr->running) {
if (!ctlr->busy) {
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
/* Only do teardown in the thread */
if (!in_kthread) {
kthread_queue_work(&ctlr->kworker,
&ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
ctlr->busy = false;
ctlr->idling = true;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
kfree(ctlr->dummy_rx);
ctlr->dummy_rx = NULL;
kfree(ctlr->dummy_tx);
ctlr->dummy_tx = NULL;
if (ctlr->unprepare_transfer_hardware &&
ctlr->unprepare_transfer_hardware(ctlr))
dev_err(&ctlr->dev,
"failed to unprepare transfer hardware\n");
if (ctlr->auto_runtime_pm) {
pm_runtime_mark_last_busy(ctlr->dev.parent);
pm_runtime_put_autosuspend(ctlr->dev.parent);
}
trace_spi_controller_idle(ctlr);
spin_lock_irqsave(&ctlr->queue_lock, flags);
ctlr->idling = false;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
/* Extract head of queue */
ctlr->cur_msg =
list_first_entry(&ctlr->queue, struct spi_message, queue);
list_del_init(&ctlr->cur_msg->queue);
if (ctlr->busy)
was_busy = true;
else
ctlr->busy = true;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
mutex_lock(&ctlr->io_mutex);
if (!was_busy && ctlr->auto_runtime_pm) {
ret = pm_runtime_get_sync(ctlr->dev.parent);
if (ret < 0) {
pm_runtime_put_noidle(ctlr->dev.parent);
dev_err(&ctlr->dev, "Failed to power device: %d\n",
ret);
mutex_unlock(&ctlr->io_mutex);
return;
}
}
if (!was_busy)
trace_spi_controller_busy(ctlr);
if (!was_busy && ctlr->prepare_transfer_hardware) {
ret = ctlr->prepare_transfer_hardware(ctlr);
if (ret) {
dev_err(&ctlr->dev,
"failed to prepare transfer hardware\n");
if (ctlr->auto_runtime_pm)
pm_runtime_put(ctlr->dev.parent);
mutex_unlock(&ctlr->io_mutex);
return;
}
}
trace_spi_message_start(ctlr->cur_msg);
if (ctlr->prepare_message) {
ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
if (ret) {
dev_err(&ctlr->dev, "failed to prepare message: %d\n",
ret);
ctlr->cur_msg->status = ret;
spi_finalize_current_message(ctlr);
goto out;
}
ctlr->cur_msg_prepared = true;
}
ret = spi_map_msg(ctlr, ctlr->cur_msg);
if (ret) {
ctlr->cur_msg->status = ret;
spi_finalize_current_message(ctlr);
goto out;
}
ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
if (ret) {
dev_err(&ctlr->dev,
"failed to transfer one message from queue\n");
goto out;
}
out:
mutex_unlock(&ctlr->io_mutex);
/* Prod the scheduler in case transfer_one() was busy waiting */
if (!ret)
cond_resched();
}
函数内容不少,一步步看看:
1、如果当前正在有 msg 处理,即 cur_msg 不为 NULL,则暂时不发起传输,直接返回
2、从 ctlr->queue 取出第一个 spi_message 赋值给 ctlr->cur_msg,标记当前状态 ctlr->busy 为 busy,was_busy代表是否之前已经 busy,第一次进来为 false,同一次传输的再次进来,如果 ctlr->busy 是 busy 的话,那说进来之前已经 busy,was_busy 被置为 ture;
3、如果 was_busy 为 true 的话,调用 spi_controller 的 prepare_transfer_hardware
4、如果有 prepare_message,则调用 ctlr->prepare_message,准备传输之前的一些准备工作,SoC 下面挂接的钩子实现,这一步如果执行失败,则返回错误码到 ctlr->cur_msg->status,否则 ctlr->cur_msg_prepared = true; 设置为准备成功
5、调用到 *spi_map_msg* 函数,主要处理一些 dma 相关的内容
6、调用到 ctlr->*transfer_one_message*
在第 6 步中的 *transfer_one_message* 在初始化的时候如果对接底层部分没有指定的话,其实是挂接了 core 层的 spi_transfer_one_message:
9.2.1、spi_transfer_one_message
static int spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
unsigned long long ms = 1;
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
spi_set_cs(msg->spi, true);
SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
trace_spi_transfer_start(msg, xfer);
spi_statistics_add_transfer_stats(statm, xfer, ctlr);
spi_statistics_add_transfer_stats(stats, xfer, ctlr);
if (xfer->tx_buf || xfer->rx_buf) {
reinit_completion(&ctlr->xfer_completion);
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
SPI_STATISTICS_INCREMENT_FIELD(statm,
errors);
SPI_STATISTICS_INCREMENT_FIELD(stats,
errors);
dev_err(&msg->spi->dev,
"SPI transfer failed: %d\n", ret);
goto out;
}
if (ret > 0) {
ret = 0;
ms = 8LL * 1000LL * xfer->len;
do_div(ms, xfer->speed_hz);
ms += ms + 200; /* some tolerance */
if (ms > UINT_MAX)
ms = UINT_MAX;
ms = wait_for_completion_timeout(&ctlr->xfer_completion,
msecs_to_jiffies(ms));
}
if (ms == 0) {
SPI_STATISTICS_INCREMENT_FIELD(statm,
timedout);
SPI_STATISTICS_INCREMENT_FIELD(stats,
timedout);
dev_err(&msg->spi->dev,
"SPI transfer timed out\n");
msg->status = -ETIMEDOUT;
}
} else {
if (xfer->len)
dev_err(&msg->spi->dev,
"Bufferless transfer has length %u\n",
xfer->len);
}
trace_spi_transfer_stop(msg, xfer);
if (msg->status != -EINPROGRESS)
goto out;
if (xfer->delay_usecs) {
u16 us = xfer->delay_usecs;
if (us <= 10)
udelay(us);
else
usleep_range(us, us + DIV_ROUND_UP(us, 10));
}
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
&msg->transfers)) {
keep_cs = true;
} else {
spi_set_cs(msg->spi, false);
udelay(10);
spi_set_cs(msg->spi, true);
}
}
msg->actual_length += xfer->len;
}
out:
if (ret != 0 || !keep_cs)
spi_set_cs(msg->spi, false);
if (msg->status == -EINPROGRESS)
msg->status = ret;
if (msg->status && ctlr->handle_err)
ctlr->handle_err(ctlr, msg);
spi_res_release(ctlr, msg);
spi_finalize_current_message(ctlr);
return ret;
}
这里我们就要发起传输了:
1、首先设置了片选 cs,具体调用到了芯片厂家底层对接的 set_cs
2、从 spi_message 中的 transfer_list 逐个取出 transfer 调用底层对接的 transfer_one
3、根据是否配置了需要的延时 delay_usecs,来确定是否在下一次传输之前进行必要的 udelay
4、根据是否配置了 cs_change,来判断是否需要变动片选信号 cs
5、累加实际传输的数据 actual_length
6、调用 spi_finalize_current_message 告知完成传输
10、小结
大致的关系如图所示: