linux spi 传输代码分析
- 数据结构分析
- 传输流程分析
- SPI传输过程流程图
- 小结
1.数据结构分析
struct spi_message {
struct list_head transfers;
struct spi_device *spi;
unsigned is_dma_mapped:1;
/* REVISIT: we might want a flag affecting the behavior of the
* last transfer ... allowing things like "read 16 bit length L"
* immediately followed by "read L bytes". Basically imposing
* a specific message scheduling algorithm.
*
* Some controller drivers (message-at-a-time queue processing)
* could provide that as their default scheduling algorithm. But
* others (with multi-message pipelines) could need a flag to
* tell them about such special cases.
*/
/* completion is reported through a callback */
void (*complete)(void *context);
void *context;
unsigned frame_length;
unsigned actual_length;
int status;
/* for optional use by whatever driver currently owns the
* spi_message ... between calls to spi_async and then later
* complete(), that's the spi_controller controller driver.
*/
struct list_head queue;
void *state;
/* list of spi_res reources when the spi message is processed */
struct list_head resources;
};
struct spi_transfer {
/* it's ok if tx_buf == rx_buf (right?)
* for MicroWire, one buffer must be null
* buffers must work with dma_*map_single() calls, unless
* spi_message.is_dma_mapped reports a pre-existing mapping
*/
const void *tx_buf;
void *rx_buf;
unsigned len;
dma_addr_t tx_dma;
dma_addr_t rx_dma;
struct sg_table tx_sg;
struct sg_table rx_sg;
unsigned cs_change:1;
unsigned tx_nbits:3;
unsigned rx_nbits:3;
#define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */
#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
u8 bits_per_word;
u16 delay_usecs;
struct spi_delay delay;
struct spi_delay cs_change_delay;
struct spi_delay word_delay;
u32 speed_hz;
u32 effective_speed_hz;
unsigned int ptp_sts_word_pre;
unsigned int ptp_sts_word_post;
struct ptp_system_timestamp *ptp_sts;
bool timestamped_pre;
bool timestamped_post;
struct list_head transfer_list;
};
2.spi 传输流程分析
(1)spi 初始化过程
一般spi platform 设备驱动注册过程中执行probe 函数完成对spi controller 的一系列初始化。
对于本文分析我们先关注spi_register_controller 函数,函数内部会执行有关spi controler 传输函数的初始化,这里先做一个上文,可以回头再看。
static int xxx_spi_probe(struct platform_device *pdev)
{
...
//平台/IP 商实现的传输函数
ctlr->transfer_one_message = xxx_transfer_one_message;
//根据注释,transfer_one 将用于spi core 通用的transfer_one_message 函数。
ctlr->transfer_one = xxx_spi_transfer_one;
...
err = spi_register_controller(ctrl);
...
}
int spi_register_controller(struct spi_controller *ctlr)
{
...
/*
* If we're using a queued driver, start the queue. Note that we don't
* need the queueing logic if the driver is only supporting high-level
* memory operations.
*/
if (ctlr->transfer) {
dev_info(dev, "controller is unqueued, this is deprecated\n");
} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
//xxx_spi_probe 函数已经注册了transfer_one/transfer_one_message hook 函数
status = spi_controller_initialize_queue(ctlr);
if (status) {
device_del(&ctlr->dev);
goto free_bus_id;
}
}
...
}
static int spi_controller_initialize_queue(struct spi_controller *ctlr)
{
...
//初始化 tranfer,它主要的工作是添加spi message到queue,后面分析。
ctlr->transfer = spi_queued_transfer;
//如果没有实现transfer_one_message,默认使用spi core通用的实现
if (!ctlr->transfer_one_message)
ctlr->transfer_one_message = spi_transfer_one_message;
/* Initialize and start queue */
ret = spi_init_queue(ctlr); //创建worker,初始化work
...
ctlr->queued = true;
// work 已经加入到worker queue等待执行
ret = spi_start_queue(ctlr);
...
}
static int spi_init_queue(struct spi_controller *ctlr)
{
ctlr->running = false;
ctlr->busy = false;
kthread_init_worker(&ctlr->kworker); //初始化worker
//创建和wake a worker thread
ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
"%s", dev_name(&ctlr->dev));
...
//初始化work,等待加入到工作队列,work 的处理函数为spi_pump_messages
kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
...
return 0;
}
(2)spi read 为例分析
以下以spi read 函数分析spi 传输数据过程。
函数调用主过程:
spi_read
–>spi_sync_transfer
—>spi_message_init_with_transfers //初始化spi message 并且添加 transfers到message
注意:这里的message 以及transfer空间位于栈中,因为使用同步阻塞接口,在传输完成之前不会退出当前上下文。
—>spi_sync
—>__spi_sync
/**
* spi_read - SPI synchronous read
* @spi: device from which data will be read
* @buf: data buffer
* @len: data buffer size
* Context: can sleep
*
* This function reads the buffer @buf.
* Callable only from contexts that can sleep.
*
* Return: zero on success, else a negative error code.
*/
static inline int
spi_read(struct spi_device *spi, void *buf, size_t len)
{
struct spi_transfer t = {
.rx_buf = buf,
.len = len,
};
return spi_sync_transfer(spi, &t, 1);
}
/**
* spi_sync_transfer - synchronous SPI data transfer
* @spi: device with which data will be exchanged
* @xfers: An array of spi_transfers
* @num_xfers: Number of items in the xfer array
* Context: can sleep
*
* Does a synchronous SPI data transfer of the given spi_transfer array.
*
* For more specific semantics see spi_sync().
*
* Return: Return: zero on success, else a negative error code.
*/
static inline int
spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
unsigned int num_xfers)
{
struct spi_message msg;
spi_message_init_with_transfers(&msg, xfers, num_xfers);
return spi_sync(spi, &msg);
}
接下重点分析spi_sync函数的实现过程:
static int __spi_sync(struct spi_device *spi, struct spi_message *message)
{
DECLARE_COMPLETION_ONSTACK(done); //初始化完成信号量
...
//初始化message complete func and context
message->complete = spi_complete;
message->context = &done;
message->spi = spi;
...
//重点分析 queued transfers 的传输方法
//如果实现了queue的传输方法,执行__spi_queued_transfer
status = __spi_queued_transfer(spi, message, false);
//
__spi_pump_messages(ctlr, false);
//等待完成信号量
wait_for_completion(&done);
}
static int __spi_queued_transfer(struct spi_device *spi,
struct spi_message *msg,
bool need_pump)
{
struct spi_controller *ctlr = spi->controller;
...
list_add_tail(&msg->queue, &ctlr->queue); //添加到spi contrller queue 的尾部
if (!ctlr->busy && need_pump) //如果条件满足,插入work 到工作queue ,等待worker调度执行work 的函数
//ctlr->pump_messages work在spi_init_queue 中已经完成初始化。
//在此将此work 添加到spi controller 的worker queue等待调度执行。
//ctlr->pump_messages work的处理函数已经初始化为spi_pump_messages
//因此,当worker 执行queue work中的处理函数时,spi_pump_messages将会被执行。继续分析spi_pump_messages
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
...
return 0;
}
static void spi_pump_messages(struct kthread_work *work)
{
struct spi_controller *ctlr =
container_of(work, struct spi_controller, pump_messages);
__spi_pump_messages(ctlr, true);
}
/**
* __spi_pump_messages - function which processes spi message queue
* @ctlr: controller to process queue for
* @in_kthread: true if we are in the context of the message pump thread
*
* This function checks if there is any spi message in the queue that
* needs processing and if so call out to the driver to initialize hardware
* and transfer each message.
*
* Note that it is called both from the kthread itself and also from
* inside spi_sync(); the queue extraction handling at the top of the
* function should deal with this safely.
*/
static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{
...
/* Extract head of queue */
msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
ctlr->cur_msg = msg;
list_del_init(&msg->queue);
...
//Prepares hardware for transfer
ret = ctlr->prepare_transfer_hardware(ctlr);
//执行厂商的 注册的准备函数
ret = ctlr->prepare_message(ctlr, msg);
//准备tx,rx buf
ret = spi_map_msg(ctlr, msg);
//执行厂商 注册的transfer_one_message 函数。注:本文主要分析
//厂商没有实现此函数(根据前文初始化分析会使用spi core 通用的实现spi_transfer_one_message),仅仅实现transfer one的情况下的过程
ret = ctlr->transfer_one_message(ctlr, msg);
....
}
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
*
* This is a standard implementation of transfer_one_message() for
* drivers which implement a transfer_one() operation. It provides
* standard handling of delays and chip select management.
*/
static int spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
...
spi_set_cs(msg->spi, true);//enable cs
...
//遍历执行message中所有transfer传输任务
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
...
if (xfer->tx_buf || xfer->rx_buf) {
reinit_completion(&ctlr->xfer_completion); //初始化transfer 完成信号量
//调用transfer_one(一般初始化阶段由厂商注册) 完成一次transfer传输
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
...
//如果ret > 0 将会等待tranfer 完成
if (ret > 0) {
//等待xfer传输完成的信号量
ret = spi_transfer_wait(ctlr, msg, xfer);
if (ret < 0)
msg->status = ret;
}
...
}
out:
//所有的tranfer 传输完成,或者某次传输失败,比如超时等
if (ret != 0 || !keep_cs)
spi_set_cs(msg->spi, false);
if (msg->status == -EINPROGRESS)
msg->status = ret;
if (msg->status && ctlr->handle_err)
ctlr->handle_err(ctlr, msg);
//释放本次message 资源
spi_res_release(ctlr, msg);
//为下次传输messagge 做准备,调度work 和释放message完成信号量
//促使__spi_sync 函数返回
spi_finalize_current_message(ctlr);
}
3.spi 传输过程流程图
根据前文的分析,spi 传输过程流程图如图所示。
4.小结
本文简单分析了spi传输的大致过程,其中涉及到spi 传输将会使用work queue 机制完成spi message 的传输任务。其处理函数将会遍历处理spi message queue,因此假如当前message 传输未完成,将会排队等待执行。
另外,spi 同步接口使用完成信号量阻塞等待传输任务完成。传输任务完成会释放完成信号量,其中每次最小的spi xfer 单位传输也是通过完成信号量实现同步/阻塞的,保证xfer 按照顺序传输。
最后,SPI core已经实现了标准的传输功能/逻辑,厂商一般可以复用此逻辑和实现自己平台相关的最小传输单位控制hook函数,以及对应的传输event/IRQ 处理,这部分代码都跟具体的平台相关。
有关work queue的机制实现比较简单,后续单独分析。