tcc803x spi驱动解析

什么时spi的bitbang

http://blog.sina.com.cn/s/blog_6524fd1f01010wsv.html

SPI只有主模式和从模式之分,没有读和写的说法,外设的写操作和读操作是同步完成的。如果只进行写操作,主机只需忽略接收到的字节;反之,若主机要读取从机的一个字节,就必须发送一个空字节来引发从机的传输。也就是说,你发一个数据必然会收到一个数据;你要收一个数据必须也要先发一个数据

SPI原理超详细讲解---值得一看_Z小旋-CSDN博客_spi通信的详细讲解

测试程序:

Linux下SPI驱动的移植和应用程序的测试_ccccccsdn的博客-CSDN博客_spi测试

spi设备端驱动: 

spidev_open函数: 

static int spidev_open(struct inode *inode, struct file *filp)
{
	struct spidev_data	*spidev;
	int			status = -ENXIO;

	mutex_lock(&device_list_lock);

    //通过设备号找出对应的spidev设备
	list_for_each_entry(spidev, &device_list, device_entry) {
		if (spidev->devt == inode->i_rdev) {
			status = 0;
			break;
		}
	}
    //给tx_buffer和rx_buffer分配内存空间
	if (!spidev->tx_buffer) {
		spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
	}

	if (!spidev->rx_buffer) {
		spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
	}

	mutex_unlock(&device_list_lock);
	return 0;

}

open函数中涉及的spidev_data数据 结构:

struct spidev_data {
	dev_t			devt;
	struct spi_device	*spi;
	struct list_head	device_entry;
    ......
	u8			*tx_buffer;
	u8			*rx_buffer;
	u32			speed_hz;
};

数据传输从spidev_ioctl函数开始,直接看default:

static long spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    
	    struct spidev_data	*spidev;
	    struct spi_device	*spi;
	    unsigned		n_ioc;
	    struct spi_ioc_transfer	*ioc;

        spi = spi_dev_get(spidev->spi);

     default:
	
		ioc = spidev_get_ioc_message(cmd, (struct spi_ioc_transfer __user *)arg, &n_ioc);
	
		if (!ioc)
			break;	/* n_ioc is also 0 */

		/* translate to spi_message, execute */
		retval = spidev_message(spidev, ioc, n_ioc);
		kfree(ioc);
		break;

}

spidev_message函数:

static int spidev_message(struct spidev_data *spidev,
		struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
{
	struct spi_message	msg;
	struct spi_transfer	*k_xfers;
	struct spi_transfer	*k_tmp;
	struct spi_ioc_transfer *u_tmp;
	unsigned		n, total, tx_total, rx_total;
	u8			*tx_buf, *rx_buf;
	int			status = -EFAULT;

	spi_message_init(&msg);
	k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);

	
	tx_buf = spidev->tx_buffer;
	rx_buf = spidev->rx_buffer;
	total = 0;
	tx_total = 0;
	rx_total = 0;

	for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; n ;
			n--, k_tmp++, u_tmp++) {
		k_tmp->len = u_tmp->len;

		total += k_tmp->len;

		if (u_tmp->tx_buf) {
		
			tx_total += k_tmp->len;

			k_tmp->tx_buf = tx_buf;
            //将每个spi_ioc_transfer里的tx_buf的内容复制到spidev->tx_buffer里,同时对应的                
            //spi_transfer的tx_buf也得到复制。
			if (copy_from_user(tx_buf, (const u8 __user *)(uintptr_t) u_tmp->tx_buf,
                    u_tmp->len))
            {}
			tx_buf += k_tmp->len;
		}

        //将spi_ioc_transfer里的各成员值赋值到对应的spi_transfer中
		k_tmp->cs_change = !!u_tmp->cs_change;
		k_tmp->tx_nbits = u_tmp->tx_nbits;
		k_tmp->rx_nbits = u_tmp->rx_nbits;
		k_tmp->bits_per_word = u_tmp->bits_per_word;
		k_tmp->delay_usecs = u_tmp->delay_usecs;
		k_tmp->speed_hz = u_tmp->speed_hz;
		if (!k_tmp->speed_hz)
			k_tmp->speed_hz = spidev->speed_hz;

        //spi_transfer插入到spi_message的队列中
		spi_message_add_tail(k_tmp, &msg);
	}

	status = spidev_sync(spidev, &msg);

	rx_buf = spidev->rx_buffer;
    //将spidev中的rx_buffer中的内容复制到各个spi_ioc_transfer的rx_buf中去
	for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
		if (u_tmp->rx_buf) {
			if (copy_to_user((u8 __user *)(uintptr_t) u_tmp->rx_buf, rx_buf,
					u_tmp->len)) {

            }
			rx_buf += u_tmp->len;
		}
	}

}

spi_message_add_tail函数: 

static inline void
spi_message_add_tail(struct spi_transfer *t, struct spi_message *m)
{
	list_add_tail(&t->transfer_list, &m->transfers);
}

spidev_sync函数分析:

static ssize_t
spidev_sync(struct spidev_data *spidev, struct spi_message *message)
{
	int status;
	struct spi_device *spi;

	spi = spidev->spi;

	status = spi_sync(spi, message);

	return status;
}
spi_sync
    __spi_sync

__spi_sync函数:

static int __spi_sync(struct spi_device *spi, struct spi_message *message)
{
    //注意此处定义的完成量done.
    DECLARE_COMPLETION_ONSTACK(done);   
	int status;
	struct spi_controller *ctlr = spi->controller;
	unsigned long flags;

    //完成量done的回调函数
    message->complete = spi_complete;
    //注意此处完成量context指向done.
	message->context = &done;

	if (ctlr->transfer == spi_queued_transfer) {
		status = __spi_queued_transfer(spi, message, false);
	} 

	if (status == 0) {
	
		if (ctlr->transfer == spi_queued_transfer) {
            //走路线1
			__spi_pump_messages(ctlr, false);
		}
        //此处休眠等待唤醒
        wait_for_completion(&done);
		status = message->status;
	}

    //注意此处的完成量context
    message->context = NULL;

	return status;
}

__spi_queued_transfer函数:

static int __spi_queued_transfer(struct spi_device *spi,
				 struct spi_message *msg,
				 bool need_pump)
{
	struct spi_controller *ctlr = spi->controller;
	unsigned long flags;

	msg->actual_length = 0;
	msg->status = -EINPROGRESS;

	list_add_tail(&msg->queue, &ctlr->queue);

	return 0;
}

__spi_pump_messages函数:直接调用这个函数,in_kthread为false,通过工作队列调用in_kthread为true。

static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{
	unsigned long flags;
	bool was_busy = false;

    //正在传输消息
    if (ctlr->cur_msg) {
		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
		return;
	}

    //ctlr在休闲期,什么都不干,退出,重新调用函数,重新判断(轮询判断)
    if (ctlr->idling) {
		kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
		return;
	}
    //只有一个消息时,收尾工作后消息队列为空为走这最后的销毁;
    //如果消息一个接一个的到来,消息队列可能不为空,此时一个接着一个的传输消息,
    //等所有消息都传输完了再做最后的销毁。
	//ctlr->running:ctrl的工作队列(kworker_task)是否在运行
	if (list_empty(&ctlr->queue) || !ctlr->running) {

        //ctlr->busy为true,表示上次有传输数据。
        if (!ctlr->busy) {        //----------路线3

			return;
		}
        //spi_flush_queue调用会走这儿,但是spi_flush_queue没有被调用
        if (!in_kthread) {
			kthread_queue_work(&ctlr->kworker,&ctlr->pump_messages);
			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
			return;
		}

		ctlr->busy = false;        //----------路线2
        //ctlr->idling:控制器处于休闲期,什么事都不干,这个期间是为了
        //处理数据传输完成后的收尾工作,如对变量重新重新初始化等。
		ctlr->idling = true;

		kfree(ctlr->dummy_rx);
		ctlr->dummy_rx = NULL;
		kfree(ctlr->dummy_tx);
		ctlr->dummy_tx = NULL;
        //控制器摆脱休闲期,即数据传输的收尾工作已经处理完成,可以重新数据传输了
		ctlr->idling = false;
		return;
	}

    //----------路线1

	ctlr->cur_msg =list_first_entry(&ctlr->queue, struct spi_message, queue);
	list_del_init(&ctlr->cur_msg->queue);

    //在传输数据前,需将ctlr->busy设为true;
	if (ctlr->busy)
		was_busy = true;
	else
		ctlr->busy = true;

	ctlr->transfer_one_message(ctlr, ctlr->cur_msg);

}

transfer_one_message函数指针指向spi_transfer_one_message函数:

static int spi_transfer_one_message(struct spi_controller *ctlr,
				    struct spi_message *msg)
{
	struct spi_transfer *xfer;
	bool keep_cs = false;
	int ret = 0;
	unsigned long long ms = 1;

	spi_set_cs(msg->spi, true);

	list_for_each_entry(xfer, &msg->transfers, transfer_list) {

		if (xfer->tx_buf || xfer->rx_buf) {
			ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
		} 

		if (msg->status != -EINPROGRESS){

		}
		
		msg->actual_length += xfer->len;
	}

out:
	if (ret != 0 || !keep_cs){
		spi_set_cs(msg->spi, false);
	}

	if (msg->status == -EINPROGRESS){
		msg->status = ret;    //msg->status=0
	}
	
	spi_res_release(ctlr, msg);

	spi_finalize_current_message(ctlr);

	return ret;
}

spi_finalize_current_message函数:

void spi_finalize_current_message(struct spi_controller *ctlr)
{
	struct spi_message *mesg;
	unsigned long flags;
	int ret;

	mesg = ctlr->cur_msg;

	ctlr->cur_msg = NULL;
	ctlr->cur_msg_prepared = false;

    // 走路线2
	kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);

    //唤醒等待线程,mesg->context指向struct completion变量done.
    mesg->complete(mesg->context);

}
kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
static void spi_pump_messages(struct kthread_work *work)
{
	struct spi_controller *ctlr =
		container_of(work, struct spi_controller, pump_messages);
	__spi_pump_messages(ctlr, true);
}

spi总线驱动:

static int tcc_spi_probe(struct platform_device *pdev)
{

    struct spi_master *master;
    struct device	*dev = &pdev->dev;
    
    master = spi_alloc_master(dev, sizeof(struct tcc_spi));
    master->transfer_one = tcc_spi_transfer_one;

    devm_spi_register_master(dev, master);

}
#define devm_spi_register_master(_dev, _ctlr) \
	devm_spi_register_controller(_dev, _ctlr)
devm_spi_register_controller
    spi_register_controller
int spi_register_controller(struct spi_controller *ctlr)
{
    struct device		*dev = ctlr->dev.parent;

     of_spi_register_master(ctlr);

    INIT_LIST_HEAD(&ctlr->queue);

    device_add(&ctlr->dev);

    if (ctlr->transfer){
	
    }else {
		status = spi_controller_initialize_queue(ctlr);
	}

    of_register_spi_devices(ctlr);
}
static int of_spi_register_master(struct spi_controller *ctlr)
{
	int nb, i, *cs;
	struct device_node *np = ctlr->dev.of_node;

	if (!np)
		return 0;

	nb = of_gpio_named_count(np, "cs-gpios");
	ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);

	cs = devm_kzalloc(&ctlr->dev, sizeof(int) * ctlr->num_chipselect,GFP_KERNEL);
	ctlr->cs_gpios = cs;

	if (!ctlr->cs_gpios)
		return -ENOMEM;

	for (i = 0; i < ctlr->num_chipselect; i++)
		cs[i] = -ENOENT;

	for (i = 0; i < nb; i++)
		cs[i] = of_get_named_gpio(np, "cs-gpios", i);

	return 0;
}
static int spi_controller_initialize_queue(struct spi_controller *ctlr)
{
	int ret;

	ctlr->transfer = spi_queued_transfer;
	if (!ctlr->transfer_one_message)
		ctlr->transfer_one_message = spi_transfer_one_message;

	/* Initialize and start queue */
	ret = spi_init_queue(ctlr);

	ctlr->queued = true;

	ret = spi_start_queue(ctlr);

	return 0;

}
static int spi_init_queue(struct spi_controller *ctlr)
{
	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };

	ctlr->running = false;
	ctlr->busy = false;

	kthread_init_worker(&ctlr->kworker);

	ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
					 "%s", dev_name(&ctlr->dev));
	
	kthread_init_work(&ctlr->pump_messages, spi_pump_messages);

	return 0;
}
static int spi_start_queue(struct spi_controller *ctlr)
{
	unsigned long flags;

    //ctrl的工作队列开始运行
	ctlr->running = true;
	ctlr->cur_msg = NULL;
    //走路线3
	kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);

	return 0;
}

of_register_spi_devices函数:

static void of_register_spi_devices(struct spi_controller *ctlr)
{
	struct spi_device *spi;
	struct device_node *nc;

	for_each_available_child_of_node(ctlr->dev.of_node, nc) {
		if (of_node_test_and_set_flag(nc, OF_POPULATED))
			continue;
		spi = of_register_spi_device(ctlr, nc);
	}

of_register_spi_device函数: 

of_register_spi_device
    rc = spi_add_device(spi);
        spi_setup(spi);

spi_destroy_queue函数:销毁spi工作队列

spi_unregister_controller
    spi_destroy_queue
        spi_stop_queue
            //ctrl的工作队列结束
            ctlr->running = false;
        kthread_flush_worker(&ctlr->kworker);
	    kthread_stop(ctlr->kworker_task);

        

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值