1. 用户空间read的操作实现
static ssize_t tty_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
int i;
struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
ld = tty_ldisc_ref_wait(tty); /* 获取tty对应的线路规程ldisc,和tty_write是一样的,可以回看《Linux串口驱动(4) - write详解》 */
if (ld->ops->read)
i = (ld->ops->read)(tty, file, buf, count); /* ld->ops->read即n_tty_read */
else
i = -EIO;
tty_ldisc_deref(ld);
return i;
}
static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
unsigned char __user *buf, size_t nr)
{
struct n_tty_data *ldata = tty->disc_data;
unsigned char __user *b = buf;
DECLARE_WAITQUEUE(wait, current);
if (!ldata->icanon) /*icanon默认为1,icanon表示标准模式*/
······
add_wait_queue(&tty->read_wait, &wait);
while (nr) {
set_current_state(TASK_INTERRUPTIBLE);
if (!input_available_p(tty, 0)) { /*没有可读数据时会休眠*/
timeout = schedule_timeout(timeout); /*定时休眠*/
continue;
}
__set_current_state(TASK_RUNNING);
if (ldata->icanon && !L_EXTPROC(tty)) {
while (nr && ldata->read_cnt) {
int eol;
eol = test_and_clear_bit(ldata->read_tail,
ldata->read_flags);
c = ldata->read_buf[ldata->read_tail]; /*从tty线路规程的缓冲区ldata->read_buf读数据*/
ldata->read_tail = ((ldata->read_tail+1) &
(N_TTY_BUF_SIZE-1));
ldata->read_cnt--;
raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (!eol || (c != __DISABLED_CHAR)) {
if (tty_put_user(tty, c, b++)) { /*将读到的数据放入用户buffer*/
retval = -EFAULT;
b--;
raw_spin_lock_irqsave(&ldata->read_lock, flags);
break;
}
nr--;
}
raw_spin_lock_irqsave(&ldata->read_lock, flags);
}
raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
}
}
remove_wait_queue(&tty->read_wait, &wait);
return retval;
}
write的数据为什么是从tty线路规程的buffer里读取,这一点可以回看《Linux串口驱动(3) - open详解》的分析线路2-3部分。
2. 总结
2.1 DMA
关于DMA搬运地址的配置,在open时会同时配置发送消息时DMA搬运的目标地址和接收消息时DMA搬运的源地址。因为发送数据时要将数据搬运到哪个地址是确定的,但是从哪个地址搬运数据是不确定的,接收数据时则反之。
不确定的那个地址,会在启动DMA搬运的时候进行配置。
2.2 write和read的不同
write是SOC端主动发起的动作,所以DMA搬运的启动操作是在write函数的底层操作里调用的;而read是SOC端的被动操作,所以串口在open的时候就要启动DMA搬运,将数据搬运到tty 线路规程的一个buffer里,用户读的时候不用去底层读取,去线路规程的buffer里读取即可。
3. DMA模式
static int start_rx_dma(struct imx_port *sport)
{
struct dma_chan *chan = sport->dma_chan_rx;
struct dma_async_tx_descriptor *desc;
desc = dmaengine_prep_dma_cyclic(chan, sport->rx_buf.dmaaddr,
sport->rx_buf.buf_len, sport->rx_buf.period_len,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
desc->callback = dma_rx_callback; //DMA完成一次搬运后,会调用这个回调函数
dmaengine_submit(desc); //将该描述符插入dmaengine驱动的传输队列
dma_async_issue_pending(chan); //启动对应DMA通道上的传输
sport->dma_is_rxing = 1;
return 0;
}
static void dma_rx_callback(void *data)
{
dma_rx_work(sport);
}
static void dma_rx_work(struct imx_port *sport)
{
struct tty_struct *tty = sport->port.state->port.tty;
unsigned int cur_idx = sport->rx_buf.cur_idx;
dma_rx_push_data(sport, tty, 0, cur_idx); / *最终串口接收到的数据都被放到了tty ldisc的一个buffer里,用户空间读的时候会从这个buffer里取* /
}
4. 非DMA模式
//如果在open阶段置位了中断使能位,所以当RX FIFO有数据的时候会触发中断
static irqreturn_t imx_int(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
unsigned int sts;
unsigned int sts2;
sts = readl(sport->port.membase + USR1);
if ((sts & USR1_RRDY || sts & USR1_AGTIM) &&
!sport->dma_is_enabled) {
if (sts & USR1_AGTIM)
writel(USR1_AGTIM, sport->port.membase + USR1);
imx_rxint(irq, dev_id);
}
if (sts & USR1_TRDY &&
readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN)
imx_txint(irq, dev_id);
return IRQ_HANDLED;
}
static irqreturn_t imx_rxint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
unsigned int rx, flg, ignored = 0;
struct tty_port *port = &sport->port.state->port;
unsigned long flags, temp;
spin_lock_irqsave(&sport->port.lock, flags);
while (readl(sport->port.membase + USR2) & USR2_RDR) {
flg = TTY_NORMAL;
sport->port.icount.rx++;
//读取RX FIFO中的数据
rx = readl(sport->port.membase + URXD0);
temp = readl(sport->port.membase + USR2);
if (temp & USR2_BRCD) {
writel(USR2_BRCD, sport->port.membase + USR2);
if (uart_handle_break(&sport->port))
continue;
}
if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
continue;
//将读取的单个字节数据放到port的缓冲区中
tty_insert_flip_char(port, rx, flg);
}
out:
spin_unlock_irqrestore(&sport->port.lock, flags);
//将port缓冲区中的数据全部拷贝至tty线路规程的缓冲区
tty_flip_buffer_push(port);
return IRQ_HANDLED;
}