UART用户层接口函数调用驱动流程,本章主要介绍Write调用。
Read和write操作就会交给line discipline处理。调用Open/Read/Write则调用驱动tty_open/tty_read/tty_write,先看个line discipline结构体
// line discipline结构体
// include/linux/tty_ldisc.h
struct tty_ldisc {
struct tty_ldisc_ops *ops;
struct tty_struct *tty;
};
// drivers/tty/tty_io.c
static ssize_t tty_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
-> struct tty_struct *tty = file_tty(file);
-> struct tty_ldisc *ld;
-> ld = tty_ldisc_ref_wait(tty); // 等待ld整理出来
-> if (ld->ops->read)
i = iterate_tty_read(ld, tty, file, to);
-> ..... size = ld->ops->read(tty, file, buf, count); //调用到了ldisc层(线路规程)的read函数
static ssize_t tty_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
-> struct tty_struct *tty = file_tty(file);
-> struct tty_ldisc *ld;
-> ld = tty_ldisc_ref_wait(tty); // 等待ld整理出来
-> if (!ld->ops->write)
-> .... ret = do_tty_write(ld->ops->write, tty, file, buf, count); //调用到了ldisc层(线路规程)的write函数
line discipline结构体 初始化与操作函数
// drivers/tty/n_tty.c
static struct tty_ldisc_ops n_tty_ops = {
.magic = TTY_LDISC_MAGIC,
.name = "n_tty",
.open = n_tty_open,
.close = n_tty_close,
.flush_buffer = n_tty_flush_buffer,
.read = n_tty_read,
.write = n_tty_write,
.ioctl = n_tty_ioctl,
.set_termios = n_tty_set_termios,
.poll = n_tty_poll,
.receive_buf = n_tty_receive_buf,
.write_wakeup = n_tty_write_wakeup,
.receive_buf2 = n_tty_receive_buf2,
};
void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
{
*ops = n_tty_ops;
ops->owner = NULL;
ops->refcount = ops->flags = 0;
}
EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
调用到了ldisc层(线路规程)的write函数为n_tty_write
static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr)
const unsigned char *b = buf;
add_wait_queue(&tty->write_wait, &wait); //将当前进程放到等待队列中
while (1) {
if (signal_pending(current)) { // 进入此处继续执行的原因可能是被信号打断,而不是条件得到了满足。只有条件得到了满足,我们才会继续,否则,直接返回!
retval = -ERESTARTSYS;
break;
}
if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) {
retval = -EIO;
break;
}
if (O_OPOST(tty)) {
while (nr > 0) {
ssize_t num = process_output_block(tty, b, nr);
if (num < 0) {
if (num == -EAGAIN)
break;
retval = num;
goto break_out;
}
b += num;
nr -= num;
if (nr == 0)
break;
c = *b;
if (process_output(c, tty) < 0)
break;
b++; nr--;
}
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
} else {
struct n_tty_data *ldata = tty->disc_data;
while (nr > 0) {
mutex_lock(&ldata->output_lock);
c = tty->ops->write(tty, b, nr); //调用到具体的驱动中的write函数
mutex_unlock(&ldata->output_lock);
if (c < 0) {
retval = c;
goto break_out;
}
if (!c)
break; //全部写入,返回
b += c;
nr -= c;
}
}
if (!nr)
break;
if (tty_io_nonblock(tty, file)) {
retval = -EAGAIN;
break;
} /* 假如是以非阻塞的方式打开的,那么也直接返回。否则,让出cpu,等条件满足以后再继续执行。 */
up_read(&tty->termios_rwsem);
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
down_read(&tty->termios_rwsem); //执行到这里,当前进程才会真正让出cpu!!!
}
注:在执行read/wirte函数时候都会调用具体驱动的read/wirte函数,在tty_open时候tty->ops指针给赋值了具体结构体代码架构解析查看
tty->ops->write(tty, b, nr)
static int uart_write(struct tty_struct *tty, const unsigned char *buf, int count)
-> struct uart_state *state = tty->driver_data;
struct uart_port *port;
struct circ_buf *circ;
-> port = uart_port_lock(state, flags); // 打开锁
-> circ = &state->xmit; // 发送值存储结构体
-> if (!circ->buf) { // 判断结构体是否为空
uart_port_unlock(port, flags); // 若是,这没有发送buf为空,返回并解锁
return 0;
}
-> while (port) { // 开始发送值
c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); // 获取buf空间大小
if (count < c) // 数值大小与空间大小比较
c = count;
if (c <= 0)
break;
memcpy(circ->buf + circ->head, buf, c); // 将底层circ的buf值拷贝到串口buf中传出去
circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
buf += c;
count -= c;
ret += c;
}
-> __uart_start(tty); // 开始发送
-> port->ops->start_tx(port);
-> uart_port_unlock(port, flags); // 解锁