驱动中常见的队列操作函数

/**
 * destroy_workqueue - safely terminate a workqueue
 * @wq: target workqueue
 *
 * Safely destroy a workqueue. All work currently pending will be done first.
 */
void destroy_workqueue(struct workqueue_struct *wq)
{
struct pool_workqueue *pwq;
int node;


/* drain it before proceeding with destruction */
drain_workqueue(wq);


/* sanity checks */
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq) {
int i;


for (i = 0; i < WORK_NR_COLORS; i++) {
if (WARN_ON(pwq->nr_in_flight[i])) {
mutex_unlock(&wq->mutex);
return;
}
}


if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
   WARN_ON(pwq->nr_active) ||
   WARN_ON(!list_empty(&pwq->delayed_works))) {
mutex_unlock(&wq->mutex);
return;
}
}
mutex_unlock(&wq->mutex);


/*
* wq list is used to freeze wq, remove from list after
* flushing is complete in case freeze races us.
*/
mutex_lock(&wq_pool_mutex);
list_del_init(&wq->list);
mutex_unlock(&wq_pool_mutex);


workqueue_sysfs_unregister(wq);


if (wq->rescuer) {
kthread_stop(wq->rescuer->task);
kfree(wq->rescuer);
wq->rescuer = NULL;
}


if (!(wq->flags & WQ_UNBOUND)) {
/*
* The base ref is never dropped on per-cpu pwqs.  Directly
* free the pwqs and wq.
*/
free_percpu(wq->cpu_pwqs);
kfree(wq);
} else {
/*
* We're the sole accessor of @wq at this point.  Directly
* access numa_pwq_tbl[] and dfl_pwq to put the base refs.
* @wq will be freed when the last pwq is released.
*/
for_each_node(node) {
pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
put_pwq_unlocked(pwq);
}


/*
* Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
* put.  Don't access it afterwards.
*/
pwq = wq->dfl_pwq;
wq->dfl_pwq = NULL;
put_pwq_unlocked(pwq);
}
}

EXPORT_SYMBOL_GPL(destroy_workqueue);



/**
 * flush_workqueue - ensure that any scheduled work has run to completion.
 * @wq: workqueue to flush
 *
 * This function sleeps until all work items which were queued on entry
 * have finished execution, but it is not livelocked by new incoming ones.
 */
void flush_workqueue(struct workqueue_struct *wq)
{
struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list),
.flush_color = -1,
.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
};
int next_color;


lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);


mutex_lock(&wq->mutex);


/*
* Start-to-wait phase
*/
next_color = work_next_color(wq->work_color);


if (next_color != wq->flush_color) {
/*
* Color space is not full.  The current work_color
* becomes our flush_color and work_color is advanced
* by one.
*/
WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
this_flusher.flush_color = wq->work_color;
wq->work_color = next_color;


if (!wq->first_flusher) {
/* no flush in progress, become the first flusher */
WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);


wq->first_flusher = &this_flusher;


if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
      wq->work_color)) {
/* nothing to flush, done */
wq->flush_color = next_color;
wq->first_flusher = NULL;
goto out_unlock;
}
} else {
/* wait in queue */
WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
list_add_tail(&this_flusher.list, &wq->flusher_queue);
flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
}
} else {
/*
* Oops, color space is full, wait on overflow queue.
* The next flush completion will assign us
* flush_color and transfer to flusher_queue.
*/
list_add_tail(&this_flusher.list, &wq->flusher_overflow);
}


mutex_unlock(&wq->mutex);


wait_for_completion(&this_flusher.done);


/*
* Wake-up-and-cascade phase
*
* First flushers are responsible for cascading flushes and
* handling overflow.  Non-first flushers can simply return.
*/
if (wq->first_flusher != &this_flusher)
return;


mutex_lock(&wq->mutex);


/* we might have raced, check again with mutex held */
if (wq->first_flusher != &this_flusher)
goto out_unlock;


wq->first_flusher = NULL;


WARN_ON_ONCE(!list_empty(&this_flusher.list));
WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);


while (true) {
struct wq_flusher *next, *tmp;


/* complete all the flushers sharing the current flush color */
list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
if (next->flush_color != wq->flush_color)
break;
list_del_init(&next->list);
complete(&next->done);
}


WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
    wq->flush_color != work_next_color(wq->work_color));


/* this flush_color is finished, advance by one */
wq->flush_color = work_next_color(wq->flush_color);


/* one color has been freed, handle overflow queue */
if (!list_empty(&wq->flusher_overflow)) {
/*
* Assign the same color to all overflowed
* flushers, advance work_color and append to
* flusher_queue.  This is the start-to-wait
* phase for these overflowed flushers.
*/
list_for_each_entry(tmp, &wq->flusher_overflow, list)
tmp->flush_color = wq->work_color;


wq->work_color = work_next_color(wq->work_color);


list_splice_tail_init(&wq->flusher_overflow,
     &wq->flusher_queue);
flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
}


if (list_empty(&wq->flusher_queue)) {
WARN_ON_ONCE(wq->flush_color != wq->work_color);
break;
}


/*
* Need to flush more colors.  Make the next flusher
* the new first flusher and arm pwqs.
*/
WARN_ON_ONCE(wq->flush_color == wq->work_color);
WARN_ON_ONCE(wq->flush_color != next->flush_color);


list_del_init(&next->list);
wq->first_flusher = next;


if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
break;


/*
* Meh... this color is already done, clear first
* flusher and repeat cascading.
*/
wq->first_flusher = NULL;
}


out_unlock:
mutex_unlock(&wq->mutex);
}
EXPORT_SYMBOL_GPL(flush_workqueue);


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Linux内核UART驱动通常使用队列来解耦原有的功能函数,以提高代码的可读性和可维护性。这种方法的基本思想是将不同的功能分割成独立的模块,每个模块都有一个输入队列和一个输出队列。 输入队列用于接收来自UART接口的数据,处理数据并将结果放入输出队列。输出队列的数据可以是控制命令、状态信息或者其他需要传递给上层应用程序的数据。这种方法的优点是可以使编写驱动程序更加简单,易于维护和扩展。 以下是一个使用队列解耦原有功能函数的UART驱动程序的示例: ```c #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/serial_core.h> #define BUFFER_SIZE 1024 struct uart_device { struct cdev cdev; struct uart_port port; struct mutex mutex; struct work_struct work; struct tasklet_struct tasklet; spinlock_t lock; wait_queue_head_t read_queue; wait_queue_head_t write_queue; char *buf; int head; int tail; }; static int uart_driver_open(struct inode *inode, struct file *file) { struct uart_device *dev; dev = container_of(inode->i_cdev, struct uart_device, cdev); file->private_data = dev; return 0; } static int uart_driver_release(struct inode *inode, struct file *file) { return 0; } static ssize_t uart_driver_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { struct uart_device *dev = file->private_data; ssize_t ret; if (count == 0) return 0; if (wait_event_interruptible(dev->read_queue, dev->head != dev->tail)) return -ERESTARTSYS; mutex_lock(&dev->mutex); if (dev->head > dev->tail) { ret = min_t(ssize_t, count, dev->head - dev->tail); if (copy_to_user(buf, dev->buf + dev->tail, ret)) { ret = -EFAULT; goto out; } dev->tail += ret; } else { ret = min_t(ssize_t, count, BUFFER_SIZE - dev->tail); if (copy_to_user(buf, dev->buf + dev->tail, ret)) { ret = -EFAULT; goto out; } dev->tail = (dev->tail + ret) % BUFFER_SIZE; } out: mutex_unlock(&dev->mutex); return ret; } static ssize_t uart_driver_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct uart_device *dev = file->private_data; ssize_t ret; if (count == 0) return 0; if (wait_event_interruptible(dev->write_queue, dev->head != ((dev->tail - 1 + BUFFER_SIZE) % BUFFER_SIZE))) return -ERESTARTSYS; mutex_lock(&dev->mutex); if (dev->tail > dev->head) { ret = min_t(ssize_t, count, BUFFER_SIZE - dev->tail); if (copy_from_user(dev->buf + dev->tail, buf, ret)) { ret = -EFAULT; goto out; } dev->tail += ret; } else { ret = min_t(ssize_t, count, dev->head - dev->tail); if (copy_from_user(dev->buf + dev->tail, buf, ret)) { ret = -EFAULT; goto out; } dev->tail = (dev->tail + ret) % BUFFER_SIZE; } out: mutex_unlock(&dev->mutex); return ret; } static void uart_driver_work(struct work_struct *work) { struct uart_device *dev = container_of(work, struct uart_device, work); struct uart_port *port = &dev->port; unsigned char c; int i; mutex_lock(&dev->mutex); while (uart_chars_avail(port)) { c = uart_get_char(port); if (dev->head == ((dev->tail - 1 + BUFFER_SIZE) % BUFFER_SIZE)) { /* Buffer is full, drop the incoming character */ continue; } dev->buf[dev->head] = c; dev->head = (dev->head + 1) % BUFFER_SIZE; } mutex_unlock(&dev->mutex); wake_up_interruptible(&dev->read_queue); } static void uart_driver_tasklet(unsigned long data) { struct uart_device *dev = (struct uart_device *)data; struct uart_port *port = &dev->port; unsigned char c; int i; spin_lock(&dev->lock); while (uart_chars_avail(port)) { c = uart_get_char(port); if (dev->head == ((dev->tail - 1 + BUFFER_SIZE) % BUFFER_SIZE)) { /* Buffer is full, drop the incoming character */ continue; } dev->buf[dev->head] = c; dev->head = (dev->head + 1) % BUFFER_SIZE; } spin_unlock(&dev->lock); wake_up_interruptible(&dev->read_queue); } static void uart_driver_start(struct uart_port *port) { struct uart_device *dev = container_of(port, struct uart_device, port); INIT_WORK(&dev->work, uart_driver_work); tasklet_init(&dev->tasklet, uart_driver_tasklet, (unsigned long)dev); spin_lock_init(&dev->lock); init_waitqueue_head(&dev->read_queue); init_waitqueue_head(&dev->write_queue); mutex_init(&dev->mutex); dev->buf = kzalloc(BUFFER_SIZE, GFP_KERNEL); dev->head = 0; dev->tail = 0; uart_write_wakeup(port); } static void uart_driver_stop(struct uart_port *port) { struct uart_device *dev = container_of(port, struct uart_device, port); cancel_work_sync(&dev->work); tasklet_kill(&dev->tasklet); spin_lock_irq(&dev->lock); dev->head = dev->tail = 0; spin_unlock_irq(&dev->lock); kfree(dev->buf); } static struct uart_ops uart_driver_ops = { .tx_empty = uart_tx_empty, .set_mctrl = uart_set_mctrl, .get_mctrl = uart_get_mctrl, .stop_tx = uart_stop_tx, .start_tx = uart_start_tx, .send_xchar = uart_send_xchar, .stop_rx = uart_stop_rx, .enable_ms = uart_enable_ms, .break_ctl = uart_break_ctl, .startup = uart_driver_start, .shutdown = uart_driver_stop, }; static struct uart_driver uart_driver = { .owner = THIS_MODULE, .driver_name = "uart_driver", .dev_name = "ttyUART", .major = 0, .minor = 0, .nr = 1, .cons = NULL, .ops = &uart_driver_ops, }; static int __init uart_driver_init(void) { dev_t devno; int ret; ret = alloc_chrdev_region(&devno, 0, 1, "uart_driver"); if (ret < 0) return ret; cdev_init(&uart_driver.cdev, &uart_driver_ops); uart_driver.cdev.owner = THIS_MODULE; ret = cdev_add(&uart_driver.cdev, devno, 1); if (ret < 0) { unregister_chrdev_region(devno, 1); return ret; } uart_register_driver(&uart_driver); return 0; } static void __exit uart_driver_exit(void) { uart_unregister_driver(&uart_driver); cdev_del(&uart_driver.cdev); unregister_chrdev_region(uart_driver.cdev.dev, 1); } module_init(uart_driver_init); module_exit(uart_driver_exit); MODULE_AUTHOR("Your Name"); MODULE_LICENSE("GPL"); ``` 在这个驱动程序,我们使用了两个输入队列和一个输出队列。`read_queue` 用于接收来自 UART 的数据,`write_queue` 用于接收要发送到 UART 的数据,`buf` 用于存储接收到的数据。 `wait_event_interruptible` 函数用于等待数据到达队列。`mutex_lock` 和 `mutex_unlock` 函数用于保护共享数据结构。`wake_up_interruptible` 函数用于唤醒等待在队列上的进程。 `uart_driver_work` 和 `uart_driver_tasklet` 函数用于从 UART 接收数据,并将接收到的数据放入输入队列。 `uart_driver_start` 和 `uart_driver_stop` 函数用于初始化和清除输入队列和输出队列的数据。`uart_driver_ops` 结构体包含了驱动程序使用的 UART 操作。`uart_driver` 结构体包含了驱动程序的基本信息。 通过使用队列解耦原有的功能函数,我们可以将驱动程序的不同部分分割成独立的模块,使得代码更加简洁、易于维护和扩展。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值