前面我们介绍了Linux驱动的中断处理程序机制
由于其本身存在一些局限,所以它只能完成整个中断处理流程的上半部分。这里我们则着重介绍下半部。
**下半部的任务就是执行与中断处理密切相关但中断处理程序本身不执行的工作。
为什么要用下半部:一言以蔽之,期望中断处理程序执行的越快越好,即能够尽快的返回。**
一旦响应中断处理程序,势必要打断原来的工作,会屏蔽与该中断同级的其他中断(IRQF_DISABLE),只会被更高优先级的中断打断;最差时,当前处理器上所有其他中断都将会被屏蔽(IRQ_DISABLE)。
如果被打断的事情属于比较紧急的,那么我们就希望中断处理程序越快返回越好,那么下半部是干嘛的呢?下半部则是实现那些对于时间要求相对宽松的任务。
也就是说,对于整个中断处理流程,上半部负责对中断源做出快速响应(发生中断认为是比较紧急的任务,也要考虑中断优先级),然后返回,下半部则负责对应的对时间要求比较宽松的中断服务程序,这些工作可以推迟到以后去做,等处理器不在处理紧急任务的时候。
通常下半部在中断处理程序一返回就会马上执行。下半部执行的关键在于当它们运行的时候,允许响应所有的中断。
下面我们介绍实现下半部的其中一种机制,也是最常用的tasklet:
tasklet
tasklet和进程没有任何关系。先看下tasklet_struct:
/* Tasklets --- multithreaded analogue of BHs.
Main feature differing them of generic softirqs: tasklet
is running only on one CPU simultaneously.
Main feature differing them of BHs: different tasklets
may be run simultaneously on different CPUs.
//仔细阅读下面的英文介绍
Properties:
* If tasklet_schedule() is called, then tasklet is guaranteed
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its excecution is still not
started, it will be executed only once.
* If this tasklet is already running on another CPU (or schedule is called
from tasklet itself), it is rescheduled for later.
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
*/
struct tasklet_struct
{
struct tasklet_struct *next;//链表中的下一个tasklet
unsigned long state;//tasklet的状态
atomic_t count;//引用计数器
void (*func)(unsigned long);//tasklet处理函数
unsigned long data;//上面处理函数的参数
};
ok,我们先贴下实例代码:后面的代码都是在前面的基础上添加功能,所以会有些本篇博文不涉及到的代码
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <asm/uaccess.h>
#include <linux/poll.h>
#include <linux/semaphore.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
MODULE_LICENSE("Dual BSD/GPL");
#define DEV_SIZE 20
#define WQ_MAJOR 230
#define DEBUG_SWITCH 1
#if DEBUG_SWITCH
#define P_DEBUG(fmt, args...) printk("<1>" "<kernel>[%s]"fmt,__FUNCTION__, ##args)
#else
#define P_DEBUG(fmt, args...) printk("<7>" "<kernel>[%s]"fmt,__FUNCTION__, ##args)
#endif
static int irq;
static char *devname;
static struct tasklet_struct wq_tasklet;
module_param(irq, int, S_IRUGO);
module_param(devname, charp, S_IRUGO);
struct wq_dev{
char kbuf[DEV_SIZE];//缓冲区
dev_t devno;//设备号
unsigned int major;
struct cdev wq_cdev;
unsigned int cur_size;//可读可写的数据量
struct semaphore sem;//信号量
wait_queue_head_t r_wait;//读等待队列
wait_queue_head_t w_wait;//写等待队列
struct fasync_struct *async_queue;//异步通知队列
};
//struct wq_dev *wq_devp;
//异步通知机制驱动函数
static int wq_fasync(int fd, struct file *filp, int mode)
{
struct wq_dev *dev = filp->private_data;
return fasync_helper(fd, filp, mode, &dev->async_queue);//调用内核提供的函数
}
//tasklet处理程序,下半部处理程序
static void wq_tasklet_handler(unsigned long data)
{
printk("tasklet is working...\n");//对时间要求不是那么严格的任务可以统统丢在这里完成
}
//中断处理程序,越快返回越好
static irqreturn_t wq_irq_handler(int irq, void *dev)
{
struct wq_dev mydev;
static int count = 0;
mydev = *(struct wq_dev*)dev;
printk("key:%d\n",count);
printk("ISR is working...\n");
if(count < 10)
{
printk("------%d start-------\n",count+1);
printk("the interrupt handler is working\n");
printk("the most of interrupt work will be done by following fasklet...\n");
tasklet_init(&wq_tasklet, wq_tasklet_handler, 0);//动态创建一个tasklet结构
tasklet_schedule(&wq_tasklet);//tasklet会被挂起,等待机会被执行
printk("the top half has been done and bottom half will be processed...\n");
}
count++;
return IRQ_HANDLED;
}
int wq_open(struct inode *inodep, struct file *filp)
{
struct wq_dev *dev;
dev = container_of(inodep->i_cdev, struct wq_dev, wq_cdev);
filp->private_data = dev;
//文件打开的时候,注册中断处理程序,激活给定的中断线
if(request_irq(irq, wq_irq_handler, IRQF_SHARED, devname, dev) != 0)
{
printk("%s request IRQ:%d failed...\n", devname, irq);
return -1;
}
printk("%s request IRQ:%d sucess...\n", devname, irq);
printk(KERN_ALERT "open is ok!\n");
return 0;
}
int wq_release(struct inode *inodep, struct file *filp)
{
printk(KERN_ALERT "release is ok!\n");
wq_fasync(-1, filp, 0);//从异步通知队列中删除该filp
return 0;
}
static ssize_t wq_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
{
struct wq_dev *dev = filp->private_data;
P_DEBUG("read data...\n");
if(down_interruptible(&dev->sem))//获取信号量
{
P_DEBUG("enter read down_interruptible\n");
return -ERESTARTSYS;
}
P_DEBUG("read first down\n");
while(dev->cur_size == 0){//无数据可读,进入休眠lon
up(&dev->sem);//释放信号量,不然写进程没有机会来唤醒(没有获得锁)
if(filp->f_flags & O_NONBLOCK)//检查是否是阻塞型I/O
return -EAGAIN;
P_DEBUG("%s reading:going to sleep\n", current->comm);
if(wait_event_interruptible(dev->r_wait, dev->cur_size != 0))//休眠等待被唤醒
{
P_DEBUG("read wait interruptible\n");
return -ERESTARTSYS;
}
P_DEBUG("wake up r_wait\n");
if(down_interruptible(&dev->sem))//获取信号量
return -ERESTARTSYS;
}
//数据已就绪
P_DEBUG("[2]dev->cur_size is %d\n", dev->cur_size);
if(dev->cur_size > 0)
count = min(count, dev->cur_size);
//从内核缓冲区赋值数据到用户空间,复制成功返回0
if(copy_to_user(buf, dev->kbuf, count))
{
up(&dev->sem);
return -EFAULT;
}
dev->cur_size -= count;//可读数据量更新
up(&dev->sem);
wake_up_interruptible(&dev->w_wait);//唤醒写进程
P_DEBUG("%s did read %d bytes\n", current->comm, (unsigned int)count);
return count;
}
static ssize_t wq_write(struct file *filp,const char __user *buf,size_t count, loff_t *offset)
{
struct wq_dev *dev = filp->private_data;
//wait_queue_t my_wait;
P_DEBUG("write is doing\n");
if(down_interruptible(&dev->sem))//获取信号量
{
P_DEBUG("enter write down_interruptible\n");
return -ERESTARTSYS;
}
P_DEBUG("write first down\n");
while(dev->cur_size == DEV_SIZE){//判断空间是否已满
up(&dev->sem);//释放信号量
if(filp->f_flags & O_NONBLOCK)
return -EAGAIN;
P_DEBUG("writing going to sleep\n");
if(wait_event_interruptible(dev->w_wait, dev->cur_size < DEV_SIZE))
return -ERESTARTSYS;
if(down_interruptible(&dev->sem))//获取信号量
return -ERESTARTSYS;
}
if(count > DEV_SIZE - dev->cur_size)
count = DEV_SIZE - dev->cur_size;
if(copy_from_user(dev->kbuf, buf, count))//数据复制
return -EFAULT;
dev->cur_size += count;//更新数据量
P_DEBUG("write %d bytes , cur_size:[%d]\n", count, dev->cur_size);
P_DEBUG("kbuf is [%s]\n", dev->kbuf);
up(&dev->sem);
wake_up_interruptible(&dev->r_wait);//唤醒读进程队列
if(dev->async_queue)
kill_fasync(&dev->async_queue, SIGIO, POLL_IN);//可写时发送信号
return count;
}
static unsigned int wq_poll(struct file *filp, poll_table *wait)
{
struct wq_dev *dev = filp->private_data;
unsigned int mask = 0;
if(down_interruptible(&dev->sem))//获取信号量
return -ERESTARTSYS;
poll_wait(filp, &dev->w_wait, wait);//添加写等待队列
poll_wait(filp, &dev->r_wait, wait);//添加读等待队列
if(dev->cur_size != 0)//判断是否可读取
mask |= POLLIN | POLLRDNORM;
if(dev->cur_size != DEV_SIZE)//判断是否可写入
mask |= POLLOUT | POLLWRNORM;
up(&dev->sem);//释放信号量
return mask;
}
struct file_operations wq_fops = {
.open = wq_open,
.release = wq_release,
.write = wq_write,
.read = wq_read,
.poll = wq_poll,
.fasync = wq_fasync,//函数注册
};
struct wq_dev my_dev;
static int __init wq_init(void)
{
int result = 0;
my_dev.cur_size = 0;
my_dev.devno = MKDEV(WQ_MAJOR, 0);
//设备号分配
if(WQ_MAJOR)
result = register_chrdev_region(my_dev.devno, 1, "wqlkp");
else
{
result = alloc_chrdev_region(&my_dev.devno, 0, 1, "wqlkp");
my_dev.major = MAJOR(my_dev.devno);
}
if(result < 0)
return result;
cdev_init(&my_dev.wq_cdev, &wq_fops);//设备初始化
my_dev.wq_cdev.owner = THIS_MODULE;
sema_init(&my_dev.sem, 1);//信号量初始化
init_waitqueue_head(&my_dev.r_wait);//等待队列初始化
init_waitqueue_head(&my_dev.w_wait);
result = cdev_add(&my_dev.wq_cdev, my_dev.devno, 1);//设备注册
if(result < 0)
{
P_DEBUG("cdev_add error!\n");
goto err;
}
printk(KERN_ALERT "hello kernel\n");
return 0;
err:
unregister_chrdev_region(my_dev.devno,1);
return result;
}
static void __exit wq_exit(void)
{
free_irq(irq, &my_dev);//卸载驱动的时候,注销相应的中断处理程序,并释放中断线(其释放机制类似于C++中的引用,linux中的描述符的引用...)
cdev_del(&my_dev.wq_cdev);
unregister_chrdev_region(my_dev.devno, 1);
}
module_init(wq_init);
module_exit(wq_exit);
从上面可以看到,tasklet的创建初始化以及调度都放在中断处理程序当中。(现在我们说的中断处理程序默认说的就是上半部)。
还是跟踪下内核源码(linux/interrupt.h,实现在kernel/softirq.c,可以看出tasklet是基于软中断的),一窥究竟
我们这里用的是动态初始化,静态的自己去看《Linux内核设计与实现》
void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data)
{
t->next = NULL;
t->state = 0;//状态
atomic_set(&t->count, 0);//原子操作,引用计数
t->func = func;//函数
t->data = data;
}
再看tasklet_schedule()
static inline void tasklet_schedule(struct tasklet_struct *t)
{
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
//检查tasklet的状态,如果为TASKLET_STATE_SCHED,
//表示已经被调度过了,直接返回
__tasklet_schedule(t);
}
/* Some compilers disobey section attribute on statics when not
initialized -- RR */
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
void fastcall __tasklet_schedule(struct tasklet_struct *t)
{
unsigned long flags;
local_irq_save(flags);//保存IF标志的状态,并禁用本地中断
t->next = __get_cpu_var(tasklet_vec).list;//下面这两行代码就是为该tasklet分配per_cpu变量
__get_cpu_var(tasklet_vec).list = t;
raise_softirq_irqoff(TASKLET_SOFTIRQ);//触发软中断,可以看出tasklet是基于软中断的
local_irq_restore(flags);//回复前面保存的标志,并开放本地中段
}
中断系列就不介绍软中断了,其编程实现和tasklet差不多,在中断处理程序当中触发软中断(当然前提是你要注册,以及定义下半部处理程序)
参考文献:
《Linux内核设计与实现》