异步通知fasync
异步通知fasync是应用于系统调用signal和sigaction函数,下面我会使用signal函数。简单的说,signal函数就是让一个信号与与一个函数对应,没当接收到这个信号就会调用相应的函数。
一、什么是异步通知
个人认为,异步通知类似于中断的机制,如下面的将要举例的程序,当设备可写时,设备驱动函数发送一个信号给内核,告知内核有数据可读,在条件不满足之前,并不会造成阻塞。而不像之前学的阻塞型IO和poll,它们是调用函数进去检查,条件不满足时还会造成阻塞。
二、应用层中启用异步通知机制
其实就三个步骤:
1)signal(SIGIO, sig_handler);
调用signal函数,让指定的信号SIGIO与处理函数sig_handler对应。
2)fcntl(fd, F_SET_OWNER, getpid());
指定一个进程作为文件的“属主(filp->owner)”,这样内核才知道信号要发给哪个进程。
3)f_flags = fcntl(fd, F_GETFL);
fcntl(fd, F_SETFL, f_flags | FASYNC);
在设备文件中添加FASYNC标志,驱动中就会调用将要实现的test_fasync函数。
三个步骤执行后,一旦有信号产生,相应的进程就会收到。
以下是fasync应用實例:
点击(此处)折叠或打开
- #include <stdio.h>
- #include <sys/types.h>
- #include <sys/stat.h>
- #include <fcntl.h>
- #include <sys/select.h>
- #include <unistd.h>
- #include <signal.h>
-
- unsigned int flag;
-
- void sig_handler(int sig)
- {
- printf("<app>%s\n", __FUNCTION__);
- flag = 1;
- }
-
- int main(void)
- {
- char buf[20];
- int fd;
- int f_flags;
- flag = 0;
-
- fd = open("/dev/test", O_RDWR);
- if(fd < 0)
- {
- perror("open");
- return -1;
- }
- /*三个步骤*/
- signal(SIGIO, sig_handler);
- fcntl(fd, F_SETOWN, getpid());
- f_flags = fcntl(fd, F_GETFL);
- fcntl(fd, F_SETFL, FASYNC | f_flags);
-
- while(1)
- {
- printf("waiting \n"); //在还没收到信号前,程序还在不停的打印
- sleep(4);
- if(flag)
- break;
- }
- read(fd, buf, 10);
- printf("finish: read[%s]\n", buf);
- close(fd);
- return 0;
- }
三、驱动中需要实现的异步通知
上面说的三个步骤,内核已经帮忙实现了前两个步骤,只需要我们稍稍实现第三个步骤的一个简单的传参。
实现异步通知,内核需要知道几个东西:哪个文件(filp),什么信号(SIGIIO),发给哪个进程(pid),收到信号后做什么(sig_handler)。这些都由前两个步骤完成了。
回想一下,在实现等待队列中,我们需要将一个等待队列wait_queue_t添加到指定的等待队列头wait_queue_head_t中。
在这里,同样需要把一个结构体struct fasync_struct添加到内核的异步队列头(名字是我自己取的)中。这个结构体用来存放对应设备文件的信息(如fd, filp)并交给内核来管理。一但收到信号,内核就会在这个所谓的异步队列头找到相应的文件(fd),并在filp->owner中找到对应的进程PID,并且调用对应的sig_handler了。
点击(此处)折叠或打开
- struct fasync_struct {
- spinlock_t fa_lock;
- int magic;
- int fa_fd;
- struct fasync_struct *fa_next; /* singly linked list */
- struct file *fa_file;
- struct rcu_head fa_rcu;
- };
点击(此处)折叠或打开
- #include <linux/module.h>
- #include <linux/moduleparam.h>
- #include <linux/sched.h>
- #include <linux/kernel.h> /* printk(), min() */
- #include <linux/slab.h> /* kmalloc() */
- #include <linux/fs.h> /* everything... */
- #include <linux/proc_fs.h>
- #include <linux/errno.h> /* error codes */
- #include <linux/types.h> /* size_t */
- #include <linux/fcntl.h>
- #include <linux/poll.h>
- #include <linux/cdev.h>
- #include <asm/uaccess.h>
-
- #include "scull.h" /* local definitions */
-
- struct scull_pipe {
- wait_queue_head_t inq, outq; /* read and write queues */
- char *buffer, *end; /* begin of buf, end of buf */
- int buffersize; /* used in pointer arithmetic */
- char *rp, *wp; /* where to read, where to write */
- int nreaders, nwriters; /* number of openings for r/w */
- struct fasync_struct *async_queue; /* asynchronous readers */
- struct semaphore sem; /* mutual exclusion semaphore */
- struct cdev cdev; /* Char device structure */
- };
-
- /* parameters */
- static int scull_p_nr_devs = SCULL_P_NR_DEVS; /* number of pipe devices */
- int scull_p_buffer = SCULL_P_BUFFER; /* buffer size */
- dev_t scull_p_devno; /* Our first device number */
-
- module_param(scull_p_nr_devs, int, 0); /* FIXME check perms */
- module_param(scull_p_buffer, int, 0);
-
- static struct scull_pipe *scull_p_devices;
-
- static int scull_p_fasync(int fd, struct file *filp, int mode);
- static int spacefree(struct scull_pipe *dev);
- /*
- * Open and close
- */
-
- static int scull_p_open(struct inode *inode, struct file *filp)
- {
- struct scull_pipe *dev;
-
- dev = container_of(inode->i_cdev, struct scull_pipe, cdev);
- filp->private_data = dev;
-
- if (down_interruptible(&dev->sem))
- return -ERESTARTSYS;
- if (!dev->buffer) {
- /* allocate the buffer */
- dev->buffer = kmalloc(scull_p_buffer, GFP_KERNEL);
- if (!dev->buffer) {
- up(&dev->sem);
- return -ENOMEM;
- }
- }
- dev->buffersize = scull_p_buffer;
- dev->end = dev->buffer + dev->buffersize;
- dev->rp = dev->wp = dev->buffer; /* rd and wr from the beginning */
-
- /* use f_mode,not f_flags: it's cleaner (fs/open.c tells why) */
- if (filp->f_mode & FMODE_READ)
- dev->nreaders++;
- if (filp->f_mode & FMODE_WRITE)
- dev->nwriters++;
- up(&dev->sem);
-
- return nonseekable_open(inode, filp);
- }
-
-
-
- static int scull_p_release(struct inode *inode, struct file *filp)
- {
- struct scull_pipe *dev = filp->private_data;
-
- /* remove this filp from the asynchronously notified filp's */
- scull_p_fasync(-1, filp, 0);
- down(&dev->sem);
- if (filp->f_mode & FMODE_READ)
- dev->nreaders--;
- if (filp->f_mode & FMODE_WRITE)
- dev->nwriters--;
- if (dev->nreaders + dev->nwriters == 0) {
- kfree(dev->buffer);
- dev->buffer = NULL; /* the other fields are not checked on open */
- }
- up(&dev->sem);
- return 0;
- }
-
- /*
- * Data management: read and write
- */
- static ssize_t scull_p_read (struct file *filp, char __user *buf, size_t count,
- loff_t *f_pos)
- {
- struct scull_pipe *dev = filp->private_data;
-
- if (down_interruptible(&dev->sem))
- return -ERESTARTSYS;
-
- while (dev->rp == dev->wp) { /* nothing to read */
- up(&dev->sem); /* release the lock */
- if (filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
- PDEBUG("\"%s\" reading: going to sleep\n", current->comm);
- if (wait_event_interruptible(dev->inq, (dev->rp != dev->wp)))
- return -ERESTARTSYS; /* signal: tell the fs layer to handle it */
- /* otherwise loop, but first reacquire the lock */
- if (down_interruptible(&dev->sem))
- return -ERESTARTSYS;
- }
- /* ok, data is there, return something */
- if (dev->wp > dev->rp)
- count = min(count, (size_t)(dev->wp - dev->rp));
- else /* the write pointer has wrapped, return data up to dev->end */
- count = min(count, (size_t)(dev->end - dev->rp));
- if (copy_to_user(buf, dev->rp, count)) {
- up (&dev->sem);
- return -EFAULT;
- }
- dev->rp += count;
- if (dev->rp == dev->end)
- dev->rp = dev->buffer; /* wrapped */
- up (&dev->sem);
-
- /* finally, awake any writers and return */
- wake_up_interruptible(&dev->outq);
- PDEBUG("\"%s\" did read %li bytes\n",current->comm, (long)count);
- return count;
- }
-
- /* Wait for space for writing; caller must hold device semaphore. On
- * error the semaphore will be released before returning. */
- static int scull_getwritespace(struct scull_pipe *dev, struct file *filp)
- {
- while (spacefree(dev) == 0) { /* full */
- DEFINE_WAIT(wait);
-
- up(&dev->sem);
- if (filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
- PDEBUG("\"%s\" writing: going to sleep\n",current->comm);
- prepare_to_wait(&dev->outq, &wait, TASK_INTERRUPTIBLE);
- if (spacefree(dev) == 0)
- schedule();
- finish_wait(&dev->outq, &wait);
- if (signal_pending(current))
- return -ERESTARTSYS; /* signal: tell the fs layer to handle it */
- if (down_interruptible(&dev->sem))
- return -ERESTARTSYS;
- }
- return 0;
- }
-
- /* How much space is free? */
- static int spacefree(struct scull_pipe *dev)
- {
- if (dev->rp == dev->wp)
- return dev->buffersize - 1;
- return ((dev->rp + dev->buffersize - dev->wp) % dev->buffersize) - 1;
- }
-
- static ssize_t scull_p_write(struct file *filp, const char __user *buf, size_t count,
- loff_t *f_pos)
- {
- struct scull_pipe *dev = filp->private_data;
- int result;
-
- if (down_interruptible(&dev->sem))
- return -ERESTARTSYS;
-
- /* Make sure there's space to write */
- result = scull_getwritespace(dev, filp);
- if (result)
- return result; /* scull_getwritespace called up(&dev->sem) */
-
- /* ok, space is there, accept something */
- count = min(count, (size_t)spacefree(dev));
- if (dev->wp >= dev->rp)
- count = min(count, (size_t)(dev->end - dev->wp)); /* to end-of-buf */
- else /* the write pointer has wrapped, fill up to rp-1 */
- count = min(count, (size_t)(dev->rp - dev->wp - 1));
- PDEBUG("Going to accept %li bytes to %p from %p\n", (long)count, dev->wp, buf);
- if (copy_from_user(dev->wp, buf, count)) {
- up (&dev->sem);
- return -EFAULT;
- }
- dev->wp += count;
- if (dev->wp == dev->end)
- dev->wp = dev->buffer; /* wrapped */
- up(&dev->sem);
-
- /* finally, awake any reader */
- wake_up_interruptible(&dev->inq); /* blocked in read() and select() */
-
- /* and signal asynchronous readers, explained late in chapter 5 */
- if (dev->async_queue)
- kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
- PDEBUG("\"%s\" did write %li bytes\n",current->comm, (long)count);
- return count;
- }
-
- static unsigned int scull_p_poll(struct file *filp, poll_table *wait)
- {
- struct scull_pipe *dev = filp->private_data;
- unsigned int mask = 0;
-
- /*
- * The buffer is circular; it is considered full
- * if "wp" is right behind "rp" and empty if the
- * two are equal.
- */
- down(&dev->sem);
- poll_wait(filp, &dev->inq, wait);
- poll_wait(filp, &dev->outq, wait);
- if (dev->rp != dev->wp)
- mask |= POLLIN | POLLRDNORM; /* readable */
- if (spacefree(dev))
- mask |= POLLOUT | POLLWRNORM; /* writable */
- up(&dev->sem);
- return mask;
- }
-
- static int scull_p_fasync(int fd, struct file *filp, int mode)
- {
- struct scull_pipe *dev = filp->private_data;
- return fasync_helper(fd, filp, mode, &dev->async_queue);
- }
-
- /* FIXME this should use seq_file */
- #ifdef SCULL_DEBUG
- static void scullp_proc_offset(char *buf, char **start, off_t *offset, int *len)
- {
- if (*offset == 0)
- return;
- if (*offset >= *len) { /* Not there yet */
- *offset -= *len;
- *len = 0;
- }
- else { /* We're into the interesting stuff now */
- *start = buf + *offset;
- *offset = 0;
- }
- }
-
- static int scull_read_p_mem(char *buf, char **start, off_t offset, int count,
- int *eof, void *data)
- {
- int i, len;
- struct scull_pipe *p;
-
- #define LIMIT (PAGE_SIZE-200) /* don't print any more after this size */
- *start = buf;
- len = sprintf(buf, "Default buffersize is %i\n", scull_p_buffer);
- for(i = 0; i<scull_p_nr_devs && len <= LIMIT; i++) {
- p = &scull_p_devices[i];
- if (down_interruptible(&p->sem))
- return -ERESTARTSYS;
- len += sprintf(buf+len, "\nDevice %i: %p\n", i, p);
- len += sprintf(buf+len, " Buffer: %p to %p (%i bytes)\n", p->buffer, p->end, p->buffersize);
- len += sprintf(buf+len, " rp %p wp %p\n", p->rp, p->wp);
- len += sprintf(buf+len, " readers %i writers %i\n", p->nreaders, p->nwriters);
- up(&p->sem);
- scullp_proc_offset(buf, start, &offset, &len);
- }
- *eof = (len <= LIMIT);
- return len;
- }
- #endif
-
- /*
- * The file operations for the pipe device
- * (some are overlayed with bare scull)
- */
- struct file_operations scull_pipe_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = scull_p_read,
- .write = scull_p_write,
- .poll = scull_p_poll,
- .ioctl = scull_ioctl,
- .open = scull_p_open,
- .release = scull_p_release,
- .fasync = scull_p_fasync,
- };
-
- /*
- * Set up a cdev entry.
- */
- static void scull_p_setup_cdev(struct scull_pipe *dev, int index)
- {
- int err, devno = scull_p_devno + index;
-
- cdev_init(&dev->cdev, &scull_pipe_fops);
- dev->cdev.owner = THIS_MODULE;
- err = cdev_add (&dev->cdev, devno, 1);
- /* Fail gracefully if need be */
- if (err)
- printk(KERN_NOTICE "Error %d adding scullpipe%d", err, index);
- }
-
- /*
- * Initialize the pipe devs; return how many we did.
- */
- int scull_p_init(dev_t firstdev)
- {
- int i, result;
-
- result = register_chrdev_region(firstdev, scull_p_nr_devs, "scullp");
- if (result < 0) {
- printk(KERN_NOTICE "Unable to get scullp region, error %d\n", result);
- return 0;
- }
- scull_p_devno = firstdev;
- scull_p_devices = kmalloc(scull_p_nr_devs * sizeof(struct scull_pipe), GFP_KERNEL);
- if (scull_p_devices == NULL) {
- unregister_chrdev_region(firstdev, scull_p_nr_devs);
- return 0;
- }
- memset(scull_p_devices, 0, scull_p_nr_devs * sizeof(struct scull_pipe));
- for (i = 0; i < scull_p_nr_devs; i++) {
- init_waitqueue_head(&(scull_p_devices[i].inq));
- init_waitqueue_head(&(scull_p_devices[i].outq));
- init_MUTEX(&scull_p_devices[i].sem);
- scull_p_setup_cdev(scull_p_devices + i, i);
- }
- #ifdef SCULL_DEBUG
- create_proc_read_entry("scullpipe", 0, NULL, scull_read_p_mem, NULL);
- #endif
- return scull_p_nr_devs;
- }
-
- /*
- * This is called by cleanup_module or on failure.
- * It is required to never fail, even if nothing was initialized first
- */
- void scull_p_cleanup(void)
- {
- int i;
-
- #ifdef SCULL_DEBUG
- remove_proc_entry("scullpipe", NULL);
- #endif
-
- if (!scull_p_devices)
- return; /* nothing else to release */
-
- for (i = 0; i < scull_p_nr_devs; i++) {
- cdev_del(&scull_p_devices[i].cdev);
- kfree(scull_p_devices[i].buffer);
- }
- kfree(scull_p_devices);
- unregister_chrdev_region(scull_p_devno, scull_p_nr_devs);
- scull_p_devices = NULL; /* pedantic */
- }
(1)讲一下函数fasync_helper:
int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
一看就知道,前面的三个参数其实就是scull_p_fasync的三个参数,只要我们定义号的fasync_struct结构体也传进去就可以了。内核会完成我上面我們所说的事情。
(2)当设备可写时,调用函数kill_fasync发送信号SIGIO给内核。
讲解一下这个函数:
void kill_fasync(struct fasync_struct **fp, int sig, int band)
sig就是我们要发送的信号。
band(带宽),一般都是使用POLL_IN,表示设备可读,如果设备可写,使用POLL_OUT
(3)当设备关闭时,需要将fasync_struct从异步队列中删除:
scull_p_fasync(-1, filp, 0);
删除也是调用scull_p_fasync,不过改了一下参数而已。
四、阻塞型IO、poll和异步通知的区别:
一个最重要的区别:
1)异步通知是不会造成阻塞的。
2)调用阻塞IO时如果条件不满足,会在驱动函数中的test_read或test_write中阻塞。
3)如果条件不满足,selcet会在系统调用中阻塞。
所谓的异步,就是进程可以在信号没到前干别的事情,等到信号到来了,进程就会被内核通知去做相应的信号操作。进程是不知道信号什么时候来的。
五,总结
今天只是讲了异步通知在内核中的实现,并且对应的应用函数和驱动函数需要做什么事情。最后总结了一下阻塞IO、poll和异步通知的区别。