ldd3 pipe示例

#include<linux/init.h>
#include<linux/module.h>
#include<linux/kernel.h>


#include <linux/config.h>
#include <linux/slab.h> /* kmalloc() */
#include <linux/fs.h> /* everything... */
#include <linux/errno.h> /* error codes */
#include <linux/types.h> /* size_t */
#include <linux/proc_fs.h>
#include <linux/fcntl.h> /* O_ACCMODE */
#include <linux/seq_file.h>
#include <linux/cdev.h>


#include <asm/system.h> /* cli(), *_flags */
#include <asm/uaccess.h> /* copy_*_user */


dev_t scull_p_devno; /* Our first device number */


struct scull_pipe {
        wait_queue_head_t inq, outq;       /* read and write queues */
        char *buffer, *end;                /* begin of buf, end of buf */
        int buffersize;                    /* used in pointer arithmetic */
        char *rp, *wp;                     /* where to read, where to write */
        int nreaders, nwriters;            /* number of openings for r/w */
        struct fasync_struct *async_queue; /* asynchronous readers */
        struct semaphore sem;              /* mutual exclusion semaphore */
        struct cdev cdev;                  /* Char device structure */
};


static struct scull_pipe *scull_p_devices;
static int scull_p_fasync(int fd, struct file *filp, int mode);
static int spacefree(struct scull_pipe *dev);
int scull_p_buffer = 4000;
static int scull_p_fasync(int fd, struct file *filp, int mode)
{
struct scull_pipe *dev = filp->private_data;


return fasync_helper(fd, filp, mode, &dev->async_queue);
}




static int scull_p_open(struct inode *inode, struct file *filp)
{
struct scull_pipe *dev;


dev = container_of(inode->i_cdev, struct scull_pipe, cdev);
filp->private_data = dev;


if (down_interruptible(&dev->sem))
return -ERESTARTSYS;
if (!dev->buffer) {
/* allocate the buffer */
dev->buffer = kmalloc(scull_p_buffer, GFP_KERNEL);
if (!dev->buffer) {
up(&dev->sem);
return -ENOMEM;
}
}
dev->buffersize = scull_p_buffer;
dev->end = dev->buffer + dev->buffersize;
dev->rp = dev->wp = dev->buffer; /* rd and wr from the beginning */


/* use f_mode,not  f_flags: it's cleaner (fs/open.c tells why) */
if (filp->f_mode & FMODE_READ)
dev->nreaders++;
if (filp->f_mode & FMODE_WRITE)
dev->nwriters++;
up(&dev->sem);


return nonseekable_open(inode, filp);
}


static ssize_t scull_p_read (struct file *filp, char __user *buf, size_t count,
                loff_t *f_pos)
{
struct scull_pipe *dev = filp->private_data;


if (down_interruptible(&dev->sem))
return -ERESTARTSYS;


while (dev->rp == dev->wp) { /* nothing to read */
up(&dev->sem); /* release the lock */
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
//PDEBUG("\"%s\" reading: going to sleep\n", current->comm);
if (wait_event_interruptible(dev->inq, (dev->rp != dev->wp)))
return -ERESTARTSYS; /* signal: tell the fs layer to handle it */
/* otherwise loop, but first reacquire the lock */
if (down_interruptible(&dev->sem))
return -ERESTARTSYS;
}
/* ok, data is there, return something */
if (dev->wp > dev->rp)
count = min(count, (size_t)(dev->wp - dev->rp));
else /* the write pointer has wrapped, return data up to dev->end */
count = min(count, (size_t)(dev->end - dev->rp));
if (copy_to_user(buf, dev->rp, count)) {
up (&dev->sem);
return -EFAULT;
}
dev->rp += count;
if (dev->rp == dev->end)
dev->rp = dev->buffer; /* wrapped */
up (&dev->sem);


/* finally, awake any writers and return */
wake_up_interruptible(&dev->outq);
//PDEBUG("\"%s\" did read %li bytes\n",current->comm, (long)count);
return count;
}
static int scull_getwritespace(struct scull_pipe *dev, struct file *filp)
{
while (spacefree(dev) == 0) { /* full */
DEFINE_WAIT(wait);

up(&dev->sem);
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
//PDEBUG("\"%s\" writing: going to sleep\n",current->comm);
prepare_to_wait(&dev->outq, &wait, TASK_INTERRUPTIBLE);
if (spacefree(dev) == 0)
schedule();
finish_wait(&dev->outq, &wait);
if (signal_pending(current))
return -ERESTARTSYS; /* signal: tell the fs layer to handle it */
if (down_interruptible(&dev->sem))
return -ERESTARTSYS;
}
return 0;
}


/* How much space is free? */
static int spacefree(struct scull_pipe *dev)
{
if (dev->rp == dev->wp)
return dev->buffersize - 1;
return ((dev->rp + dev->buffersize - dev->wp) % dev->buffersize) - 1;
}


static ssize_t scull_p_write(struct file *filp, const char __user *buf, size_t count,
                loff_t *f_pos)
{
struct scull_pipe *dev = filp->private_data;
int result;


if (down_interruptible(&dev->sem))
return -ERESTARTSYS;


/* Make sure there's space to write */
result = scull_getwritespace(dev, filp);
if (result)
return result; /* scull_getwritespace called up(&dev->sem) */


/* ok, space is there, accept something */
count = min(count, (size_t)spacefree(dev));
if (dev->wp >= dev->rp)
count = min(count, (size_t)(dev->end - dev->wp)); /* to end-of-buf */
else /* the write pointer has wrapped, fill up to rp-1 */
count = min(count, (size_t)(dev->rp - dev->wp - 1));
//PDEBUG("Going to accept %li bytes to %p from %p\n", (long)count, dev->wp, buf);
if (copy_from_user(dev->wp, buf, count)) {
up (&dev->sem);
return -EFAULT;
}
dev->wp += count;
if (dev->wp == dev->end)
dev->wp = dev->buffer; /* wrapped */
up(&dev->sem);


/* finally, awake any reader */
wake_up_interruptible(&dev->inq);  /* blocked in read() and select() */


/* and signal asynchronous readers, explained late in chapter 5 */
if (dev->async_queue)
kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
//PDEBUG("\"%s\" did write %li bytes\n",current->comm, (long)count);
return count;
}


static int scull_p_release(struct inode *inode, struct file *filp)
{
struct scull_pipe *dev = filp->private_data;


/* remove this filp from the asynchronously notified filp's */
scull_p_fasync(-1, filp, 0);
down(&dev->sem);
if (filp->f_mode & FMODE_READ)
dev->nreaders--;
if (filp->f_mode & FMODE_WRITE)
dev->nwriters--;
if (dev->nreaders + dev->nwriters == 0) {
kfree(dev->buffer);
dev->buffer = NULL; /* the other fields are not checked on open */
}
up(&dev->sem);
return 0;
}


struct file_operations scull_pipe_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = scull_p_read,
.write = scull_p_write,
// .poll = scull_p_poll,
// .ioctl = scull_ioctl,
.open = scull_p_open,
.release = scull_p_release,
.fasync = scull_p_fasync,
};
dev_t pipe_major;
int pipe_init(void)
{
int result = 0;
dev_t dev = 0;
int i = 0;
int err = 0;
result = alloc_chrdev_region(&dev, 0, 4, "scullp");
pipe_major = MAJOR(dev);
dev = MKDEV(pipe_major, 0);
scull_p_devices = kmalloc(4 * sizeof(struct scull_pipe), GFP_KERNEL);
if(scull_p_devices == NULL)
{

}

memset(scull_p_devices, 0, 4*sizeof(struct scull_pipe));
for(i = 0; i < 4; i++)
{
init_waitqueue_head(&scull_p_devices[i].inq);
init_waitqueue_head(&scull_p_devices[i].outq);
init_MUTEX(&scull_p_devices[i].sem);
cdev_init(&(scull_p_devices[i].cdev), &scull_pipe_fops);
scull_p_devices[i].cdev.owner = THIS_MODULE;
err = cdev_add(&(scull_p_devices[i].cdev), dev+i, 1);
}

printk("pipe.ko is install \n");
return 0;
}


void pipe_exit(void)
{
int i = 0;
dev_t devno = MKDEV(pipe_major, 0);
for(i = 0; i < 4; i++)
{
cdev_del(&(scull_p_devices[i].cdev));
if(scull_p_devices[i].buffer)
{
kfree(scull_p_devices[i].buffer);
}
}
kfree(scull_p_devices);
unregister_chrdev_region(devno, 4);
scull_p_devices = NULL;
printk("pipe.ko is uninstalled!\n");
}


module_init(pipe_init);
module_exit(pipe_exit);


MODULE_LICENSE("GPL");

MODULE_AUTHOR("ang");


//Makefile

obj-m:=pipe.o
CURRENT_PATH:=$(shell pwd)
KERNELDIR?=/lib/modules/$(shell uname -r)/build
all:
make -C $(KERNELDIR) M=$(CURRENT_PATH) modules


运行

insmod pipe.ko

mknod /dev/pipe c 253 0

cat /dev/pipe

在另外一个终端运行

ls >/dev/pipe

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值