scullpipe阻塞型设备

源码来自lddr3_examples/scullc/

pipe.c

#include <linux/module.h>
#include <linux/moduleparam.h>

#include <linux/kernel.h>	/* printk(), min() */
#include <linux/slab.h>		/* kmalloc() */
#include <linux/fs.h>		/* everything... */
#include <linux/proc_fs.h>
#include <linux/errno.h>	/* error codes */
#include <linux/types.h>	/* size_t */
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/cdev.h>
#include <asm/uaccess.h>
#include <linux/sched.h>
#include "scull.h"		/* local definitions */


struct scull_pipe{
        wait_queue_head_t inq, outq;       /* read and write queues */
        char *buffer, *end;                /* begin of buf, end of buf */
        int buffersize;                    /* used in pointer arithmetic */
        char *rp, *wp;                     /* where to read, where to write */
        int nreaders, nwriters;            /* number of openings for r/w */
        struct fasync_struct *async_queue; /* asynchronous readers */
        struct semaphore sem;              /* mutual exclusion semaphore */
        struct cdev cdev;                  /* Char device structure */
	};


static int scull_p_nr_devs=SCULL_P_NR_DEVS;
int scull_p_buffer=SCULL_P_BUFFER;
dev_t scull_p_devno;

module_param(scull_p_nr_devs, int, 0);	/* FIXME check perms */
module_param(scull_p_buffer, int, 0);


static struct scull_pipe *scull_p_devices;

 static int scull_p_fasync(int fd, struct file *filp,int mode);
static int spacefree(struct scull_pipe *dev);

	
static int scull_p_open(struct inode *inode,struct file *filp)
{
	struct scull_pipe *dev;
	dev=container_of(inode->i_cdev,struct scull_pipe,cdev);
	filp->private_data=dev;
	if(down_interruptible(&dev->sem))
		return -ERESTARTSYS;
	//³õʼ»¯devÀïÃæ³ÉÔ±
	if(!dev->buffer)
		{
			dev->buffer=kmalloc(scull_p_buffer,GFP_KERNEL);
			if(!dev->buffer)
				{
					up(&dev->sem);
					return -ENOMEM;
				}
			
		}

	dev->buffersize=scull_p_buffer;
	dev->end=dev->buffer+dev->buffersize;
	//ÿ´Î´ò¿ªµÄʱºò¶ÁдµÄλÖö¼ÔÚbuffersµÄ¿ªÍ·
	dev->rp=dev->wp=dev->buffer;
	if(filp->f_mode & FMODE_READ)
		dev->nreaders++;
	if(filp->f_mode & FMODE_WRITE)
		dev->nwriters++;
	up(&dev->sem);
	return nonseekable_open(inode,filp);
}

static int scull_p_release(struct inode *inode, struct file *filp)
{
	struct scull_pipe *dev=filp->private_data;
	//Òì²½ÖжϵĶ«Î÷
	scull_p_fasync(-1,filp,&dev->async_queue);
	//ÊÍ·Å×ÊÔ´µÄʱºò£¬Ê¹Óò»¿ÉÖжϵÄ×ÔÐýËø¶«¶«
	down(&dev->sem);
	if(filp->f_mode &FMODE_READ)
		dev->nreaders--;
	if(filp->f_mode&FMODE_WRITE)
		dev->nwriters--;
	if((dev->nwriters+dev->nreaders)==0)
		{
			kfree(dev->buffer);
				dev->buffer=NULL;
				
		}
	up(&dev->sem);
	return 0;
}
//ʵÏÖ×èÈûµÄ¶Á»¹ÊÇ·Ç×èÈûµÄ¶Á¡£»¹¿´
//Óû§³ÌÐòÊÇÈçºÎ±àдµÄ
static ssize_t scull_p_read(struct file *filp,char __user *buf,size_t count,
			loff_t * f_pos)
{
	struct scull_pipe *dev=filp->private_data;
	if(down_interruptible(&dev->sem))
		return -ERESTARTSYS;
	while(dev->rp==dev->wp)
		{
			up(&dev->sem);
			if(filp->f_flags &O_NONBLOCK)
				return -EAGAIN;
			//¼ÓÈëµÈ´ý¶ÓÁÐ
			if(wait_event_interruptible(dev->inq,(dev->rp!=dev->wp)))
				return -ERESTARTSYS;
			if(down_interruptible(&dev->sem))
				return -ERESTARTSYS;
		}
	if(dev->wp>dev->rp)
		count=min(count,(size_t)(dev->wp-dev->rp));
	else
		count=min(count,(size_t)(dev->end-dev->rp));
	if(copy_to_user(buf,dev->rp,count))
		{
			up(&dev->sem);
			return -EFAULT;
		}
	dev->rp+=count;
	if(dev->rp==dev->end)
		dev->rp=dev->buffer;
	//¸´ÖƺÃÊý¾ÝµÄʱºò ¸æËßÓû§³ÌÐò
	//»½ÐÑдµÈ´ý¶ÓÁеÄËùÓÐʼþ
	up(&dev->sem);
	wake_up_interruptible(&dev->outq);
	PDEBUG(""%s" did read %li bytesn",current->comm, (long)count);
	return count;
	
}

static int spacefree(struct scull_pipe *dev)
{
	
	if(dev->rp==dev->wp)
		return dev->buffersize -1;
	return ((dev->rp+dev->buffersize-dev->wp)%dev->buffersize)-1;
}

static int scull_getwritespace(struct scull_pipe *dev,struct file*filp)
{
	  while(spacefree(dev)==0)//û¿Õ¼ä䶫Î÷ÁË
	  	{
	  		DEFINE_WAIT(wait);
			up(&dev->sem);
		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;
		PDEBUG(""%s" writing: going to sleepn",current->comm);
			prepare_to_wait(&dev->outq,&wait,TASK_INTERRUPTIBLE);
			if(spacefree(dev)==0)
				schedule();
			finish_wait(&dev->outq,&wait);
			if(signal_pending(current))
				return -ERESTARTSYS;
			if(down_interruptible(&dev->sem))
				return -ERESTARTSYS;
	  	}
	  return 0;
}



static ssize_t scull_p_write(struct file *filp, const char __user *buf, size_t count,
                loff_t *f_pos)
{
	struct scull_pipe *dev = filp->private_data;
	int result;

	if (down_interruptible(&dev->sem))
		return -ERESTARTSYS;

	/* Make sure there's space to write */
	result = scull_getwritespace(dev, filp);
	if (result)
		return result; /* scull_getwritespace called up(&dev->sem) */

	/* ok, space is there, accept something */
	count = min(count, (size_t)spacefree(dev));
	if (dev->wp >= dev->rp)
		count = min(count, (size_t)(dev->end - dev->wp)); /* to end-of-buf */
	else /* the write pointer has wrapped, fill up to rp-1 */
		count = min(count, (size_t)(dev->rp - dev->wp - 1));
	PDEBUG("Going to accept %li bytes to %p from %pn", (long)count, dev->wp, buf);
	if (copy_from_user(dev->wp, buf, count)) {
		up (&dev->sem);
		return -EFAULT;
	}
	dev->wp += count;
	if (dev->wp == dev->end)
		dev->wp = dev->buffer; /* wrapped */
	up(&dev->sem);

	/* finally, awake any reader */
	wake_up_interruptible(&dev->inq);  /* blocked in read() and select() */

	/* and signal asynchronous readers, explained late in chapter 5 */
	if (dev->async_queue)
		kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
	PDEBUG(""%s" did write %li bytesn",current->comm, (long)count);
	return count;
}


//ÆäÄ¿µÄÊÇΪÁ˲éѯÎļþÊÇÊÇ·ñ¿Éд£¬ÊÇ·ñ¿É¶Á
//·µ»ØÎļþµÄ״̬
static unsigned int scull_p_poll(struct file *filp,poll_table*wait)
{
	struct scull_pipe *dev = filp->private_data;
	unsigned int mask = 0;
	down(&dev->sem);
	//¶Ô¿ÉÄÜÒýÆðÎļþ״̬±ä»¯µÄµÈ´ý¶ÓÁУ¬½«Æä·Åµ½poll_table
	//µÈ´ý¶ÓÁÐÍ·
	poll_wait(filp, &dev->inq,  wait);
	poll_wait(filp, &dev->outq, wait);
	if (dev->rp != dev->wp)
		mask |= POLLIN | POLLRDNORM;	/* readable */
	if (spacefree(dev))
		mask |= POLLOUT | POLLWRNORM;	/* writable */
	up(&dev->sem);
	return mask;
}
static int scull_p_fasync(int fd,struct file *filp,int mode)
{
	struct scull_pipe *dev=filp->private_data;
	return fasync_helper(fd,filp,mode,&dev->async_queue);
}
#ifdef SCULL_DEBUG
static void scullp_proc_offset(char *buf,char **start,off_t *offset,int *len)
{
	
	if (*offset == 0)
		return;
	if (*offset >= *len) {	/* Not there yet */
		*offset -= *len;
		*len = 0;
	}
	else {			/* We're into the interesting stuff now */
		*start = buf + *offset;
		*offset = 0;
	}
}
static int scull_read_p_mem(char *buf,char **start,off_t offset,int count,
			int *eof,void *data)
{
	int i, len;
	struct scull_pipe *p;

#define LIMIT (PAGE_SIZE-200)	/* don't print any more after this size */
	*start = buf;
	len = sprintf(buf, "Default buffersize is %in", scull_p_buffer);
	for(i = 0; i<scull_p_nr_devs && len <= LIMIT; i++) {
		p = &scull_p_devices[i];
		if (down_interruptible(&p->sem))
			return -ERESTARTSYS;
		len += sprintf(buf+len, "nDevice %i: %pn", i, p);
/*		len += sprintf(buf+len, "   Queues: %p %pn", p->inq, p->outq);*/
		len += sprintf(buf+len, "   Buffer: %p to %p (%i bytes)n", p->buffer, p->end, p->buffersize);
		len += sprintf(buf+len, "   rp %p   wp %pn", p->rp, p->wp);
		len += sprintf(buf+len, "   readers %i   writers %in", p->nreaders, p->nwriters);
		up(&p->sem);
		scullp_proc_offset(buf, start, &offset, &len);
	}
	*eof = (len <= LIMIT);
	return len;
}
#endif


struct file_operations scull_pipe_fops={
		.owner =	THIS_MODULE,
	.llseek =	no_llseek,
	.read =		scull_p_read,
	.write =	scull_p_write,
	.poll =		scull_p_poll,
	.unlocked_ioctl =	scull_ioctl,
	.open =		scull_p_open,
	.release =	scull_p_release,
	.fasync =	scull_p_fasync,
};
static void scull_p_setup_cdev(struct scull_pipe *dev,int index)
{
	int err, devno = scull_p_devno + index;
    	cdev_init(&dev->cdev, &scull_pipe_fops);
	dev->cdev.owner = THIS_MODULE;
	err = cdev_add (&dev->cdev, devno, 1);
	/* Fail gracefully if need be */
	if (err)
		printk(KERN_NOTICE "Error %d adding scullpipe%d", err, index);
}

int scull_p_init(dev_t firstdev)
{
	int i, result;

	result = register_chrdev_region(firstdev, scull_p_nr_devs, "scullp");
	if (result < 0) {
		printk(KERN_NOTICE "Unable to get scullp region, error %dn", result);
		return 0;
	}
	scull_p_devno = firstdev;
	scull_p_devices = kmalloc(scull_p_nr_devs * sizeof(struct scull_pipe), GFP_KERNEL);
	if (scull_p_devices == NULL) {
		unregister_chrdev_region(firstdev, scull_p_nr_devs);
		return 0;
	}
	memset(scull_p_devices, 0, scull_p_nr_devs * sizeof(struct scull_pipe));
	for (i = 0; i < scull_p_nr_devs; i++) {
		init_waitqueue_head(&(scull_p_devices[i].inq));
		init_waitqueue_head(&(scull_p_devices[i].outq));
		sema_init(&scull_p_devices[i].sem,1);
		scull_p_setup_cdev(scull_p_devices + i, i);
	}
#ifdef SCULL_DEBUG
	create_proc_read_entry("scullpipe", 0, NULL, scull_read_p_mem, NULL);
#endif
	return scull_p_nr_devs;
}

void scull_p_cleanup(void)
{
	int i;

#ifdef SCULL_DEBUG
	remove_proc_entry("scullpipe", NULL);
#endif

	if (!scull_p_devices)
		return; /* nothing else to release */

	for (i = 0; i < scull_p_nr_devs; i++) {
		cdev_del(&scull_p_devices[i].cdev);
		kfree(scull_p_devices[i].buffer);
	}
	kfree(scull_p_devices);
	unregister_chrdev_region(scull_p_devno, scull_p_nr_devs);
	scull_p_devices = NULL; /* pedantic */
}



main.c

 

#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>

#include <linux/kernel.h>	/* printk() */
#include <linux/slab.h>		/* kmalloc() */
#include <linux/fs.h>		/* everything... */
#include <linux/errno.h>	/* error codes */
#include <linux/types.h>	/* size_t */
#include <linux/proc_fs.h>
#include <linux/fcntl.h>	/* O_ACCMODE */
#include <linux/seq_file.h>
#include <linux/cdev.h>

#include <asm/system.h>		/* cli(), *_flags */
#include <asm/uaccess.h>	/* copy_*_user */

#include "scull.h"		/* local definitions */

 int scull_major=SCULL_MAJOR;
int scull_minor=0;
int scull_nr_devs=SCULL_NR_DEVS;
int scull_quantum=SCULL_QUANTUM;
int scull_qset=SCULL_QSET;

module_param(scull_major,int,S_IRUGO);
module_param(scull_nr_devs,int,S_IRUGO);
module_param(scull_quantum,int,S_IRUGO);
MODULE_LICENSE("Dual BSD/GPL");


struct scull_dev *scull_devices;//scull_devices ½á¹¹Êý×飬Óкܶà¸öscull_dev½á¹¹

int scull_trim(struct scull_dev *dev)
{
	struct scull_qset *dptr,*next;
	int qset=dev->qset;
	int i;
	for(dptr=dev->data;dptr;dptr=next)
		{
		if(dptr->data)
			{
			for(i=0;i<qset;i++)
				kfree(dptr->data[i]);
			
			kfree(dptr->data);//?
			dptr->data=NULL;
			}
		next=dptr->next;
		kfree(dptr);

		}
	dev->quantum=scull_quantum;
	dev->size=0;
	dev->data=NULL;
	dev->qset=scull_qset;
	return 0;
}

#ifdef SCULL_DEBUG
//ÀÏ·½·¨ÊµÏÖproc
int scull_read_procmem(char *buf,char **start,off_t offset,
			int count,int *eof,void *data)
{
	int i ,j ,len=0;
	int limit=count-80;
	for(i=0;i<scull_nr_devs&&len<=limit;i++)
		{
			struct scull_dev *d=&scull_devices[i];
			struct scull_qset *qs=d->data;
			if(down_interruptible(&d->sem))
				return -ERESTARTSYS;
			len+=sprintf(buf+len,"%nDevice %i:qset %i,q%i,size %lin",
				i,d->qset,d->quantum,d->size);
			for(;qs&&len<=limit,qs=qs->next)
				{
					len+=sprintf(buf+len,"itme at %p,qset at %pn",
						qs,qs->data);
					//´òÓ¡qsetÁбíÖеÄ×îºóÒ»¸ö
					if(qs&&!qs->data)
						{
							for(j=0;j<d->qset;j++)
								{
									len+=sprintf(buf+len,
										"%4i,%8p:n",j,qs->data[j]);
								}
						}
				}
			up(&scull_devices[i]->sem);
			
			
		}
	*eof=1;
	return len;
	
}
//з½·¨ÊµÏÖproc,»ñÈ¡µÚpos¸öscull_devices;
static void *scull_seq_start(struct seq_file *s,loff_t *pos)
{
	if(*pos>=scull_nr_devs)
		{
			return NULL;
		}
	return scull_devices+*pos;
}
static void *scull_seq_next(struct seq_file *s,void *v ,loff_t *pos)
{
	*pos++;
	if(*pos>=scull_nr_devs)
		return NULL;
	return scull_devices+*pos;
}
static void *scull_seq_stop(struct seq_file *s,void *v)
{
	
}
static void *scull_seq_show(struct seq_file *s ,void *v)
{
		struct scull_dev *dev=(struct scull_dev *)v;
		struct scull_qset *d;
		int i;
		if(down_interruptible(&dev->sem))
			return -ERESTARTSYS;
		//´òÓ¡µÚ¼¸¸öscull_devices
		seq_printf(s,"nDevice:%i: qset %i,quantum %i,sz %lin"
					(int)(dev-scull_devices),dev->qset,dev->quantum,dev->size);
		for(d=dev->data;d;d=d->next)
			{
			//ÁгöËùÓеÄqset½á¹¹¼°ÆäÄÚÈÝ´æ·ÅλÖÃ
				seq_printf(s,"item at %p,qset at %pn",d,d->data);
			//ÔÚÉÏÃæËùÁеĽṹÀïÃ棬ֻ´òÓ¡×îºóÒ»¸öqset
			//½á¹¹ºÍÆäËùÓеÄÄÚÈÝ
				if(d->data&&!d->next)
					for(i=0;i<dev->qset;i++)
							{	if(d->data[i])
								
								seq_printf(s,"qset:%4i,data[i]%8pn",i,d->data[i]);
								
							}
				
			}
		up(&dev->sem);
		return 0;
			
		
}
static struct seq_operations scull_seq_ops={
			.start=scull_seq_start,
			.next=scull_seq_next,
			.stop=scull_seq_stop,
			.show=scull_seq_show
};
static scull_proc_open(struct inode *inode,struct file *file)
{
	return seq_open(file,&scull_seq_ops);
}
static struct file_operations scull_proc_ops={
		.owner=THIS_MODULE,
		.open=scull_proc_open,
		.read=seq_read,
		.llseek=seq_lseek,
		.release=seq_release
	};
//ÓÃÁ½ÖÖ·½·¨¸÷×Ô´´½¨ÁËprocÎļþ
static scull_create_proc(void)
{
	struct proc_dir_entry *entry;
	//ÀÏ·½·¨´´½¨µÄ
	//
	create_proc_read_entry("scullmem",0,NULL,scull_read_procmem,NULL);
}

static void scull_remove_proc(void)
{
	//ֻҪע²áÁË ÒÔϺ¯Êý»ù±¾»áÔËÐÐÍê³É
	remove_proc_entry("scullmem",NULL);
	remove_proc_entry("scullseq",NULL);
}

#endif

int scull_open(struct inode *inode, struct file *filp)
{
	struct scull_dev *dev; /* device information */

	dev = container_of(inode->i_cdev, struct scull_dev, cdev);
	/*
	[˵Ã÷]
		1.ÎÒÃÇÒªÌî³äµÄÓ¦¸ÃÊÇÎÒÃÇ×Ô¼ºµÄÌØÊâÉ豸£¬¶ø²»ÊÇǯÔÚËûÀïÃæµÄ×Ö·ûÉ豸½á¹¹£»
		2.inode½á¹¹µÄi_cdev³ÉÔ±ÕâÄÜÌṩ»ù±¾×Ö·ûÉ豸½á¹¹£»
		3.ÕâÀïÀûÓÃÁ˶¨ÒåÔÚ<linux/kernel.h>ÖеĺêÀ´ÊµÏÖͨ¹ýcdevµÃµ½dev;
	*/
	
	/*
	ÒÔºóread , write ,µÈ²Ù×÷µÄʵÏÖÖоͿ¿ËûÀ´µÃµ½devÁË£»
	*/
	filp->private_data = dev; /* for other methods */
	

	/* now trim to 0 the length of the device if open was write-only */
	if ( (filp->f_flags & O_ACCMODE) == O_WRONLY) {
		if (down_interruptible(&dev->sem))
			return -ERESTARTSYS;
		scull_trim(dev); /* ignore errors */
		up(&dev->sem);
	}
	return 0;          /* success */
}

int scull_release(struct inode *inode, struct file *filp)
{
	return 0;
}

//Ö»Òª¹æ¶¨ÁËn£¬¼´Ê¹´ï²»µ½n£¬Ò²Òª´´½¨
struct scull_qset *scull_follow(struct scull_dev *dev,int n)
{
	struct scull_qset *qs=dev->data;
	if(!qs)
		{
			qs=dev->data=kmalloc(sizeof(struct scull_qset),GFP_KERNEL);
			if(!qs)
				return NULL;
			memset(qs,0,sizeof(struct scull_qset));
		}
	while(n--)
		{
			if(!qs->next)
				{
					qs->next=kmalloc(sizeof(struct scull_qset),GFP_KERNEL);
					if(!qs->next)
						return NULL;
					qs=qs->next;
					memset(qs->next,0,sizeof(struct scull_qset));
				}
			qs=qs->next;
		}
}

ssize_t scull_read(struct file *filp, char __user *buf, size_t count,
                loff_t *f_pos)
{
	struct scull_dev *dev = filp->private_data; 
	struct scull_qset *dptr;	/* the first listitem */
	int quantum = dev->quantum, qset = dev->qset;
	int itemsize = quantum * qset; /* how many bytes in the listitem */
	int item, s_pos, q_pos, rest;
	ssize_t retval = 0;

	if (down_interruptible(&dev->sem))
		return -ERESTARTSYS;
	if (*f_pos >= dev->size) //²Ù×÷λÖõ½Îļþ⣬»ò³¬³öÎļþβÁË
		goto out;
	if (*f_pos + count > dev->size) //ÔÚµ±Ç°Î»ÖÃËùÒª¶ÁµÄÊýÄ¿³¬¹ýÎļþβÁË
		count = dev->size - *f_pos;	//¼õСÕâ´ÎµÄÆÚÍû¶ÁÈ¡ÊýÄ¿

	/* find listitem, qset index, and offset in the quantum */
	item = (long)*f_pos / itemsize; //È·¶¨ÊÇÄĸöÁ´±íÏîÏ£¬¼´Äĸö½ÚµãÏ£»
	rest = (long)*f_pos % itemsize; //ÔÚÕâ¸öÁ´±íÏîµÄʲôλÖã¨Æ«ÒÆÁ¿£©£¬ÓÃÓÚÏÂÃæÕÒqsetË÷ÒýºÍÆ«ÒÆÁ¿£»
	s_pos = rest / quantum;		//ÔÚÕâ¸ö½ÚµãÀï**dataÕâ¸öÖ¸ÕëÊý×éµÄµÚ¼¸ÐУ»
	 q_pos = rest % quantum; //ÔÚÕâÐУ¬¼´Õâ¸öÁ¿×ÓÀïµÄÆ«ÒÆÁ¿£»

	/* follow the list up to the right position (defined elsewhere) */
	dptr = scull_follow(dev, item);  //ÕÒµ½Õâ¸öÁ´±íÏî

	if (dptr == NULL || !dptr->data || ! dptr->data[s_pos])
		goto out; /* don't fill holes */

//ÒÔÒ»¸öÁ¿×ÓΪµ¥Î»´«£¬¼ò»¯ÁË´úÂ룻
	/* read only up to the end of this quantum */
	if (count > quantum - q_pos)
		count = quantum - q_pos;

/*
 * ÉÏÃæΪÕâ²½×¼±¸Á˾ßÌåÔÚÄĸöÁ´±íÏîµÄÖ¸ÕëÊý×éµÄµÚ¼¸Ðеĵڼ¸ÁУ¨¼´dptr->data[s_pos] + q_pos£©
 * ´ÓÕâ¸öλÖõÄÄÚºË̬µÄbufÖп½¸øÓû§Ì¬	
*/	

//¹Ø¼üÒ»²½£¬½«Êý¾Ý¿½¸øÓû§¿Õ¼ä
	if (copy_to_user(buf, dptr->data[s_pos] + q_pos, count)) {
		retval = -EFAULT;
		goto out;
	}
	*f_pos += count; //¸üÐÂÎļþÖ¸Õë
	retval = count;

  out:
	up(&dev->sem);
	return retval;
}

//ÓëreadµÄʵÏÖÀàËÆ
ssize_t scull_write(struct file *filp, const char __user *buf, size_t count,
                loff_t *f_pos)
{
	struct scull_dev *dev = filp->private_data;
	struct scull_qset *dptr;
	int quantum = dev->quantum, qset = dev->qset;
	int itemsize = quantum * qset;
	int item, s_pos, q_pos, rest;
	ssize_t retval = -ENOMEM; /* value used in "goto out" statements */

	if (down_interruptible(&dev->sem))
		return -ERESTARTSYS;

	/* find listitem, qset index and offset in the quantum */
	item = (long)*f_pos / itemsize;
	rest = (long)*f_pos % itemsize;
	s_pos = rest / quantum; q_pos = rest % quantum;

	/* follow the list up to the right position */
	dptr = scull_follow(dev, item);
	if (dptr == NULL)
		goto out;
	if (!dptr->data) {
		dptr->data = kmalloc(qset * sizeof(char *), GFP_KERNEL);
		if (!dptr->data)
			goto out;
		memset(dptr->data, 0, qset * sizeof(char *));
	}
	if (!dptr->data[s_pos]) {
		dptr->data[s_pos] = kmalloc(quantum, GFP_KERNEL);
		if (!dptr->data[s_pos])
			goto out;
	}
	/* write only up to the end of this quantum */
	if (count > quantum - q_pos)
		count = quantum - q_pos;

	if (copy_from_user(dptr->data[s_pos]+q_pos, buf, count)) {
		retval = -EFAULT;
		goto out;
	}
	*f_pos += count;
	retval = count;

        /* update the size */
	if (dev->size < *f_pos)
		dev->size = *f_pos;

  out:
	up(&dev->sem);
	return retval;
}

/*
 * The ioctl() implementation
 */

long  scull_ioctl(struct inode *inode,struct file *filp,unsigned int cmd,unsigned long arg)
{
	int err=0,tmp;
	int retval=0;
	if(_IOC_TYPE(cmd)!=SCULL_IOC_MAGIC) return -ENOTTY;
	if(_IOC_NR(cmd)>SCULL_IOC_MAXNR)return -ENOTTY;
	if(_IOC_DIR(cmd)&_IOC_READ)
		err=!access_ok(VERIFY_WRITE,(void __user *)arg,_IOC_SIZE(cmd));
	if(err) return -EFAULT;

	switch(cmd)
		{
			case SCULL_IOCRESET:
		scull_quantum = SCULL_QUANTUM;
		scull_qset = SCULL_QSET;
		break;
        
	  case SCULL_IOCSQUANTUM: /* Set: arg points to the value */
		if (! capable (CAP_SYS_ADMIN))
			return -EPERM;
		retval = __get_user(scull_quantum, (int __user *)arg);
		break;

	  case SCULL_IOCTQUANTUM: /* Tell: arg is the value */
		if (! capable (CAP_SYS_ADMIN))
			return -EPERM;
		scull_quantum = arg;
		break;

	  case SCULL_IOCGQUANTUM: /* Get: arg is pointer to result */
		retval = __put_user(scull_quantum, (int __user *)arg);
		break;

	  case SCULL_IOCQQUANTUM: /* Query: return it (it's positive) */
		return scull_quantum;

	  case SCULL_IOCXQUANTUM: /* eXchange: use arg as pointer */
		if (! capable (CAP_SYS_ADMIN))
			return -EPERM;
		tmp = scull_quantum;
		retval = __get_user(scull_quantum, (int __user *)arg);
		if (retval == 0)
			retval = __put_user(tmp, (int __user *)arg);
		break;

	  case SCULL_IOCHQUANTUM: /* sHift: like Tell + Query */
		if (! capable (CAP_SYS_ADMIN))
			return -EPERM;
		tmp = scull_quantum;
		scull_quantum = arg;
		return tmp;
        
	  case SCULL_IOCSQSET:
		if (! capable (CAP_SYS_ADMIN))
			return -EPERM;
		retval = __get_user(scull_qset, (int __user *)arg);
		break;

	  case SCULL_IOCTQSET:
		if (! capable (CAP_SYS_ADMIN))
			return -EPERM;
		scull_qset = arg;
		break;

	  case SCULL_IOCGQSET:
		retval = __put_user(scull_qset, (int __user *)arg);
		break;

	  case SCULL_IOCQQSET:
		return scull_qset;

	  case SCULL_IOCXQSET:
		if (! capable (CAP_SYS_ADMIN))
			return -EPERM;
		tmp = scull_qset;
		retval = __get_user(scull_qset, (int __user *)arg);
		if (retval == 0)
			retval = put_user(tmp, (int __user *)arg);
		break;

	  case SCULL_IOCHQSET:
		if (! capable (CAP_SYS_ADMIN))
			return -EPERM;
		tmp = scull_qset;
		scull_qset = arg;
		return tmp;

        /*
         * The following two change the buffer size for scullpipe.
         * The scullpipe device uses this same ioctl method, just to
         * write less code. Actually, it's the same driver, isn't it?
         */

	  case SCULL_P_IOCTSIZE:
		scull_p_buffer = arg;
		break;

	  case SCULL_P_IOCQSIZE:
		return scull_p_buffer;


	  default:  /* redundant, as cmd was checked against MAXNR */
		return -ENOTTY;
		}
	return retval;
}


loff_t scull_llseek(struct file *filp, loff_t off, int whence)
{
	struct scull_dev *dev = filp->private_data;
	loff_t newpos;

	switch(whence) {
	  case 0: /* SEEK_SET */
		newpos = off;
		break;

	  case 1: /* SEEK_CUR */
		newpos = filp->f_pos + off;
		break;

	  case 2: /* SEEK_END */
		newpos = dev->size + off;
		break;

	  default: /* can't happen */
		return -EINVAL;
	}
	if (newpos < 0) return -EINVAL;
	filp->f_pos = newpos;
	return newpos;
}


//[Tag007]½«Õâ×é²Ù×÷´ò°üΪһ¸ö¶ÔÏó£»
struct file_operations scull_fops = {
	.owner =    THIS_MODULE,
	.llseek =   scull_llseek,
	.read =     scull_read,
	.write =    scull_write,
	.unlocked_ioctl = scull_ioctl,
	.open =     scull_open,
	.release =  scull_release,
};

void scull_cleanup_module(void)
{
	int i;
	dev_t devno = MKDEV(scull_major, scull_minor);

	/* Get rid of our char dev entries */
	if (scull_devices) {
		for (i = 0; i < scull_nr_devs; i++) {
			scull_trim(scull_devices + i);
			cdev_del(&scull_devices[i].cdev);	//[???]ÊÇÒ»¸öÄں˺¯Êýô£¿
		}
		kfree(scull_devices);
	}

#ifdef SCULL_DEBUG /* use proc only if debugging */
	scull_remove_proc();
#endif

	/* cleanup_module is never called if registering failed */
	unregister_chrdev_region(devno, scull_nr_devs);

	/* and call the cleanup functions for friend devices */
	scull_p_cleanup();
	scull_access_cleanup();

}


static void scull_setup_cdev(struct scull_dev *dev, int index)
{
	int err, devno = MKDEV(scull_major, scull_minor + index);
    
   // [1]
	cdev_init(&dev->cdev, &scull_fops);	/* ³õʼ»¯, ×Ö·ûÉ豸ºÍ¸øËüÒ»×éÔÚËüÉÏÃæ²Ù×÷µÄ·½·¨¼¯ */
	
	/* Ìî³ä»ù±¾×Ö·ûÉ豸µÄ³ÉÔ± */
	dev->cdev.owner = THIS_MODULE;		//Ä£¿é¼ÆÊý
	dev->cdev.ops = &scull_fops;		//¸½ÉÏÒ»×é²Ù×÷×Ô¼ºµÄ·½·¨¼¯
	
//	[2]
	err = cdev_add (&dev->cdev, devno, 1);
	/*
	º¯Êý˵Ã÷:
		cdev -- ×Ö·ûÉ豸µÄ½á¹¹Ö¸Õë,ÎÒÃǾÍÊÇÒª°ÑËû¸æË߸øÄÚºË;
		devno -- É豸±àºÅ,ÓÃMKDEVÀûÓÃÈ«¾ÖµÄÖ÷É豸ºÅºÍ´ÎÉ豸ºÅÉú³ÉµÄ;
		1	-- ÊÇÓ¦¸ÃºÍ¸ÃÉ豸¹ØÁªµÄÉ豸±àºÅµÄÊýÁ¿, Ò»°ãÇé¿ö϶¼Îª1;
			Ò»°ãÎÒÃǶ¼ÊÇÒ»¸öÉ豸±àºÅ¶ÔÓ¦Ò»¸öÉ豸;		
	*/
	/*
	×¢Òâ:
		ÔÚµ÷ÓÃcdev_addºó,ÎÒÃǵÄÉ豸¾Í±»Ìí¼Óµ½ÏµÍ³ÁË,Ëû"»î"ÁË. ¸½¼ÓµÄ²Ù×÷¼¯Ò²¾Í¿ÉÒÔ±»Äں˵÷ÓÃÁË
		,Òò´Ë,ÔÚÇý¶¯³ÌÐò»¹Ã»ÓÐÍêȫ׼±¸ºÃ´¦ÀíÉ豸ÉϵIJÙ×÷ʱ,¾Í²»Äܵ÷ÓÃcdev_add!
	*/
	/* Fail gracefully if need be */
	if (err)
		printk(KERN_NOTICE "Error %d adding scull%d", err, index);
}

/*[Tag000]
 * µ±Ä£¿é¼ÓÔØʱ£¬µ÷Ó㻵«ÊÇΪʲôҪ·ÅÔÚ×îºóÀ´ÊµÏÖËûÄØ£¬¿´µ½Tag002ʱ£¬ÄãÓ¦¸Ã¾ÍÃ÷°×ÁË£»
*/
int scull_init_module(void)
{
	int result, i;
	dev_t dev = 0;

/* [Tag001] */
/* [1]·ÖÅäÉ豸±àºÅ */
/*
 * Get a range of minor numbers to work with, asking for a dynamic
 * major unless directed otherwise at load time.
 */
	if (scull_major) { 	/* Ô¤ÏÈ×Ô¼ºÖ¸¶¨ÁËÖ÷É豸ºÅ */
		dev = MKDEV(scull_major, scull_minor); /* ÀûÓÃÖ÷É豸ºÅ,ÕÒµ½É豸±àºÅ¸ø·½·¨1Óà */
		result = register_chrdev_region(dev, scull_nr_devs, "scull");
	} else {		/* ¶¯Ì¬×Ô¼ºÉú³ÉÉ豸±àºÅ,È»ºóÔÙÀûÓÃÉ豸±àºÅµÃµ½Ö÷É豸ºÅ;
						¼ÇסÈç¹ûÓÃÕâ¸ö·½·¨ÄÇô¾ÍÒªºó½¨É豸ÎļþÁË,ÒòΪ²»ÄÜÌáÇ°ÖªµÀÖ÷ºÅ
						µ±È»Ò²¿ÉÒÔÀûÓÃldd3ÊéÖÐÌṩµÄ½Å±¾,¾Þ·½±ã&&ͨÓà */
		result = alloc_chrdev_region(&dev, scull_minor, scull_nr_devs,
				"scull");
		scull_major = MAJOR(dev);
	}
	if (result < 0) {
		printk(KERN_WARNING "scull: can't get major %dn", scull_major);
		return result;
	}

    /*[2]É豸¶ÔÏóʵÀý»¯*/ 
        /* 
	 * allocate the devices -- we can't have them static, as the number
	 * can be specified at load time
	 */
	scull_devices = kmalloc(scull_nr_devs * sizeof(struct scull_dev), GFP_KERNEL);
	if (!scull_devices) {
		result = -ENOMEM;
		goto fail;  /* Make this more graceful */
	}
	memset(scull_devices, 0, scull_nr_devs * sizeof(struct scull_dev));

/* [3]ÔÚÕâÀï³õʼ»¯É豸ÓÃÁË2.6µÄз½·¨,ÔÚscull_setup_cdevÀïÍê³É */
        /* Initialize each device. */
	for (i = 0; i < scull_nr_devs; i++) {
		scull_devices[i].quantum = scull_quantum;	/* ¿ÉÒÔ¸ù¾Ý×Ô¼ºinsmodʱ´«²Î
														À´×Ô¼º¸Ä±äÁ¿×ÓºÍÁ¿×Ó¼¯(Ö¸ÕëÊý×é)µÄ´óС */
		scull_devices[i].qset = scull_qset;
		sema_init(&scull_devices[i].sem,1);
		scull_setup_cdev(&scull_devices[i], i);	/* ÔÚ·Ö±ðÍêÖ÷É豸±àºÅºógoto Tag002 É豸ע²á */
	}

        /* At this point call the init function for any friend device */
	dev = MKDEV(scull_major, scull_minor + scull_nr_devs);
	dev += scull_p_init(dev);
	dev += scull_access_init(dev);

#ifdef SCULL_DEBUG /* only when debugging */
	scull_create_proc();
#endif

	return 0; /* succeed */

  fail:
	scull_cleanup_module();
	return result;
}

module_init(scull_init_module);		//insmod	
module_exit(scull_cleanup_module);	//rmmod

access.c

#include <linux/kernel.h> /* printk() */
#include <linux/module.h>
#include <linux/slab.h>   /* kmalloc() */
#include <linux/fs.h>     /* everything... */
#include <linux/errno.h>  /* error codes */
#include <linux/types.h>  /* size_t */
#include <linux/fcntl.h>
#include <linux/cdev.h>
#include <linux/tty.h>
#include <asm/atomic.h>
#include <linux/list.h>

#include <linux/sched.h>
#include <linux/spinlock_types.h>

#include "scull.h"        /* local definitions */

#define SPIN_LOCK_UNLOCKED	__SPIN_LOCK_UNLOCKED(old_style_spin_init)

static dev_t scull_a_firstdev;  /* Where our range begins */

/*
 * These devices fall back on the main scull operations. They only
 * differ in the implementation of open() and close()
 */



/************************************************************************
 *
 * The first device is the single-open one,
 *  it has an hw structure and an open count
 */

static struct scull_dev scull_s_device;
static atomic_t scull_s_available = ATOMIC_INIT(1);

static int scull_s_open(struct inode *inode, struct file *filp)
{
	struct scull_dev *dev = &scull_s_device; /* device information */

	if (! atomic_dec_and_test (&scull_s_available)) {
		atomic_inc(&scull_s_available);
		return -EBUSY; /* already open */
	}

	/* then, everything else is copied from the bare scull device */
	if ( (filp->f_flags & O_ACCMODE) == O_WRONLY)
		scull_trim(dev);
	filp->private_data = dev;
	return 0;          /* success */
}

static int scull_s_release(struct inode *inode, struct file *filp)
{
	atomic_inc(&scull_s_available); /* release the device */
	return 0;
}


/*
 * The other operations for the single-open device come from the bare device
 */
struct file_operations scull_sngl_fops = {
	.owner =	THIS_MODULE,
	.llseek =     	scull_llseek,
	.read =       	scull_read,
	.write =      	scull_write,
	.unlocked_ioctl =      	scull_ioctl,
	.open =       	scull_s_open,
	.release =    	scull_s_release,
};


/************************************************************************
 *
 * Next, the "uid" device. It can be opened multiple times by the
 * same user, but access is denied to other users if the device is open
 */

static struct scull_dev scull_u_device;
static int scull_u_count;	/* initialized to 0 by default */
static uid_t scull_u_owner;	/* initialized to 0 by default */
static spinlock_t scull_u_lock = SPIN_LOCK_UNLOCKED;

static int scull_u_open(struct inode *inode, struct file *filp)
{
	struct scull_dev *dev = &scull_u_device; /* device information */

	spin_lock(&scull_u_lock);
	if (scull_u_count && 
			(scull_u_owner != current->cred->uid) &&  /* allow user */
			(scull_u_owner != current->cred->euid) && /* allow whoever did su */
			!capable(CAP_DAC_OVERRIDE)) { /* still allow root */
		spin_unlock(&scull_u_lock);
		return -EBUSY;   /* -EPERM would confuse the user */
	}

	if (scull_u_count == 0)
		scull_u_owner = current->cred->uid; /* grab it */

	scull_u_count++;
	spin_unlock(&scull_u_lock);

/* then, everything else is copied from the bare scull device */

	if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
		scull_trim(dev);
	filp->private_data = dev;
	return 0;          /* success */
}

static int scull_u_release(struct inode *inode, struct file *filp)
{
	spin_lock(&scull_u_lock);
	scull_u_count--; /* nothing else */
	spin_unlock(&scull_u_lock);
	return 0;
}



/*
 * The other operations for the device come from the bare device
 */
struct file_operations scull_user_fops = {
	.owner =      THIS_MODULE,
	.llseek =     scull_llseek,
	.read =       scull_read,
	.write =      scull_write,
	.unlocked_ioctl =      scull_ioctl,
	.open =       scull_u_open,
	.release =    scull_u_release,
};


/************************************************************************
 *
 * Next, the device with blocking-open based on uid
 */

static struct scull_dev scull_w_device;
static int scull_w_count;	/* initialized to 0 by default */
static uid_t scull_w_owner;	/* initialized to 0 by default */
static DECLARE_WAIT_QUEUE_HEAD(scull_w_wait);
static spinlock_t scull_w_lock = SPIN_LOCK_UNLOCKED;

static inline int scull_w_available(void)
{
	return scull_w_count == 0 ||
		scull_w_owner == current->cred->uid ||
		scull_w_owner == current->cred->euid ||
		capable(CAP_DAC_OVERRIDE);
}


static int scull_w_open(struct inode *inode, struct file *filp)
{
	struct scull_dev *dev = &scull_w_device; /* device information */

	spin_lock(&scull_w_lock);
	while (! scull_w_available()) {
		spin_unlock(&scull_w_lock);
		if (filp->f_flags & O_NONBLOCK) return -EAGAIN;
		if (wait_event_interruptible (scull_w_wait, scull_w_available()))
			return -ERESTARTSYS; /* tell the fs layer to handle it */
		spin_lock(&scull_w_lock);
	}
	if (scull_w_count == 0)
		scull_w_owner = current->cred->uid; /* grab it */
	scull_w_count++;
	spin_unlock(&scull_w_lock);

	/* then, everything else is copied from the bare scull device */
	if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
		scull_trim(dev);
	filp->private_data = dev;
	return 0;          /* success */
}

static int scull_w_release(struct inode *inode, struct file *filp)
{
	int temp;

	spin_lock(&scull_w_lock);
	scull_w_count--;
	temp = scull_w_count;
	spin_unlock(&scull_w_lock);

	if (temp == 0)
		wake_up_interruptible_sync(&scull_w_wait); /* awake other uid's */
	return 0;
}


/*
 * The other operations for the device come from the bare device
 */
struct file_operations scull_wusr_fops = {
	.owner =      THIS_MODULE,
	.llseek =     scull_llseek,
	.read =       scull_read,
	.write =      scull_write,
	.unlocked_ioctl =      scull_ioctl,
	.open =       scull_w_open,
	.release =    scull_w_release,
};

/************************************************************************
 *
 * Finally the `cloned' private device. This is trickier because it
 * involves list management, and dynamic allocation.
 */

/* The clone-specific data structure includes a key field */

struct scull_listitem {
	struct scull_dev device;
	dev_t key;
	struct list_head list;
    
};

/* The list of devices, and a lock to protect it */
static LIST_HEAD(scull_c_list);
static spinlock_t scull_c_lock = SPIN_LOCK_UNLOCKED;

/* A placeholder scull_dev which really just holds the cdev stuff. */
static struct scull_dev scull_c_device;   

/* Look for a device or create one if missing */
static struct scull_dev *scull_c_lookfor_device(dev_t key)
{
	struct scull_listitem *lptr;

	list_for_each_entry(lptr, &scull_c_list, list) {
		if (lptr->key == key)
			return &(lptr->device);
	}

	/* not found */
	lptr = kmalloc(sizeof(struct scull_listitem), GFP_KERNEL);
	if (!lptr)
		return NULL;

	/* initialize the device */
	memset(lptr, 0, sizeof(struct scull_listitem));
	lptr->key = key;
	scull_trim(&(lptr->device)); /* initialize it */
	sema_init(&(lptr->device.sem),1);

	/* place it in the list */
	list_add(&lptr->list, &scull_c_list);

	return &(lptr->device);
}

static int scull_c_open(struct inode *inode, struct file *filp)
{
	struct scull_dev *dev;
	dev_t key;
 
	if (!current->signal->tty) { 
		PDEBUG("Process "%s" has no ctl ttyn", current->comm);
		return -EINVAL;
	}
	key = tty_devnum(current->signal->tty);

	/* look for a scullc device in the list */
	spin_lock(&scull_c_lock);
	dev = scull_c_lookfor_device(key);
	spin_unlock(&scull_c_lock);

	if (!dev)
		return -ENOMEM;

	/* then, everything else is copied from the bare scull device */
	if ( (filp->f_flags & O_ACCMODE) == O_WRONLY)
		scull_trim(dev);
	filp->private_data = dev;
	return 0;          /* success */
}

static int scull_c_release(struct inode *inode, struct file *filp)
{
	/*
	 * Nothing to do, because the device is persistent.
	 * A `real' cloned device should be freed on last close
	 */
	return 0;
}



/*
 * The other operations for the device come from the bare device
 */
struct file_operations scull_priv_fops = {
	.owner =    THIS_MODULE,
	.llseek =   scull_llseek,
	.read =     scull_read,
	.write =    scull_write,
	.unlocked_ioctl =    scull_ioctl,
	.open =     scull_c_open,
	.release =  scull_c_release,
};

/************************************************************************
 *
 * And the init and cleanup functions come last
 */

static struct scull_adev_info {
	char *name;
	struct scull_dev *sculldev;
	struct file_operations *fops;
} scull_access_devs[] = {
	{ "scullsingle", &scull_s_device, &scull_sngl_fops },
	{ "sculluid", &scull_u_device, &scull_user_fops },
	{ "scullwuid", &scull_w_device, &scull_wusr_fops },
	{ "sullpriv", &scull_c_device, &scull_priv_fops }
};
#define SCULL_N_ADEVS 4

/*
 * Set up a single device.
 */
static void scull_access_setup (dev_t devno, struct scull_adev_info *devinfo)
{
	struct scull_dev *dev = devinfo->sculldev;
	int err;

	/* Initialize the device structure */
	dev->quantum = scull_quantum;
	dev->qset = scull_qset;
	sema_init(&dev->sem,1);

	/* Do the cdev stuff. */
	cdev_init(&dev->cdev, devinfo->fops);
	kobject_set_name(&dev->cdev.kobj, devinfo->name);
	dev->cdev.owner = THIS_MODULE;
	err = cdev_add (&dev->cdev, devno, 1);
        /* Fail gracefully if need be */
	if (err) {
		printk(KERN_NOTICE "Error %d adding %sn", err, devinfo->name);
		kobject_put(&dev->cdev.kobj);
	} else
		printk(KERN_NOTICE "%s registered at %xn", devinfo->name, devno);
}


int scull_access_init(dev_t firstdev)
{
	int result, i;

	/* Get our number space */
	result = register_chrdev_region (firstdev, SCULL_N_ADEVS, "sculla");
	if (result < 0) {
		printk(KERN_WARNING "sculla: device number registration failedn");
		return 0;
	}
	scull_a_firstdev = firstdev;

	/* Set up each device. */
	for (i = 0; i < SCULL_N_ADEVS; i++)
		scull_access_setup (firstdev + i, scull_access_devs + i);
	return SCULL_N_ADEVS;
}

/*
 * This is called by cleanup_module or on failure.
 * It is required to never fail, even if nothing was initialized first
 */
void scull_access_cleanup(void)
{
	struct scull_listitem *lptr, *next;
	int i;

	/* Clean up the static devs */
	for (i = 0; i < SCULL_N_ADEVS; i++) {
		struct scull_dev *dev = scull_access_devs[i].sculldev;
		cdev_del(&dev->cdev);
		scull_trim(scull_access_devs[i].sculldev);
	}

    	/* And all the cloned devices */
	list_for_each_entry_safe(lptr, next, &scull_c_list, list) {
		list_del(&lptr->list);
		scull_trim(&(lptr->device));
		kfree(lptr);
	}

	/* Free up our number space */
	unregister_chrdev_region(scull_a_firstdev, SCULL_N_ADEVS);
	return;
}


scull.h

#ifndef _SCULL_H_
#define _SCULL_H_

#include <linux/ioctl.h> /* needed for the _IOW etc stuff used later */

/*
 * Macros to help debugging
 */

#undef PDEBUG             /* undef it, just in case */
#ifdef SCULL_DEBUG
#  ifdef __KERNEL__
     /* This one if debugging is on, and kernel space */
#    define PDEBUG(fmt, args...) printk( KERN_DEBUG "scull: " fmt, ## args)
#  else
     /* This one for user space */
#    define PDEBUG(fmt, args...) fprintf(stderr, fmt, ## args)
#  endif
#else
#  define PDEBUG(fmt, args...) /* not debugging: nothing */
#endif

#undef PDEBUGG
#define PDEBUGG(fmt, args...) /* nothing: it's a placeholder */

#ifndef SCULL_MAJOR
#define SCULL_MAJOR 0   /* dynamic major by default */
#endif

#ifndef SCULL_NR_DEVS
#define SCULL_NR_DEVS 4    /* scull0 through scull3 */
#endif

#ifndef SCULL_P_NR_DEVS
#define SCULL_P_NR_DEVS 4  /* scullpipe0 through scullpipe3 */
#endif

/*
 * The bare device is a variable-length region of memory.
 * Use a linked list of indirect blocks.
 *
 * "scull_dev->data" points to an array of pointers, each
 * pointer refers to a memory area of SCULL_QUANTUM bytes.
 *
 * The array (quantum-set) is SCULL_QSET long.
 */
#ifndef SCULL_QUANTUM
#define SCULL_QUANTUM 4000		/* ÿ¸öÖ¸Õë(Á¿×Ó)Ö¸ÏòÒ»¸ö4000×Ö½ÚµÄÇøÓò */
#endif

#ifndef SCULL_QSET
#define SCULL_QSET    1000		/* Ò»¸öÓÐ1000¸ö(Á¿×Ó)µÄÖ¸ÕëÊý×é */
#endif

/*
 * The pipe device is a simple circular buffer. Here its default size
 */
#ifndef SCULL_P_BUFFER
#define SCULL_P_BUFFER 4000
#endif

/*
 * Representation of scull quantum sets.
 * Ò»¸öÁ´±íÏî
 */
struct scull_qset {
	void **data;
	struct scull_qset *next;	/* ÏÂÒ»¸öÁ´±í½Úµã£¨Á´±íÏ */
};

/* ÎÒÃÇ×Ô¼ºµÄÉ豸(°üº¬ÁË»ù±¾µÄcdev×Ö·ûÉ豸½á¹¹) */
struct scull_dev {
	struct scull_qset *data;  /* Pointer to first quantum set (Á´±íµÄÍ·)*/
	int quantum;              /* the current quantum size */
	int qset;                 /* the current array size */
	unsigned long size;       /* amount of data stored here (±£´æÔÚÆäÖеÄÊý¾Ý×ÜÁ¿)*/
	unsigned int access_key;  /* used by sculluid and scullpriv */
	struct semaphore sem;     /* mutual exclusion semaphore     */
	struct cdev cdev;	  /* Char device structure		*/
};

/*
 * Split minors in two parts
 */
#define TYPE(minor)	(((minor) >> 4) & 0xf)	/* high nibble */
#define NUM(minor)	((minor) & 0xf)		/* low  nibble */


/*
 * The different configurable parameters
 */
extern int scull_major;     /* main.c */
extern int scull_nr_devs;
extern int scull_quantum;
extern int scull_qset;

extern int scull_p_buffer;	/* pipe.c */


/*
 * Prototypes for shared functions
 */

int     scull_p_init(dev_t dev);
void    scull_p_cleanup(void);
int     scull_access_init(dev_t dev);
void    scull_access_cleanup(void);

int     scull_trim(struct scull_dev *dev);

ssize_t scull_read(struct file *filp, char __user *buf, size_t count,
                   loff_t *f_pos);
ssize_t scull_write(struct file *filp, const char __user *buf, size_t count,
                    loff_t *f_pos);
loff_t  scull_llseek(struct file *filp, loff_t off, int whence);
long     scull_ioctl(struct inode *inode, struct file *filp,
                    unsigned int cmd, unsigned long arg);


/*
 * Ioctl definitions
 */

/* Use 'k' as magic number */
#define SCULL_IOC_MAGIC  'k'
/* Please use a different 8-bit number in your code */

#define SCULL_IOCRESET    _IO(SCULL_IOC_MAGIC, 0)

/*
 * S means "Set" through a ptr,
 * T means "Tell" directly with the argument value
 * G means "Get": reply by setting through a pointer
 * Q means "Query": response is on the return value
 * X means "eXchange": switch G and S atomically
 * H means "sHift": switch T and Q atomically
 */
#define SCULL_IOCSQUANTUM _IOW(SCULL_IOC_MAGIC,  1, int)
#define SCULL_IOCSQSET    _IOW(SCULL_IOC_MAGIC,  2, int)
#define SCULL_IOCTQUANTUM _IO(SCULL_IOC_MAGIC,   3)
#define SCULL_IOCTQSET    _IO(SCULL_IOC_MAGIC,   4)
#define SCULL_IOCGQUANTUM _IOR(SCULL_IOC_MAGIC,  5, int)
#define SCULL_IOCGQSET    _IOR(SCULL_IOC_MAGIC,  6, int)
#define SCULL_IOCQQUANTUM _IO(SCULL_IOC_MAGIC,   7)
#define SCULL_IOCQQSET    _IO(SCULL_IOC_MAGIC,   8)
#define SCULL_IOCXQUANTUM _IOWR(SCULL_IOC_MAGIC, 9, int)
#define SCULL_IOCXQSET    _IOWR(SCULL_IOC_MAGIC,10, int)
#define SCULL_IOCHQUANTUM _IO(SCULL_IOC_MAGIC,  11)
#define SCULL_IOCHQSET    _IO(SCULL_IOC_MAGIC,  12)

/*
 * The other entities only have "Tell" and "Query", because they're
 * not printed in the book, and there's no need to have all six.
 * (The previous stuff was only there to show different ways to do it.
 */
#define SCULL_P_IOCTSIZE _IO(SCULL_IOC_MAGIC,   13)
#define SCULL_P_IOCQSIZE _IO(SCULL_IOC_MAGIC,   14)
/* ... more to come */

#define SCULL_IOC_MAXNR 14

#endif /* _SCULL_H_ */


先make,在切换到root权限中  ,使用 ./scull_load来加载模块

卸载时使用./scull_unload

测试scull_pipe的时候开两个终端

一个终端  输入cat /dev/scullpipe,会阻塞

另一个终端 输入 echo  hello /dev/scullpipe0

另一个窗口会有东西显示出来

解析:第一个终端。调用open打开设备,然后调用read,poll阻塞,释放信号量

第二个终端,获得信号量,再写数据,写完数据通知设备的文件描述符的读等待事件队列,让其进程继续执行读数据

便可以读到数据



申明:驱动程序及本博客一切内容归本博主所有,转载请注意来源,不得用于商业用途

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值