raid0

# Comment/uncomment the following line to disable/enable debugging
#DEBUG = y


# Add your debugging flag (or not) to CFLAGS
ifeq ($(DEBUG),y)
  DEBFLAGS = -O -g -DSBULL_DEBUG # "-O" is needed to expand inlines
else
  DEBFLAGS = -O2
endif

EXTRA_CFLAGS += $(DEBFLAGS)
EXTRA_CFLAGS += -I..

ifneq ($(KERNELRELEASE),)
# call from kernel build system

obj-m	:= nss_md.o
	
else

KERNELDIR ?= /lib/modules/$(shell uname -r)/build
PWD       := $(shell pwd)

default:
	$(MAKE) -C $(KERNELDIR) M=$(PWD) modules

endif



clean:
	rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions

depend .depend dep:
	$(CC) $(EXTRA_CFLAGS) -M *.c > .depend


ifeq (.depend,$(wildcard .depend))
include .depend
endif
typedef struct request_queue request_queue_t;  //由于这个内核里面没有这个定义,所以自己添加。
struct member_disk
{
	struct block_device* bdev;
	struct member_disk * next;
};
struct nss_md_dev{
	char *name;
	unsigned char *data;
	short users;
	unsigned long  long size;
	unsigned long  array_sectors; 
	int level;
	int chunk_sectors;		//每个chunk所包含的扇区数
	int raid_disks;
	struct list_head disks;
	short media_change;
	spinlock_t lock;
	struct request_queue* queue;
	struct gendisk *gd;
	struct timer_list timer; 	
	struct member_disk * mdisk;
	int status;					//标识设备是否准备好
};
typedef struct nss_md_dev nss_md_dev_t;
struct strip_zone
{
	sector_t zone_end;
	sector_t dev_start;
	int nb_dev;
};
typedef struct __Disk_T
{
	int major;
	int minor;
	int number;
	int raid_disks;
}Disk_T;

typedef struct array_info_s
{
	int level;
	int raid_disks;
	int chunk_size;
	unsigned long  sectors;
}array_info_t;
#define READY 0X0005
#define NOT_READY 0x0006
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/hdreg.h>
#include <linux/kdev_t.h>
#include <linux/vmalloc.h>
#include <linux/genhd.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
#include <linux/mutex.h>
#include <linux/math64.h>
#include "nss_md.h"
MODULE_LICENSE("Dual BSD/GPL");
static int nss_md_major=0;
module_param(nss_md_major,int,0);
static int hardsect_size = 512;
module_param(hardsect_size,int,0);
static int nsectors = 1024;
module_param(nsectors, int, 0);
static int ndevices = 1;
module_param(ndevices,int,0);
enum{
	RM_SIMPLE = 0,
	RM_FULL =1 ,
	RM_NOQUEUE = 2,
};
static int request_mode = RM_SIMPLE;
module_param(request_mode,int,0);
#define NSS_MD_MINORS   16

#define KERNEL_SECTOR_SIZE 512
#define INVALIDATE_DELAY 	30*HZ

#define TEST_CMD 				0x000666
#define ADD_NEW_DISK 			0x000667
#define SET_ARRAY_INFO 			0x000668
#define GET_ARRAY_INFO			0x000669
#define GET_DISK_INFO			0x000670
#define is_power_of_2(x)	((x) != 0 && (((x) & ((x) - 1)) == 0))
static struct nss_md_dev *Devices = NULL;

//dev_t dev0,dev1,dev2;
//struct block_device * bdev0,*bdev1,*bdev2;
/*static void nss_md_transfer(struct nss_md_dev *dev,unsigned long sector,
			unsigned long nsect,char *buffer,int write)
{
	printk("nss_md_transfer begin\n");
	unsigned long offset =sector *KERNEL_SECTOR_SIZE;
	unsigned long nbytes =nsect*KERNEL_SECTOR_SIZE;
	if((offset+nbytes)>dev->size){
		printk(KERN_NOTICE "Beyond-end write (%ld %ld)\n",offset,nbytes);
		return ;
	}
	if(write)
		memcpy(dev->data+offset,buffer,nbytes);
	else
		memcpy(buffer,dev->data+offset,nbytes);
	printk("nss_md_transfer finished\n");
}*/

/*static void nss_md_request(request_queue_t *q)
{
	printk("nss_md_request begin\n");
	struct request *req;
	req=blk_fetch_request(q);
	while(req){
		struct nss_md_dev *dev=req->rq_disk->private_data;
		if(!blk_fs_request(req)){
			printk(KERN_NOTICE "Skip non-fs request\n");
			blk_end_request_all(req,0);
			
			continue;
		}
//		nss_md_transfer(dev,blk_rq_pos(req),blk_rq_cur_sectors(req),
//				req->buffer,rq_data_dir(req));
		unsigned long offset =blk_rq_pos(req)<<9;		//偏移字节数	
		unsigned long nbytes=blk_rq_cur_bytes(req);		//剩余字节数	
		int err=0;
		if((offset+nbytes)>dev->size){
			printk("bad access:block=%lu,count=%u\n",
				blk_rq_pos(req),blk_rq_cur_sectors(req));
			err=-EIO;
			goto done;
		}
		if(rq_data_dir(req)==WRITE)
		{
			memcpy(dev->data+offset,req->buffer,nbytes);
			printk(" WRITE finished\n");
		}
		else
		{
			memcpy(req->buffer,dev->data+offset,nbytes);
			printk("READ finished\n");
		}
		done:
			if(!__blk_end_request_cur(req,err))
				req=blk_fetch_request(q);
	}
	printk("nss_md_request finished\n");
}*/

/*static int nss_md_xfer_bio(struct nss_md_dev *dev,struct bio *bio)
{
	int i;
	struct bio_vec *bvec;
	sector_t sector = bio->bi_sector;
	bio_for_each_segment(bvec,bio,i){
		char *buffer = __bio_kmap_atomic(bio,i,KM_USER0);
		nss_md_transfer(dev,sector,bio_cur_bytes(bio)>>9,
					buffer,bio_data_dir(bio)==WRITE);
		sector +=bio_cur_bytes(bio)>>9;
		__bio_kunmap_atomic(bio,KM_USER0);
	}
	return 0;
}*/

/*static int nss_md_xfer_request(struct nss_md_dev *dev,struct request *req)
{
	struct bio *bio;
	int nsect =0;
	__rq_for_each_bio(bio,req){
		nss_md_xfer_bio(dev,bio);
		nsect +=bio->bi_size/KERNEL_SECTOR_SIZE;
	}
	return nsect;
}*/

/*static void nss_md_full_request(request_queue_t *q)
{
	struct request *req;
	int sectors_xferred;
	struct nss_md_dev *dev=q->queuedata;
	while((req=blk_fetch_request(q))!=NULL){
		if(!blk_fs_request(req)){
			printk(KERN_NOTICE "Skip non-fs request\n");
			blk_end_request_all(req,0);
			continue;
		}
		sectors_xferred=nss_md_xfer_request(dev,req);
		if (!__blk_end_request_cur(req, 0)) {
			blk_start_request(req);
			blk_fetch_request(q);
		}	
	}
}*/

static inline int is_io_in_chunk_boundary(struct nss_md_dev *mddev,
			unsigned int chunk_sects, struct bio *bio)
{
	if (likely(is_power_of_2(chunk_sects))) {
		return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
					+ (bio->bi_size >> 9));
	} else{
		sector_t sector = bio->bi_sector;
		return chunk_sects >= (sector_div(sector, chunk_sects)
						+ (bio->bi_size >> 9));
	}
}

static int nss_md_make_request(request_queue_t *q,struct bio *bio)
{
	nss_md_dev_t *dev=q->queuedata;
	unsigned int chunk_sects;
	const int rw =bio_data_dir(bio);
	int cpu,tmp;
	struct member_disk* tmp_disk;
	
	sector_t  strip=0;
	unsigned int   disknum=0;
	sector_t chunk_num=0;
	unsigned int  chunk_offset=0;
	
	printk("nss_md_make_request begin\n");
	printk("bio->bi_sector:%llu\n",bio->bi_sector);
	printk("dev->status:%d\n",dev->status);
	if(dev->status==NOT_READY)
	{
		printk("status of device is not READY\n");
		bio_io_error(bio);
		return 0;
	}
	
	/*if(unlikely(bio_barrier(bio)))
	{
		bio_endio(bio,-EOPNOTSUPP);
		return 0;
	}*/
	
	cpu=part_stat_lock();
	part_stat_inc(cpu,&dev->gd->part0,ios[rw]);
	part_stat_add(cpu,&dev->gd->part0,sectors[rw],bio_sectors(bio));
	part_stat_unlock();
	chunk_sects=dev->chunk_sectors;
	printk("chunk_sects:%d\n",chunk_sects);
	if(unlikely(!is_io_in_chunk_boundary(dev,chunk_sects,bio)))
	{
		sector_t sector=bio->bi_sector;
		struct bio_pair *bp;
		if(bio->bi_vcnt!=1 || bio->bi_idx!=0)
			goto bad_map;
		if(likely(is_power_of_2(chunk_sects)))
			bp=bio_split(bio,chunk_sects-(sector & 
			(chunk_sects-1)));
		else
			bp=bio_split(bio,chunk_sects-
				sector_div(sector,chunk_sects));
		if(nss_md_make_request(q,&bp->bio1))
			generic_make_request(&bp->bio1);
		if(nss_md_make_request(q,&bp->bio2))
			generic_make_request(&bp->bio2);
		bio_pair_release(bp);
		return 0;
	}
//	sector_t tmp=bio->bi_sector;
//	chunk_num=div_u64(tmp,128);
//	chunk_num=tmp>>7;
	chunk_num=div_u64_rem(bio->bi_sector,chunk_sects,&chunk_offset);
	printk("bio->bi_sector:%llu,chunk_offset:%u,chunk_num:%llu\n",bio->bi_sector,chunk_offset,chunk_num);
//	strip=div_u64(chunk_num,3);
	strip=div_u64_rem(chunk_num,dev->raid_disks,&disknum);
	printk("chunk_offset:%u---strip:%llu---disknum:%u\n",chunk_offset,strip,disknum);
	for(tmp_disk=dev->mdisk,tmp=0;tmp<disknum;tmp++)
	{
		tmp_disk=tmp_disk->next;
	}
	bio->bi_bdev=tmp_disk->bdev;
	bio->bi_sector=(strip*chunk_sects)+chunk_offset;
/*	if(disknum==0)
	{
		bio->bi_bdev=bdev0;
//		bio->bi_sector=(strip<<7)+chunk_offset;
	}
	else if(disknum==1)
	{
		bio->bi_bdev=bdev1;
//		bio->bi_sector=(strip<<7)+chunk_offset;
	}
	else
	{
		bio->bi_bdev=bdev2;
//		bio->bi_sector=(strip<<7)+chunk_offset;

	}*/
//	bio->bi_bdev=bdev1;
//	generic_make_request(bio);
//	printk("nss_md_make_request finished\n");
	return 1;
bad_map:
	printk("raid0_make_request bug: can't convert block across chunks"
		" or bigger than %dk %llu %d\n", chunk_sects / 2,
		(unsigned long long)bio->bi_sector, bio->bi_size >> 10);

	bio_io_error(bio);
	return 0;
}

static int nss_md_open(struct block_device *bdev,fmode_t  mode)
{
	struct nss_md_dev* dev=bdev->bd_disk->private_data;
	printk("open\n");
	printk("open2\n");
	del_timer_sync(&dev->timer);
	printk("open3\n");
	printk("open4\n");
	spin_lock(&dev->lock);
	printk("open5\n");
	if(!dev->users)
	{
		printk("open6\n");
		check_disk_change(bdev);
		printk("open7\n");
	}
	dev->users++;
	spin_unlock(&dev->lock);
	printk("open8\n");
	return 0;
}

static int nss_md_release(struct gendisk *gd ,fmode_t mode)
{
	struct nss_md_dev *dev=gd->private_data;
	spin_lock(&dev->lock);
	dev->users--;
	if(!dev->users){
		dev->timer.expires=jiffies+INVALIDATE_DELAY;
		add_timer(&dev->timer);
	}
	spin_unlock(&dev->lock);
	return 0;
}

int nss_md_media_changed(struct gendisk *gd)
{
	struct nss_md_dev *dev=gd->private_data;
	return dev->media_change;
}

int nss_md_revalidate(struct gendisk *gd)
{
	struct nss_md_dev *dev =gd->private_data;
	if(dev->media_change){
		dev->media_change=0;
		memset(dev->data,0,dev->size);
	}
	return 0;
}

void nss_md_invalidate(unsigned long ldev)
{
	struct nss_md_dev *dev=(struct nss_md_dev *)ldev;
	spin_lock(&dev->lock);
	if(dev->users ||!dev->data)
		printk(KERN_WARNING "sbull:timer sanity check failed\n");
	else
		dev->media_change=1;
	spin_unlock(&dev->lock);
}
static int set_array_info(nss_md_dev_t* dev, array_info_t*array)
{
	dev->level=array->level;
	dev->chunk_sectors=array->chunk_size>>9;
	dev->raid_disks=array->raid_disks;
	dev->array_sectors=array->sectors;
	return 0;
}
static int add_new_disk(nss_md_dev_t* dev, Disk_T* disk)
{
	dev_t kdev;
	struct block_device * bdev;
	struct member_disk* memdisk;
	printk("come to add_new_disk()\n");
	kdev=MKDEV(disk->major,disk->minor);
	printk("after mkdev\n");
	if(disk->major!=MAJOR(kdev) || disk->minor!=MINOR(kdev))
	{
		printk("overflow\n");
		return -EOVERFLOW;
	}
	bdev=open_by_devnum(kdev,FMODE_READ|FMODE_WRITE);
	memdisk=(struct member_disk*)kmalloc(sizeof(struct member_disk),0);
	memdisk->bdev=bdev;
	memdisk->next=(*dev).mdisk;
	(*dev).mdisk=memdisk;
	printk("after operation of list\n");
	if(disk->number==disk->raid_disks)
	{
		set_capacity(dev->gd,dev->array_sectors);
		(*dev).status=READY;
		printk("status set to READY\n");
	}
	printk("add new disk finished\n");
	return 0;
}
int nss_md_ioctl(struct block_device *bdev,fmode_t mode,
			unsigned int cmd,unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	struct nss_md_dev *dev = bdev->bd_disk->private_data;
	array_info_t array;
	struct member_disk * tmp_mdisk;
			Disk_T disk;
	switch(cmd){
/*		case HDIO_GETGEO:
			printk("received cmd :HDIO_GETGEO\n");
			size =dev->size*(hardsect_size/KERNEL_SECTOR_SIZE);
			geo.cylinders=(size &~0x3f)>>6;
			geo.heads =4;
			geo.sectors =16;
			geo.start=4;
			if(copy_to_user((void __user *)arg,&geo,sizeof(geo)))
				return -EFAULT;
			return 0;*/
		case TEST_CMD:
			printk("cmd TEST has been received~\n");
			break;
		case SET_ARRAY_INFO:
			printk("cmd SET_ARRAY_INFO has been received~\n");
			if(copy_from_user(&array,argp,sizeof(array)))
				printk("copy from user error\n");
			else
			{
				printk("level:%d,raid_disks:%d,chunk-size:%d,size:%lu\n",array.level,array.raid_disks,array.chunk_size,array.sectors);
				set_array_info(dev,&array);
			}
			break;
		case ADD_NEW_DISK:
			printk("cmd ADD_NEW_DISK has been received~\n");
			if(copy_from_user(&disk,argp,sizeof(disk)))
				printk("copy from user error\n");
			else
			{
				printk("major:%d,minor:%d,number:%d,raid_disks:%d\n",disk.major,disk.minor,disk.number,disk.raid_disks);
				if(disk.number>disk.raid_disks)
				{
					printk("the number of disks is more than raid_disks\n");
					break;
				}
				else
				{
					add_new_disk(dev,&disk);
					for(tmp_mdisk=dev->mdisk;tmp_mdisk!=NULL;)
					{
						printk("dev:%s\n",tmp_mdisk->bdev->bd_disk->disk_name);	
						tmp_mdisk=tmp_mdisk->next;
					}
				}
			}
			break;
//		case GET_ARRAY_INFO:
//			break;
//		case GET_DISK_INFO:
//			break;
	}
	return 0;
}

/*
* the device operations structure.
*/
static struct block_device_operations nss_md_ops={
	.owner			=THIS_MODULE,
	.open			=nss_md_open,
	.release			=nss_md_release,
	.media_changed	=nss_md_media_changed,
	.revalidate_disk	=nss_md_revalidate,
	.ioctl			=nss_md_ioctl
};

/*static int  get_diskinfo(void)
{
	dev0=MKDEV(8,16);
	if(8!=MAJOR(dev0) || 16!=MINOR(dev0))
		return -EOVERFLOW;
	bdev0=open_by_devnum(dev0,FMODE_READ|FMODE_WRITE);
	printk("disk0 name:%s\n",bdev0->bd_disk->disk_name);

	dev1=MKDEV(8,32);
	if(8!=MAJOR(dev1) || 32!=MINOR(dev1))
		return -EOVERFLOW;
	bdev1=open_by_devnum(dev1,FMODE_READ|FMODE_WRITE);
	printk("disk1 name:%s\n",bdev1->bd_disk->disk_name);

	dev2=MKDEV(8,48);
	if(8!=MAJOR(dev2) || 48!=MINOR(dev2))
		return -EOVERFLOW;
	bdev2=open_by_devnum(dev2,FMODE_READ|FMODE_WRITE);
	printk("disk2 name:%s\n",bdev2->bd_disk->disk_name);
	return 0;
}*/
static void setup_device(struct nss_md_dev *dev,int which)
{
	printk(KERN_ALERT "hello2\n");
	
//	get_diskinfo();
	memset(dev,0,sizeof(struct nss_md_dev));
//	dev->size=nsectors*hardsect_size;					//指定设备的大小
	dev->size=8<<30;
//	dev->data =vmalloc(dev->size);
	printk(KERN_ALERT "hello3\n");
//	if(dev->data ==NULL){
//		printk(KERN_NOTICE "vmalloc failure.\n");
//		return;
//	}
	printk(KERN_ALERT "hello4\n");
	spin_lock_init(&dev->lock);
	printk(KERN_ALERT "hello5\n");
	init_timer(&dev->timer);
	printk(KERN_ALERT "hello6\n");
	dev->timer.data=(unsigned long)dev;
	dev->timer.function=nss_md_invalidate;
/*	switch(request_mode){
		case RM_NOQUEUE:*/
			dev->queue=blk_alloc_queue(GFP_KERNEL);
			if(dev->queue==NULL)
				goto out_vfree;
			blk_queue_make_request(dev->queue,nss_md_make_request);
//			break;
/*		case RM_FULL:
			dev->queue=blk_init_queue(nss_md_full_request,&dev->lock);
			if(dev->queue==NULL)
				goto out_vfree;
			break;
		case RM_SIMPLE:
			printk(KERN_ALERT "hello7\n");
			dev->queue=blk_init_queue(nss_md_request,&dev->lock);
			printk(KERN_ALERT "hello8\n");
			if(dev->queue==NULL)
				goto out_vfree;*/
//			break;
//		default:
//			printk(KERN_NOTICE "Bad request mode %d,using simple\n",request_mode);
//		}
		printk(KERN_ALERT "hello9\n");
		blk_queue_logical_block_size(dev->queue,hardsect_size);
		printk(KERN_ALERT "hello10\n");
		dev->status=NOT_READY;
		printk("dev->status:%d\n",dev->status);
		dev->queue->queuedata=dev;
		dev->gd=alloc_disk(NSS_MD_MINORS);
		printk(KERN_ALERT "hello11\n");
		if(!dev->gd){
			printk(KERN_NOTICE "alloc_disk failure\n");
			goto out_vfree;
		}
		dev->gd->major=nss_md_major;
		dev->gd->first_minor=which*NSS_MD_MINORS;
		dev->gd->fops=&nss_md_ops;
		dev->gd->queue =dev->queue;
		dev->gd->private_data=dev;
		printk(KERN_ALERT "hello12\n");
		snprintf(dev->gd->disk_name,32,"nss_md%d",which);
		printk(KERN_ALERT "hello13\n");
//		set_capacity(dev->gd,nsectors*(hardsect_size/KERNEL_SECTOR_SIZE));
//		set_capacity(dev->gd,8<<21);
		dev->gd->flags |= GENHD_FL_EXT_DEVT;
		printk(KERN_ALERT "hello14\n");
		add_disk(dev->gd);
		printk(KERN_ALERT "hello15\n");
		return;
		
	out_vfree:
		if(dev->data)
			vfree(dev->data);
}

static int __init nss_md_init(void)
{
	int i;
	printk(KERN_ALERT "nss_md_init begin\n");
	nss_md_major=register_blkdev(nss_md_major,"nss_md");
	if(nss_md_major<=0){
		printk(KERN_WARNING "nss_md:unable to get major number\n");
		return -EBUSY;
	}
	Devices=kmalloc(ndevices *sizeof(struct nss_md_dev),GFP_KERNEL);
	if(Devices==NULL)
		goto out_unregister;
	for(i=0;i<ndevices;i++)
		setup_device(Devices+i,i);
	printk(KERN_ALERT "nss_md_init finished\n");
	return 0;
out_unregister:
	printk(KERN_ALERT "out_unregister\n");
	unregister_blkdev(nss_md_major,"nss_md");
	return -ENOMEM;
	
}
static void __exit nss_md_exit(void)
{
	int i;
	printk(KERN_ALERT  "goodbye\n");
	for(i=0;i<ndevices;i++){
		struct nss_md_dev *dev=Devices +i;
		printk(KERN_ALERT  "goodbye2\n");
		del_timer_sync(&dev->timer);
		printk(KERN_ALERT  "goodbye3\n");
		if(dev->gd)
		{
			printk(KERN_ALERT  "goodbye4\n");
			del_gendisk(dev->gd);
			printk(KERN_ALERT  "goodbye5\n");
			put_disk(dev->gd);
			printk(KERN_ALERT  "goodbye6\n");
		}
		if(dev->queue)
		{
			printk(KERN_ALERT  "goodbye7\n");
			blk_cleanup_queue(dev->queue);
			printk(KERN_ALERT  "goodbye8\n");
				
		}
		if(dev->data)
		{
			printk(KERN_ALERT  "goodbye9\n");
			vfree(dev->data);
			printk(KERN_ALERT  "goodbye10\n");
		}
	}
	printk(KERN_ALERT  "goodbye11\n");
	unregister_blkdev(nss_md_major,"nss_md");
	printk(KERN_ALERT  "goodbye12\n");
	kfree(Devices);
}
module_init(nss_md_init);
module_exit(nss_md_exit);

转载于:https://my.oschina.net/taojianrong/blog/145870

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值