因此,偶也写了这样一个例程,可以动态的将内核空间的物理地址和大小传给用户空间。
整个内核模块,在模块插入时建立proc文件,分配内存。卸载模块的时候将用户空间写入的内容打印出来。
以下是内核模块的代码和用户空间的测试代码。
and pass the physical address to userspace through proc file.*/
#include <linux/version.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#define PROC_MEMSHARE_DIR "memshare"
#define PROC_MEMSHARE_PHYADDR "phymem_addr"
#define PROC_MEMSHARE_SIZE "phymem_size"
/*alloc one page. 4096 bytes*/
#define PAGE_ORDER 0
/*this value can get from PAGE_ORDER*/
#define PAGES_NUMBER 1
struct proc_dir_entry *proc_memshare_dir ;
unsigned long kernel_memaddr = 0;
unsigned long kernel_memsize= 0;
static int proc_read_phymem_addr(char *page, char **start, off_t off, int count)
{
return sprintf(page, "%08lx/n", __pa(kernel_memaddr));
}
static int proc_read_phymem_size(char *page, char **start, off_t off, int count)
{
return sprintf(page, "%lu/n", kernel_memsize);
}
static int __init init(void)
{
/*build proc dir "memshare"and two proc files: phymem_addr, phymem_size in the dir*/
proc_memshare_dir = proc_mkdir(PROC_MEMSHARE_DIR, NULL);
create_proc_info_entry(PROC_MEMSHARE_PHYADDR, 0, proc_memshare_dir, proc_read_phymem_addr);
create_proc_info_entry(PROC_MEMSHARE_SIZE, 0, proc_memshare_dir, proc_read_phymem_size);
/*alloc one page*/
kernel_memaddr =__get_free_pages(GFP_KERNEL, PAGE_ORDER);
if(!kernel_memaddr)
{
printk("Allocate memory failure!/n");
}
else
{
SetPageReserved(virt_to_page(kernel_memaddr));
kernel_memsize = PAGES_NUMBER * PAGE_SIZE;
printk("Allocate memory success!. The phy mem addr=%08lx, size=%lu/n", __pa(kernel_memaddr), kernel_memsize);
}
return 0;
}
static void __exit fini(void)
{
printk("The content written by user is: %s/n", (unsigned char *) kernel_memaddr);
ClearPageReserved(virt_to_page(kernel_memaddr));
free_pages(kernel_memaddr, PAGE_ORDER);
remove_proc_entry(PROC_MEMSHARE_PHYADDR, proc_memshare_dir);
remove_proc_entry(PROC_MEMSHARE_SIZE, proc_memshare_dir);
remove_proc_entry(PROC_MEMSHARE_DIR, NULL);
return;
}
module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Godbach ( nylzhaowei@163.com)");
MODULE_DESCRIPTION("Kernel memory share module.");
用户空间的测试代码:
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/mman.h>
int main(int argc, char* argv[])
{
if(argc != 2)
{
printf("Usage: %s string/n", argv[0]);
return 0;
}
unsigned long phymem_addr, phymem_size;
char *map_addr;
char s[256];
int fd;
/*get the physical address of allocated memory in kernel*/
fd = open("/proc/memshare/phymem_addr", O_RDONLY);
if(fd < 0)
{
printf("cannot open file /proc/memshare/phymem_addr/n");
return 0;
}
read(fd, s, sizeof(s));
sscanf(s, "%lx", &phymem_addr);
close(fd);
/*get the size of allocated memory in kernel*/
fd = open("/proc/memshare/phymem_size", O_RDONLY);
if(fd < 0)
{
printf("cannot open file /proc/memshare/phymem_size/n");
return 0;
}
read(fd, s, sizeof(s));
sscanf(s, "%lu", &phymem_size);
close(fd);
printf("phymem_addr=%lx, phymem_size=%lu/n", phymem_addr, phymem_size);
/*memory map*/
int map_fd = open("/dev/mem", O_RDWR);
if(map_fd < 0)
{
printf("cannot open file /dev/mem/n");
return 0;
}
map_addr = mmap(0, phymem_size, PROT_READ|PROT_WRITE, MAP_SHARED, map_fd, phymem_addr);
strcpy(map_addr, argv[1]);
munmap(map_addr, phymem_size);
close(map_fd);
return 0;
}
测试的内核是2.6.25.以下是执行结果。
debian:/home/km/memshare# ./memshare_user 'hello,world!'
phymem_addr=e64e000, phymem_size=4096
debian:/home/km/memshare# cat /proc/memshare/phymem_addr
0e64e000
debian:/home/km/memshare# cat /proc/memshare/phymem_size
4096
debian:/home/km/memshare# rmmod memshare_kernel
debian:/home/km/memshare# tail /var/log/messages
Sep 27 18:14:24 debian kernel: [50527.567931] Allocate memory success!. The phy mem addr=0e64e000, size=4096
Sep 27 18:15:31 debian kernel: [50592.570986] The content written by user is: hello,world!
仓促之间,有些地方处理的还是比较简单。希望高手多多指正。需要了解这方面实现的可以参考一下,共同学习。
=============================================================================================================================
通过共享内存(文件)的方式可行
/*user.c*/
#include <stdio.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#define PAGES 512
#define MEM_WIDTH 1500
struct MEM_DATA
{
//int key;
unsigned short width;/*缓冲区宽度*/
unsigned short length;/*缓冲区长度*/
//unsigned short wtimes;/*写进程记数,预留,为以后可以多个进程写*/
//unsigned short rtimes;/*读进程记数,预留,为以后可以多个进程读*/
unsigned short wi;/*写指针*/
unsigned short ri;/*读指针*/
} * mem_data;
struct MEM_PACKET
{
unsigned int len;
unsigned char packetp[MEM_WIDTH - 4];/*sizeof(unsigned int) == 4*/
};
int get_mem(char *aMem,char *aBuf,unsigned int *size)
{
register int i,s,width,length,mem_i;
char *buf;
struct MEM_PACKET * curr_pack;
s = 0;
mem_data = (void *)aMem;
width = mem_data[0].width;
length = mem_data[0].length;
mem_i = mem_data[0].ri;
buf = (void *)(aMem + width * mem_i);
curr_pack = (struct MEM_PACKET *)buf;
if (curr_pack->len != 0){/*第一个字节为0说明该部分为空*/
memcpy(aBuf,curr_pack->packetp,curr_pack->len);
*size = curr_pack->len;
curr_pack->len = 0;
s = mem_data[0].ri;
mem_data[0].ri++;
if(mem_data[0].ri >= length)
mem_data[0].ri = 1;
goto ret;
}
for (i=1;i<length;i++){
mem_i++;/*继续向后找,最糟糕的情况是把整个缓冲区都找一遍*/
if (mem_i >= length)
mem_i = 1;
buf = (void *)(aMem + width*mem_i);
curr_pack = (struct MEM_PACKET *)buf;
if (curr_pack->len == 0)
continue;
memcpy(aBuf,curr_pack->packetp,curr_pack->len);
*size = curr_pack->len;
curr_pack->len = 0;
s = mem_data[0].ri = mem_i;
mem_data[0].ri++;
if(mem_data[0].ri >= length)
mem_data[0].ri = 1;
break;
}
ret:
return s;
}
int main()
{
char *su1_2;
char receive[1500];
int i,j;
int fd;
int fd_procaddr;
unsigned int size;
char addr[9];
unsigned long ADDR;
j = 0;
/*open device 'mem' as a media to access the RAM*/
fd=open("/dev/mem",O_RDWR);
fd_procaddr = open("/proc/nf_addr",O_RDONLY);
read(fd_procaddr,addr,9);
ADDR = atol(addr);
close(fd_procaddr);
printf("%u[%8lx]/n",ADDR,ADDR);
/*Map the address in kernel to user space, use mmap function*/
su1_2 = mmap(0,PAGES*4*1024, PROT_READ|PROT_WRITE, MAP_SHARED, fd, ADDR);
perror("mmap");
while(1)
{
bzero(receive,1500);
i = get_mem(su1_2,receive,&size);
if (i != 0)
{
j++;
printf("%d:%s[size = %d]/n",j,receive,size);
}
else
{
printf("there have no data/n");
munmap(su1_2,PAGES*4*1024);
close(fd);
break;
}
}
while(1);
}
Top
11 楼IControlWorld(大師)回复于 2005-11-07 17:40:05 得分 0
/*corem.c for zero-copy*/
#define __KERNEL__
#define MODULE
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/wrapper.h>
#include <asm/page.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#ifndef KERNEL_VERSION
#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
#endif
#define PAGES_ORDER 9
#define PAGES 512
#define MEM_WIDTH 1500
struct MEM_DATA
{
//int key;
unsigned short width;/*缓冲区宽度*/
unsigned short length;/*缓冲区长度*/
//unsigned short wtimes;/*写进程记数,预留,为以后可以多个进程写*/
//unsigned short rtimes;/*读进程记数,预留,为以后可以多个进程读*/
unsigned short wi;/*写指针*/
unsigned short ri;/*读指针*/
} *mem_data;
struct MEM_PACKET
{
unsigned int len;
unsigned char packetp[MEM_WIDTH - 4];/*sizeof(unsigned int) == 4*/
};
unsigned long su1_2;
void del_mem()
{
int pages = 0;
char *addr;
addr = (char *)su1_2;
while (pages <=PAGES -1)
{
mem_map_unreserve(virt_to_page(addr));
addr = addr + PAGE_SIZE;
pages++;
}
free_pages(su1_2,PAGES_ORDER);
}
void init_mem()
{
int i;
int pages = 0;
char *addr;
char *buf;
struct MEM_PACKET * curr_pack;
su1_2 = __get_free_pages(GFP_KERNEL,PAGES_ORDER);
printk("[%x]/n",su1_2);
addr = (char *)su1_2;
while (pages <= PAGES -1)
{
mem_map_reserve(virt_to_page(addr));
addr = addr + PAGE_SIZE;
pages++;
}
mem_data = (struct MEM_DATA *)su1_2;
mem_data[0].ri = 1;
mem_data[0].wi = 1;
mem_data[0].length = PAGES*4*1024 / MEM_WIDTH;
mem_data[0].width = MEM_WIDTH;
/* initial su1_2 */
for(i=1;i<=mem_data[0].length;i++)
{
buf = (void *)((char *)su1_2 + MEM_WIDTH * i);
curr_pack = (struct MEM_PACKET *)buf;
curr_pack->len = 0;
}
}
int put_mem(char *aBuf,unsigned int pack_size)
{
register int s,i,width,length,mem_i;
char *buf;
struct MEM_PACKET * curr_pack;
s = 0;
mem_data = (struct MEM_DATA *)su1_2;
width = mem_data[0].width;
length = mem_data[0].length;
mem_i = mem_data[0].wi;
buf = (void *)((char *)su1_2 + width * mem_i);
for (i=1;i<length;i++){
curr_pack = (struct MEM_PACKET *)buf;
if (curr_pack->len == 0){
memcpy(curr_pack->packetp,aBuf,pack_size);
curr_pack->len = pack_size;;
s = mem_i;
mem_i++;
if (mem_i >= length)
mem_i = 1;
mem_data[0].wi = mem_i;
break;
}
mem_i++;
if (mem_i >= length){
mem_i = 1;
buf = (void *)((char *)su1_2 + width);
}
else buf = (char *)su1_2 + width*mem_i;
}
if(i >= length)
s = 0;
return s;
}
int read_procaddr(char *buf,char **start,off_t offset,int count,int *eof,void *data)
{
sprintf(buf,"%u/n",__pa(su1_2));
*eof = 1;
return 9;
}
int init_module(void)
{
put_pkt2mem_n = 0;
init_mem();
put_mem("data1dfadfaserty",16);
put_mem("data2zcvbnm",11);
put_mem("data39876543210poiuyt",21);
create_proc_read_entry("nf_addr",0,NULL,read_procaddr,NULL);
return 0;
}
void cleanup_module(void)
{
del_mem();
remove_proc_entry("nf_addr",NULL);
}Top
12 楼IControlWorld(大師)回复于 2005-11-07 17:42:42 得分 0
上面是两部分代码,分别是用户空间和内核空间程序的代码,只拷一些重要的,编译一下,这个两个程序运行在不同的空间就可以通信,好好研究一下
===================================================================================================
操,直接操作/dev/kmem吧
QQ:28286880
======================================================================================================
3.2.2 netlink 套接字
在 Linux 2.4 版以后版本的内核中,几乎全部的中断过程与用户态进程的通信都是使用 netlink 套接字实现的,同时还使用 netlink 实现了 ip queue 工具,但 ip queue 的使用有其局限性,不能自由地用于各种中断过程。内核的帮助文档和其他一些 Linux 相关文章都没有对 netlink 套接字在中断过程和用户空间通信的应用上作详细的说明,使得很多用户对此只有一个模糊的概念。
netlink 套接字的通信依据是一个对应于进程的标识,一般定为该进程的 ID。当通信的一端处于中断过程时,该标识为 0。当使用 netlink 套接字进行通信,通信的双方都是用户态进程,则使用方法类似于消息队列。但通信双方有一端是中断过程,使用方法则不同。netlink 套接字的最大特点是对中断过程的支持,它在内核空间接收用户空间数据时不再需要用户自行启动一个内核线程,而是通过另一个软中断调用用户事先指定的接收函数。工作原理如图【3】。
图【3】
很明显,这里使用了软中断而不是内核线程来接收数据,这样就可以保证数据接收的实时性。
当 netlink 套接字用于内核空间与用户空间的通信时,在用户空间的创建方法和一般套接字使用类似,但内核空间的创建方法则不同。图【4】是 netlink 套接字实现此类通信时创建的过程。
图【4】
以下举一个 netlink 套接字的应用示例。示例实现了从 netfilter 的 NF_IP_PRE_ROUTING 点截获的 ICMP 数据报,在将数据报的相关信息传递到一个用户态进程,由用户态进程将信息打印在终端上。源码在文件 imp2.tar.gz中。内核模块代码(分段详解):
(一)模块初始化与卸载
static struct sock *nlfd;
struct
{
__u32 pid;
rwlock_t lock;
}user_proc;
/*挂接在 netfilter 框架的 NF_IP_PRE_ROUTING 点上的函数为 get_icmp()*/
static struct nf_hook_ops imp2_ops =
{
.hook = get_icmp, /*netfilter 钩子函数*/
.pf = PF_INET,
.hooknum = NF_IP_PRE_ROUTING,
.priority = NF_IP_PRI_FILTER -1,
};
static int __init init(void)
{
rwlock_init(&user_proc.lock);
/*在内核创建一个 netlink socket,并注明由 kernel_recieve() 函数接收数据
这里协议 NL_IMP2 是自定的*/
nlfd = netlink_kernel_create(NL_IMP2, kernel_receive);
if(!nlfd)
{
printk("can not create a netlink socket/n");
return -1;
}
/*向 netfilter 的 NF_IP_PRE_ROUTING 点挂接函数*/
return nf_register_hook(&imp2_ops);
}
static void __exit fini(void)
{
if(nlfd)
{
sock_release(nlfd->socket);
}
nf_unregister_hook(&imp2_ops);
}
module_init(init);
module_exit(fini);
其实片断(一)的工作很简单,模块加载阶段先在内核空间创建一个 netlink 套接字,再将一个函数挂接在 netfilter 框架的 NF_IP_PRE_ROUTING 钩子点上。卸载时释放套接字所占的资源并注销之前在 netfilter 上挂接的函数。
(二)接收用户空间的数据
DECLARE_MUTEX(receive_sem);
01: static void kernel_receive(struct sock *sk, int len)
02: {
03: do
04: {
05: struct sk_buff *skb;
06: if(down_trylock(&receive_sem))
07: return;
08:
09: while((skb = skb_dequeue(&sk-<receive_queue)) != NULL)
10: {
11: {
12: struct nlmsghdr *nlh = NULL;
13: if(skb-<len <= sizeof(struct nlmsghdr))
14: {
15: nlh = (struct nlmsghdr *)skb-<data;
16: if((nlh-<nlmsg_len <= sizeof(struct nlmsghdr))
17: && (skb-<len <= nlh-<nlmsg_len))
18: {
19: if(nlh-<nlmsg_type == IMP2_U_PID)
20: {
21: write_lock_bh(&user_proc.pid);
22: user_proc.pid = nlh-<nlmsg_pid;
23: write_unlock_bh(&user_proc.pid);
24: }
25: else if(nlh-<nlmsg_type == IMP2_CLOSE)
26: {
27: write_lock_bh(&user_proc.pid);
28: if(nlh-<nlmsg_pid == user_proc.pid) user_proc.pid = 0;
29: write_unlock_bh(&user_proc.pid);
30: }
31: }
32: }
33: }
34: kfree_skb(skb);
35: }
36: up(&receive_sem);
37: }while(nlfd && nlfd-<receive_queue.qlen);
38: }
如果读者看过 ip_queue.c 或 rtnetlink.c中的源码会发现片断(二)中的 03~18 和 31~38 是 netlink socket 在内核空间接收数据的框架。在框架中主要是从套接字缓存中取出全部的数据,然后分析是不是合法的数据报,合法的 netlink 数据报必须有nlmsghdr 结构的报头。在这里笔者使用了自己定义的消息类型:IMP2_U_PID(消息为用户空间进程的ID),IMP2_CLOSE(用户空间进程关闭)。因为考虑到 SMP,所以在这里使用了读写锁来避免不同 CPU 访问临界区的问题。kernel_receive() 函数的运行在软中断环境。
(三)截获 IP 数据报
static unsigned int get_icmp(unsigned int hook,
struct sk_buff **pskb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct iphdr *iph = (*pskb)->nh.iph;
struct packet_info info;
if(iph->protocol == IPPROTO_ICMP) /*若传输层协议为 ICMP*/
{
read_lock_bh(&user_proc.lock);
if(user_proc.pid != 0)
{
read_unlock_bh(&user_proc.lock);
info.src = iph->saddr; /*记录源地址*/
info.dest = iph->daddr; /*记录目的地址*/
send_to_user(&info); /*发送数据*/
}
else
read_unlock_bh(&user_proc.lock);
}
return NF_ACCEPT;
}
(四)发送数据
static int send_to_user(struct packet_info *info)
{
int ret;
int size;
unsigned char *old_tail;
struct sk_buff *skb;
struct nlmsghdr *nlh;
struct packet_info *packet;
size = NLMSG_SPACE(sizeof(*info));
/*开辟一个新的套接字缓存*/
skb = alloc_skb(size, GFP_ATOMIC);
old_tail = skb->tail;
/*填写数据报相关信息*/
nlh = NLMSG_PUT(skb, 0, 0, IMP2_K_MSG, size-sizeof(*nlh));
packet = NLMSG_DATA(nlh);
memset(packet, 0, sizeof(struct packet_info));
/*传输到用户空间的数据*/
packet->src = info->src;
packet->dest = info->dest;
/*计算经过字节对其后的数据实际长度*/
nlh->nlmsg_len = skb->tail - old_tail;
NETLINK_CB(skb).dst_groups = 0;
read_lock_bh(&user_proc.lock);
ret = netlink_unicast(nlfd, skb, user_proc.pid, MSG_DONTWAIT); /*发送数据*/
read_unlock_bh(&user_proc.lock);
return ret;
nlmsg_failure: /*若发送失败,则撤销套接字缓存*/
if(skb)
kfree_skb(skb);
return -1;
}
片断(四)中所使用的宏参考如下:
/*字节对齐*/
#define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) )
/*计算包含报头的数据报长度*/
#define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(sizeof(struct nlmsghdr)))
/*字节对齐后的数据报长度*/
#define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len))
/*填写相关报头信息,这里使用了nlmsg_failure标签,所以在程序中要定义*/
#define NLMSG_PUT(skb, pid, seq, type, len) /
({ if (skb_tailroom(skb) < (int)NLMSG_SPACE(len)) goto nlmsg_failure; /
__nlmsg_put(skb, pid, seq, type, len); })
static __inline__ struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len)
{
struct nlmsghdr *nlh;
int size = NLMSG_LENGTH(len);
nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
nlh->nlmsg_type = type;
nlh->nlmsg_len = size;
nlh->nlmsg_flags = 0;
nlh->nlmsg_pid = pid;
nlh->nlmsg_seq = seq;
return nlh;
}
/*跳过报头取实际数据*/
#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0)))
/*取 netlink 控制字段*/
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
运行示例时,先编译 imp2_k.c 模块,然后使用 insmod 将模块加载入内核。再运行编译好的 imp2_u 命令,此时就会显示出本机当前接收的 ICMP 数据报的源地址和目的地址。用户可以使用 Ctrl+C 来终止用户空间的进程,再次启动也不会带来问题。
4 总结
本文从内核态代码的不同运行环境来实现不同方法的内核空间与用户空间的通信,并分析了它们的实际效果。最后推荐使用 netlink 套接字实现中断环境与用户态进程通信,因为 netlink 套接字是专为此类通信定制的。
Top
18 楼mrelay(whxzl)回复于 2005-11-09 14:32:04 得分 0
通过驱动程序吧。Top
19 楼bekars(涡轮增压:没有解决不了的问题,因为根本就没有问题)回复于 2005-11-09 17:56:56 得分 0
看了通过NetLink获取内核中网络接口名称的一个实现,各个参数设置的眼花缭乱:
static int
netlink_request(struct nl_handle *nl, int family, int type)
{
int status;
struct sockaddr_nl snl;
struct {
struct nlmsghdr nlh;
struct rtgenmsg g;
} req;
/* Cleanup the room */
memset(&snl, 0, sizeof (snl));
snl.nl_family = AF_NETLINK;
req.nlh.nlmsg_len = sizeof (req);
req.nlh.nlmsg_type = type;
req.nlh.nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST;
req.nlh.nlmsg_pid = 0;
req.nlh.nlmsg_seq = ++nl->seq;
req.g.rtgen_family = family;
status = sendto(nl->fd, (void *) &req, sizeof (req)
, 0, (struct sockaddr *) &snl, sizeof (snl));
if (status < 0) {
FW_LOG("Netlink: sendto() failed: %s/n", strerror(errno));
return -1;
}
return 0;
}
/* Our netlink parser */
static int
netlink_parse_info(int (*filter) (struct sockaddr_nl *, struct nlmsghdr *),
struct nl_handle *nl)
{
int status;
int ret = 0;
int error;
while (1) {
char buf[4096];
struct iovec iov = { buf, sizeof buf };
struct sockaddr_nl snl;
struct msghdr msg =
{ (void *) &snl, sizeof snl, &iov, 1, NULL, 0, 0 };
struct nlmsghdr *h;
status = recvmsg(nl->fd, &msg, 0);
if (status < 0) {
if (errno == EINTR)
continue;
if (errno == EWOULDBLOCK)
break;
FW_LOG("Netlink: Received message overrun/n");
continue;
}
if (status == 0) {
FW_LOG("Netlink: EOF/n");
return -1;
}
if (msg.msg_namelen != sizeof snl) {
FW_LOG("Netlink: Sender address length error: length %d/n", msg.msg_namelen);
return -1;
}
for (h = (struct nlmsghdr *) buf; NLMSG_OK(h, status);
h = NLMSG_NEXT(h, status)) {
/* Finish of reading. */
if (h->nlmsg_type == NLMSG_DONE)
return ret;
/* Error handling. */
if (h->nlmsg_type == NLMSG_ERROR) {
struct nlmsgerr *err =
(struct nlmsgerr *) NLMSG_DATA(h);
if (h->nlmsg_len <
NLMSG_LENGTH(sizeof (struct nlmsgerr))) {
FW_LOG("Netlink: error: message truncated/n");
return -1;
}
FW_LOG("Netlink: error: %s, type=(%u), seq=%u, pid=%d/n",
strerror(-err->error),
err->msg.nlmsg_type,
err->msg.nlmsg_seq, err->msg.nlmsg_pid);
return -1;
}
error = (*filter) (&snl, h);
if (error < 0) {
FW_LOG("Netlink: filter function error/n");
ret = error;
}
}
/* After error care. */
if (msg.msg_flags & MSG_TRUNC) {
FW_LOG("Netlink: error: message truncated/n");
continue;
}
if (status) {
FW_LOG("Netlink: error: data remnant size %d/n", status);
return -1;
}
}
return ret;
}
/* Netlink interface link lookup filter */
static int
netlink_if_link_filter(struct sockaddr_nl *snl, struct nlmsghdr *h)
{
struct ifinfomsg *ifi;
struct rtattr *tb[IFLA_MAX + 1];
interface *ifp;
int i, len;
char *name;
ifi = NLMSG_DATA(h);
if (h->nlmsg_type != RTM_NEWLINK)
return 0;
len = h->nlmsg_len - NLMSG_LENGTH(sizeof (struct ifinfomsg));
if (len < 0)
return -1;
/* Interface name lookup */
memset(tb, 0, sizeof (tb));
parse_rtattr(tb, IFLA_MAX, IFLA_RTA(ifi), len);
if (tb[IFLA_IFNAME] == NULL)
return -1;
name = (char *) RTA_DATA(tb[IFLA_IFNAME]);
/* Return if loopback */
if (ifi->ifi_type == ARPHRD_LOOPBACK)
return 0;
/* Fill the interface structure */
ifp = (interface *) MALLOC(sizeof (interface));
memcpy(ifp->ifname, name, strlen(name));
ifp->ifindex = ifi->ifi_index;
ifp->flags = ifi->ifi_flags;
ifp->mtu = *(int *) RTA_DATA(tb[IFLA_MTU]);
ifp->hw_type = ifi->ifi_type;
if (tb[IFLA_ADDRESS]) {
int hw_addr_len = RTA_PAYLOAD(tb[IFLA_ADDRESS]);
if (hw_addr_len > IF_HWADDR_MAX)
FW_LOG("MAC address for %s is too large: %d/n", name, hw_addr_len);
else {
ifp->hw_addr_len = hw_addr_len;
memcpy(ifp->hw_addr, RTA_DATA(tb[IFLA_ADDRESS]), hw_addr_len);
for (i = 0; i < hw_addr_len; i++)
if (ifp->hw_addr[i] != 0)
break;
if (i == hw_addr_len)
ifp->hw_addr_len = 0;
else
ifp->hw_addr_len = hw_addr_len;
}
}
/* Queue this new interface */
if_add_queue(ifp);
return 0;
}
===============================================================================================================
之所以想写这篇文章,是有两个方面原因。其一是内核版有一个关于《内核可以从线性地址直接计算物理地址,用来做什么呢?》的讨论,偶说计算出物理地址可以共享给用户空间读写。dreamice兄说能否说一下详细的应用。其二是alb*版主提到wheelz曾经写过这样一个例程,拜读了一把,发现在传递物理地址和内存大小上,wheelz的例程还有些不够灵活。alb*版主提到可以通过文件的方式实现动态的传递。
因此,本人也写了这样一个例程,可以动态的将内核空间的物理地址和大小传给用户空间。
本文欢迎自由转载,但请标明作者,并保证本文的完整性。
整个内核模块,在模块插入时建立proc文件,分配内存。卸载模块的时候将用户空间写入的内容打印出来。
以下是内核模块的代码和用户空间的测试代码。
/*This program is used to allocate memory in kernel
and pass the physical address to userspace through proc file.*/
#include <linux/version.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#define PROC_MEMSHARE_DIR "memshare"
#define PROC_MEMSHARE_PHYADDR "phymem_addr"
#define PROC_MEMSHARE_SIZE "phymem_size"
/*alloc one page. 4096 bytes*/
#define PAGE_ORDER 0
/*this value can get from PAGE_ORDER*/
#define PAGES_NUMBER 1
struct proc_dir_entry *proc_memshare_dir ;
unsigned long kernel_memaddr = 0;
unsigned long kernel_memsize= 0;
static int proc_read_phymem_addr(char *page, char **start, off_t off, int count)
{
return sprintf(page, "%08lx/n", __pa(kernel_memaddr));
}
static int proc_read_phymem_size(char *page, char **start, off_t off, int count)
{
return sprintf(page, "%lu/n", kernel_memsize);
}
static int __init init(void)
{
/*build proc dir "memshare"and two proc files: phymem_addr, phymem_size in the dir*/
proc_memshare_dir = proc_mkdir(PROC_MEMSHARE_DIR, NULL);
create_proc_info_entry(PROC_MEMSHARE_PHYADDR, 0, proc_memshare_dir, proc_read_phymem_addr);
create_proc_info_entry(PROC_MEMSHARE_SIZE, 0, proc_memshare_dir, proc_read_phymem_size);
/*alloc one page*/
kernel_memaddr =__get_free_pages(GFP_KERNEL, PAGE_ORDER);
if(!kernel_memaddr)
{
printk("Allocate memory failure!/n");
}
else
{
SetPageReserved(virt_to_page(kernel_memaddr));
kernel_memsize = PAGES_NUMBER0, 204);">* PAGE_SIZE;
printk("Allocate memory success!. The phy mem addr=%08lx, size=%lu/n", __pa(kernel_memaddr), kernel_memsize);
}
return 0;
}
static void __exit fini(void)
{
printk("The content written by user is: %s/n", (unsigned char *) kernel_memaddr);
ClearPageReserved(virt_to_page(kernel_memaddr));
free_pages(kernel_memaddr, PAGE_ORDER);
remove_proc_entry(PROC_MEMSHARE_PHYADDR, proc_memshare_dir);
remove_proc_entry(PROC_MEMSHARE_SIZE, proc_memshare_dir);
remove_proc_entry(PROC_MEMSHARE_DIR, NULL);
return;
}
module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Godbach ([email]nylzhaowei@163.com[/email])");
MODULE_DESCRIPTION("Kernel memory share module.");
文章出处:http://www.diybl.com/course/6_system/linux/Linuxjs/2008106/148265_2.html
文章出处:http://www.diybl.com/course/6_system/linux/Linuxjs/2008106/148265.html