linux_driver_model_devtmpfs

此文档仅作为开发随笔记录文档,可作为正式文档做材料参考,但不做正式文档。

 

Written bywolfgang huang(stillinux@gmail.com)

 

此类文档仅记录linux4.0驱动各模块开发要点,备注好资料应用,以供后续开发人员快速入手,也可作为科普类资料,供其他相关人员学习。

 

Devtmpfs

 

首先借鉴lwn.net中对于devfs的阐述:

The drive for faster boot times has led to a number of changes inthe kernel.

like the recently proposed devtmpfs, have a differentset of challenges. While it may provide a good solution to reducing boot times, devtmpfs facessome fairly stiff resistance, at least partially because it reminds some folksof a feature previously excised from the kernel, namely devfs.

 

The basic idea is to createa tmpfs early in the kernel initialization before the driver core hasinitialized. Then, as each device registers with the driver core, its major andminor numbers and device name can be used to create an entry in thatfilesystem. Eventually, the root filesystem will be mounted and thepopulated tmpfs can be mounted at /dev.

This has a number of benefits, all of whichderive from the fact that no user-space support is required to have aworking /dev directory. With the current udev-based approach, thereis a need for a reasonably functional user-space environment for udev tooperate in. For simplified booting scenarios—like rescue tools or usingthe init=/bin/sh kernel boot parameter—a functional /dev directoryis needed, in particular because of dynamic device numbers. It would also beuseful for embedded devices that do not need or want a full-featured userspace.

 

其主要阐述思想即在 Linux 核心启动早期建立一个初步的 /dev,令一般启动程序不用等待 udev,缩短 GNU/Linux 的开机时间。

 

下面我们对其源码进行分析:

int __init devtmpfs_init(void)

{

         /*note@wolfgang: register the dev_fs_type to the file_system_list */

         interr = register_filesystem(&dev_fs_type);

         if(err) {

                   printk(KERN_ERR"devtmpfs: unable to register devtmpfs "

                          "type %i\n", err);

                   returnerr;

         }

         /*note@wolfgang: create and running devtmpfsd kernel thread */

         thread= kthread_run(devtmpfsd, &err, "kdevtmpfs");          

if(!IS_ERR(thread)) {

/* note@wolfgang:make the kthread_init wait for the devtmpfsd thread to complete */

                   wait_for_completion(&setup_done);     

         } else {

                   err= PTR_ERR(thread);

                   thread= NULL;

         }

 

         if(err) {

                   printk(KERN_ERR"devtmpfs: unable to create devtmpfs %i\n", err);

                   unregister_filesystem(&dev_fs_type);

                   returnerr;

         }

 

         printk(KERN_INFO"devtmpfs: initialized\n");

         return0;

}

 

 

 

故我们接下来看我们devtmpfsd内核线程的运作流程:

static int devtmpfsd(void *p)

{

         charoptions[] = "mode=0755";

         int*err = p;

          /* note@wolfgang:New mount namespace group */

         *err= sys_unshare(CLONE_NEWNS);

         if(*err)

                   gotoout;

         *err= sys_mount("devtmpfs", "/", "devtmpfs",MS_SILENT, options);

         if(*err)

                   gotoout;

         sys_chdir("/..");/* will traverse into overmounted root */

         sys_chroot(".");

 

         /*wake the kthread_init */

         complete(&setup_done);

         while(1) {

                   spin_lock(&req_lock);

                   while(requests) {

                            structreq *req = requests;

                            requests= NULL;

                            spin_unlock(&req_lock);

                            /*traverse the request list, handle and wake the request thread */

                            while(req) {

                                     structreq *next = req->next;

                                     req->err= handle(req->name, req->mode,

                                                          req->uid, req->gid, req->dev);

                                     complete(&req->done);

                                     req= next;

                            }

                            spin_lock(&req_lock);

                   }

                   /*make the thread sleep mode */

                   __set_current_state(TASK_INTERRUPTIBLE);

                   spin_unlock(&req_lock);

                   /*schedule to make others to work */

                   schedule();

         }

         return0;

out:

         complete(&setup_done);

         return*err;

}

其在kthread_init启动内核线程继续运行下去之前,必须等待devtmpfsd内核线程简历自己的工作目录,并挂在/devtmpfs到根目录。之后激活kthread_init继续运行,之后在device_add的时候调用到devtmpfs_create_node或devtmpfs_delete_node就会激活devtmpfsd内核线程,进行添加或删除节点的处理。

 

我们暂且先不分析devtmpfs_create_node和devtmpfs_delete_node,我们继续跟踪devtmpfsd的处理。

 

static int handle(const char *name, umode_tmode, kuid_t uid, kgid_t gid,

                     struct device *dev)

{

         if(mode)

                   returnhandle_create(name, mode, uid, gid, dev);

         else

                   returnhandle_remove(name, dev);

}

其对应于不同的mode(添加或删除,0为删除),调用不同的处理。

static int handle_create(const char*nodename, umode_t mode, kuid_t uid,

                             kgid_t gid, struct device *dev)

{

         structdentry *dentry;

         structpath path;

         interr;

        

         /*查找最顶层目录,即/dev,没有则创建*/

         dentry= kern_path_create(AT_FDCWD, nodename, &path, 0);

         if(dentry == ERR_PTR(-ENOENT)) {

                   create_path(nodename);

                   dentry= kern_path_create(AT_FDCWD, nodename, &path, 0);

         }

         if(IS_ERR(dentry))

                   returnPTR_ERR(dentry);

 

         /*创建设备节点 */

         err= vfs_mknod(path.dentry->d_inode, dentry, mode, dev->devt);

         if(!err) {

                   structiattr newattrs;

 

                   newattrs.ia_mode= mode;

                   newattrs.ia_uid= uid;

                   newattrs.ia_gid= gid;

                   newattrs.ia_valid= ATTR_MODE|ATTR_UID|ATTR_GID;

                   mutex_lock(&dentry->d_inode->i_mutex);

                   /*设置设备节点属性 */

                   notify_change(dentry,&newattrs, NULL);

                   mutex_unlock(&dentry->d_inode->i_mutex);

 

                   /*mark as kernel-created inode */

                   dentry->d_inode->i_private= &thread;

         }

         /*refcount -- */

         done_path_create(&path,dentry);

         returnerr;

}

 

 

static int handle_remove(const char*nodename, struct device *dev)

{

         structpath parent;

         structdentry *dentry;

         intdeleted = 0;

         interr;

 

         /*get the /dev/node dentry */

         dentry= kern_path_locked(nodename, &parent);

         if(IS_ERR(dentry))

                   returnPTR_ERR(dentry);

         /*获取设备索引节点 */

         if(dentry->d_inode) {

                   structkstat stat;

                   structpath p = {.mnt = parent.mnt, .dentry = dentry};

                   err= vfs_getattr(&p, &stat);

                   if(!err && dev_mynode(dev, dentry->d_inode, &stat)) {

                            structiattr newattrs;

                            /*

                             * before unlinking this node, resetpermissions

                             * of possible references like hardlinks

                             */

                            newattrs.ia_uid= GLOBAL_ROOT_UID;

                            newattrs.ia_gid= GLOBAL_ROOT_GID;

                            newattrs.ia_mode= stat.mode & ~0777;

                            newattrs.ia_valid=

                                     ATTR_UID|ATTR_GID|ATTR_MODE;

                            mutex_lock(&dentry->d_inode->i_mutex);

                            notify_change(dentry,&newattrs, NULL);

                            mutex_unlock(&dentry->d_inode->i_mutex);

 

                            /*删除其链接 */

                            err= vfs_unlink(parent.dentry->d_inode, dentry, NULL);

                            if(!err || err == -ENOENT)

                                     deleted= 1;

                   }

         }else {

                   err= -ENOENT;

         }

         dput(dentry);

         mutex_unlock(&parent.dentry->d_inode->i_mutex);

 

         /*parent dev refcount-- */

         path_put(&parent);

         /*删除该设备节点路径 */

         if(deleted && strchr(nodename, '/'))

                   delete_path(nodename);

         returnerr;

}

下面我们查看device_add-> devtmpfs_create_node和device_del-> devtmpfs_delete_node

int devtmpfs_create_node(struct device *dev)

{

         constchar *tmp = NULL;

         structreq req;

 

         /*如果没有启动devtmpfsd内核线程,则退出 */

         if(!thread)

                   return0;

 

         req.mode= 0;

         req.uid= GLOBAL_ROOT_UID;

         req.gid= GLOBAL_ROOT_GID;

/* 获取设备相对节点名,或者kobject->name,用以创建到/dev/xxx*/

         req.name= device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp);

         if(!req.name)

                   return-ENOMEM;

 

         if(req.mode == 0)

                   req.mode= 0600;

         if(is_blockdev(dev))

                   req.mode|= S_IFBLK;

         else

                   req.mode|= S_IFCHR;

 

         req.dev= dev;

        

         /*初始化req的竞争量 */

         init_completion(&req.done);

 

         spin_lock(&req_lock);

         req.next= requests;

         requests= &req;

         spin_unlock(&req_lock);

 

         /*唤醒devtmpfsd内核线程,并等待其完成该req的增加请求 */

         wake_up_process(thread);

         wait_for_completion(&req.done);

 

         kfree(tmp);

 

         returnreq.err;

}

 

int devtmpfs_delete_node(struct device*dev)

{

         constchar *tmp = NULL;

         structreq req;

 

         if(!thread)

                   return0;

 

         /*获取设备节点名 */

         req.name= device_get_devnode(dev, NULL, NULL, NULL, &tmp);

         if(!req.name)

                   return-ENOMEM;

 

         req.mode= 0;

         req.dev= dev;

 

         /*初始化req的竞争量 */

         init_completion(&req.done);

 

         spin_lock(&req_lock);

         req.next= requests;

         requests= &req;

         spin_unlock(&req_lock);

 

         /*唤醒devtmpfsd内核线程,并等待其完成该req的增加请求 */

         wake_up_process(thread);

         wait_for_completion(&req.done);

 

         kfree(tmp);

         returnreq.err;

}

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值