七种Linux设备驱动模型之——Uevent

前言

当一个设备动态的加入到系统时候(比如常见的将U盘插入到PC机器上), 设备驱动程序就需要动态的检测到有设备插入了系统,就需要将此事件通知到用户层,然后用户层对这一事件做响应的处理,比如加载USB驱动,更新UI等。而将此事件通知到用户层就需要某种机制,典型的就是mdev hotplug和udev。关于udev和mdev hotplug可以在上篇文章有解释。Linux系统对uevent机制的具体实现是建立在设备模型的基础上的,通过kobject_uevent函数实现。

在前面的kset小节中提到了注册一个kset的接口,可以在这里习复下。

/**
 * kset_register - initialize and add a kset.
 * @k: kset.
 */
int kset_register(struct kset *k)
{
	int err;

	if (!k)
		return -EINVAL;

	kset_init(k);
	err = kobject_add_internal(&k->kobj);
	if (err)
		return err;
	kobject_uevent(&k->kobj, KOBJ_ADD);
	return 0;
}

可以看到这里调用了kobject_uevent接口,发送一个action为: KOBJ_ADD的事件。而kobject和kset的主要区别就是,将一个kset注册到系统的时候,就需要将此事件通过kobject_uevent发送到用户空间,而kobject如果是单独的,没有依赖kset,则无法通过uevent机制发送事件到用户空间。

数据结构

struct kset_uevent_ops {
	int (* const filter)(struct kset *kset, struct kobject *kobj);
	const char *(* const name)(struct kset *kset, struct kobject *kobj);
	int (* const uevent)(struct kset *kset, struct kobject *kobj,
		      struct kobj_uevent_env *env);
};

kset_uevent_ops代表意思是Kset事件处理函数集合。

filter: 当上报uevent的时候,kset会通过filter接口去过滤,阻止不希望上报的uevent。

name: 返回kset的名称,如果此kset没有名称,也是不允许上报event

uevent: 通常会调用此回调处理一些Kset的私有事情。

struct kobj_uevent_env {
	char *argv[3];
	char *envp[UEVENT_NUM_ENVP];
	int envp_idx;
	char buf[UEVENT_BUFFER_SIZE];
	int buflen;
};

envp: 用户保存每个环境变量的地址,最大支持32个。

envp_idx: 用户访问envp。

buf: 保存环境的buffer,最大支持2048

buflen: 用于访问buf。

代码分析

/**
 * kobject_uevent - notify userspace by sending an uevent
 *
 * @action: action that is happening
 * @kobj: struct kobject that the action is happening to
 *
 * Returns 0 if kobject_uevent() is completed with success or the
 * corresponding error when it fails.
 */
int kobject_uevent(struct kobject *kobj, enum kobject_action action)
{
	return kobject_uevent_env(kobj, action, NULL);
}

可以看到注释: 通过发送一个uevent通知事件到用户层, action就是当前发生的事件类型,如下action是个枚举类型

enum kobject_action {
	KOBJ_ADD,             
	KOBJ_REMOVE,
	KOBJ_CHANGE,
	KOBJ_MOVE,
	KOBJ_ONLINE,
	KOBJ_OFFLINE,
	KOBJ_MAX
};

KOBJ_ADD/KOBJ_REMOVE代表添加或者移除

KOBJ_ONLINE/KOBJ_OFFLINE代表上线或这下线

/**
 * kobject_uevent_env - send an uevent with environmental data
 *
 * @action: action that is happening
 * @kobj: struct kobject that the action is happening to
 * @envp_ext: pointer to environmental data
 *
 * Returns 0 if kobject_uevent_env() is completed with success or the
 * corresponding error when it fails.
 */
int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
		       char *envp_ext[])
{
	struct kobj_uevent_env *env;
	const char *action_string = kobject_actions[action];
	const char *devpath = NULL;
	const char *subsystem;
	struct kobject *top_kobj;
	struct kset *kset;
	const struct kset_uevent_ops *uevent_ops;
	int i = 0;
	int retval = 0;
#ifdef CONFIG_NET
	struct uevent_sock *ue_sk;
#endif

	pr_debug("kobject: '%s' (%p): %s\n",
		 kobject_name(kobj), kobj, __func__);

	/* search the kset we belong to */
	top_kobj = kobj;
	while (!top_kobj->kset && top_kobj->parent)            //听过while循环找到kobj所属的顶层kset
		top_kobj = top_kobj->parent;

	if (!top_kobj->kset) {                                 //发送一个envet必须存在kset
		pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
			 "without kset!\n", kobject_name(kobj), kobj,
			 __func__);
		return -EINVAL;
	}

	kset = top_kobj->kset;                                //得到最顶层kset的uevent_ops
	uevent_ops = kset->uevent_ops;

	/* skip the event, if uevent_suppress is set*/
	if (kobj->uevent_suppress) {                           //如果uevnet_suppress=1,则不发送uevent
		pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
				 "caused the event to drop!\n",
				 kobject_name(kobj), kobj, __func__);
		return 0;
	}
	/* skip the event, if the filter returns zero. */
	if (uevent_ops && uevent_ops->filter)                 //通过filter函数过滤,如果返回0,则说明顶层的kset过滤了此event
		if (!uevent_ops->filter(kset, kobj)) {
			pr_debug("kobject: '%s' (%p): %s: filter function "
				 "caused the event to drop!\n",
				 kobject_name(kobj), kobj, __func__);
			return 0;
		}

	/* originating subsystem */
	if (uevent_ops && uevent_ops->name)                     //通过name函数设置subsystem       
		subsystem = uevent_ops->name(kset, kobj);
	else
		subsystem = kobject_name(&kset->kobj);
	if (!subsystem) {
		pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
			 "event to drop!\n", kobject_name(kobj), kobj,
			 __func__);
		return 0;
	}

	/* environment buffer */
	env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);      //分配环境变量buff
	if (!env)
		return -ENOMEM;

	/* complete object path */
	devpath = kobject_get_path(kobj, GFP_KERNEL);                 //得到此obj的路径
	if (!devpath) {
		retval = -ENOENT;
		goto exit;
	}

	/* default keys */
	retval = add_uevent_var(env, "ACTION=%s", action_string);         //添加环境变量,ACTION, DEVPATH, SUBSYSTEM到环境变量buff中
	if (retval)
		goto exit;
	retval = add_uevent_var(env, "DEVPATH=%s", devpath);
	if (retval)
		goto exit;
	retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem);
	if (retval)
		goto exit;

	/* keys passed in from the caller */
	if (envp_ext) {                                                       //添加调用者提供的参数
		for (i = 0; envp_ext[i]; i++) {
			retval = add_uevent_var(env, "%s", envp_ext[i]);
			if (retval)
				goto exit;
		}
	}

	/* let the kset specific function add its stuff */                    //让Kset完成一些自己的私人处理
	if (uevent_ops && uevent_ops->uevent) {
		retval = uevent_ops->uevent(kset, kobj, env);
		if (retval) {
			pr_debug("kobject: '%s' (%p): %s: uevent() returned "
				 "%d\n", kobject_name(kobj), kobj,
				 __func__, retval);
			goto exit;
		}
	}

	/*
	 * Mark "add" and "remove" events in the object to ensure proper
	 * events to userspace during automatic cleanup. If the object did
	 * send an "add" event, "remove" will automatically generated by
	 * the core, if not already done by the caller.
	 */
	if (action == KOBJ_ADD)
		kobj->state_add_uevent_sent = 1;
	else if (action == KOBJ_REMOVE)
		kobj->state_remove_uevent_sent = 1;

	mutex_lock(&uevent_sock_mutex);
	/* we will send an event, so request a new sequence number */                     //更新uevent seq number
	retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum);
	if (retval) {
		mutex_unlock(&uevent_sock_mutex);
		goto exit;
	}

#if defined(CONFIG_NET)            //如果开启了CONFIG_NET就使用netlink发送Uevent
	/* send netlink message */
	list_for_each_entry(ue_sk, &uevent_sock_list, list) {
		struct sock *uevent_sock = ue_sk->sk;
		struct sk_buff *skb;
		size_t len;

		if (!netlink_has_listeners(uevent_sock, 1))
			continue;

		/* allocate message with the maximum possible size */
		len = strlen(action_string) + strlen(devpath) + 2;
		skb = alloc_skb(len + env->buflen, GFP_KERNEL);
		if (skb) {
			char *scratch;

			/* add header */
			scratch = skb_put(skb, len);
			sprintf(scratch, "%s@%s", action_string, devpath);

			/* copy keys to our continuous event payload buffer */
			for (i = 0; i < env->envp_idx; i++) {
				len = strlen(env->envp[i]) + 1;
				scratch = skb_put(skb, len);
				strcpy(scratch, env->envp[i]);
			}

			NETLINK_CB(skb).dst_group = 1;
			retval = netlink_broadcast_filtered(uevent_sock, skb,
							    0, 1, GFP_KERNEL,
							    kobj_bcast_filter,
							    kobj);
			/* ENOBUFS should be handled in userspace */
			if (retval == -ENOBUFS || retval == -ESRCH)
				retval = 0;
		} else
			retval = -ENOMEM;
	}
#endif
	mutex_unlock(&uevent_sock_mutex);

#ifdef CONFIG_UEVENT_HELPER             //如果开启了就是用uevent_helper发送uevent。
	/* call uevent_helper, usually only enabled during early boot */
	if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
		struct subprocess_info *info;

		retval = add_uevent_var(env, "HOME=/");
		if (retval)
			goto exit;
		retval = add_uevent_var(env,
					"PATH=/sbin:/bin:/usr/sbin:/usr/bin");
		if (retval)
			goto exit;
		retval = init_uevent_argv(env, subsystem);
		if (retval)
			goto exit;

		retval = -ENOMEM;
		info = call_usermodehelper_setup(env->argv[0], env->argv,
						 env->envp, GFP_KERNEL,
						 NULL, cleanup_uevent_env, env);
		if (info) {
			retval = call_usermodehelper_exec(info, UMH_NO_WAIT);
			env = NULL;	/* freed by cleanup_uevent_env */
		}
	}
#endif

exit:
	kfree(devpath);
	kfree(env);
	return retval;
}

uevent_helper机制

目前内核支持两种方式,netlink和uevent_helper,本节重点分析uevent_helper的实现。

uevent_helper的定义如下

#ifdef CONFIG_UEVENT_HELPER
char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
#endif

CONFIG_UEVENT_HELPER_PATH可以在内核的config文件中找到。

CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"

对应的/sbin/hotplug, 而此hotplug对应的程序是什么? 如果是嵌入式设备,会在etc目录下看到这样的配置:

echo /sbin/mdev >/proc/sys/kernel/hotplug
/sbin/mdev -s

也就是说uevent_helper最终调用到/sbin/mdev.

接着会到kobject_uevent函数中,继续分析。

在开是调用userhelper之前的准备工作。

struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
		char **envp, gfp_t gfp_mask,
		int (*init)(struct subprocess_info *info, struct cred *new),
		void (*cleanup)(struct subprocess_info *info),
		void *data)
{
	struct subprocess_info *sub_info;
	sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
	if (!sub_info)
		goto out;

	INIT_WORK(&sub_info->work, __call_usermodehelper);                //初始化一个工作队列。
	sub_info->path = path;                                            //通过参数初始化subprocess_info
	sub_info->argv = argv;
	sub_info->envp = envp;

	sub_info->cleanup = cleanup;
	sub_info->init = init;
	sub_info->data = data;
  out:
	return sub_info;
}

接着调用call_usermodehelper_exec函数开启一个用户模式的应用程序。

int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
{
	DECLARE_COMPLETION_ONSTACK(done);                            //初始化一个完成对象
	int retval = 0;

	if (!sub_info->path) {                                      //如果没有path变量,就执行cleanup
		call_usermodehelper_freeinfo(sub_info);
		return -EINVAL;
	}
	helper_lock();                                               //原子变量running_helpers加1
	if (!khelper_wq || usermodehelper_disabled) {                //如果不存在khelper_wq,或者usermodehelper已经disabled
		retval = -EBUSY;
		goto out;
	}
	/*
	 * Worker thread must not wait for khelper thread at below
	 * wait_for_completion() if the thread was created with CLONE_VFORK
	 * flag, for khelper thread is already waiting for the thread at
	 * wait_for_completion() in do_fork().
	 */
	if (wait != UMH_NO_WAIT && current == kmod_thread_locker) {     
		retval = -EBUSY;
		goto out;
	}

	/*
	 * Set the completion pointer only if there is a waiter.
	 * This makes it possible to use umh_complete to free
	 * the data structure in case of UMH_NO_WAIT.
	 */
	sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
	sub_info->wait = wait;

	queue_work(khelper_wq, &sub_info->work);                               //提交工作节点到工作队列。
	if (wait == UMH_NO_WAIT)	/* task has freed sub_info */          //如果wait等于NO_WAIT则就返回。
		goto unlock;

	if (wait & UMH_KILLABLE) {                                
		retval = wait_for_completion_killable(&done);                   //如果支持可kill的
		if (!retval)
			goto wait_done;

		/* umh_complete() will see NULL and free sub_info */
		if (xchg(&sub_info->complete, NULL))
			goto unlock;
		/* fallthrough, umh_complete() was already called */
	}

	wait_for_completion(&done);                                      //如果wait不是上述的两种,就一直等待,那等待什么?  当然是等待有人解放它。
wait_done:
	retval = sub_info->retval;
out:
	call_usermodehelper_freeinfo(sub_info);
unlock:
	helper_unlock();
	return retval;
}

那什么时候会唤醒等待? 当然是工作队列上的任务完成之后,就会触发complete,唤醒等待。

/* This is run by khelper thread  */
static void __call_usermodehelper(struct work_struct *work)
{
	struct subprocess_info *sub_info =
		container_of(work, struct subprocess_info, work);
	int wait = sub_info->wait & ~UMH_KILLABLE;
	pid_t pid;

	/* CLONE_VFORK: wait until the usermode helper has execve'd
	 * successfully We need the data structures to stay around
	 * until that is done.  */
	if (wait == UMH_WAIT_PROC)
		pid = kernel_thread(wait_for_helper, sub_info,
				    CLONE_FS | CLONE_FILES | SIGCHLD);
	else {
		pid = kernel_thread(call_helper, sub_info,
				    CLONE_VFORK | SIGCHLD);
		/* Worker thread stopped blocking khelper thread. */
		kmod_thread_locker = NULL;
	}

	if (pid < 0) {
		sub_info->retval = pid;
		umh_complete(sub_info);
	}
}

此处通过创建一个内核线程,当调度到call_helper函数,此函数调用到____call_usermodehelper。在此函数中最终调用

	retval = do_execve(getname_kernel(sub_info->path),
			   (const char __user *const __user *)sub_info->argv,
			   (const char __user *const __user *)sub_info->envp);

在内核空间执行应用程序。最终在umh_complete函数中调用complete,唤醒等待。

static void umh_complete(struct subprocess_info *sub_info)
{
	struct completion *comp = xchg(&sub_info->complete, NULL);
	/*
	 * See call_usermodehelper_exec(). If xchg() returns NULL
	 * we own sub_info, the UMH_KILLABLE caller has gone away
	 * or the caller used UMH_NO_WAIT.
	 */
	if (comp)
		complete(comp);
	else
		call_usermodehelper_freeinfo(sub_info);
}

至此就分析完毕。

原文作者:DragonKingZhu

原文地址:Linux设备驱动模型-Uevent - 阅读清单 - 腾讯云开发者社区-腾讯云(版权归原文作者所有,侵权留言联系删除)

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值