Binder native层源码分析(五):sm对数据的接收以及处理

我们从servicemanager的主函数开始

main和binder_loop

//\frameworks\native\cmds\servicemanager\service_manager.c
int main()
{
    struct binder_state *bs;

    bs = binder_open(128*1024);
    if (!bs) {
        ALOGE("failed to open binder driver\n");
        return -1;
    }

    if (binder_become_context_manager(bs)) {
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }
//......,selinux相关代码,跳过
    binder_loop(bs, svcmgr_handler);

    return 0;
}

servicemanager主函数中,先是打开了Binder驱动,然后让自己成为环境的默认manager,接着就进入了binder_loop。注意这里把svcmgr_handler函数作为参数传入了binder_loop。

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;//write size为0,所以不会写
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;
//binder_thread_write对该协议的处理似乎只是设置线程的状态,不重要,跳过
    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(uint32_t));
//进入循环
    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//读取数据

        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }
		//交给binder_parse
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

在binder_loop中,bwr的write_size为0,read_size>0,也就是说该函数不断地在循环中读取请求,然后交给binder_parse处理。

在驱动的ioctl函数中,由于read_size>0,write_size=0,所以会进入binder_thread_read函数。

binder_thread_read

static int binder_thread_read(struct binder_proc *proc,
			      struct binder_thread *thread,
			      binder_uintptr_t binder_buffer, size_t size,
			      binder_size_t *consumed, int non_block)
{
	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
	void __user *ptr = buffer + *consumed;//也就是binder_loop中的read_buf
	void __user *end = buffer + size;

	int ret = 0;
	int wait_for_proc_work;

	if (*consumed == 0) {
		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
	}
retry:
	binder_inner_proc_lock(proc);
	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
	binder_inner_proc_unlock(proc);

	thread->looper |= BINDER_LOOPER_STATE_WAITING;
//当transaction_stack为空且todo链表也为空时,wait_for_proc_work为true
	trace_binder_wait_for_work(wait_for_proc_work,
				   !!thread->transaction_stack,
				   !binder_worklist_empty(proc, &thread->todo));
//事务已经放进todo链表中了,wait_for_proc_work为false,不会进入该if
	if (wait_for_proc_work) {
		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
					BINDER_LOOPER_STATE_ENTERED))) {
			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
				proc->pid, thread->pid, thread->looper);
		//进入睡眠,等待唤醒
			wait_event_interruptible(binder_user_error_wait,
						 binder_stop_on_user_error < 2);
		}
		binder_restore_priority(current, proc->default_priority);
	}
	if (non_block) {
		if (!binder_has_work(thread, wait_for_proc_work))
			ret = -EAGAIN;
	} else {
		ret = binder_wait_for_work(thread, wait_for_proc_work);
	}
	//设置线程循环状态
	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

	if (ret)
		return ret;

在上篇博文中,我们知道驱动已经把一个事务放进todo链表中了,所以此时不会进入睡眠,继续往下看。

	while(1) {
		uint32_t cmd;
		struct binder_transaction_data tr;
		struct binder_work *w = NULL;
		struct list_head *list = NULL;
		struct binder_transaction *t = NULL;
		struct binder_thread *t_from;

		binder_inner_proc_lock(proc);
		//得到todo队列
		if (!binder_worklist_empty_ilocked(&thread->todo))
			list = &thread->todo;
		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
			   wait_for_proc_work)
			list = &proc->todo;
		else {
			binder_inner_proc_unlock(proc);

			/* no data added */
			if (ptr - buffer == 4 && !thread->looper_need_return)
				goto retry;
			break;
		}
		w = binder_dequeue_work_head_ilocked(list);
		if (binder_worklist_empty_ilocked(&thread->todo))
			thread->process_todo = false;
//根据binder_work的类型做出处理。这里只关注它是怎么处理之前放进发送线程的
//BINDER_WORK_TRANSACTION_COMPLETE类型的tcomplete和放进目标线程的
//BINDER_WORK_TRANSACTION类型的binder_work即可
		switch (w->type) {
		case BINDER_WORK_TRANSACTION: {
			binder_inner_proc_unlock(proc);
//在binder_thread_write中把t->work的地址放进了队列,这里根据work的地址恢复出t
			t = container_of(w, struct binder_transaction, work);
		} break;
//由于在ioctl中是先写再读,所以当发送进程binder_thread_write完毕之后,会进入
//binder_thread_read的这个分支处理之前写的时候放进todo队列的tcomplete事务,
//而不用等ioctl被再次调用。
		case BINDER_WORK_TRANSACTION_COMPLETE: {
			binder_inner_proc_unlock(proc);
			cmd = BR_TRANSACTION_COMPLETE;//
			if (put_user(cmd, (uint32_t __user *)ptr))//在prt(mIn)中写入BR_TRANSACTION_COMPILE的cmd代表发送成功。
				return -EFAULT;
			ptr += sizeof(uint32_t);

			binder_stat_br(proc, thread, cmd);
			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
				     "%d:%d BR_TRANSACTION_COMPLETE\n",
				     proc->pid, thread->pid);
			kfree(w);
			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
		} break;
//...

在switch case中,处理BINDER_WORK_TRANCTION时仅仅只是简单地恢复出了t,对t的处理放在了后面:

		if (!t)
			continue;

		BUG_ON(t->buffer == NULL);
		if (t->buffer->target_node) {
		//得到sm对应的binder_node
			struct binder_node *target_node = t->buffer->target_node;
			struct binder_priority node_prio;
		//根据上篇博文的分析我们知道,ptr和cookie指向服务类
			tr.target.ptr = target_node->ptr;
			tr.cookie =  target_node->cookie;//注意这里对cookie进行赋值
			node_prio.sched_policy = target_node->sched_policy;
			node_prio.prio = target_node->min_priority;
			binder_transaction_priority(current, t, node_prio,
						    target_node->inherit_rt);
			cmd = BR_TRANSACTION;//注意这里是BR_TRANSACTION而非BC
		} else {
			tr.target.ptr = 0;
			tr.cookie = 0;
			cmd = BR_REPLY;
		}
		tr.code = t->code;
		tr.flags = t->flags;
		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);

		t_from = binder_get_txn_from(t);
		if (t_from) {
			struct task_struct *sender = t_from->proc->tsk;

			tr.sender_pid = task_tgid_nr_ns(sender,
							task_active_pid_ns(current));
		} else {
			tr.sender_pid = 0;
		}

		tr.data_size = t->buffer->data_size;
		tr.offsets_size = t->buffer->offsets_size;
		//将内核地址转换成用户空间地址
		tr.data.ptr.buffer = (binder_uintptr_t)
			((uintptr_t)t->buffer->data +
			binder_alloc_get_user_buffer_offset(&proc->alloc));
		tr.data.ptr.offsets = tr.data.ptr.buffer +
					ALIGN(t->buffer->data_size,
					    sizeof(void *));
	//向read_buf写协议BR_TRANSACTION
		if (put_user(cmd, (uint32_t __user *)ptr)) {
			if (t_from)
				binder_thread_dec_tmpref(t_from);

			binder_cleanup_transaction(t, "put_user failed",
						   BR_FAILED_REPLY);

			return -EFAULT;
		}
		ptr += sizeof(uint32_t);
		//向read_buf写binder_transaction_data tr
		if (copy_to_user(ptr, &tr, sizeof(tr))) {
			if (t_from)
				binder_thread_dec_tmpref(t_from);

			binder_cleanup_transaction(t, "copy_to_user failed",
						   BR_FAILED_REPLY);

			return -EFAULT;
		}
		ptr += sizeof(tr);
		//...

		if (t_from)
			binder_thread_dec_tmpref(t_from);
		t->buffer->allow_user_free = 1;
		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
			binder_inner_proc_lock(thread->proc);
			//备份transaction_stack
			t->to_parent = thread->transaction_stack;
			t->to_thread = thread;
			thread->transaction_stack = t;//本线程的transaction_stack等于t
			binder_inner_proc_unlock(thread->proc);
		} else {
			binder_free_transaction(t);
		}
		break;
	}

上面的代码很简单,根据t重新构造出了binder_transaction_data tr,然后写进了前面binder_loop中的read_buf。注意,传输的数据已经在binder_thread_write中被复制到了分配给本进程的通信空间中,所以这里直接令tr.data.ptr.buffer的值等于该通信空间的地址(转换成用户空间地址),这样就完成了数据的跨进程传输。

另外,注意其中的一条赋值语句tr.cookie=traget_node.cookie,目标服务结点对应的服务的引用被储存到了tr当中。

后面的代码就是更新bwr.consumed,更新完binder_thread_read就结束了。

binder_parse

调用完ioctl后,binder_loop会进入binder_parse函数。

//res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) {
//先读cmd
        uint32_t cmd = *(uint32_t *) ptr;//即读出BR_TRANSACTION
        ptr += sizeof(uint32_t);
        switch(cmd) {
//...
        case BR_TRANSACTION: {
//和waitforResponse中处理TRANSACTION和REPLY协议时一样,将后面的数据
//强制转换成binder_transaction_data类型,只不过这里的变量名是txn,而
//waitForResponse中变量名是tr
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {//func即函数svcmgr_handler
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;//注意reply是binder_io类型,而非Parcel
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);//初始化reply
                bio_init_from_txn(&msg, txn);//见下面
                res = func(bs, txn, &msg, &reply);//交给svcmgr_handler处理
                if (txn->flags & TF_ONE_WAY) {//flags为0,不会进入该if
                    binder_free_buffer(bs, txn->data.ptr.buffer);
                } else {//进入该else
                //发送reply
                    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
                }
            }
            ptr += sizeof(*txn);
            break;
        }
//...
}

binder_io类型和bio_init_from_txn

在binder_parse处理BR_TRANSACTION协议时,先是用读出的binder_transaction txn初始化binder_io msg,我们看一下初始化函数和binder_io数据结构。

struct binder_io
{
    char *data;            /* pointer to read/write from */
    binder_size_t *offs;   /* array of offsets */
    size_t data_avail;     /* bytes available in data buffer */
    size_t offs_avail;     /* entries available in offsets array */

    char *data0;           /* start of data buffer */
    binder_size_t *offs0;  /* start of offsets buffer */
    uint32_t flags;
    uint32_t unused;
};

binder_io分为两个区域,data区和offs区。其中data0/offs0是区域的起始地址,而data/offs是当前地址。其中offs记录的是obj的相关信息。

void bio_init_from_txn(struct binder_io *bio, struct binder_transaction_data *txn)
{
    bio->data = bio->data0 = (char *)(intptr_t)txn->data.ptr.buffer;
    bio->offs = bio->offs0 = (binder_size_t *)(intptr_t)txn->data.ptr.offsets;
    bio->data_avail = txn->data_size;
    bio->offs_avail = txn->offsets_size / sizeof(size_t);
    bio->flags = BIO_F_SHARED;
}

代码很简单,就是把txn中的数据部分提取出来,而丢掉txn中用于数据传输的部分例如pid,uid,code等。

svcmgr_handler

得到两个binder_io后,binder_parse进入真正处理客户端请求的关键函数,即sm的svcmgr_handler

//res = func(bs, txn, &msg, &reply);
int svcmgr_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    size_t len;
    uint32_t handle;
    uint32_t strict_policy;
    int allow_isolated;

    //ALOGI("target=%p code=%d pid=%d uid=%d\n",
    //      (void*) txn->target.ptr, txn->code, txn->sender_pid, txn->sender_euid);
//检查目标是否正确
    if (txn->target.ptr != BINDER_SERVICE_MANAGER)
        return -1;

    if (txn->code == PING_TRANSACTION)
        return 0;

    // Equivalent to Parcel::enforceInterface(), reading the RPC
    // header with the strict mode policy mask and the interface name.
    // Note that we ignore the strict_policy and don't propagate it
    // further (since we do no outbound RPCs anyway).
    strict_policy = bio_get_uint32(msg);
//检查目标服务名是否android.io.IServiceManager。服务名即源码分析一结尾中首先
//放入Parcel的InterfaceToken
    s = bio_get_string16(msg, &len);
    if (s == NULL) {
        return -1;
    }

    if ((len != (sizeof(svcmgr_id) / 2)) ||
        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
        fprintf(stderr,"invalid id %s\n", str8(s, len));
        return -1;
    }

    if (sehandle && selinux_status_updated() > 0) {
        struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
        if (tmp_sehandle) {
            selabel_close(sehandle);
            sehandle = tmp_sehandle;
        }
    }
//开始正式进入请求的处理
    switch(txn->code) {
    case SVC_MGR_GET_SERVICE:
    case SVC_MGR_CHECK_SERVICE:
//...
    case SVC_MGR_ADD_SERVICE:
//...
    case SVC_MGR_LIST_SERVICES: {
//...
    default:
        ALOGE("unknown code %d\n", txn->code);
        return -1;
    }

    bio_put_uint32(reply, 0);
    return 0;
}

处理了4种请求,在源码分析一的结尾我们知道传入的code值是ADD_SERVICE_TRANSACTION,从名字上看对应上面的SVC_MGR_ADD_SERVICE。

处理SVC_MGR_ADD_SERVICE请求

    case SVC_MGR_ADD_SERVICE:
    //读取字符串,在源码分析一中,第二个写入Parcel的是String16("media.player")
    //即服务名
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        handle = bio_get_ref(msg);
        allow_isolated = bio_get_uint32(msg) ? 1 : 0;
        if (do_add_service(bs, s, len, handle, txn->sender_euid,
            allow_isolated, txn->sender_pid))
            return -1;
        break;

取得了服务名,handle值后调用了do_add_service。我们先看handle是怎么得到的。

bio_get_ref和_bio_get_obj

uint32_t bio_get_ref(struct binder_io *bio)
{
    struct flat_binder_object *obj;

    obj = _bio_get_obj(bio);//读取obj
    if (!obj)
        return 0;
	//在源码分析四的binder_translate_binder中我们知道,obj的类型被改成了
	//BINDER_TYPE_HANDLE
    if (obj->type == BINDER_TYPE_HANDLE)
        return obj->handle;

    return 0;//否则返回0
}

static struct flat_binder_object *_bio_get_obj(struct binder_io *bio)
{
    size_t n;
    size_t off = bio->data - bio->data0;

    /* TODO: be smarter about this? */
    //如果存在一个offs区域的偏移信息等于当前位置,则从当前位置读一个obj
    for (n = 0; n < bio->offs_avail; n++) {
        if (bio->offs[n] == off)
            return bio_get(bio, sizeof(struct flat_binder_object));
    }

    bio->data_avail = 0;
    bio->flags |= BIO_F_OVERFLOW;
    return NULL;
}

其实就是从offs区域(等同于binder_transactiondata.data.ptr.offsets和Parcel.mObjects)中恢复出flat_binder_object,再返回其handle值。

在上篇博文中我们知道,该handle值是binder_ref在sm的binder_proc红黑树中的键值。通过这个handle值我们就能从sm的binder_proc中找到相应的binder_ref,进而找到MediaPlayerService的binder_node,如图:
在这里插入图片描述

do_add_service

取得handle值后,交给了do_add_service处理。

//do_add_service(bs, s, len, handle, txn->sender_euid,allow_isolated, txn->sender_pid)
int do_add_service(struct binder_state *bs,
                   const uint16_t *s, size_t len,
                   uint32_t handle, uid_t uid, int allow_isolated,
                   pid_t spid)
{
    struct svcinfo *si;

    //ALOGI("add_service('%s',%x,%s) uid=%d\n", str8(s, len), handle,
    //        allow_isolated ? "allow_isolated" : "!allow_isolated", uid);

    if (!handle || (len == 0) || (len > 127))
        return -1;

    if (!svc_can_register(s, len, spid, uid)) {
        ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n",
             str8(s, len), handle, uid);
        return -1;
    }
//在svclist中找是否存在该service
    si = find_svc(s, len);
    if (si) {//如果service在svclist中存在
    //如果存在handle值,即binder_proc中存在该服务的旧binder_ref,则废弃
        if (si->handle) {
            ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
                 str8(s, len), handle, uid);
            svcinfo_death(bs, si);//释放对该handle对应服务的引用
        }
        si->handle = handle;//更新handle值
    } else {//如果在service在svclist中不存在
    	//分配一个新的servie并插入svclist
        si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
        if (!si) {
            ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n",
                 str8(s, len), handle, uid);
            return -1;
        }
        si->handle = handle;//记录binder_ref键值信息
        si->len = len;
        memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
        si->name[len] = '\0';
        si->death.func = (void*) svcinfo_death;
        si->death.ptr = si;
        si->allow_isolated = allow_isolated;
        si->next = svclist;//插入svclist
        svclist = si;
    }

    binder_acquire(bs, handle);//增加对该handle对应服务的引用
    binder_link_to_death(bs, handle, &si->death);
    return 0;
}

这块代码牵扯到一些协议,得结合binder_thread_write和executecommand中对协议的处理一块看才看得明白。

当需要添加的service已经存在时,会调用svcinfo_death()向Binder发送一个协议为BC_RELEASE,数据为旧handle值的数据。

在函数的最后,会调用binder_acquire()向Binder驱动发送一个协议为BC_ACQUIRE,数据为新handle值的数据。

我们看Binder驱动中是怎么处理这些协议的。

binder_thread_write对BC_RELEASE,BC_ACQUIRE的处理

		switch (cmd) {
		case BC_INCREFS:
		case BC_ACQUIRE:
		case BC_RELEASE:
		case BC_DECREFS: {
			uint32_t target;
			const char *debug_string;
			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
			struct binder_ref_data rdata;
		//读出handle值
			if (get_user(target, (uint32_t __user *)ptr))
				return -EFAULT;

			ptr += sizeof(uint32_t);
			ret = -1;
			if (increment && !target) {//target(handle值)非0,不会进入这个if
				struct binder_node *ctx_mgr_node;
				mutex_lock(&context->context_mgr_node_lock);
				ctx_mgr_node = context->binder_context_mgr_node;
				if (ctx_mgr_node)
					ret = binder_inc_ref_for_node(
							proc, ctx_mgr_node,
							strong, NULL, &rdata);
				mutex_unlock(&context->context_mgr_node_lock);
			}
			if (ret)//ret非0,会进入这个if
				ret = binder_update_ref_for_handle(
						proc, target, increment, strong,
						&rdata);
			if (!ret && rdata.desc != target) {
				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
					proc->pid, thread->pid,
					target, rdata.desc);
			}

读出了handle值,传给了binder_update_ref_for_handle处理。注意其中的proc是servicemanager的proc。

binder_update_ref_for_handle和binder_ref类型

static int binder_update_ref_for_handle(struct binder_proc *proc,
		uint32_t desc, bool increment, bool strong,
		struct binder_ref_data *rdata)
{//desc即代表handle值
	int ret = 0;
	struct binder_ref *ref;
	bool delete_ref = false;

	binder_proc_lock(proc);
	//根据handle值在proc的binder_ref红黑树中找到对应的binder_ref
	ref = binder_get_ref_olocked(proc, desc, strong);
	if (!ref) {
		ret = -EINVAL;
		goto err_no_ref;
	}
	if (increment)
		ret = binder_inc_ref_olocked(ref, strong, NULL);//增加引用
	else
		delete_ref = binder_dec_ref_olocked(ref, strong);//减少引用

	if (rdata)
		*rdata = ref->data;
	binder_proc_unlock(proc);

	if (delete_ref)
		binder_free_ref(ref);
	return ret;

err_no_ref:
	binder_proc_unlock(proc);
	return ret;
}

从servicemanager进程的binder_proc中找到该handle对应的引用,然后根据参数值增加引用计数或减少引用计数。

binder_inc_ref_olocked的代码在上篇博文中已经看过了,总结一下就是增加binder_ref->data.strong,且当该值为0时还会增加对应的binder_node->internal_strong_refs

而binder_dec_node_olocked就是减少binder_ref->data.strong,当减为0时还会减少binder_node->internal_strong_refs。

总结

至此,service manager完成了MediaPlayerService的注册,其实就是在服务进程创建一个binder_node,然后在sm的进程中创建一个binder_ref指向服务的binder_node,而handle值是binder_ref的关键字,用于在sm的binder_ref的红黑树中找到服务的binder_ref。

记得在上篇博文中,服务进程向sm进程通信时,是直接从环境变量中取得sm的binder_node的。而当binder_ref创建完毕后,sm就可以根据handle值取得该服务的binder_ref,进而得到binder_node,也就具备了向服务进程发送通信数据的能力。

在下篇博文中我们分析,当客户端向sm查询服务时,sm是怎么把服务端的信息交给客户端使得客户端能够向服务端通信的。弄清楚这个之后,关于Binder底层实现的内容基本就结束了。

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值