adreno源码系列(六)启动kgsl

/* Nice level for the higher priority GPU start thread */
int adreno_wake_nice = -7;

/**
 * adreno_start() - Power up and initialize the GPU
 * @device: Pointer to the KGSL device to power up
 * @priority:  Boolean flag to specify of the start should be scheduled in a low
 * latency work queue
 *
 * Power up the GPU and initialize it.  If priority is specified then elevate
 * the thread priority for the duration of the start operation
 */
int adreno_start(struct kgsl_device *device, int priority)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
    // 当前进程nice值
	int nice = task_nice(current);
	int ret;

    // 如果指定以低延时的工作队列启动, 而且当前nice值低于-7, 则重新设置其nice值
	if (priority && (adreno_wake_nice < nice))
		set_user_nice(current, adreno_wake_nice);

    // [见第1节]
	ret = _adreno_start(adreno_dev);

	if (priority)
		set_user_nice(current, nice);

	return ret;
}

1. _adreno_start

/**
 * _adreno_start - Power up the GPU and prepare to accept commands
 * @adreno_dev: Pointer to an adreno_device structure
 *
 * The core function that powers up and initalizes the GPU.  This function is
 * called at init and after coming out of SLUMBER
 */
static int _adreno_start(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int status;
	unsigned int state = device->state;
	bool regulator_left_on;

	/* make sure ADRENO_DEVICE_STARTED is not set here */
	WARN_ON(test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv));

	regulator_left_on = regulators_left_on(device);

	/* Clear any GPU faults that might have been left over */
	adreno_clear_gpu_fault(adreno_dev);

	/* Put the GPU in a responsive state */
    // 改变GPU状态为KGSL_STATE_AWARE
	status = kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
	if (status)
		goto error_pwr_off;

	/* Set any stale active contexts to NULL */
	adreno_set_active_ctxs_null(adreno_dev);

	/* Set the bit to indicate that we've just powered on */
	set_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv);

	/* Soft reset the GPU if a regulator is stuck on*/
	if (regulator_left_on)
		_soft_reset(adreno_dev);

	adreno_ringbuffer_set_global(adreno_dev, 0);

    // 调用kgsl_iommu_ops中定义的kgsl_iommu_start启动IOMMU
	status = kgsl_mmu_start(device);
	if (status)
		goto error_pwr_off;

	adreno_get_bus_counters(adreno_dev);

	adreno_clear_dcvs_counters(adreno_dev);

	/* Restore performance counter registers with saved values */
	adreno_perfcounter_restore(adreno_dev);

	/* Start the GPU */
    // 调用adreno_a6xx_gpudev中定义的a6xx_start启动GPU[第2节]
	gpudev->start(adreno_dev);

	/* Re-initialize the coresight registers if applicable */
	adreno_coresight_start(adreno_dev);

	adreno_irqctrl(adreno_dev, 1);

    // 启动性能计数器[见第3节]
	adreno_perfcounter_start(adreno_dev);

	/* Clear FSR here in case it is set from a previous pagefault */
	kgsl_mmu_clear_fsr(&device->mmu);

    // 调用adreno_a6xx_gpudev中定义的a6xx_rb_start启动adreno_ringbuffer[见第4节]
	status = gpudev->rb_start(adreno_dev);
	if (status)
		goto error_pwr_off;

	/*
	 * At this point it is safe to assume that we recovered. Setting
	 * this field allows us to take a new snapshot for the next failure
	 * if we are prioritizing the first unrecoverable snapshot.
	 */
	if (device->snapshot)
		device->snapshot->recovered = true;

	/* Start the dispatcher */
    // 开启adreno_dispatcher的调度[见第5节]
	adreno_dispatcher_start(device);

	device->reset_counter++;

	set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);

	return 0;

error_pwr_off:
	/* set the state back to original state */
	kgsl_pwrctrl_change_state(device, state);

	return status;
}

2. a6xx_start

void a6xx_start(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
	unsigned int mal, mode, hbb_hi = 0, hbb_lo = 0;
	unsigned int uavflagprd_inv;
	unsigned int amsbc = 0;
	unsigned int rgb565_predicator = 0;
	static bool patch_reglist;

	/* Enable 64 bit addressing */
    // 寄存器值初始化为0x1
	kgsl_regwrite(device, A6XX_CP_ADDR_MODE_CNTL, 0x1);
	kgsl_regwrite(device, A6XX_VSC_ADDR_MODE_CNTL, 0x1);
	kgsl_regwrite(device, A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
	kgsl_regwrite(device, A6XX_RB_ADDR_MODE_CNTL, 0x1);
	kgsl_regwrite(device, A6XX_PC_ADDR_MODE_CNTL, 0x1);
	kgsl_regwrite(device, A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
	kgsl_regwrite(device, A6XX_VFD_ADDR_MODE_CNTL, 0x1);
	kgsl_regwrite(device, A6XX_VPC_ADDR_MODE_CNTL, 0x1);
	kgsl_regwrite(device, A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
	kgsl_regwrite(device, A6XX_SP_ADDR_MODE_CNTL, 0x1);
	kgsl_regwrite(device, A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
	kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);

	/* Set up VBIF registers from the GPU core definition */
    // 写adreno_gpu_core_a640定义的a640_vbif_regs
	adreno_reglist_write(adreno_dev, a6xx_core->vbif,
		a6xx_core->vbif_count);

	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW))
		kgsl_regwrite(device, A6XX_UCHE_GBIF_GX_CONFIG, 0x10200F9);

	/* Make all blocks contribute to the GPU BUSY perf counter */
	kgsl_regwrite(device, A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);

	/*
	 * Set UCHE_WRITE_THRU_BASE to the UCHE_TRAP_BASE effectively
	 * disabling L2 bypass
	 */
    // 写L2缓存寄存器
	kgsl_regwrite(device, A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
	kgsl_regwrite(device, A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
	kgsl_regwrite(device, A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
	kgsl_regwrite(device, A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
	kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
	kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);

	/*
	 * Some A6xx targets no longer use a programmed UCHE GMEM base
	 * address so only write the registers if this address is
	 * non zero.
	 */
	if (adreno_dev->uche_gmem_base) {
		kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_LO,
				adreno_dev->uche_gmem_base);
		kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0);
		kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_LO,
				adreno_dev->uche_gmem_base +
				adreno_dev->gpucore->gmem_size - 1);
		kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0);
	}

	kgsl_regwrite(device, A6XX_UCHE_FILTER_CNTL, 0x804);
	kgsl_regwrite(device, A6XX_UCHE_CACHE_WAYS, 0x4);

	/* ROQ sizes are twice as big on a640/a680 than on a630 */
	if ((ADRENO_GPUREV(adreno_dev) >= ADRENO_REV_A640) &&
		!adreno_is_a702(adreno_dev)) {
		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
	} else if (adreno_is_a612(adreno_dev) || adreno_is_a610(adreno_dev) ||
			adreno_is_a702(adreno_dev)) {
		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
	} else {
		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x010000C0);
		kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
	}

	if (adreno_is_a660(adreno_dev))
		kgsl_regwrite(device, A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);

	if (adreno_is_a612(adreno_dev) || adreno_is_a610(adreno_dev)) {
		/* For A612 and A610 Mem pool size is reduced to 48 */
		kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 48);
		kgsl_regwrite(device, A6XX_CP_MEM_POOL_DBG_ADDR, 47);
	} else if (adreno_is_a702(adreno_dev)) {
		kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 64);
		kgsl_regwrite(device, A6XX_CP_MEM_POOL_DBG_ADDR, 63);
	} else {
        // CP内存池
		kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 128);
	}

	/* Setting the primFifo thresholds values */
	kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL,
		a6xx_core->prim_fifo_threshold);

	/* Set the AHB default slave response to "ERROR" */
	kgsl_regwrite(device, A6XX_CP_AHB_CNTL, 0x1);

	/* Turn on performance counters */
	kgsl_regwrite(device, A6XX_RBBM_PERFCTR_CNTL, 0x1);

	/* Turn on the IFPC counter (countable 4 on XOCLK4) */
	if (gmu_core_isenabled(device))
		gmu_core_regrmw(device, A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1,
			0xff, 0x4);

	/* Turn on GX_MEM retention */
	if (gmu_core_isenabled(device) && adreno_is_a612(adreno_dev)) {
		kgsl_regwrite(device, A6XX_RBBM_BLOCK_GX_RETENTION_CNTL, 0x7FB);
		/* For CP IPC interrupt */
		kgsl_regwrite(device, A6XX_RBBM_INT_2_MASK, 0x00000010);
	}

	if (of_property_read_u32(device->pdev->dev.of_node,
		"qcom,min-access-length", &mal))
		mal = 32;

	if (of_property_read_u32(device->pdev->dev.of_node,
		"qcom,ubwc-mode", &mode))
		mode = 0;

	switch (mode) {
	case KGSL_UBWC_1_0:
		mode = 1;
		break;
	case KGSL_UBWC_2_0:
		mode = 0;
		break;
	case KGSL_UBWC_3_0:
		mode = 0;
		amsbc = 1; /* Only valid for A640 and A680 */
		break;
	case KGSL_UBWC_4_0:
		mode = 0;
		rgb565_predicator = 1;
		amsbc = 1;
		break;
	default:
		break;
	}

	/*
	 * For macrotiling change on a680,  will affect RB, SP and TP
	 * 0 means UBWC 3.0, 1 means UBWC 3.1
	 */
	if (adreno_is_a680(adreno_dev))
		kgsl_regwrite(device, A6XX_RBBM_NC_MODE_CNTL, 1);

	if (!WARN_ON(!adreno_dev->highest_bank_bit)) {
		hbb_lo = (adreno_dev->highest_bank_bit - 13) & 3;
		hbb_hi = ((adreno_dev->highest_bank_bit - 13) >> 2) & 1;
	}

	mal = (mal == 64) ? 1 : 0;

	uavflagprd_inv = (adreno_is_a650_family(adreno_dev)) ? 2 : 0;

	kgsl_regwrite(device, A6XX_RB_NC_MODE_CNTL, (rgb565_predicator << 11)|
				(hbb_hi << 10) | (amsbc << 4) | (mal << 3) |
				(hbb_lo << 1) | mode);

	kgsl_regwrite(device, A6XX_TPL1_NC_MODE_CNTL, (hbb_hi << 4) |
				(mal << 3) | (hbb_lo << 1) | mode);

	kgsl_regwrite(device, A6XX_SP_NC_MODE_CNTL, (hbb_hi << 10) |
				(mal << 3) | (uavflagprd_inv << 4) |
				(hbb_lo << 1) | mode);

	kgsl_regwrite(device, A6XX_UCHE_MODE_CNTL, (mal << 23) |
		(hbb_lo << 21));

	kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
				(1 << 30) | a6xx_core->hang_detect_cycles);

	kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1);

	/* Set weights for bicubic filtering */
	if (adreno_is_a650_family(adreno_dev)) {
		kgsl_regwrite(device, A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
		kgsl_regwrite(device, A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
			0x3FE05FF4);
		kgsl_regwrite(device, A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
			0x3FA0EBEE);
		kgsl_regwrite(device, A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
			0x3F5193ED);
		kgsl_regwrite(device, A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
			0x3F0243F0);
	}

	/* Set TWOPASSUSEWFI in A6XX_PC_DBG_ECO_CNTL if requested */
	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI))
		kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));

	/* Set the bit vccCacheSkipDis=1 to get rid of TSEskip logic */
	if (a6xx_core->disable_tseskip)
		kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 9));

	/* Set the bit in HLSQ Cluster for A702 */
	if (adreno_is_a702(adreno_dev))
		kgsl_regwrite(device, A6XX_CP_CHICKEN_DBG, (1 << 24));

	/* Enable the GMEM save/restore feature for preemption */
    // 支持抢占
	if (adreno_is_preemption_enabled(adreno_dev))
		kgsl_regwrite(device, A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
			0x1);

	/*
	 * Enable GMU power counter 0 to count GPU busy. This is applicable to
	 * all a6xx targets
	 */
	if (adreno_is_a619_holi(adreno_dev)) {
		unsigned int val;

		adreno_write_gmu_wrapper(adreno_dev,
			A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
		adreno_read_gmu_wrapper(adreno_dev,
			A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, &val);
		adreno_write_gmu_wrapper(adreno_dev,
			A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
			(val & 0xff) | 0x20);
		adreno_write_gmu_wrapper(adreno_dev,
			A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0x1);

	} else {
		kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK,
			0xff000000);
		kgsl_regrmw(device, A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
			0xff, 0x20);
		kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE,
			0x1);
	}
	a6xx_protect_init(adreno_dev);
	/*
	 * We start LM here because we want all the following to be up
	 * 1. GX HS
	 * 2. SPTPRAC
	 * 3. HFI
	 * At this point, we are guaranteed all.
	 */

	/* Configure LLCC */
	a6xx_llc_configure_gpu_scid(adreno_dev);
	a6xx_llc_configure_gpuhtw_scid(adreno_dev);

	a6xx_llc_enable_overrides(adreno_dev);

	if (adreno_is_a660(adreno_dev)) {
		kgsl_regwrite(device, A6XX_CP_CHICKEN_DBG, 0x1);
		kgsl_regwrite(device, A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0);

		/* Set dualQ + disable afull for A660, A642 GPU but not for A642L */
		if (!adreno_is_a642l(adreno_dev))
			kgsl_regwrite(device, A6XX_UCHE_CMDQ_CONFIG, 0x66906);
	}

	if (ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
		kgsl_regwrite(device, A6XX_CP_APRIV_CNTL, A6XX_APRIV_DEFAULT);

	a6xx_set_secvid(device);

	/*
	 * Enable hardware clock gating here to prevent any register access
	 * issue due to internal clock gating.
	 */
	a6xx_hwcg_set(adreno_dev, true);

	/*
	 * All registers must be written before this point so that we don't
	 * miss any register programming when we patch the power up register
	 * list.
	 */
	if (!patch_reglist && (adreno_dev->pwrup_reglist->gpuaddr != 0)) {
		a6xx_patch_pwrup_reglist(adreno_dev);
		patch_reglist = true;
	}

	/*
	 * During adreno_stop, GBIF halt is asserted to ensure
	 * no further transaction can go through GPU before GPU
	 * headswitch is turned off.
	 *
	 * This halt is deasserted once headswitch goes off but
	 * incase headswitch doesn't goes off clear GBIF halt
	 * here to ensure GPU wake-up doesn't fail because of
	 * halted GPU transactions.
	 */
	a6xx_deassert_gbif_halt(adreno_dev);
}

3. adreno_perfcounter_start

/**
 * adreno_perfcounter_start: Enable performance counters
 * @adreno_dev: Adreno device to configure
 *
 * Ensure all performance counters are enabled that are allocated.  Since
 * the device was most likely stopped, we can't trust that the counters
 * are still valid so make it so.
 */
void adreno_perfcounter_start(struct adreno_device *adreno_dev)
{
	const struct adreno_perfcounters *counters =
		ADRENO_PERFCOUNTERS(adreno_dev);
	const struct adreno_perfcount_group *group;
	unsigned int i, j;

	if (counters == NULL)
		return;
	/* group id iter */
	for (i = 0; i < counters->group_count; i++) {
		group = &(counters->groups[i]);

		/* countable iter */
		for (j = 0; j < group->reg_count; j++) {
			if (!active_countable(group->regs[j].countable))
				continue;

			/*
			 * The GPU has to be idle before calling the perfcounter
			 * enable function, but since this function is called
			 * during start we already know the GPU is idle.
			 * Since the countable/counter pairs have already been
			 * validated, there is no way for _enable() to fail so
			 * no need to check the return code.
			 */
			adreno_perfcounter_enable(adreno_dev, i, j,
					  group->regs[j].countable);
		}
	}
}

4. a6xx_rb_start

int a6xx_rb_start(struct adreno_device *adreno_dev)
{
	const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	u32 cp_rb_cntl = A6XX_CP_RB_CNTL_DEFAULT |
		(ADRENO_FEATURE(adreno_dev, ADRENO_APRIV) ? 0 : (1 << 27));
	struct adreno_ringbuffer *rb;
	uint64_t addr;
	int ret, i;
	unsigned int *cmds;

	/* Clear all the ringbuffers */
	FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
		memset(rb->buffer_desc->hostptr, 0xaa, KGSL_RB_SIZE);
		kgsl_sharedmem_writel(device->scratch,
			SCRATCH_RPTR_OFFSET(rb->id), 0);

		rb->wptr = 0;
		rb->_wptr = 0;
		rb->wptr_preempt_end = ~0;
	}

	a6xx_preemption_start(adreno_dev);

	/* Set up the current ringbuffer */
	rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
	addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);

	kgsl_regwrite(device, A6XX_CP_RB_RPTR_ADDR_LO, lower_32_bits(addr));
	kgsl_regwrite(device, A6XX_CP_RB_RPTR_ADDR_HI, upper_32_bits(addr));

	/*
	 * The size of the ringbuffer in the hardware is the log2
	 * representation of the size in quadwords (sizedwords / 2).
	 */
	kgsl_regwrite(device, A6XX_CP_RB_CNTL, cp_rb_cntl);

	kgsl_regwrite(device, A6XX_CP_RB_BASE,
		lower_32_bits(rb->buffer_desc->gpuaddr));

	kgsl_regwrite(device, A6XX_CP_RB_BASE_HI,
		upper_32_bits(rb->buffer_desc->gpuaddr));

	a6xx_unhalt_sqe(adreno_dev);

	ret = a6xx_send_cp_init(adreno_dev, rb);
	if (ret)
		return ret;

	ret = adreno_zap_shader_load(adreno_dev, a6xx_core->zap_name);
	if (ret)
		return ret;

	/*
	 * Take the GPU out of secure mode. Try the zap shader if it is loaded,
	 * otherwise just try to write directly to the secure control register
	 */
	if (!adreno_dev->zap_loaded)
		kgsl_regwrite(device, A6XX_RBBM_SECVID_TRUST_CNTL, 0);
	else {
		cmds = adreno_ringbuffer_allocspace(rb, 2);
		if (IS_ERR(cmds))
			return PTR_ERR(cmds);

		*cmds++ = cp_packet(adreno_dev, CP_SET_SECURE_MODE, 1);
		*cmds++ = 0;

		ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
		if (ret) {
			a6xx_spin_idle_debug(adreno_dev,
				"Switch to unsecure failed to idle\n");
			return ret;
		}
	}

    // 仅在支持抢占时使用
	return a6xx_post_start(adreno_dev);
}

5. adreno_dispatcher_start

/**
 * adreno_dispatcher_start() - activate the dispatcher
 * @adreno_dev: pointer to the adreno device structure
 *
 */
void adreno_dispatcher_start(struct kgsl_device *device)
{
	complete_all(&device->halt_gate);

	/* Schedule the work loop to get things going */
	adreno_dispatcher_schedule(device);
}

5.1 adreno_dispatcher_schedule

void adreno_dispatcher_schedule(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;

	kthread_queue_work(&kgsl_driver.worker, &dispatcher->work);
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值