Linux搭建Hadoop jdk Hive Zookeeper HBase Spark

vmware workstation安装

下载地址:https://download3.vmware.com/software/wkst/file/VMware-workstation-full-16.2.2-19200509.exe

VM 网络配置

在这里插入图片描述

CentOS网路配置

开机自启网络

# 修改配置文件
vim /etc/sysconfig/network-scripts/ifcfg-ens32

onboot=yes

配置固定IP

配置虚拟网络

在这里插入图片描述

# 修改配置文件
vim /etc/sysconfig/network-scripts/ifcfg-ens32

# IP地址大家配一个自己想要配的地址,但是必须是同VMnet8的子网IP在同一网段
IPADDR=192.168.22.131
# 网关,这个值与我们在第四步“NAT(设置S)...”设置的网关一样
GATEWAY=192.168.22.2
# 子网掩码
NETMASK=255.255.255.0
# DNS的值也跟我们第四步“NAT(设置)...”设置的的网关一样
DNS1=192.168.22.2

修改之后的配置文件

在这里插入图片描述

重启网络生效

sudo systemctl restart network

# 关闭防火墙
sudo systemctl stop firewalld.service

# 禁止开机启动
sudo systemctl disable firewalld.service

# 开启 sshd 服务
systemctl restart sshd

# 查看版本
cat /etc/redhat-release

# 配置环境变量后,立刻生效
source /etc/profile

主机不能ping通虚拟机

在这里插入图片描述

SELinux

状态查看

在这里插入图片描述

修改状态

vim /etc/selinux/config

# SELINUX=enforcing
SELINUX=disabled

# 重启机器
reboot

配置ssh

ssh-keygen -t rsa

在这里插入图片描述

# 进入目录
cd /root/.ssh

# 把 id_rsa.pub 中的内容写入到 authorized_keys
cat id_rsa.pub > authorized_keys

jdk1.8安装配置

jdk准备

# 进入 /usr/local 目录
cd /usr/local
mkdir java

# 解压 jdk 文件到 /usr/local/java 下
tar -xvf jdk-8u221-linux-x64.tar.gz -C /usr/local/java

jdk环境配置

编辑 vim /etc/profile 文件

export JAVA_HOME=/usr/local/java/jdk1.8.0_221
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar 
export PATH=$JAVA_HOME/bin:$PATH

使其生效以及查看

# 生效
source /etc/profile

# 查看 
java -version

Hadoop 安装

  • master [ 192.168.22.131 ]
  • slave1 [ 192.168.22.132 ]
  • slave2 [ 192.168.22.133 ]

配置 master

修改主机名

hostnamectl set-hostname master

# 修改 hosts
vim /etc/hosts

192.168.22.131 master
192.168.22.132 slave1
192.168.22.133 slave2

配置 Hadoop

# 解压文件
tar -xvf hadoop-3.2.2.tar.gz -C /opt/
配置环境变量
vim /etc/profile

export HADOOP_HOME=/opt/hadoop-3.2.2

export PATH=$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH

# 生效
source /etc/profile
创建 hadoop 文件夹
mkdir -p /root/hadoop
mkdir -p /root/hadoop/tmp
mkdir -p /root/hadoop/var
mkdir -p /root/hadoop/dfs
mkdir -p /root/hadoop/dfs/name
mkdir -p /root/hadoop/dfs/data
修改core-site.xml

cd /opt/hadoop-3.2.2/etc/hadoop
vim core-site.xml

<configuration>
        <property>
                <name>hadoop.tmp.dir</name>
                <value>/root/hadoop/tmp</value>
                <description>Abase for other temporary directories.</description>
        </property>
        <property>
                <name>fs.default.name</name>
                <value>hdfs://master:9000</value>
        </property>
</configuration>
修改hdfs-site.xml

vim hdfs-site.xml

<configuration>
        <property>
                <name>dfs.name.dir</name>
                <value>/root/hadoop/dfs/name</value>
                <description>Path on the local filesystem where theNameNode stores the namespace and transactions logs persistently.</description>
        </property>
        <property>
                <name>dfs.data.dir</name>
                <value>/root/hadoop/dfs/data</value>
                <description>Comma separated list of paths on the localfilesystem of a DataNode where it should store its blocks.</description>
        </property>
        <property>
                <name>dfs.replication</name>
                <value>2</value>
        </property>
        <property>
                <name>dfs.permissions</name>
                <value>false</value>
                <description>need not permissions</description>
        </property>
</configuration>
修改mapred-site.xml

vim mapred-site.xml

<configuration>
        <property>
                <name>mapred.job.tracker</name>
                <value>master:49001</value>
        </property>
        <property>
                <name>mapred.local.dir</name>
                <value>/root/hadoop/var</value>
        </property>
        <property>
                <name>mapreduce.framework.name</name>
                <value>yarn</value>
        </property>
</configuration>
修改workers

vim workers

# 删除 localhost
slave1
slave2
修改yarn-site.xml

vim yarn-site.xml

<configuration>

<!-- Site specific YARN configuration properties -->
	<property>
		<name>yarn.resourcemanager.hostname</name>
		<value>master</value>
	</property>

	<property>
		<description>The address of the applications manager interface in the RM.</description>
		<name>yarn.resourcemanager.address</name>
		<value>${yarn.resourcemanager.hostname}:8032</value>
	</property>

	<property>
		<description>The address of the scheduler interface.</description>
		<name>yarn.resourcemanager.scheduler.address</name>
		<value>${yarn.resourcemanager.hostname}:8030</value>
	</property>

	<property>
		<description>The http address of the RM web application.</description>
		<name>yarn.resourcemanager.webapp.address</name>
		<value>${yarn.resourcemanager.hostname}:8088</value>
	</property>

	<property>
		<description>The https adddress of the RM web application.</description>
		<name>yarn.resourcemanager.webapp.https.address</name>
		<value>${yarn.resourcemanager.hostname}:8090</value>
	</property>

	<property>
		<name>yarn.resourcemanager.resource-tracker.address</name>
		<value>${yarn.resourcemanager.hostname}:8031</value>
	</property>

	<property>
		<description>The address of the RM admin interface.</description>
		<name>yarn.resourcemanager.admin.address</name>
		<value>${yarn.resourcemanager.hostname}:8033</value>
	</property>
	
	<property>
		<name>yarn.nodemanager.aux-services</name>
		<value>mapreduce_shuffle</value>
	</property>

	<property>
		<name>yarn.scheduler.maximum-allocation-mb</name>
		<value>1024</value>
		<discription>每个节点可用内存,单位MB,默认8182MB</discription>
	</property>

	<property>
		<name>yarn.nodemanager.vmem-pmem-ratio</name>
		<value>2.1</value>
	</property>

	<property>
		<name>yarn.nodemanager.resource.memory-mb</name>
		<value>1024</value>
	</property>

	<property>
		<name>yarn.nodemanager.vmem-check-enabled</name>
		<value>false</value>
	</property>
</configuration>

修改启动用户

  • cd /opt/hadoop-3.2.2/sbin

start-dfs.sh
stop-dfs.sh
start-yarn.sh
stop-yarn.sh

start-dfs.sh

vim start-dfs.sh

# 文件上方添加以下内容
HDFS_DATANODE_USER=root
HADOOP_SECURE_DN_USER=root
HDFS_NAMENODE_USER=root
HDFS_SECONDARYNAMENODE_USER=root
stop-dfs.sh

vim stop-dfs.sh

# 文件上方添加以下内容
HDFS_DATANODE_USER=root
HADOOP_SECURE_DN_USER=root
HDFS_NAMENODE_USER=root
HDFS_SECONDARYNAMENODE_USER=root
修改hadoop-env.sh
export JAVA_HOME=/usr/local/java/jdk1.8.0_221
start-yarn.sh

vim start-yarn.sh

# 文件上方添加以下内容
YARN_RESOURCEMANAGER_USER=root
HADOOP_SECURE_DN_USER=root
YARN_NODEMANAGER_USER=root
stop-yarn.sh

vim stop-yarn.sh

# 文件上方添加以下内容
YARN_RESOURCEMANAGER_USER=root
HADOOP_SECURE_DN_USER=root
YARN_NODEMANAGER_USER=root

slave1, slave2 直接克隆 master 虚拟机

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

修改 slave1

修改 ip
vim /etc/sysconfig/network-scripts/ifcfg-ens33

# ip 更改为
IPADDR=192.168.22.132

# 重启网络
systemctl restart network
修改主机名
hostnamectl set-hostname slave1

# 修改 hosts
vim /etc/hosts

192.168.22.131 master
192.168.22.132 slave1
192.168.22.133 slave2

修改 slave2

修改 ip
vim /etc/sysconfig/network-scripts/ifcfg-ens33

# ip 更改为
IPADDR=192.168.22.133

# 重启网络
systemctl restart network
修改主机名
hostnamectl set-hostname slave2

# 修改 hosts
vim /etc/hosts

192.168.22.131 master
192.168.22.132 slave1
192.168.22.133 slave2

master hadoop格式化

# 格式化 hadoop
hadoop namenode -format

hadoop 启动

# 启动
start-all.sh

# 停止
stop-all.sh

访问

http://192.168.22.131:9870/dfshealth.html#tab-overview
在这里插入图片描述
http://192.168.22.131:8088/
在这里插入图片描述

Hive 安装

下载hive

https://dlcdn.apache.org/hive/hive-2.3.9/apache-hive-2.3.9-bin.tar.gz

配置 hive

# 解压文件
tar -xvf apache-hive-2.3.9-bin.tar.gz -C /opt/
配置环境变量
vim /etc/profile

export HIVE_HOME=/opt/apache-hive-2.3.9-bin

export PATH=$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH

# 生效
source /etc/profile
设置hive执行权限
chmod u+x /opt/apache-hive-2.3.9-bin/bin/*
conf/hive-default.xml.template 复制成新的文件: hive-site.xml
  <property>
    <name>hive.exec.stagingdir</name>
    <value>/opt/apache-hive-2.3.9-bin/data/exec_logs</value>
    <description>Directory name that will be created inside table locations in order to support HDFS encryption. This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans.</description>
  </property>
  <!--hive的临时数据目录,指定的位置在hdfs上的目录-->
  <property>
    <name>hive.exec.scratchdir</name>
    <value>/tmp/hive</value>
    <description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/&lt;username&gt; is created, with ${hive.scratch.dir.permission}.</description>
  </property>

  <!-- scratchdir 本地目录 -->
  <property>
    <name>hive.exec.local.scratchdir</name>
    <value>/opt/apache-hive-2.3.9-bin/data/scratch_dir</value>
    <description>Local scratch space for Hive jobs</description>
  </property>
  
  <!-- resources_dir 本地目录 -->
  <property>
    <name>hive.downloaded.resources.dir</name>
    <value>/opt/apache-hive-2.3.9-bin/data/resources_dir/</value>
    <description>Temporary local directory for added resources in the remote file system.</description>
  </property>

  <!--hive的临时数据目录,指定的位置在hdfs上的目录-->
  <property>
    <name>hive.metastore.warehouse.dir</name>
    <value>/user/hive/warehouse</value>
    <description>location of default database for the warehouse</description>
  </property>

  <!-- 数据库访问密码 -->
  <property>
    <name>javax.jdo.option.ConnectionPassword</name>
    <value>root</value>
    <description>password to use against metastore database</description>
  </property>

  <!-- 数据库连接地址配置 -->
  <property>
    <name>javax.jdo.option.ConnectionURL</name>
    <value>jdbc:mysql://127.0.0.1:3306/hive?serverTimezone=UTC&amp;useSSL=false&amp;allowPublicKeyRetrieval=true</value>
    <description>
      JDBC connect string for a JDBC metastore.
      To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.
      For example, jdbc:postgresql://myhost/db?ssl=true for postgres database.
    </description>
  </property>

  <!-- 自动创建全部 -->
  <!-- hive Required table missing : "DBS" in Catalog""Schema" 错误 -->
  <property>
    <name>datanucleus.schema.autoCreateAll</name>
    <value>true</value>
    <description>Auto creates necessary schema on a startup if one doesn't exist. Set this to false, after creating it once.To enable auto create also set hive.metastore.schema.verification=false. Auto creation is not recommended for production use cases, run schematool command instead.</description>
  </property>
  <!-- 解决 Caused by: MetaException(message:Version information not found in metastore. ) -->
  <property>
    <name>hive.metastore.schema.verification</name>
    <value>false</value>
    <description>
      Enforce metastore schema version consistency.
      True: Verify that version information stored in is compatible with one from Hive jars.  Also disable automatic
            schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures
            proper metastore schema migration. (Default)
      False: Warn if the version information stored in metastore doesn't match with one from in Hive jars.
    </description>
  </property>

  <!-- 数据库驱动配置 -->
  <property>
    <name>javax.jdo.option.ConnectionDriverName</name>
    <value>com.mysql.cj.jdbc.Driver</value>
    <description>Driver class name for a JDBC metastore</description>
  </property>

  <!-- 数据库用户名 -->
  <property>
    <name>javax.jdo.option.ConnectionUserName</name>
    <value>root</value>
    <description>Username to use against metastore database</description>
  </property>

  <!-- operation_logs 本地目录 -->
  <property>
    <name>hive.server2.logging.operation.log.location</name>
    <value>/opt/apache-hive-2.3.9-bin/data/operation_logs</value>
    <description>Top level directory where operation logs are stored if logging functionality is enabled</description>
  </property>
conf/hive-env.sh.template 复制成新的文件: hive-env.sh
export HADOOP_HOME=/opt/hadoop-3.2.2

# Hive Configuration Directory can be controlled by:
export HIVE_CONF_DIR=/opt/apache-hive-2.3.9-bin/conf

# Folder containing extra libraries required for hive compilation/execution can be controlled by:
export HIVE_AUX_JARS_PATH=/opt/apache-hive-2.3.9-bin/lib
conf/hive-exec-log4j.properties.template 复制成新的文件: hive-exec-log4j2.properties
conf/hive-log4j.properties.template 复制成新的文件: hive-log4j2.properties

启动 Hive 服务

初始化 Hive 数据库

schematool -initSchema -dbType mysql

在这里插入图片描述

错误1解决
  1. 删除/opt/apache-hive-2.3.9-bin/lib/guava-14.0.1.jar
  2. rm -rf /opt/apache-hive-2.3.9-bin/lib/guava-14.0.1.jar
  3. copy hadoop 下的版本 guava-27.0-jre.jar
  4. cp /opt/hadoop-3.2.2/share/hadoop/common/lib/guava-27.0-jre.jar /opt/apache-hive-2.3.9-bin/lib/

在这里插入图片描述

错误2解决

连接数据库创建 hive database

初始化成功

在这里插入图片描述

进入hive

在这里插入图片描述

zookeeper 安装

下载zookeeper

https://dlcdn.apache.org/zookeeper/zookeeper-3.7.0/apache-zookeeper-3.7.0-bin.tar.gz

配置 zookeeper

# 解压文件
tar -xvf apache-zookeeper-3.7.0-bin.tar.gz -C /opt/
配置环境变量
vim /etc/profile

export ZOOKEEPER_HOME=/opt/apache-zookeeper-3.7.0-bin

export PATH=$JAVA_HOME/bin:$ZOOKEEPER_HOME/bin:$PATH

# 生效
source /etc/profile
conf/zoo_sample.cfg 复制成新的文件: zoo.cfg

在/opt/apache-zookeeper-3.7.0-bin目录下新建data,logs两个文件夹

# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial 
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between 
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just 
# example sakes.
dataDir=/opt/apache-zookeeper-3.7.0-bin/data
dataLogDir=/opt/apache-zookeeper-3.7.0-bin/logs
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the 
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1

## Metrics Providers
#
# https://prometheus.io Metrics Exporter
#metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
#metricsProvider.httpPort=7000
#metricsProvider.exportJvmInfo=true

server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888
将配置好的zookeeper复制到另外两台服务器上
scp -r /opt/apache-zookeeper-3.7.0-bin slave1:/opt
scp -r /opt/apache-zookeeper-3.7.0-bin slave2:/opt
配置myid文件

分别在三台服务器的/data/zookeeper/zookeeper_cluster/zookeeper-3.4.9/data目录下新建myid文件,内容分别为master.1,slave1.2,slave2.3后面的数字
echo “1” > myid

配置zk集群启动停止脚本
#!/bin/bash  

usage="Usage: $0 (start|stop|status)"

if [ $# -lt 1 ]; then
  echo $usage
  exit 1
fi

behave=$1

iparray=(master slave1 slave2)

path="/opt/apache-zookeeper-3.7.0-bin"

echo "$behave zkServer cluster"

for ip in ${iparray[*]}  
do

    echo "ssh to $ip"

    ssh $ip "$path/bin/zkServer.sh $behave $path/conf/zoo.cfg"

    sleep 2s

done

exit 0

Hbase 安装

下载Hbase

https://dlcdn.apache.org/hbase/2.4.9/hbase-2.4.9-bin.tar.gz

配置 Hbase

# 解压文件
tar -xvf hbase-2.4.9-bin.tar.gz -C /opt/
配置环境变量
vim /etc/profile

export HBASE_HOME=/opt/hbase-2.4.9-bin

export PATH=$JAVA_HOME/bin:$HBASE_HOME/bin:$PATH

# 生效
source /etc/profile
conf/hbase-env.sh
export JAVA_HOME=/usr/local/java/jdk1.8.0_221

export HADOOP_HOME=/opt/hadoop-3.2.2

export HBASE_HOME=/opt/hbase-2.4.9

# 指定HBase是否使用HBase本身自带的Zookeeper
export HBASE_MANAGES_ZK=false

export HBASE_CLASSPATH=/opt/hbase-2.4.9/conf

export PATH=$JAVA_HOME/bin:$HADOOP_HOME/bin:$HBASE_HOME/bin:$PATH
conf/hbase-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
-->
<configuration>
  <!--
    The following properties are set for running HBase as a single process on a
    developer workstation. With this configuration, HBase is running in
    "stand-alone" mode and without a distributed file system. In this mode, and
    without further configuration, HBase and ZooKeeper data are stored on the
    local filesystem, in a path under the value configured for `hbase.tmp.dir`.
    This value is overridden from its default value of `/tmp` because many
    systems clean `/tmp` on a regular basis. Instead, it points to a path within
    this HBase installation directory.

    Running against the `LocalFileSystem`, as opposed to a distributed
    filesystem, runs the risk of data integrity issues and data loss. Normally
    HBase will refuse to run in such an environment. Setting
    `hbase.unsafe.stream.capability.enforce` to `false` overrides this behavior,
    permitting operation. This configuration is for the developer workstation
    only and __should not be used in production!__

    See also https://hbase.apache.org/book.html#standalone_dist
  -->
  <!-- 是否分布式部署 -->
  <property>
    <name>hbase.cluster.distributed</name>
    <value>true</value>
  </property>
  <!-- 指定hbase存放数据的HDFS目录,如果是分布式部署,要和Hadoop的core-site.xml中的fs.defaultFS一致-->
  <property>
    <name>hbase.rootdir</name>
    <value>hdfs://master:9000/hbase</value>
  </property>
  <property>
    <name>hbase.tmp.dir</name>
    <value>/opt/hbase-2.4.9/tmp</value>
  </property>
  <!-- 配置Zookeeper节点-->
  <property>
    <name>hbase.zookeeper.quorum</name>
    <value>master,slave1,slave2</value>
  </property>
  <!-- 设置zk集群端口,默认是2181,一定要和你的zk集群端口保持一致-->
  <property>
      <name>hbase.zookeeper.property.clientPort</name>
      <value>2181</value>
  </property>
  <!-- Hbase在zk上注册的数据信息,默认是/tmp,如果不修改,当系统重启的时候会删除/tmp目录 -->
  <property>
    <name>hbase.zookeeper.property.dataDir</name>
    <value>/opt/hbase-2.4.9/zkdata</value>
  </property>
  <property>
    <name>hbase.unsafe.stream.capability.enforce</name>
    <value>false</value>
  </property>
  <property>
    <name>hbase.master.info.port</name>
    <value>60010</value>
  </property>
</configuration>
conf/regionservers
slave1
slave2
配置好的zookeeper复制到另外两台服务器上
scp -r /opt/hbase-2.4.9 slave1:/opt
scp -r /opt/hbase-2.4.9 slave2:/opt
启动停止hbase
# 启动
start-hbase.sh

# 停止
stop-hbase.sh

# thrift
hbase-daemon.sh start thrift -p 9090
查看是否启动成功

在这里插入图片描述

访问页面

http://192.168.22.131:60010/master-status#userTables

在这里插入图片描述

Spark 安装

下载Spark

https://dlcdn.apache.org/spark/spark-3.2.1/spark-3.2.1-bin-hadoop3.2.tgz

配置 Spark

# 解压文件
tar -xvf spark-3.2.1-bin-hadoop3.2.tgz -C /opt/
配置环境变量
vim /etc/profile

export HBASE_HOME=/opt/spark-3.2.1-bin-hadoop3.2

export PATH=$JAVA_HOME/bin:$HBASE_HOME/bin:$PATH

# 生效
source /etc/profile
conf/spark-env.sh.template 复制为新文件:spark-env.sh
export JAVA_HOME=/usr/local/java/jdk1.8.0_221
export SPARK_MASTER_HOST=master
export SPARK_MASTER_PORT=7077
conf/workers.template 复制为新文件:workers
slave1
slave2
sbin/start-master.sh
# 修改页面访问端口,避免占用
SPARK_MASTER_WEBUI_PORT=8085
配置好的zookeeper复制到另外两台服务器上
scp -r /opt/spark-3.2.1-bin-hadoop3.2 slave1:/opt
scp -r /opt/spark-3.2.1-bin-hadoop3.2 slave2:/opt
启动停止Spark
# 启动
sbin/start-all.sh

# 停止
sbin/stop-all.sh
  • 15
    点赞
  • 30
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值