一键部署hadoop集群

1. 背景

因为要经常使用hadoop集群,所以就写了shell脚本

2. 具体脚本如下

2.1 需要host映射

#!/bin/bash
# author : Cayon
# time : 2021-04-15
#1. 使用统一的用户
#定义变量
ip1=$1
hostname1=$2
ip2=$3
hostname2=$4
ip3=$5
hostname3=$6
#2. 统一目录
setUnifiedDirectory()
{
    mkdir -p /export/servers

    if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m创建安装目录失败!\n\033[0m"
                exit 2
    fi
    echo -e "\033[0;32m创建安装目录成功!\n \033[0m"
}
#"3. 安装统一的JDK"
setJDK()
{
    cd ~
    echo "准备解压JDK····"
    tar -zxvf jdk-8u65-linux-x64.tar.gz -C /export/servers/
    if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m解压JDK失败!\n\033[0m"
                exit 2
        fi
        echo -e "\033[0;32m解压JDK成功!\n \033[0m"
    cd /export/servers
    echo "export JAVA_HOME=/export/servers/jdk1.8.0_65
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar" >> /etc/profile
    source /etc/profile
    if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m加载环境变量失败!\n\033[0m"
                exit 2
        fi
        echo -e "\033[0;32m加载环境变量成功!\n \033[0m"

    java -version
    if [ $? -ne 0 ] ; then
                echo -e "\033[0;31mJDK安装失败!\n\033[0m"
                exit 2
     fi
        echo -e "\033[0;32mJDK安装完成!\n \033[0m"
}
#"4. 关闭机器的防火墙和selinux"
setFilewalld()
{
    systemctl stop firewalld.service
    systemctl disable firewalld.service
     if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m关闭防火墙失败!\n\033[0m"
                exit 2
      fi
         echo -e "\033[0;32m关闭防火墙成功!\n \033[0m"
     sed -i '7c SELINUX=disabled' /etc/selinux/config
     if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m更改selinux失败!\n\033[0m"
                exit 2
     fi
        echo -e "\033[0;32m更改selinux成功!\n \033[0m"
}
setIP()
{
    echo -e "${ip1} ${hostname1} \n ${ip2} ${hostname2} \n ${ip3} ${hostname3} "  >> /etc/hosts
    if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m更改host失败!\n\033[0m"
                exit 2
        fi
        echo -e "\033[0;32m更改host成功!\n \033[0m"
    #echo -e "${hostname1} \n ${hostname2} \n ${hostname3} " >> /etc/hostname
    #if [ $? -ne 0 ] ; then
        #        echo -e "\033[0;31m更改主机名失败!\n\033[0m"
        #        exit 2
        #fi
        #echo -e "\033[0;32m更改主机名成功!\n \033[0m"
}
#配置ssh免秘钥
setSSH()
{
    ssh-keygen -t rsa 
    ssh-copy-id ${ip1}
    ssh-copy-id ${ip2}
    ssh-copy-id ${ip3}
}
#安装hadoop及配置信息
installHadoop()
{
    cd ~
        ls -l | grep hadoop-2.6.0-cdh5.14.0.tar.gz
        if [ $? -ne 0 ] ; then
                echo -e "\033[0;34m没有这个文件\033[0m"
                sleep 1.5
        fi
        tar -zxvf hadoop-2.6.0-cdh5.14.0.tar.gz -C /export/servers/
        cd /export/servers/hadoop-2.6.0-cdh5.14.0/
        #配置环境变量
       echo '#HADOOP_HOME
export HADOOP_HOME=/export/servers/hadoop-2.6.0-cdh5.14.0
export PATH=:$PATH:$HADOOP_HOME/bin' >> /etc/profile
    source /etc/profile
    sleep 1
        #这个目录用于存放Hadoop的临时数据
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/tempDatas
        #这两个目录是用于存储元数据文件的
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/namenodeDatas1
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/namenodeDatas2
        #这两个目录是存放hdfs数据的目录
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/datanodeDatas1
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/datanodeDatas2
        #secondary同步元数据用到的目录
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/nn/edits
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/snn/name
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/dfs/snn/edits
        sleep 1.5
 #配置hadoop
        echo -e "\033[0;34m正在配置Hadoop...\033[0m"
        sleep 1.5
        cd /export/servers/hadoop-2.6.0-cdh5.14.0/etc/hadoop
        #修改hadoop-env.sh
        sed -i '25c export JAVA_HOME=/export/servers/jdk1.8.0_65' hadoop-env.sh 
        #修改mapred-env.sh
        sed -i '17c export JAVA_HOME=/export/servers/jdk1.8.0_65'  mapred-env.sh
        #修改hadoop-env.sh 
        sed -i '26c  JAVA_HOME=/export/servers/jdk1.8.0_65' yarn-env.sh
 
        mv mapred-site.xml.template mapred-site.xml
        echo "<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>
<!--
  Licensed under the Apache License, Version 2.0 (the \"License\");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an \"AS IS\" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>

<!--用于指定namenode的地址-->
<property>
    <name>fs.defaultFS</name>
    <value>hdfs://${hostname1}:8020</value>
</property>
<!--用于指定Hadoop临时存储目录-->
<property>
    <name>hadoop.tmp.dir</name>
    <value>/export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/tempDatas</value>
</property>
<property>
    <name>io.file.buffer.size</name>
    <value>4096</value>
</property>
<property>
    <name>fs.trash.interval</name>
    <value>10080</value>
</property>
</configuration>" > core-site.xml

    echo "<?xml version=\"1.0\"?>
<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>
<!--
  Licensed under the Apache License, Version 2.0 (the \"License\");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an \"AS IS\" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<!--启用uber模式-->
<property>
    <name>mapreduce.job.ubertask.enable</name>
    <value>true</value>
</property>
<!--配置MapReduce程序运行在yarn上-->
<property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
</property>

</configuration>" > mapred-site.xml


    echo "<?xml version=\"1.0\"?>
<!--
  Licensed under the Apache License, Version 2.0 (the \"License\");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an \"AS IS\" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->
<configuration>

<!-- Site specific YARN configuration properties -->

<!--指定resourcemanager运行在第三台机器-->
<property>
    <name>yarn.resourcemanager.hostname</name>
    <value>${hostname3}</value>
</property>
<!--指定yarn上运行的程序类型-->
<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
</property>
<property>    
    <name>yarn.nodemanager.resource.memory-mb</name>    
    <value>8192</value>
</property>
<property>  
    <name>yarn.scheduler.minimum-allocation-mb</name>
    <value>1024</value>
</property>
<property>
    <name>yarn.nodemanager.vmem-pmem-ratio</name>
    <value>2.1</value>
</property>

</configuration>" > yarn-site.xml

     echo "<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>
<!--
  Licensed under the Apache License, Version 2.0 (the \"License\");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an \"AS IS\" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>

<!--指定secondary的运行地址-->
<property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>${hostname1}:50090</value>
</property>
<!--指定namenode的网页访问地址-->
<property>
    <name>dfs.namenode.http-address</name>
    <value>${hostname1}:50070</value>
</property>
<!--指定namenode元数据在文件中的存放地址,备份-->
<property>
    <name>dfs.namenode.name.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/namenodeDatas1,file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/namenodeDatas2</value>
</property>
<!--指定hdfs上的文件在linux中的存放地址,负载均衡-->
<property>
    <name>dfs.datanode.data.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/datanodeDatas1,file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/datanodeDatas2</value>
</property>
<!--指定secondary用于元数据同步的目录-->
<property>
    <name>dfs.namenode.edits.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/nn/edits</value>
</property>
<property>
    <name>dfs.namenode.checkpoint.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/snn/name</value>
</property>
<property>
    <name>dfs.namenode.checkpoint.edits.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/dfs/snn/edits</value>
</property>
<!--指定总共存储几份-->
<property>
    <name>dfs.replication</name>
    <value>3</value>
</property>
<!--关闭访问权限-->
<property>
    <name>dfs.permissions.enabled</name>
    <value>false</value>
</property>
<!--每个块的大小-->
<property>
    <name>dfs.blocksize</name>
    <value>134217728</value>
</property>

</configuration>" > hdfs-site.xml

    echo -e "${hostname1}\n${hostname2}\n${hostname3}" >slaves
#        cd /export/servers/hadoop-2.6.0-cdh5.14.0
#        echo -e "\033[0;34m正在执行 NameNode 的格式化...\033[0m"
#    hdfs namenode -format
#        echo -e "\033[0;34m正在开启 NaneNode 和 DataNode 守护进程...\033[0m"
#        sbin/start-dfs.sh        #启动dfs
#        echo -e "\033[0;34m正在启动YARN...\033[0m"
#        sbin/start-yarn.sh                                                                #启动YARN
       if [ $? -ne 0 ] ; then
                echo -e "\033[0;32mHadoop配置失败 \033[0m"
                exit 2;
        fi
        echo -e "\033[0;32mHadoop配置并启动成功 请至http://${hostname1}:50070和http://${hostname3}:8088/cluster查看\033[0m"
        sleep 1.5
}


main()
{       echo "请选择功能"
        echo "1. 使用统一的用户"
        echo "2. 统一目录"
        echo "3. 安装统一的JDK"
        echo "4. 关闭机器的防火墙和selinux"
        echo "5. 配置好对应的ip以及主机名映射"
    echo "6. 配置机器的ssh免秘钥登录"
    echo "7. 配置机器的时钟同步"
    echo "8. Hadoop的分布式的部署"
        echo "q. 退出本脚本"
        read a
        case $a in
                2) setUnifiedDirectory;;
                3) setJDK;;
                4) setFilewalld;;
                5) setIP;;
                6) setSSH;;
        8) installHadoop;; 
                q) exit 0;;
                *) echo -e "\033[0;31m没有此选项\033[0m";;
         
        esac
}
main
 

2.2 不需要host映射

#!/bin/bash
# author : Cayon
# time : 2021-04-15
#1. 使用统一的用户
#定义变量
ip1=$1
ip2=$2
ip3=$3
#2. 统一目录
setUnifiedDirectory()
{
    mkdir -p /export/servers

    if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m创建安装目录失败!\n\033[0m"
                exit 2
    fi
    echo -e "\033[0;32m创建安装目录成功!\n \033[0m"
}
#"3. 安装统一的JDK"
setJDK()
{
    cd ~
    echo "准备解压JDK····"
    tar -zxvf jdk-8u65-linux-x64.tar.gz -C /export/servers/
    if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m解压JDK失败!\n\033[0m"
                exit 2
        fi
        echo -e "\033[0;32m解压JDK成功!\n \033[0m"
    cd /export/servers
    echo "export JAVA_HOME=/export/servers/jdk1.8.0_65
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar" >> /etc/profile
    source /etc/profile
    if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m加载环境变量失败!\n\033[0m"
                exit 2
        fi
        echo -e "\033[0;32m加载环境变量成功!\n \033[0m"

    java -version
    if [ $? -ne 0 ] ; then
                echo -e "\033[0;31mJDK安装失败!\n\033[0m"
                exit 2
     fi
        echo -e "\033[0;32mJDK安装完成!\n \033[0m"
        scp -r /export/servers/jdk1.8.0_65 ${ip2}:/export/servers/
        if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m复制JDK到${ip2}失败!\n\033[0m"
                exit 2
        fi
        echo -e "\033[0;32m复制JDK到${ip2}完成!\n \033[0m"
        scp -r /export/servers/jdk1.8.0_65 ${ip3}:/export/servers/
        if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m复制JDK到${ip3}失败!\n\033[0m"
                exit 2
        fi
        echo -e "\033[0;32m复制JDK到${ip3}完成!\n \033[0m"
}
#"4. 关闭机器的防火墙和selinux"
setFilewalld()
{
    systemctl stop firewalld.service
    systemctl disable firewalld.service
     if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m关闭防火墙失败!\n\033[0m"
                exit 2
      fi
         echo -e "\033[0;32m关闭防火墙成功!\n \033[0m"
     sed -i '7c SELINUX=disabled' /etc/selinux/config
     if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m更改selinux失败!\n\033[0m"
                exit 2
     fi
        echo -e "\033[0;32m更改selinux成功!\n \033[0m"
}
#setIP()
#{
#    echo -e "${ip1} ${hostname1} \n ${ip2} ${hostname2} \n ${ip3} ${hostname3} "  >> /etc/hosts
#    if [ $? -ne 0 ] ; then
#                echo -e "\033[0;31m更改host失败!\n\033[0m"
#                exit 2
#        fi
#        echo -e "\033[0;32m更改host成功!\n \033[0m"
#    #echo -e "${hostname1} \n ${hostname2} \n ${hostname3} " >> /etc/hostname
#    #if [ $? -ne 0 ] ; then
#        #        echo -e "\033[0;31m更改主机名失败!\n\033[0m"
#        #        exit 2
#        #fi
#        #echo -e "\033[0;32m更改主机名成功!\n \033[0m"
#}
#配置ssh免秘钥
setSSH()
{
    ssh-keygen -t rsa 
    ssh-copy-id ${ip1}
    ssh-copy-id ${ip2}
    ssh-copy-id ${ip3}
}
#安装hadoop及配置信息
installHadoop()
{
    cd ~
        ls -l | grep hadoop-2.6.0-cdh5.14.0.tar.gz
        if [ $? -ne 0 ] ; then
                echo -e "\033[0;34m没有这个文件\033[0m"
                sleep 1.5
        fi
        tar -zxvf hadoop-2.6.0-cdh5.14.0.tar.gz -C /export/servers/
        if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m解压失败!\n\033[0m"
                exit 2
        fi
         echo -e "\033[0;32m解压Hadoop包成功!\n \033[0m"
        cd /export/servers/hadoop-2.6.0-cdh5.14.0/
        #配置环境变量
       echo '#HADOOP_HOME
export HADOOP_HOME=/export/servers/hadoop-2.6.0-cdh5.14.0
export PATH=:$PATH:$HADOOP_HOME/bin' >> /etc/profile
        source /etc/profile
        if [ $? -ne 0 ] ; then
                    echo -e "\033[0;31m环境变量加载失败!\n\033[0m"
                    exit 2
        fi
            echo -e "\033[0;32m环境变量加载成功!\n \033[0m"
        sleep 1
            #这个目录用于存放Hadoop的临时数据
        mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/tempDatas
            #这两个目录是用于存储元数据文件的
        mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/namenodeDatas1
        mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/namenodeDatas2
            #这两个目录是存放hdfs数据的目录
        mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/datanodeDatas1
        mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/datanodeDatas2
            #secondary同步元数据用到的目录
        mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/nn/edits
        mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/snn/name
        mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/dfs/snn/edits
        if [ $? -ne 0 ] ; then
                    echo -e "\033[0;31m创建hadoop数据文件夹失败!\n\033[0m"
                    exit 2
        fi
         echo -e "\033[0;32m创建hadoop数据文件夹成功!\n \033[0m"
 #配置hadoop
        echo -e "\033[0;34m正在配置Hadoop...\033[0m"
        sleep 1.5
        cd /export/servers/hadoop-2.6.0-cdh5.14.0/etc/hadoop
        #修改hadoop-env.sh
        sed -i '25c export JAVA_HOME=/export/servers/jdk1.8.0_65' hadoop-env.sh 
        if [ $? -ne 0 ] ; then
                echo -e "\033[0;31mhadoop-env.sh配置失败!\n\033[0m"
                exit 2
        fi
         echo -e "\033[0;32mhadoop-env.sh配置成功!\n \033[0m"
        #修改mapred-env.sh
        sed -i '17c export JAVA_HOME=/export/servers/jdk1.8.0_65'  mapred-env.sh
        if [ $? -ne 0 ] ; then
                echo -e "\033[0;31mmapred-env.sh配置失败!\n\033[0m"
                exit 2
        fi
         echo -e "\033[0;32mmapred-env.sh配置成功!\n \033[0m"
        #修改hadoop-env.sh 
        sed -i '26c  JAVA_HOME=/export/servers/jdk1.8.0_65' yarn-env.sh
        if [ $? -ne 0 ] ; then
                echo -e "\033[0;31myarn-env.sh配置失败!\n\033[0m"
                exit 2
        fi
         echo -e "\033[0;32myarn-env.sh配置成功!\n \033[0m"
 
        mv mapred-site.xml.template mapred-site.xml
        echo "<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>
<!--
  Licensed under the Apache License, Version 2.0 (the \"License\");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an \"AS IS\" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>

<!--用于指定namenode的地址-->
<property>
    <name>fs.defaultFS</name>
    <value>hdfs://${ip1}:8020</value>
</property>
<!--用于指定Hadoop临时存储目录-->
<property>
    <name>hadoop.tmp.dir</name>
    <value>/export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/tempDatas</value>
</property>
<property>
    <name>io.file.buffer.size</name>
    <value>4096</value>
</property>
<property>
    <name>fs.trash.interval</name>
    <value>10080</value>
</property>
</configuration>" > core-site.xml
            if [ $? -ne 0 ] ; then
                echo -e "\033[0;31mcore-site.xml配置失败!\n\033[0m"
                exit 2
            fi
            echo -e "\033[0;32mcore-site.xml配置成功!\n \033[0m"
 

    echo "<?xml version=\"1.0\"?>
<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>
<!--
  Licensed under the Apache License, Version 2.0 (the \"License\");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an \"AS IS\" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<!--启用uber模式-->
<property>
    <name>mapreduce.job.ubertask.enable</name>
    <value>true</value>
</property>
<!--配置MapReduce程序运行在yarn上-->
<property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
</property>

</configuration>" > mapred-site.xml

            if [ $? -ne 0 ] ; then
                echo -e "\033[0;31mcore-site.xml配置失败!\n\033[0m"
                exit 2
            fi
            echo -e "\033[0;32mmapred-site.xml配置成功!\n \033[0m"

    echo "<?xml version=\"1.0\"?>
<!--
  Licensed under the Apache License, Version 2.0 (the \"License\");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an \"AS IS\" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->
<configuration>

<!-- Site specific YARN configuration properties -->

<!--指定resourcemanager运行在第三台机器-->
<property>
    <name>yarn.resourcemanager.hostname</name>
    <value>${ip3}</value>
</property>
<!--指定yarn上运行的程序类型-->
<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
</property>
<property>    
    <name>yarn.nodemanager.resource.memory-mb</name>    
    <value>8192</value>
</property>
<property>  
    <name>yarn.scheduler.minimum-allocation-mb</name>
    <value>1024</value>
</property>
<property>
    <name>yarn.nodemanager.vmem-pmem-ratio</name>
    <value>2.1</value>
</property>

</configuration>" > yarn-site.xml

                if [ $? -ne 0 ] ; then
                    echo -e "\033[0;31myarn-site.xml配置失败!\n\033[0m"
                    exit 2
                fi
                echo -e "\033[0;32myarn-site.xml配置成功!\n \033[0m"

     echo "<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>
<!--
  Licensed under the Apache License, Version 2.0 (the \"License\");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an \"AS IS\" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>

<!--指定secondary的运行地址-->
<property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>${ip1}:50090</value>
</property>
<!--指定namenode的网页访问地址-->
<property>
    <name>dfs.namenode.http-address</name>
    <value>${ip1}:50070</value>
</property>
<!--指定namenode元数据在文件中的存放地址,备份-->
<property>
    <name>dfs.namenode.name.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/namenodeDatas1,file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/namenodeDatas2</value>
</property>
<!--指定hdfs上的文件在linux中的存放地址,负载均衡-->
<property>
    <name>dfs.datanode.data.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/datanodeDatas1,file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/datanodeDatas2</value>
</property>
<!--指定secondary用于元数据同步的目录-->
<property>
    <name>dfs.namenode.edits.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/nn/edits</value>
</property>
<property>
    <name>dfs.namenode.checkpoint.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/snn/name</value>
</property>
<property>
    <name>dfs.namenode.checkpoint.edits.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/dfs/snn/edits</value>
</property>
<!--指定总共存储几份-->
<property>
    <name>dfs.replication</name>
    <value>3</value>
</property>
<!--关闭访问权限-->
<property>
    <name>dfs.permissions.enabled</name>
    <value>false</value>
</property>
<!--每个块的大小-->
<property>
    <name>dfs.blocksize</name>
    <value>134217728</value>
</property>

</configuration>" > hdfs-site.xml
                    if [ $? -ne 0 ] ; then
                            echo -e "\033[0;31mhdfs-site.xml配置失败!\n\033[0m"
                            exit 2
                    fi
                    echo -e "\033[0;32mhdfs-site.xml配置成功!\n \033[0m"

    echo -e "${ip1}\n${ip2}\n${ip3}" >slaves
                    if [ $? -ne 0 ] ; then
                            echo -e "\033[0;31mslaves配置失败!\n\033[0m"
                            exit 2
                    fi
                    echo -e "\033[0;32mslaves配置成功!\n \033[0m"
                    
        scp -r /export/servers/hadoop-2.6.0-cdh5.14.0 ${ip2}:/export/servers/
        if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m复制Hadoop到${ip2}失败!\n\033[0m"
                exit 2
        fi
        echo -e "\033[0;32m复制Hadoop到${ip2}完成!\n \033[0m"
        scp -r /export/servers/hadoop-2.6.0-cdh5.14.0 ${ip3}:/export/servers/
        if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m复制Hadoop到${ip3}失败!\n\033[0m"
                exit 2
        fi
        echo -e "\033[0;32m复制Hadoop到${ip3}完成!\n \033[0m"
        
        
        scp /etc/profile ${ip2}:/export/servers/
        if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m复制环境变量到${ip2}失败!\n\033[0m"
                exit 2
        fi
        echo -e "\033[0;32m复制环境变量到${ip2}完成!\n \033[0m"
        scp /etc/profile ${ip3}:/export/servers/
        if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m复制环境变量到${ip3}失败!\n\033[0m"
                exit 2
        fi
        echo -e "\033[0;32m复制环境变量到${ip3}完成!\n \033[0m"

#        cd /export/servers/hadoop-2.6.0-cdh5.14.0
#        echo -e "\033[0;34m正在执行 NameNode 的格式化...\033[0m"
#    hdfs namenode -format
#        echo -e "\033[0;34m正在开启 NaneNode 和 DataNode 守护进程...\033[0m"
#        sbin/start-dfs.sh        #启动dfs
#        echo -e "\033[0;34m正在启动YARN...\033[0m"
#        sbin/start-yarn.sh                                                                #启动YARN
       if [ $? -ne 0 ] ; then
                echo -e "\033[0;32mHadoop配置失败 \033[0m"
                exit 2;
        fi
        echo -e "\033[0;32mHadoop配置成功"
        #echo -e "\033[0;32mHadoop配置并启动成功 请至http://${ip1}:50070和http://${ip3}:8088/cluster查看\033[0m"
        sleep 1.5
}


main()
{       echo "请选择功能"
        echo "1. 使用统一的用户"
        echo "1. 统一目录"
        echo "2. 安装统一的JDK"
        echo "3. 关闭机器的防火墙和selinux"
        echo "4. 配置好对应的ip以及主机名映射"
        echo "5. 配置机器的ssh免秘钥登录"
        echo "6. 配置机器的时钟同步"
        echo "7. Hadoop的分布式的部署"
        echo "8. 退出本脚本"
        read a
        case $a in
                1) setUnifiedDirectory;;
                2) setJDK;;
                3) setFilewalld;;
                4) setIP;;
                5) setSSH;;
                7) installHadoop;; 
                q) exit 0;;
                *) echo -e "\033[0;31m没有此选项\033[0m";;
         
        esac
}
main
 

3. 单台机器部署

#!/bin/sh
#1. 使用统一的用户
#2. 统一目录
setUnifiedDirectory()
{
    mkdir -p /export/servers

 if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m创建/export/servers失败!\n\033[0m"
                exit 2
        fi
        echo -e "\033[0;32m创建/export/servers成功!\n \033[0m"
}
#"3. 安装统一的JDK"
setJDK()
{
    cd ~
    tar -zxvf jdk-8u65-linux-x64.tar.gz -C /export/servers
    cd /export/servers
    echo "export JAVA_HOME=/export/servers/jdk1.8.0_65
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar" >> /etc/profile
    source /etc/profile
    java -version
     if [ $? -ne 0 ] ; then
                echo -e "\033[0;31mJDK安装失败!\n\033[0m"
                exit 2
     fi
        echo -e "\033[0;32mJDK安装完成!\n \033[0m"
}
#"4. 关闭机器的防火墙和selinux"
setFilewalld()
{
    systemctl stop firewalld.service
    systemctl disable firewalld.service
     if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m关闭防火墙失败!\n\033[0m"
                exit 2
     fi
        echo -e "\033[0;32m关闭防火墙成功!\n \033[0m"
    sed -i '7c SELINUX=disabled' /etc/selinux/config
     if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m更改selinux失败!\n\033[0m"
                exit 2
     fi
        echo -e "\033[0;32m更改selinux成功!\n \033[0m"
}
setIP()
{
    ifconfig ens192|grep "inet"|awk '{print $2}'|awk 'NR==1'  >> /etc/hosts
    if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m更改host失败!\n\033[0m"
                exit 2
    fi
        echo -e "\033[0;32m更改host成功!\n \033[0m"
    echo 'hadoop' >> /etc/hostname
    if [ $? -ne 0 ] ; then
                echo -e "\033[0;31m更改主机名失败!\n\033[0m"
                exit 2
    fi
        echo -e "\033[0;32m更改主机名成功!\n \033[0m"
}
#配置ssh免秘钥
setSSH()
{
    ssh-keygen -t rsa 
    ssh-copy-id ifconfig ens192|grep "inet"|awk '{print $2}'|awk 'NR==1'
}
#安装hadoop及配置信息
installHadoop()
{
    cd ~
        ls -l | grep hadoop-2.6.0-cdh5.14.0.tar.gz
        if [ $? -ne 0 ] ; then
                echo -e "\033[0;34m没有这个文件\033[0m"
                sleep 1.5
        fi
        tar -zxvf hadoop-2.6.0-cdh5.14.0.tar.gz -C /export/servers/
        cd /export/servers/
        #配置环境变量
       echo '#HADOOP_HOME
export HADOOP_HOME=/export/servers/hadoop-2.6.0-cdh5.14.0
export PATH=:$PATH:$HADOOP_HOME/bin' >> /etc/profile
    source /etc/profile
    sleep 2
        #这个目录用于存放Hadoop的临时数据
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/tempDatas
        #这两个目录是用于存储元数据文件的
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/namenodeDatas1
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/namenodeDatas2
        #这两个目录是存放hdfs数据的目录
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/datanodeDatas1
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/datanodeDatas2
        #secondary同步元数据用到的目录
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/nn/edits
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/snn/name
    mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/dfs/snn/edits
        sleep 1.5
 #配置hadoop
        echo -e "\033[0;34m正在配置Hadoop...\033[0m"
        sleep 1.5
        cd /export/servers/hadoop-2.6.0-cdh5.14.0/etc/hadoop
        #修改hadoop-env.sh
        sed -i '25c export JAVA_HOME=/export/servers/jdk1.8.0_65' hadoop-env.sh 
        #修改mapred-env.sh
        echo 'export JAVA_HOME=/export/servers/jdk1.8.0_65' >> mapred-env.sh
        #修改hadoop-env.sh 
        sed -i '26c export JAVA_HOME=/export/servers/jdk1.8.0_65' hadoop-env.sh 
 
        mv mapred-site.xml.template mapred-site.xml
        echo '<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>

<!--用于指定namenode的地址-->
<property>
    <name>fs.defaultFS</name>
    <value>hdfs://hadoop:8020</value>
</property>
<!--用于指定Hadoop临时存储目录-->
<property>
    <name>hadoop.tmp.dir</name>
    <value>/export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/tempDatas</value>
</property>
<property>
    <name>io.file.buffer.size</name>
    <value>4096</value>
</property>
<property>
    <name>fs.trash.interval</name>
    <value>10080</value>
</property>
</configuration>' > core-site.xml

    echo '<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<!--启用uber模式-->
<property>
    <name>mapreduce.job.ubertask.enable</name>
    <value>true</value>
</property>
<!--配置MapReduce程序运行在yarn上-->
<property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
</property>

</configuration>' > mapred-site.xml


    echo '<?xml version="1.0"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->
<configuration>

<!-- Site specific YARN configuration properties -->

<!--指定resourcemanager运行在第三台机器-->
<property>
    <name>yarn.resourcemanager.hostname</name>
    <value>hadoop3</value>
</property>
<!--指定yarn上运行的程序类型-->
<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
</property>
<property>    
    <name>yarn.nodemanager.resource.memory-mb</name>    
    <value>8192</value>
</property>
<property>  
    <name>yarn.scheduler.minimum-allocation-mb</name>
    <value>1024</value>
</property>
<property>
    <name>yarn.nodemanager.vmem-pmem-ratio</name>
    <value>2.1</value>
</property>

</configuration>' > yarn-site.xml

     echo '<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>

<!--指定secondary的运行地址-->
<property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>hadoop:50090</value>
</property>
<!--指定namenode的网页访问地址-->
<property>
    <name>dfs.namenode.http-address</name>
    <value>hadoop:50070</value>
</property>
<!--指定namenode元数据在文件中的存放地址,备份-->
<property>
    <name>dfs.namenode.name.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/namenodeDatas1,file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/namenodeDatas2</value>
</property>
<!--指定hdfs上的文件在linux中的存放地址,负载均衡-->
<property>
    <name>dfs.datanode.data.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/datanodeDatas1,file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/datanodeDatas2</value>
</property>
<!--指定secondary用于元数据同步的目录-->
<property>
    <name>dfs.namenode.edits.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/nn/edits</value>
</property>
<property>
    <name>dfs.namenode.checkpoint.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/snn/name</value>
</property>
<property>
    <name>dfs.namenode.checkpoint.edits.dir</name>
    <value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/dfs/snn/edits</value>
</property>
<!--指定总共存储几份-->
<property>
    <name>dfs.replication</name>
    <value>3</value>
</property>
<!--关闭访问权限-->
<property>
    <name>dfs.permissions.enabled</name>
    <value>false</value>
</property>
<!--每个块的大小-->
<property>
    <name>dfs.blocksize</name>
    <value>134217728</value>
</property>

</configuration>' > hdfs-site.xml

    echo 'hadoop' >slaves
#        cd /export/servers/hadoop-2.6.0-cdh5.14.0
#        echo -e "\033[0;34m正在执行 NameNode 的格式化...\033[0m"
#    hdfs namenode -format
#        echo -e "\033[0;34m正在开启 NaneNode 和 DataNode 守护进程...\033[0m"
#        sbin/start-dfs.sh        #启动dfs
#        echo -e "\033[0;34m正在启动YARN...\033[0m"
#        sbin/start-yarn.sh                                                                #启动YARN
       if [ $? -ne 0 ] ; then
                echo -e "\033[0;32mHadoop配置失败 \033[0m"
                exit 2;
        fi
        echo -e "\033[0;32mHadoop配置并启动成功 请至http://hadoop1:50070和http://localhost:8088/cluster查看\033[0m"
        sleep 1.5
}


main()
{       echo "请选择功能"
        echo "1. 使用统一的用户"
        echo "2. 统一目录"
        echo "3. 安装统一的JDK"
        echo "4. 关闭机器的防火墙和selinux"
        echo "5. 配置好对应的ip以及主机名映射"
        echo "6. 配置机器的ssh免秘钥登录"
        echo "7. 配置机器的时钟同步"
        echo "8. Hadoop的分布式的部署"
        echo "q. 退出本脚本"
        read a
        case $a in
                2) setUnifiedDirectory;;
                3) setJDK
                   exit;;
                4) setFilewalld;;
                5) setIP;;
                6) setSSH;;
        8) installHadoop;; 
                q) exit 0;;
                *) echo -e "\033[0;31m没有此选项\033[0m";;
         
        esac
}
main
 

  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
### 回答1: 将以下命令复制到终端中安装Hadoop集群部署脚本: b'shell\xe4\xb8\x80\xe9\x94\xae\xe5\xae\x89\xe8\xa3\x85hadoop\xe9\x9b\x86\xe7\xbe\xa4\xe9\x83\xa8\xe7\xbd\xb2' 按下回车开始安装。安装完成后,将你的Hadoop集群配置文件复制到所需的所有节点上,并启动Hadoop服务就可以了。 ### 回答2: Hadoop是一个分布式计算框架,主要用于处理大规模的数据集。在Hadoop集群部署过程中,需要在每台机器上安装Java、Hadoop,并配置各节点间通信等。Shell脚本为我们提供了自动化一化的部署方式,有效减少了操作时间和人工错误的可能性。 Shell是一种脚本语言,可以用于执行操作系统内置的命令,也可以组合成程序,实现批处理的工作。可以通过编写Shell脚本,在多个节点上自动安装和配置Hadoop集群。 一般来说,Shell脚本部署Hadoop集群的流程如下: 1. 编写Shell脚本,定义需要安装哪些软件和配置,主要包括安装Java、Hadoop软件、配置Hadoop环境变量、配置Hadoop集群等。 2. 在主节点上运行一安装脚本,脚本会自动下载Java、Hadoop软件到各个节点,并启动安装和配置过程。 3. 在集群各节点上传或复制一安装脚本,执行脚本完成每台机器的自动化安装和配置。 4. 验证集群配置是否正确,包括节点间的链接通断、数据块的分配、备份等。 Shell脚本部署Hadoop集群的优点很明显,它可以大大简化安装的流程,让不懂Linux命令和Hadoop的人也能快速完成部署。同时,通过Shell脚本的一安装方式,可以让部署过程更具可重复性、可靠性,减少出错和手动调整的可能性。 ### 回答3: Hadoop是一个分布式计算框架,它能够管理和处理大规模数据集。在实际应用中,如果需要进行数据分析、机器学习等工作,通常需要一台或多台计算机上配置Hadoop集群部署来实现。这个过程需要依次安装不同的环境与组件,如Java、Hadoop、Hive等。由于这个过程繁琐复杂,需要操作系统、环境与软件等多方面的知识,因此有必要使用一安装的方式来简化部署的过程。 Shell一安装Hadoop集群部署是使用Shell脚本编写自动安装脚本,减少繁琐的部署过程。通过一安装,可以省去手动安装和配置环境不免出现的人工操作失误,并且可以快速、轻松地完成Hadoop集群的部署。 具体实现过程可以分为以下几步: 1. 准备安装环境,如Ubuntu、CentOS等Linux平台、GitHub下载一安装脚本; 2. 将主机与节点服务器IP地址写入配置文件中; 3. 创建Shell脚本文件并设置所需的安装环境; 4. 编写自动安装脚本,包括软件下载、配置环境、启动服务等。 5. 安装完毕后,进行配置检查和测试。 在实际使用过程中,一安装脚本不仅可以用于简化Hadoop集群部署的过程,也可以扩展到其他的应用上。例如,一安装脚本也可以用与Kafka集群或其他分布式系统等。 总之,Shell一安装Hadoop集群部署是一项非常有用而又实用的技术,可以帮助用户快速构建和部署Hadoop集群,提高工作效率和减少错误率。同时,还可以使管理和维护的过程更加简单和高效。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值