快速配置hadoop3.1.3

安装zookeeper(略)

关闭防火墙

systemctl disable firewalld
chkconfig iptables off

node01生成公钥和私钥(全部机器运行)

ssh-keygen -t rsa

ssh-copy-id node01
ssh-copy-id node02
ssh-copy-id node03

安装jdk

查看自带的jdk

rpm -qa | grep openjdk

卸载自带的openjdk

rpm -e --nodeps java-1.8.0-openjdk-headless-1.8.0.161-2.b14.el7.x86_64
rpm -e --nodeps java-1.7.0-openjdk-1.7.0.171-2.6.13.2.el7.x86_64
rpm -e --nodeps java-1.8.0-openjdk-1.8.0.161-2.b14.el7.x86_64
rpm -e --nodeps java-1.7.0-openjdk-headless-1.7.0.171-2.6.13.2.el7.x86_64

jdk8下载地址

解压jdk

tar -zxvf jdk-8u212-linux-x64.tar.gz -C /opts/jdk/

配置jdk环境变量

export JAVA_HOME=/opts/jdk
export PATH=$PATH:$JAVA_HOME/bin

保存后

source /etc/profile

分发jdk

cd /opts/jdk/
scp -r ../jdk/ node02:$PWD
scp -r ../jdk/ node03:$PWD

也可以这样配置环境变量

vim /etc/profile.d/my_env.sh

# JAVA_HOME
export JAVA_HOME=/opts/jdk
export PATH=$PATH:$JAVA_HOME/bin
# HADOOP_HOME
export HADOOP_HOME=/opts/hadoop
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin

安装hadoop3.1.3

hadoop下载地址

解压

tar -zxvf hadoop-3.1.3.tar.gz -C ../opts/hadoop

配置环境变量

export HADOOP_HOME=/opts/hadoop
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin

保存后
source /etc/profile

分发Hadoop

cd /opts/hadoop/
scp -r ../hadoop/ node02:$PWD
scp -r ../hadoop/ node03:$PWD

配置Hadoop参数

配置文件所在目录

cd /opts/hadoop/etc/hadoop

core-site.xml

<configuration>
<!--	NameNode的位置	-->
<property>
	<name>fs.defaultFS</name>
	<value>hdfs://node01:8020</value>
	</property>
<!--	hadoop数据的目录	--> 
<property>
	<name>hadoop.tmp.dir</name>
	<value>/opts/hadoop/data</value>  
	</property>
<!--	网页默认登录账号	-->
<!-- <property>
	<name>hadoop.http.staticuser.user</name>
	<value>root</value>
	</property> -->
</configuration>

hdfs-site.xml

<configuration>
<!--	nn web端访问地址	-->
<property>
	<name>dfs.namenode.http-address</name>
	<value>node01:50070</value>
	</property> 
<!--	2nn web端访问地址	-->
<property>
	<name>dfs.namenode.secondary.http-address</name>
	<value>node02:50070</value>
	</property>
</configuration>

yarn-site.xml

<configuration>
<!-- Site specific YARN configuration properties -->
<!--	指定mr走shuffle	-->
<property>
	<name>yarn.nodemanager.aux-services</name>
	<value>mapreduce_shuffle</value>
	</property>
<!--	指定resourcemaneger的地址	--> 
<property>
	<name>yarn.resourcemanager.hostname</name>
	<value>node03</value>
	</property>
<!--	环境变量的继承	-->
<property>
	<name>yarn.nodemanager.env-whitelist</name>
<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
	</property>
</configuration>

mapred-site.xml

<configuration>
<!--	指定mr运行在yarn上	-->
<property>
	<name>mapreduce.frameword.name</name>
	<value>yarn</value>
	</property>
</configuration>

workers

node01
node02
node03

分发配置文件

scp -r

启动集群

只有第一次启动需要在NameNode的机器上格式化

hdfs namenode -format

正常启动集群

#!/bin/bash
source /etc/profile

start_log=/tmp/start_log
echo "recording start log in ${start_log}"
# ZOOKEEPER_HOME="/opt/apache-zookeeper-3.5.10-bin"
# HADOOP_HOME="/opt/hadoop-3.1.3"
RED_COLOR='\E[1;31m'   #红
RES='\E[0m'
jps(){
	for i in node01 node02 node03
	do  
        echo "======== $i ========"
		ssh $i "jps"
	done
}

zookeeper(){
    if [ $1 = "start" -o $1 = "stop" -o $1 = "status" ]; then
        for i in node01 node02 node03
        do
        echo  "======== $i $1ing zookeeper ========"
            ssh $i "${ZOOKEEPER_HOME}/bin/zkServer.sh $1"
        done
    else 
        echo -e  "${RED_COLOR}Please enter a correct option ! ! ! (eg:start )${RES}"
        exit 100
    fi
}

kafka(){
    case $1 in 
    "start")
        for i in node01 node02 node03
        do
        echo  "======== $i $1ing kafka ========"
            ssh $i "nohup sh ${KAFKA_HOME}/bin/kafka-server-start.sh ${KAFKA_HOME}/config/server.properties >${start_log} 2>&1 &"
        done
        ;; 
    "stop")
        for i in node01 node02 node03
        do
        echo  "======== $i $1ing kafka ========"
            ssh $i ps -ef | grep kafka | awk '{if($8!="grep"){print "kill -9 " $2}}'| sh
        done
        ;; 
    *)
        echo -e  "${RED_COLOR}Please enter a correct option ! ! ! (start or stop)${RES}"
        exit 100
        ;;
    esac
}

hadoop(){
    case $1 in 
    "start")
        echo  "======== $1ing hadoop clustar ========"
        ssh node01 "${HADOOP_HOME}/sbin/start-dfs.sh"
        ssh node03 "${HADOOP_HOME}/sbin/start-yarn.sh"
        ;; 
    "stop")
        echo  "======== $1ing hadoop clustar ========"
        ssh node01 "${HADOOP_HOME}/sbin/stop-dfs.sh"
        ssh node03 "${HADOOP_HOME}/sbin/stop-yarn.sh"
        ;;
    *)
        echo -e  "${RED_COLOR}Please enter a correct option ! ! ! (start or stop)${RES}"
        exit 100
        ;;
    esac
}

# app_state(){
#     echo 
#     read -p "please chose start stop or else:    " state
# }

operate_app(){
    app=$1
    state=$2
    case $app in
    "jps") 
        jps
        ;;
    "zookeeper") 
        zookeeper $state
        ;;
    "kafka") 
        kafka $state
        ;;
    "zoo") 
        zookeeper $state
        ;;
    "hadoop")
        hadoop $state
        ;;
    "all")
        echo $state
        if [ $state = "start" ]; then
            zookeeper $state
            kafka $state
            hadoop $state
        elif [ $state = "stop" ]; then
            hadoop $state
            kafka $state
            zookeeper $state
        else exit 2
        fi
        ;;
    *)
        echo -e  "${RED_COLOR}Please enter a correct option ! ! ! ${RES}"
        exit 100
        ;;
    esac
}

echo "=============> sh cluster.sh  app  state <============="

app=$1
state=$2

operate_app $app $state

 

mysql

rpm -Uvh http://dev.mysql.com/get/mysql-community-release-el7-5.noarch.rpm
yum repolist enabled | grep "mysql.*-community.*"
yum -y install mysql-community-server
systemctl enable mysqld
systemctl start mysqld
mysql_secure_installation # 设置密码
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值