规划
10.9.236.150 hadoop0 NameNode(active) & DataNode & JournalNode & ZKFC
10.9.236.151 NameNode(standby) & DataNode & JournalNode & ZKFC
10.9.236.152 DataNode & JournalNode
10.9.236.153 zk1
10.9.236.154 zk2
10.9.236.155 zk3
设置解压快捷方式
echo "alias utar='tar -zxvf'">>/etc/profile
. /etc/profile
安装java环境变量
cat >> /etc/profile << EOF
export JAVA_HOME=/root/jdk
export CLASSPATH=\$JAVA_HOME/lib
export PATH=\$PATH:\$JAVA_HOME/bin
EOF. /etc/profile
创建jdk软件
ln -s jdk1.8.0_281/ jdk
1 ip与主机映射
cat >> /etc/hosts << EOF
10.9.236.150 hadoop0
10.9.236.151 hadoop1
10.9.236.152 hadoop2
10.9.236.153 zk1
10.9.236.154 zk2
10.9.236.155 zk3
EOF
安装zookeeper
解压zookeeper
创建软连接
ln -s apache-zookeeper-3.6.2-bin zk
创建zk数据目录mkdir zkdata
分别在10.9.236.153 10.9.236.154 10.9.236.155在zk数据目录中各创建myid文件数字
echo "1">zkdata/myid
echo "2">zkdata/myid
echo "3">zkdata/myid
在zk数据目录中创建zoo.cfg(例子zk/conf/zoo_sample.cfg)
cp apache-zookeeper-3.6.2-bin/conf/zoo_sample.cfg zkdata/zoo.cnf
10.9.236.153 | 10.9.236.154 | 10.9.236.155 |
cat>/root/zkdata/zoo.cnf << EOF tickTime=2000 | cat>/root/zkdata/zoo.cnf << EOF tickTime=2000 | cat>/root/zkdata/zoo.cnf << EOF tickTime=2000 |
分别启动zk
/root/zk/bin/zkServer.sh start /root/zkdata/zoo.cfg
2 配置免密登录( zookeeper不需要)
ssh-keygen rsa -t
ssh-copy-id root@hadoop1
ssh-copy-id root@hadoop2
3 安装依赖
yum install psmisc -y
4 解压hadoop
5 配置hadoop 环境变量
cat >> /etc/profile << EOF
export HADOOP_HOME=/root/hadoop-2.10.1
export PATH=\$PATH:\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin
EOF. /etc/profile
6 修改hadoop配置 hadoop-env.sh
sed -i s#'\${JAVA_HOME}'#/root/jdk#g hadoop-2.10.1/etc/hadoop/hadoop-env.sh
core-site.xml
去除空配置:
[root@localhost ~]# sed -i s#\</configuration\>##g hadoop-2.10.1/etc/hadoop/core-site.xml
[root@localhost ~]# sed -i s#\<configuration\>##g hadoop-2.10.1/etc/hadoop/core-site.xml
加入配置:
cat >> hadoop-2.10.1/etc/hadoop/core-site.xml << EOF
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/root/hadoop-2.10.1/data</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>zk1:3001,zk2:4001,zk3:5001</value>
</property>
</configuration>
EOF
hdfs-site.xml1
去除空配置:
[root@localhost ~]# sed -i s#\</configuration\>##g hadoop-2.10.1/etc/hadoop/hdfs-site.xml
[root@localhost ~]# sed -i s#\<configuration\>##g hadoop-2.10.1/etc/hadoop/hdfs-site.xml加入配置:
cat >> hadoop-2.10.1/etc/hadoop/hdfs-site.xml << EOF
<configuration>
<property>
<name>dfs.nameservices</name>
<value>ns</value>
</property>
<property>
<name>dfs.ha.namenodes.ns</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns.nn1</name>
<value>hadoop0:9000</value>
</property>
<property>
<name>dfs.namenode.rpc-http.ns.nn1</name>
<value>hadoop0:50700</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns.nn2</name>
<value>hadoop1:9000</value>
</property>
<property>
<name>dfs.namenode.rpc-http.ns.nn2</name>
<value>hadoop1:50700</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://hadoop0:8485;hadoop1:8485;hadoop2:8485/ns</value>
</property>
<property>
<property>
<name>dfs.journalnode.shared.edits.dir</name>
<value>/root/journal</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.ns</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
<value>shell(true)</value>
</property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
</configuration>
EOF
slaves
cat >> hadoop-2.10.1/etc/hadoop/slaves << EOF
hadoop0
hadoop1
hadoop2
EOF
同步集群
scp hadoop/* root@hadoop1:/root/hadoop-2.10.1/etc/hadoop/
scp hadoop/* root@hadoop2:/root/hadoop-2.10.1/etc/hadoop/
启动集群
在两个namenode之一执行格式化命令
hdfs zkfc -formatZK
分别启动journalNode节点
/root/hadoop-2.10.1/sbin/hadoop-daemon.sh start journalnode
在active节点中格式化namenode
hdfs namenode -format ns
在active节点中启动namenode
start-dfs.sh
在standby节点中格式化namenode
hdfs namenode -bootstrapStandby
启动standby namenode
hadoop-daemon.sh start namenode