节点规划
host |
节点进程 |
node1 |
namenode,datanode,HRegionServer,NodeManager,DFSZKFailoverController |
node2 |
secondarynamenode,datanode,zookeeper,NodeManager,JournalNode,ResourceManager |
node3 |
namenode,datanode,HRegionServer,NodeManager,DFSZKFailoverController |
node4 |
zookeeper,datanode, HMaster,HRegionServer,NodeManager,JournalNode |
node5 |
zookeeper,datanode,HMaster, HRegionServer,NodeManager,JournalNode,ResourceManager |
基础环境搭建
配置host文件
192.168.234.100 node1
192.168.234.101 node2
192.168.234.102 node3
192.168.234.103 node4
192.168.234.104 node5
配置node1,node3 到 集群的免密登录
ssh-keygen -t rsa
ssh-copy-id node1
ssh-copy-id node2
ssh-copy-id node3
ssh-copy-id node4
ssh-copy-id node5
jdk 安装
export JAVA_HOME=/opt/jdk/jdk1.8.0_191
export CLASSPATH=$JAVA_HOME/lib/
export PATH=$PATH:$JAVA_HOME/bin
scp -r /opt/jdk node2:/opt/jdk
scp -r /opt/jdk node3:/opt/jdk
scp -r /opt/jdk node4:/opt/jdk
scp -r /opt/jdk node5:/opt/jdk
scp -r /etc/profile node2:/etc/profile
scp -r /etc/profile node3:/etc/profile
scp -r /etc/profile node4:/etc/profile
scp -r /etc/profile node5:/etc/profile
zookeeper 安装
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/opt/zookeeper/zookeeper-3.4.13/data
dataLogDir=/opt/zookeeper/zookeeper-3.4.13/logs
autopurge.snapRetainCount=500
autopurge.purgeInterval=24
clientPort=2181
server.1=node2:2888:3888
server.2=node4:2888:3888
server.3=node5:2888:3888
echo "1" > /opt/zookeeper/zookeeper-3.4.13/data/myid
echo "2" > /opt/zookeeper/zookeeper-3.4.13/data/myid
echo "3" > /opt/zookeeper/zookeeper-3.4.13/data/myid
hadoop 集群安装
在node1解压hadoop安装包到 /opt/hadoop/hadoop-2.7.7
hdfs-site.xml 配置
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>dfs.nameservices</name>
<value>mycluster</value>
</property>
<property>
<name>dfs.ha.namenodes.mycluster</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>node1:9000</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>node3:9000</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn1</name>
<value>node1:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn2</name>
<value>node3:50070</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://node2:8485;node4:8485;node5:8485/mycluster</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.mycluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>
sshfence
shell(/bin/true)
</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/root/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/opt/hadoop/hadoop-2.7.7/data/journal/data</value>
</property>
<property>
<name