[hadoop@server1 ~]$ cd hadoop
[hadoop@server1 hadoop]$ cd etc/hadoop/
[hadoop@server1 hadoop]$ vim hadoop-env.sh
# The java implementation to use.
export JAVA_HOME=/home/hadoop/java
[hadoop@server1 hadoop]$ cd etc/hadoop/
[hadoop@server1 hadoop]$ vim core-site.xml
<configuration><property><name>fs.defaultFS</name><value>hdfs://172.25.120.1:9000</value></property></configuration>
[hadoop@server1 hadoop]$ vim slaves
172.25.120.1
[hadoop@server1 hadoop]$ vim hdfs-site.xml
<configuration><property><name>dfs.replication</name><value>1</value></property></configuration>
[hadoop@server4 tmp]$ cd ~/zookeeper-3.4.9
[hadoop@server4 zookeeper-3.4.9]$ bin/zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /home/hadoop/zookeeper-3.4.9/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
4、查看所有节点信息
[hadoop@server2 zookeeper-3.4.9]$ bin/zkServer.sh status
ZooKeeper JMX enabled bydefault
Using config: /home/hadoop/zookeeper-3.4.9/bin/../conf/zoo.cfg
Mode: follower
[hadoop@server3 zookeeper-3.4.9]$ bin/zkServer.sh status
ZooKeeper JMX enabled bydefault
Using config: /home/hadoop/zookeeper-3.4.9/bin/../conf/zoo.cfg
Mode: follower
[hadoop@server4 zookeeper-3.4.9]$ bin/zkServer.sh status
ZooKeeper JMX enabled bydefault
Using config: /home/hadoop/zookeeper-3.4.9/bin/../conf/zoo.cfg
Mode: leader
5、leader(server4)测试
[hadoop@server4 zookeeper-3.4.9]$ bin/zkCli.sh
Connecting to localhost:2181WATCHER::
WatchedEvent state:SyncConnected type:None path:null
[zk: localhost:2181(CONNECTED) 0] ls
[zk: localhost:2181(CONNECTED) 1] ls /
[zookeeper]
[zk: localhost:2181(CONNECTED) 2] ls /zookeeper
[quota]
[zk: localhost:2181(CONNECTED) 3] ls /zookeeper/quota
[]
[zk: localhost:2181(CONNECTED) 5] quit
Quitting...
六、Zookeeper高可用
1、配置hadoop
配置slaves
[hadoop@server1 ~]$ cd hadoop/etc/
[hadoop@server1 etc]$ vim hadoop/slaves
172.25.120.2172.25.120.3172.25.120.4
配置core-site.xml
[hadoop@server1 etc]$ vim hadoop/core-site.xml
<configuration><property><name>fs.defaultFS</name><value>hdfs://masters</value></property><property><name>ha.zookeeper.quorum</name><value>172.25.120.2:2181,172.25.120.3:2181,172.25.120.4:2181</value></property></configuration>
配置hdfs-site.xml
[hadoop@server1 etc]$ vim hadoop/hdfs-site.xml
<configuration><property><name>dfs.replication</name><value>3</value></property><property><name>dfs.nameservices</name><value>masters</value></property><property><name>dfs.ha.namenodes.masters</name><value>h1,h2</value></property><property><name>dfs.namenode.rpc-address.masters.h1</name><value>172.25.120.1:9000</value></property><property><name>dfs.namenode.http-address.masters.h1</name><value>172.25.120.1:50070</value></property><property><name>dfs.namenode.rpc-address.masters.h2</name><value>172.25.120.5:9000</value></property><property><name>dfs.namenode.http-address.masters.h2</name><value>172.25.120.5:50070</value></property><property><name>dfs.namenode.shared.edits.dir</name><value>qjournal://172.25.120.2:8485;172.25.120.3:8485;172.25.120.4:8485/masters</value></property><property><name>dfs.journalnode.edits.dir</name><value>/tmp/journaldata</value></property><property><name>dfs.ha.automatic-failover.enabled</name><value>true</value></property><property><name>dfs.client.failover.proxy.provider.masters</name><value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvid
er</value><property><name>dfs.ha.fencing.methods</name><value>
sshfence
shell(/bin/true)
</value></property><property><name>dfs.ha.fencing.ssh.private-key-files</name><value>/home/hadoop/.ssh/id_rsa</value></property><property><name>dfs.ha.fencing.ssh.connect-timeout</name><value>30000</value></property></configuration>
[hadoop@server1 etc]$ cd ~/hadoop
[hadoop@server1 hadoop]$ pwd
/home/hadoop/hadoop/etc/hadoop
[hadoop@server1 hadoop]$ vim mapred-site.xml
<configuration><property><name>mapreduce.framework.name</name><value>yarn</value></property></configuration>
配置yarn-site.xml
[hadoop@server1 hadoop]$ vim yarn-site.xml
<configuration><!-- Site specific YARN configuration properties --><property><name>yarn.nodemanager.aux-services</name><value>mapreduce_shuffle</value></property><property><name>yarn.resourcemanager.ha.enabled</name><value>true</value></property><property><name>yarn.resourcemanager.cluster-id</name><value>RM_CLUSTER</value></property><property><name>yarn.resourcemanager.ha.rm-ids</name><value>rm1,rm2</value></property><property><name>yarn.resourcemanager.hostname.rm1</name><value>172.25.120.1</value></property><property><name>yarn.resourcemanager.hostname.rm2</name><value>172.25.120.5</value></property><property><name>yarn.resourcemanager.recovery.enabled</name><value>true</value></property><property><name>yarn.resourcemanager.store.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value></property><property><name>yarn.resourcemanager.zk-address</name><value>172.25.120.2:2181,172.25.120.3:2181,172.25.120.4:2181</value></property></configuration>
配置regionservers,启动hbase
[hadoop@server1 ~]$ cd hbase-1.2.4/conf/
[hadoop@server1 conf]$ vim regionservers
172.25.120.2172.25.120.3172.25.120.4
[hadoop@server1 hadoop]$ ../../sbin/start-yarn.sh
starting yarn daemons
starting resourcemanager, logging to /home/hadoop/hadoop-2.7.3/logs/yarn-hadoop-resourcemanager-server1.out
172.25.120.3: starting nodemanager, logging to /home/hadoop/hadoop-2.7.3/logs/yarn-hadoop-nodemanager-server3.out
172.25.120.4: starting nodemanager, logging to /home/hadoop/hadoop-2.7.3/logs/yarn-hadoop-nodemanager-server4.out
172.25.120.2: starting nodemanager, logging to /home/hadoop/hadoop-2.7.3/logs/yarn-hadoop-nodemanager-server2.out
[hadoop@server1 hadoop]$ jps
1927Jps1837ResourceManager1325NameNode1602DFSZKFailoverController
2、server5主机
手动开启RM节点
[hadoop@server5 ~]$ cd hadoop
[hadoop@server5 hadoop]$ sbin/yarn-daemon.sh start resourcemanager
starting resourcemanager, logging to /home/hadoop/hadoop-2.7.3/logs/yarn-hadoop-resourcemanager-server5.out
[hadoop@server5 logs]$ jps
3250Jps1694ResourceManager1209NameNode3184DFSZKFailoverController
[hadoop@server1 ~]$ tar zxf hbase-1.2.4-bin.tar.gz
[hadoop@server1 ~]$ cd hbase-1.2.4
[hadoop@server1 hbase-1.2.4]$ ls
bin conf hbase-webapps lib NOTICE.txt
CHANGES.txt docs LEGALLICENSE.txt README.txt
[hadoop@server1 hbase-1.2.4]$ cd conf/
[hadoop@server1 conf]$ vim hbase-env.sh
[hadoop@server1 conf]$ vim regionservers
[hadoop@server1 conf]$ vim hbase-site.xml
2、启动Hbase
[hadoop@server1 hbase-1.2.4]$ bin/start-hbase.sh
starting master, logging to /home/hadoop/hbase-1.2.4/bin/../logs/hbase-hadoop-master-server1.out172.25.120.3: starting regionserver, logging to /home/hadoop/hbase-1.2.4/bin/../logs/hbase-hadoop-regionserver-server3.out172.25.120.4: starting regionserver, logging to /home/hadoop/hbase-1.2.4/bin/../logs/hbase-hadoop-regionserver-server4.out172.25.120.2: starting regionserver, logging to /home/hadoop/hbase-1.2.4/bin/../logs/hbase-hadoop-regionserver-server2.out
[hadoop@server1 hbase-1.2.4]$ jps
1837 ResourceManager
2567 HMaster
1325 NameNode
1602 DFSZKFailoverController
2634 Jps