1. 解压
tar -xf Downloads/hbase-1.2.4-bin.tar.gz -C ~/
2. hbase-env.sh
export JAVA_HOME=/usr/java/jdk1.8/
export HBASE_MANAGES_ZK=false # Tell HBase whether it should manage it's own instance of Zookeeper or not.
# Configure PermSize. Only needed in JDK7. You can safely remove it for JDK8+
#export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"#export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"
3. hbase-site.xml
<configuration>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.tmp.dir</name>
<value>/home/zkpk/hbase-1.2.2/tmp</value>
</property>
<property>
<name>hbase.rootdir</name>
<value>hdfs://ns1/hbase</value>
</property>
<property>
<name>zookeeper.session.timeout</name>
<value>120000</value>
</property>
<property>
<name>hbase.zookeeper.property.tickTime</name>
<value>6000</value>
</property>
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>2181</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>master,slave1,slave2</value>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/home/zkpk/zookeeper-3.4.6/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>hbase.master.maxclockskew</name>
<value>180000</value>
</property>
</configuration>
4. regionservers
slave1
slave2
5. 拷贝hbase到其他节点
cp hadoop-2.6.4/etc/hadoop/hdfs-site.xml hbase-1.2.2/conf/
cp hadoop-2.6.4/etc/hadoop/core-site.xml hbase-1.2.2/conf/
scp -r /home/zkpk/hbase-1.2.2 slave1:~/
scp -r /home/zkpk/hbase-1.2.2 slave2:~/
6. 配置环境变量(各节点)
vim ~/.bash_profile
export HBASE_HOME=/home/zkpk/hbase-1.2.4
export PATH=$PATH:$HBASE_HOME/bin
source ~/.bash_profile
7. 拷贝Hadoop 2.7.3的jar包到 ~/hbase-1.2.4/lib/,用以使已安装的Hadoop和HBase兼容
# 替换掉2.5.1版本的hadoop jar文件
[zkpk@master ~]$ ls hbase-1.2.4/lib/ | grep 2.5.1hadoop-annotations-2.5.1.jar
hadoop-auth-2.5.1.jar
hadoop-client-2.5.1.jar
hadoop-common-2.5.1.jar
hadoop-hdfs-2.5.1.jar
hadoop-mapreduce-client-app-2.5.1.jar
hadoop-mapreduce-client-common-2.5.1.jar
hadoop-mapreduce-client-core-2.5.1.jar
hadoop-mapreduce-client-jobclient-2.5.1.jar
hadoop-mapreduce-client-shuffle-2.5.1.jar
hadoop-yarn-api-2.5.1.jar
hadoop-yarn-client-2.5.1.jar
hadoop-yarn-common-2.5.1.jar
hadoop-yarn-server-common-2.5.1.jar
cp ~/hadoop-2.7.3/share/hadoop/mapreduce/lib/hadoop-annotations-2.7.3.jar .
cp ~/hadoop-2.7.3/share/hadoop/tools/lib/hadoop-auth-2.7.3.jar .
cp ~/hadoop-2.7.3/share/hadoop/common/hadoop-common-2.7.3.jar .
cp ~/hadoop-2.7.3/share/hadoop/hdfs/hadoop-hdfs-2.7.3.jar .
cp ~/hadoop-2.7.3/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.7.3.jar .
cp ~/hadoop-2.7.3/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.7.3.jar. ## Hadoop 2.7.3没有这个jar 包
cp ~/hadoop-2.7.3/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.3.jar .
cp ~/hadoop-2.7.3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.3.jar .
cp ~/hadoop-2.7.3/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.7.3.jar .
cp ~/hadoop-2.7.3/share/hadoop/yarn/hadoop-yarn-api-2.7.3.jar .
cp ~/hadoop-2.7.3/share/hadoop/yarn/hadoop-yarn-client-2.7.3.jar .
cp ~/hadoop-2.7.3/share/hadoop/yarn/hadoop-yarn-common-2.7.3.jar .
cp ~/hadoop-2.7.3/share/hadoop/yarn/hadoop-yarn-server-common-2.7.3.jar .
# HBase自带的Hadoop2.5.1的jar包
[zkpk@master lib]$ rm -f *-2.5.1.jar
8. 启动验证
#启动
start-hbase.sh
#在backup master机(及slave1)上启动
hbase-daemon.sh start master
#可以利用jps验证,在slave1上有HMaster和HRegionServer两个进程
#浏览器访问
http://master:16010/master-status (Current Active Master)