特别提醒:(版本:hbase1.3.1)
单机版
前提:zookeeper集群,hadoop分布式
解压安装重命名环境变量source生效
[root@hadoop100 opt]# tar -zxvf hbase-1.3.1-bin.tar.gz -C ../
[root@hadoop100 opt]# cd ..
[root@hadoop100 opt]# mv hbase-1.3.1/ hbase
[root@hadoop100 opt]# vim /etc/profile.d/bigdata.sh
[root@hadoop100 opt]# source /etc/profile.d/bigdata.sh
hbase.env.sh
export JAVA_HOME=/opt/jdk ~>可以不用配置
export HBASE_MANAGES_ZK=true ~>启用HBase自带的zookeeper
注意:若是JDK1.8,需要在脚本文件【hbase-env.sh】注释掉下述两行配置信息:
# Configure PermSize. Only needed in JDK7. You can safely remove it for JDK8+
export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"
export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"
hbase.site.xml
<configuration>
<!-- 用来定制hbase表中的数据最终存储的hdfs上相应的目录 -->
<property>
<name>hbase.rootdir</name>
<value>hdfs://ns1/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>hadoop100</value>
</property>
</configuration>
启动zk、hadoop后启动hbase单机版
[root@hadoop100 opt]# myself-start-Hahadoop-all.sh
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
Starting namenodes on [hadoop100 hadoop101]
hadoop100: starting namenode, logging to /opt/hadoop/logs/hadoop-root-namenode-hadoop100.out
hadoop101: starting namenode, logging to /opt/hadoop/logs/hadoop-root-namenode-hadoop101.out
hadoop101: starting datanode, logging to /opt/hadoop/logs/hadoop-root-datanode-hadoop101.out
hadoop100: starting datanode, logging to /opt/hadoop/logs/hadoop-root-datanode-hadoop100.out
hadoop102: starting datanode, logging to /opt/hadoop/logs/hadoop-root-datanode-hadoop102.out
Starting journal nodes [hadoop100 hadoop101 hadoop102]
hadoop100: starting journalnode, logging to /opt/hadoop/logs/hadoop-root-journalnode-hadoop100.out
hadoop101: starting journalnode, logging to /opt/hadoop/logs/hadoop-root-journalnode-hadoop101.out
hadoop102: starting journalnode, logging to /opt/hadoop/logs/hadoop-root-journalnode-hadoop102.out
Starting ZK Failover Controllers on NN hosts [hadoop100 hadoop101]
hadoop100: starting zkfc, logging to /opt/hadoop/logs/hadoop-root-zkfc-hadoop100.out
hadoop101: starting zkfc, logging to /opt/hadoop/logs/hadoop-root-zkfc-hadoop101.out
starting yarn daemons
starting resourcemanager, logging to /opt/hadoop/logs/yarn-root-resourcemanager-hadoop101.out
hadoop101: starting nodemanager, logging to /opt/hadoop/logs/yarn-root-nodemanager-hadoop101.out
hadoop102: starting nodemanager, logging to /opt/hadoop/logs/yarn-root-nodemanager-hadoop102.out
hadoop100: starting nodemanager, logging to /opt/hadoop/logs/yarn-root-nodemanager-hadoop100.out
starting resourcemanager, logging to /opt/hadoop/logs/yarn-root-resourcemanager-hadoop102.out
[root@hadoop100 opt]# myself-show-all.sh
----------------------------- hadoop102 ----------------------------
1794 ResourceManager
1525 DataNode
1591 JournalNode
1943 Jps
1434 QuorumPeerMain
1724 NodeManager
----------------------------- hadoop101 ----------------------------
1905 ResourceManager
2338 Jps
1763 DFSZKFailoverController
1652 JournalNode
2020 NodeManager
1495 NameNode
1561 DataNode
1438 QuorumPeerMain
----------------------------- hadoop100 ----------------------------
1808 QuorumPeerMain
2720 Jps
2069 DataNode
2277 JournalNode
2581 NodeManager
1958 NameNode
2462 DFSZKFailoverController
[root@hadoop100 opt]# start-hbase.sh
hadoop100: starting zookeeper, logging to /opt/hbase/logs/hbase-root-zookeeper-hadoop100.out
hadoop100: SLF4J: Class path contains multiple SLF4J bindings.
hadoop100: SLF4J: Found binding in [jar:file:/opt/hbase/lib/slf4j-log4j12-1.7.5.jar!/org/slf4j/impl/StaticLoggerBinder.class]
hadoop100: SLF4J: Found binding in [jar:file:/opt/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar!/org/slf4j/impl/StaticLoggerBinder.class]
hadoop100: SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
hadoop100: SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
hadoop100: java.net.BindException: Address already in use
hadoop100: at sun.nio.ch.Net.bind0(Native Method)
hadoop100: at sun.nio.ch.Net.bind(Net.java:433)
hadoop100: at sun.nio.ch.Net.bind(Net.java:425)
hadoop100: at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:223)
starting master, logging to /opt/hbase/logs/hbase-root-master-hadoop100.out
starting regionserver, logging to /opt/hbase/logs/hbase-root-1-regionserver-hadoop100.out
[root@hadoop100 opt]# myself-show-all.sh
----------------------------- hadoop102 ----------------------------
1794 ResourceManager
1525 DataNode
1973 Jps
1591 JournalNode
1434 QuorumPeerMain
1724 NodeManager
----------------------------- hadoop101 ----------------------------
2368 Jps
1905 ResourceManager
1763 DFSZKFailoverController
1652 JournalNode
2020 NodeManager
1495 NameNode
1561 DataNode
1438 QuorumPeerMain
----------------------------- hadoop100 ----------------------------
1808 QuorumPeerMain
2069 DataNode
2277 JournalNode
2581 NodeManager
3061 HMaster
1958 NameNode
3500 Jps
2462 DFSZKFailoverController
3199 HRegionServer
[root@hadoop100 opt]#
hadoopweb端查看确认:
hbasemasterweb
连接hbase测试:
[root@hadoop100 opt]# hbase shell
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/opt/hbase/lib/slf4j-log4j12-1.7.5.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/opt/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
HBase Shell; enter 'help<RETURN>' for list of supported commands.
Type "exit<RETURN>" to leave the HBase Shell
Version 1.3.1, r930b9a55528fe45d8edce7af42fef2d35e77677a, Thu Apr 6 19:36:54 PDT 2017
hbase(main):001:0> list
TABLE
0 row(s) in 0.1630 seconds
=> []
hbase(main):002:0> quit;
hbase(main):003:0* [root@hadoop100 opt]#
关闭hbase
在这里插入代码片
hbase分布式
集群规模划
hadoop100 hadoop101 hadoop102
_________________________________________________________________
HMaster HRegionServer HRegionServer
hbase.env.sh
export JAVA_HOME=/opt/jdk
#屏蔽掉HBase自带的zookeeper,使用外置的zk分布式集群进行资源的调度
export HBASE_MANAGES_ZK=false
注意:若是JDK1.8,需要在脚本文件【hbase-env.sh】注释掉下述两行配置信息:
# Configure PermSize. Only needed in JDK7. You can safely remove it for JDK8+
export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"
export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"
hbase.site.xml
<configuration>
<!-- 用来定制hbase表中的数据最终存储的hdfs上相应的目录 -->
<property>
<name>hbase.rootdir</name>
<value>hdfs://ns1/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>hadoop100,hadoop101,hadoop102</value>
</property>
</configuration>
regionservers
hadoop101
hadoop102
同步bbase到所有节点
[root@hadoop100 opt]# scp -r hbase/ root@hadoop101:/opt/
同步环境变量(source)
[root@hadoop100 opt]# scp -r /etc/profile.d/bigdata.sh root@hadoop101:/etc/profile.d/
[root@hadoop100 opt]# ssh hadoop101 'source /etc/profile.d/bigdata.sh'
[root@hadoop100 opt]# ssh hadoop102 'source /etc/profile.d/bigdata.sh'
注意:
★如果已经配置过单机版,需要将hbase在hdfs上面的目录、以及hbase在zk中的目录清除,以免和集群版本操作冲突。
只是第一次的时候操作:← 以后启动hbase集群,不需要。
zk
rmr /hbase
hdfs
hdfs dfs -rm -R /hbase
启动hbase集群版
[root@hadoop100 opt]# start-hbase.sh
starting master, logging to /opt/hbase/logs/hbase-root-master-hadoop100.out
hadoop102: starting regionserver, logging to /opt/hbase/logs/hbase-root-regionserver-hadoop102.out
hadoop101: starting regionserver, logging to /opt/hbase/logs/hbase-root-regionserver-hadoop101.out
[root@hadoop100 opt]# myself-hwo
-bash: myself-hwo: command not found
[root@hadoop100 opt]# myself-show-all.sh
----------------------------- hadoop102 ----------------------------
1794 ResourceManager
1525 DataNode
1591 JournalNode
2119 HRegionServer
2297 Jps
1434 QuorumPeerMain
----------------------------- hadoop101 ----------------------------
1905 ResourceManager
1763 DFSZKFailoverController
1652 JournalNode
2020 NodeManager
1495 NameNode
2808 HRegionServer
1561 DataNode
2988 Jps
1438 QuorumPeerMain
----------------------------- hadoop100 ----------------------------
1808 QuorumPeerMain
4513 HMaster
2069 DataNode
2277 JournalNode
2581 NodeManager
1958 NameNode
4790 Jps
2462 DFSZKFailoverController
[root@hadoop100 opt]#
验证可用性
zk验证
[root@hadoop100 opt]# zkCli.sh -server hadoop100:2181
Connecting to hadoop100:2181
2021-10-13 10:21:53,846 [myid:] - INFO [main:Environment@100] - Client environment:zookeeper.version=3.4.10-39d3a4f269333c922ed3db283be479f9deacaa0f, built on 03/23/2017 10:13 GMT
2021-10-13 10:21:53,848 [myid:] - INFO [main:Environment@100] - Client environment:host.name=hadoop100
2021-10-13 10:21:53,848 [myid:] - INFO [main:Environment@100] - Client environment:java.version=1.8.0_131
2021-10-13 10:21:53,849 [myid:] - INFO [main:Environment@100] - Client environment:java.vendor=Oracle Corporation
2021-10-13 10:21:53,850 [myid:] - INFO [main:Environment@100] - Client environment:java.home=/opt/jdk/jre
2021-10-13 10:21:53,850 [myid:] - INFO [main:Environment@100] - Client environment:java.class.path=/opt/zookeeper/bin/../build/classes:/opt/zookeeper/bin/../build/lib/*.jar:/opt/zookeeper/bin/../lib/slf4j-log4j12-1.6.1.jar:/opt/zookeeper/bin/../lib/slf4j-api-1.6.1.jar:/opt/zookeeper/bin/../lib/netty-3.10.5.Final.jar:/opt/zookeeper/bin/../lib/log4j-1.2.16.jar:/opt/zookeeper/bin/../lib/jline-0.9.94.jar:/opt/zookeeper/bin/../zookeeper-3.4.10.jar:/opt/zookeeper/bin/../src/java/lib/*.jar:/opt/zookeeper/bin/../conf:
2021-10-13 10:21:53,850 [myid:] - INFO [main:Environment@100] - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
2021-10-13 10:21:53,850 [myid:] - INFO [main:Environment@100] - Client environment:java.io.tmpdir=/tmp
2021-10-13 10:21:53,850 [myid:] - INFO [main:Environment@100] - Client environment:java.compiler=<NA>
2021-10-13 10:21:53,850 [myid:] - INFO [main:Environment@100] - Client environment:os.name=Linux
2021-10-13 10:21:53,850 [myid:] - INFO [main:Environment@100] - Client environment:os.arch=amd64
2021-10-13 10:21:53,850 [myid:] - INFO [main:Environment@100] - Client environment:os.version=3.10.0-862.el7.x86_64
2021-10-13 10:21:53,850 [myid:] - INFO [main:Environment@100] - Client environment:user.name=root
2021-10-13 10:21:53,850 [myid:] - INFO [main:Environment@100] - Client environment:user.home=/root
2021-10-13 10:21:53,850 [myid:] - INFO [main:Environment@100] - Client environment:user.dir=/opt
2021-10-13 10:21:53,851 [myid:] - INFO [main:ZooKeeper@438] - Initiating client connection, connectString=hadoop100:2181 sessionTimeout=30000 watcher=org.apache.zookeeper.ZooKeeperMain$MyWatcher@506c589e
Welcome to ZooKeeper!
2021-10-13 10:21:53,867 [myid:] - INFO [main-SendThread(hadoop100:2181):ClientCnxn$SendThread@1032] - Opening socket connection to server hadoop100/192.168.5.100:2181. Will not attempt to authenticate using SASL (unknown error)
JLine support is enabled
2021-10-13 10:21:53,911 [myid:] - INFO [main-SendThread(hadoop100:2181):ClientCnxn$SendThread@876] - Socket connection established to hadoop100/192.168.5.100:2181, initiating session
2021-10-13 10:21:53,964 [myid:] - INFO [main-SendThread(hadoop100:2181):ClientCnxn$SendThread@1299] - Session establishment complete on server hadoop100/192.168.5.100:2181, sessionid = 0x647c77542a30000a, negotiated timeout = 30000
WATCHER::
WatchedEvent state:SyncConnected type:None path:null
[zk: hadoop100:2181(CONNECTED) 0] ls /
[zookeeper, yarn-leader-election, hadoop-ha, hbase]
[zk: hadoop100:2181(CONNECTED) 1] quit
Quitting...
2021-10-13 10:22:23,797 [myid:] - INFO [main:ZooKeeper@684] - Session: 0x647c77542a30000a closed
2021-10-13 10:22:23,799 [myid:] - INFO [main-EventThread:ClientCnxn$EventThread@519] - EventThread shut down for session: 0x647c77542a30000a
[root@hadoop100 opt]#
hdfs验证
Hmaster验证
hbase连接验证
[root@hadoop100 opt]# hbase shell
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/opt/hbase/lib/slf4j-log4j12-1.7.5.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/opt/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
HBase Shell; enter 'help<RETURN>' for list of supported commands.
Type "exit<RETURN>" to leave the HBase Shell
Version 1.3.1, r930b9a55528fe45d8edce7af42fef2d35e77677a, Thu Apr 6 19:36:54 PDT 2017
hbase(main):001:0> list
TABLE
0 row(s) in 0.1420 seconds
=> []
hbase(main):002:0> quit
[root@hadoop100 opt]#
hbase高可用
只需在上述的集群hbase的非Hmaster节点再启动一个Hmaster
我的集群规模是这样的
hadoop100 hadoop101 hadoop102
_________________________________________________________________
HMaster Hmater HRegionServer HRegionServer
[root@hadoop100 opt]# ssh hadoop101 'hbase-daemon.sh start master'
starting master, logging to /opt/hbase/logs/hbase-root-master-hadoop101.out
[root@hadoop100 opt]# myself-show-all.sh
----------------------------- hadoop102 ----------------------------
1794 ResourceManager
1525 DataNode
2438 Jps
1591 JournalNode
2119 HRegionServer
1434 QuorumPeerMain
----------------------------- hadoop101 ----------------------------
3440 Jps
1905 ResourceManager
3265 HMaster
1763 DFSZKFailoverController
1652 JournalNode
2020 NodeManager
1495 NameNode
2808 HRegionServer
1561 DataNode
1438 QuorumPeerMain
----------------------------- hadoop100 ----------------------------
1808 QuorumPeerMain
5088 Jps
4513 HMaster
2069 DataNode
2277 JournalNode
2581 NodeManager
1958 NameNode
2462 DFSZKFailoverController
[root@hadoop100 opt]#
验证
web
备用节点
验证高可用性
[root@hadoop100 opt]# hbase-daemon.sh stop master
stopping master.
[root@hadoop100 opt]# myself-show-all.sh
----------------------------- hadoop102 ----------------------------
1794 ResourceManager
1525 DataNode
1591 JournalNode
2119 HRegionServer
1434 QuorumPeerMain
2509 Jps
----------------------------- hadoop101 ----------------------------
1905 ResourceManager
3265 HMaster
1763 DFSZKFailoverController
1652 JournalNode
2020 NodeManager
1495 NameNode
2808 HRegionServer
1561 DataNode
3609 Jps
1438 QuorumPeerMain
----------------------------- hadoop100 ----------------------------
1808 QuorumPeerMain
5331 Jps
2069 DataNode
2277 JournalNode
2581 NodeManager
1958 NameNode
2462 DFSZKFailoverController
[root@hadoop100 opt]#
ok,搞定!!!