一、准备
1、版本
hbase-2.6.0-hadoop3
引用的hadoop安装参考:hadoop 3.3.6 HA安装_hadoop安装包3.3.6-CSDN博客
2、服务角色
No. | ip | role1 | role2 | remark |
1 | hadoop01 | hmaster | ||
2 | hadoop02 | hmaster | ||
3 | hadoop03 | regionserver | ||
4 | hadoop04 | regionserver | ||
5 | hadoop05 | regionserver | ||
6 | hadoop06 | regionserver | ||
7 | hadoop07 | regionserver | ||
8 | hadoop08 | regionserver |
3、创建用户
useradd hbase
4、ssh免密
ssh-keygen -b 1024 -t rsa
5、添加服务的DNS解析或hosts文件
二、服务部署
1、安装
cd /BigData/install
tar xvf hbase-2.6.0-hadoop3-bin.tar.gz
ln -snf /BigData/install/hbase-2.6.0-hadoop3 /BigData/run/hbase
2、定义环境变量
cat >> /etc/profile.d/hbase.sh << EOF
#!/bin/bash
#HBASE
export HBASE_HOME=/BigData/run/hbase
export PATH=$HBASE_HOME/bin:$PATH
EOF
3、修改配置
hbase-env.sh
[root@hadoop01 conf]# more hbase-env.sh | grep -vE '^$|^#'
export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCTimeStamps -XX:+PrintGC -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintHeapAtGC -XX:+PrintGCDateStamps -XX:+PrintAdaptiveSizePolicy -XX:+PrintTenuringDistribution -XX:PrintSafepointStatisticsCount=1 -XX:PrintFLSStatistics=1 -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/data/logs/hbase/java.dump -XX:ErrorFile=/data/logs/hbase/err_pid%p.log"
export HBASE_SERVER_JAAS_OPTS="-verbose:gc"
HBASE_LIB_DIR=$HBASE_HOME/lib
export HBASE_MASTER_JMX_BASE="-Djava.rmi.server.hostname=$HOSTNAME -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
export HBASE_REGIONSERVER_JMX_BASE="-Djava.rmi.server.hostname=$HOSTNAME -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_MASTER_JMX_BASE -Dcom.sun.management.jmxremote.port=2084"
export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS $HBASE_REGIONSERVER_JMX_BASE -Dcom.sun.management.jmxremote.port=2085"
export HBASE_SSH_OPTS="-p 22"
export HBASE_LOG_DIR=/data/logs/hbase
export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:+UseConcMarkSweepGC -XX:MaxMetaspaceSize=1024M -XX:MetaspaceSize=512M -XX:MaxTenuringThreshold=15 -Xmx12g -Xms12g -Xmn5g -Xloggc:${HBASE_LOG_DIR}/hmaster_gc_%p_.log"
export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:+UnlockExperimentalVMOptions -XX:MaxGCPauseMillis=100 -XX:InitiatingHeapOccupancyPercent=65 -XX:-ResizePLAB -XX:+ParallelRefProcEnabled -XX:ConcGCThreads=8 -XX:G1NewSizePercent=8 -XX:G1HeapWastePercent=10 -XX:MaxTenuringThreshold=1 -XX:G1HeapRegionSize=32m -XX:G1MixedGCCountTarget=16 -XX:G1OldCSetRegionThresholdPercent=10 -Xmx8g -Xms8g -XX:MaxDirectMemorySize=5g -XX:OnOutOfMemoryError=/script/killparent.sh -Xloggc:${HBASE_LOG_DIR}/regionserver_gc_%p_.log"
export HBASE_PID_DIR=/BigData/run/hbase
export HBASE_MANAGES_ZK=false
export ASYNC_PROFILER_HOME=/BigData/run/hbase/async-profiler-1.8.3-linux-x64
[root@hadoop01 conf]#
hbase-site.xml
[root@hadoop01 conf]# cat hbase-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>hbase.table.sanity.checks</name>
<value>false</value>
</property>
<property>
<name>hbase.rootdir</name>
<value>hdfs://jedy/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.tmp.dir</name>
<value>/data/tmp/hbase</value>
</property>
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>12171</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>hadoop01.jedy.com.cn:12171,hadoop02.jedy.com.cn:12171,hadoop03.jedy.com.cn:12171</value>
</property>
<property>
<name>zookeeper.recovery.retry</name>
<value>3</value>
</property>
<property>
<name>hbase.coprocessor.user.region.classes</name>
<value>org.apache.hadoop.hbase.coprocessor.AggregateImplementation</value>
</property>
<property>
<name>hbase.regionserver.ipc.address</name>
<value>0.0.0.0</value>
</property>
<property>
<name>hbase.master.ipc.address</name>
<value>0.0.0.0</value>
</property>
<!-- 是否开启集群同步 -->
<property>
<name>hbase.replication</name>
<value>true</value>
</property>
<!-- 大合并 默认关闭 -->
<property>
<name>hbase.hregion.majorcompaction</name>
<value>0</value>
</property>
<!-- 一次Minor Compaction最小合并的HFile文件数量 -->
<property>
<name>hbase.hstore.compaction.min</name>
<value>3</value>
</property>
<!-- 一次Minor Compaction最多合并的HFile文件数量 -->
<property>
<name>hbase.hstore.compaction.max</name>
<value>30</value>
</property>
<!-- 小合并线程数 -->
<property>
<name>hbase.regionserver.thread.compaction.small</name>
<value>5</value>
</property>
<!-- 大合并线程数 -->
<property>
<name>hbase.regionserver.thread.compaction.large</name>
<value>5</value>
</property>
<property>
<name>hbase.ipc.server.read.threadpool.size</name>
<value>20</value>
</property>
<property>
<name>hbase.regionserver.maxlogs</name>
<value>64</value>
</property>
<property>
<name>zookeeper.znode.parent</name>
<value>/hbase/jedy</value>
</property>
<!-- 一旦某个region 中达到或超过阈值hbase.hregion.memstore.block.multiplier * hbase.hregion.memstore.flush.size 就会flush -->
<property>
<name>hbase.hregion.memstore.block.multiplier</name>
<value>5</value>
</property>
<!-- memstore 使用策略 -->
<property>
<name>hbase.hregion.compacting.memstore.type</name>
<value>NONE</value>
</property>
<!-- memstore大于该阈值就会触发flush -->
<property>
<name>hbase.hregion.memstore.flush.size</name>
<value>134217728</value>
</property>
<!-- hbase会起一个线程定期flush所有memstore -->
<property>
<name>hbase.regionserver.optionalcacheflushinterval</name>
<value>7200000</value>
</property>
<!-- 开启region 均衡 -->
<property>
<name>hbase.master.loadbalance.bytable</name>
<value>true</value>
</property>
<!-- 开启本地读 -->
<property>
<name>dfs.domain.socket.path</name>
<value>/data/store/hadoop/run/dfssocket</value>
</property>
<property>
<name>dfs.client.domain.socket.data.traffic</name>
<value>true</value>
</property>
<property>
<name>dfs.client.read.shortcircuit</name>
<value>true</value>
</property>
<!-- 将memstore 刷到磁盘的线程数 -->
<property>
<name>hbase.hstore.flusher.count</name>
<value>3</value>
</property>
<!-- 某个store下面HFile高于多少会触发小合并 -->
<property>
<name>hbase.hstore.compactionThreshold</name>
<value>3</value>
</property>
<!--hlog 同步到磁盘的线程个数 -->
<property>
<name>hbase.hlog.asyncer.number</name>
<value>10</value>
</property>
<property>
<name>hbase.region.store.parallel.put.limit</name>
<value>0</value>
</property>
<!--读写请求各自一个队列,资源充足-->
<property>
<name>hbase.ipc.server.callqueue.handler.factor</name>
<value>1</value>
</property>
<!-- 读写handler 一样多-->
<property>
<name>hbase.ipc.server.callqueue.read.ratio</name>
<value>0.3</value>
</property>
<!-- appended 2022/06/06 11:37 -->
<property>
<name>hbase.quota.enabled</name>
<value>true</value>
</property>
<property>
<name>hbase.quota.refresh.period</name>
<value>120000</value>
</property>
<property>
<name>hbase.server.compactchecker.interval.multiplier</name>
<value>1000</value>
</property>
<property>
<name>dfs.client.read.shortcircuit.skip.checksum</name>
<value>true</value>
</property>
<property>
<name>hbase.hstore.min.locality.to.skip.major.compact</name>
<value>0.7</value>
</property>
<property>
<name>hbase.hstore.compaction.kv.max</name>
<value>100</value>
</property>
<property>
<name>hbase.hstore.compaction.max.size</name>
<value>8589934592</value>
</property>
<property>
<name>hbase.hstore.compaction.min.size</name>
<value>67108864</value>
</property>
<property>
<name>hbase.compaction.after.bulkload.enable</name>
<value>true</value>
</property>
<property>
<name>hbase.bucketcache.ioengine</name>
<value>offheap</value>
</property>
<property>
<name>hbase.bucketcache.size</name>
<value>4096</value>
</property>
<property>
<name>hbase.regionserver.global.memstore.size.lower.limit</name>
<value>0.85</value>
</property>
</configuration>
三、服务管理
1、启动hmaster
su - hbase -c 'hbase-daemon.sh start master'
2、启动regionserver
su - hdfs -c 'hbase-daemon.sh start regionserver'
四、访问Web UI
http://hadoop01.jedy.com.cn:16010
五、遇到的问题
1、压缩问题
hbase 2.3.7 + hadoop 3.3.1 存在snappy 压缩问题
2、RS无法启动
hbase 2.6.0 + hadoop 3.3.1 rs无法启动
报错:
RS 启不来 报Error: Could not create the Java Virtual Machine.
Error: A fatal exception has occurred. Program will exit.
解决方案:
移除hbase-env.sh中的 -XX:+UseG1GC