hbase集群安装

hbase集群安装

1      集群规划

Hadoop-master1

192.168.2.100

Hbase主

Hadoop-master2

192.168.2.101

Hbase被

Hadoop-slave1~hadoop-slave2

 

Zookeeper集群

 

2      linux内核调整

2.1   用户限制

vim /etc/security/limits.conf

root   soft   nproc   50000

root   hard   nproc   50000

root   soft   nofile  25535

root   hard   nofile  25535

hadoop   soft   nproc   50000

hadoop   hard   nproc   50000

hadoop   soft   nofile  25535

hadoop   hard   nofile  25535

2.2   内核参数

vim/etc/sysctl.conf

net.ipv4.ip_forward= 0

net.ipv4.conf.default.rp_filter= 1

net.ipv4.conf.default.accept_source_route=0

kernel.core_users_pid= 1

net.ipv4.tcp_syncookies= 1

net.bridge.bridge-nf-call-ip6tables= 0

net.bridge.bridge-nf-call-iptables= 0

net.bridge.bridge-nf-call-arptables= 0

kernel.mggmnb= 65536

kernel.mggmax= 65536

kernel.shmmax= 68719476736

kernel.shmall= 268435456

net.ipv4.tcp_max_syn_backlog= 65000

net.core.netdev_max_backlog= 32768

net.core.somaxconn= 32768

fs.file-max= 65000

net.core.wmem_default= 8388608

net.core.rmem_default= 8388608

net.core.rmem_max= 16777216

net.core.wmem_max= 16777216

net.ipv4.tcp_timestamps= 1

net.ipv4.tcp_synack_retries= 2

net.ipv4.tcp_syn_retries= 2

net.ipv4.tcp_mem= 94500000 915000000927000000

net.ipv4.tcp_max_orphans= 3276800

net.ipv4.tcp_tw_reuse= 1

net.ipv4.tcp_tw_recycle= 1

net.ipv4.tcp_keepalive_time= 1200

net.ipv4.tcp_syncookies= 1

net.ipv4.tcp_fin_timeout= 10

net.ipv4.tcp_keepalive_intvl= 15

net.ipv4.tcp_keepalive_probes= 3

net.ipv4.ip_local_port_range= 1024 65535

net.ipv4.conf.eml.send_redirects= 0

net.ipv4.conf.lo.send_redirects= 0

net.ipv4.conf.default.send_redirects= 0

net.ipv4.conf.all.send_redirects= 0

net.ipv4.icmp_echo_ignore_broadcasts= 1

net.ipv4.conf.eml.accept_source_route= 0

net.ipv4.conf.lo.accept_source_route= 0

net.ipv4.conf.default.accept_source_route=0

net.ipv4.conf.all.accept_source_route= 0

net.ipv4.icmp_ignore_bogus_error_responses=1

kernel.core_pattern= /tmp/core

vm.overcommit_memory= 1

#sysctl -p

3      hbase安装

3.1   软件材料

hbase-1.2.4-bin.tar.gz

3.2   安装过程

3.2.1  上传资料

mkdir /home/hadoop/hbase

将hbase-1.2.4-bin.tar.gz上传到hbase目录并解压

3.2.2  修改环境变量

vim ~/.bash_profile

 

source ~/.bash_profile

3.2.3  hbase-env.sh

<?xml version="1.0"?>

<!--

 Licensed under the Apache License, Version 2.0 (the"License");

  youmay not use this file except in compliance with the License.

  Youmay obtain a copy of the License at

 

   http://www.apache.org/licenses/LICENSE-2.0

 

 Unless required by applicable law or agreed to in writing, software

 distributed under the License is distributed on an "AS IS"BASIS,

 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

  Seethe License for the specific language governing permissions and

 limitations under the License. See accompanying LICENSE file.

-->

<configuration>

         <property>

                   <name>yarn.nodemanager.aux-services</name>

                   <value>mapreduce_shuffle</value>

         </property>

         <property>

                   <name>yarn.web-proxy.address</name>

                   <value>hadoop-master2:8888</value>

         </property>

         <property>

                   <name>yarn.log-aggregation-enable</name>

                   <value>true</value>

         </property>

         <property>

                   <name>yarn.nodemanager.remote-app-log-dir</name>

                   <value>/logs</value>

         </property>

         <property>

                   <name>yarn.log-aggregation.retain-seconds</name>

                   <value>604800</value>

         </property>

         <property>

                   <name>yarn.nodemanager.resource.memory-mb</name>

                   <value>2048</value>

         </property>

         <property>

                   <name>yarn.nodemanager.resource.cpu-vcores</name>

                   <value>2</value>

         </property>

         <property>

                   <name>yarn.resourcemanager.ha.enabled</name>

                   <value>true</value>

         </property>

         <property>

                   <name>yarn.resourcemanager.ha.automatic-failover.enabled</name>

                   <value>true</value>

         </property>

         <property>

                   <name>yarn.resourcemanager.cluster-id</name>

                   <value>yarncluster</value>

         </property>

         <property>

                   <name>yarn.resourcemanager.ha.rm-ids</name>

                   <value>rm1,rm2</value>

         </property>

         <property>

                   <name>yarn.resourcemanager.hostname.rm1</name>

                   <value>hadoop-master1</value>

         </property>

         <property>

                   <name>yarn.resourcemanager.hostname.rm2</name>

                   <value>hadoop-master2</value>

         </property>

         <property>

                   <name>yarn.resourcemanager.webapp.address.rm1</name>

                   <value>hadoop-master1:8088</value>

         </property>

         <property>

                   <name>yarn.resourcemanager.webapp.address.rm2</name>

                   <value>hadoop-master2:8088</value>

         </property>

         <property>

                   <name>yarn.resourcemanager.zk-address</name>

                   <value>hadoop-slave1:2181,hadoop-slave2:2181,hadoop-slave3:2181</value>

         </property>

         <property>

                   <name>yarn.resourcemanager.zk-state-store.parent-path</name>

                   <value>/rmstore</value>

         </property>

         <property>

                   <name>yarn.resourcemanager.recovery.enabled</name>

                   <value>true</value>

         </property>

 

         <property>

                   <name>yarn.resourcemanager.store.class</name>

                   <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>

         </property>

         <property>

                   <name>yarn.nodemanager.recovery.enabled</name>

                   <value>true</value>

         </property>

         <property>

                   <name>yarn.nodemanager.address</name>

                   <value>0.0.0.0:45454</value>

         </property>

</configuration>

 

3.2.4  regionservers

vim ~/hbase/hbase-1.2.4/conf/regionservers

hadoop-slave1

hadoop-slave2

hadoop-slave3

3.2.5  backup-masters

vim ~/hbase/hbase-1.2.4/conf/backup-masters

hadoop-master2

3.2.6  创建目录

mkdir ~/hbase/data/tmp

mkdir ~/hbase/hbase-1.2.4/logs

mkdir ~/hbase/hbase-1.2.4/pids

3.2.7  将hbase工作目录同步到集群其它节点

scp –r ~/hbase hadoop-master2:~/

3.2.8  在集群各节点上修改用户环境变量

vim .bash_profile

source .bash_profile

3.2.9  删除hbase的slf4j-log4j12-1.7.5.jar,解决hbase和hadoop的LSF4J包冲突

cd ~/hbase/hbase-1.2.4/lib

mv slf4j-log4j12-1.7.5.jarslf4j-log4j12-1.7.5.jar.bk

4      集群启动

// 启动zookeeper集群(分别在slave1、slave2和slave3上执行)

$ zkServer.shstart

备注:此命令分别在slave1/slave2/slave3节点启动了QuorumPeerMain。

// 启动HDFS(在master1执行)

$ start-dfs.sh

备注:此命令分别在master1/master2节点启动了NameNode和ZKFC,分别在slave1/slave2/slave3节点启动了DataNode和JournalNode。

// 启动YARN(在master2执行)

$ start-yarn.sh

备注:此命令在master2节点启动了ResourceManager,分别在slave1/slave2/slave3节点启动了NodeManager。

// 启动YARN的另一个ResourceManager(在master1执行,用于容灾)

$ yarn-daemon.sh start resourcemanager

备注:此命令在master1节点启动了ResourceManager。

// 启动HBase(在master1执行)

$ start-hbase.sh

备注:此命令分别在master1/master2节点启动了HMaster,分别在slave1/slave2/slave3节点启动了HRegionServer

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值