Hadoop(Day12) -- Hadoop-HA

一.Cluster Design


      IP            Hostname Namenode Datanode  ZOOKEEPER HIVE HBASE
192.168.16.100     name1    Y   N    Y   N  Y
192.168.16.101     name2    Y   N    Y   N  Y
192.168.16.102     data1   N    Y    Y(MYSQL)      Y                Y(MASTER)
192.168.16.103     data2     N   Y    Y   Y  Y       
192.168.16.104     data3     N   Y    Y   N  Y 


二.System Setting

1 Setting YUM

2 Preface
yum -y install openssh*
yum -y install man*
yum -y install compat-libstdc++-33*
yum -y install libaio-0.*
yum -y install libaio-devel*
yum -y install sysstat-9.*
yum -y install glibc-2.*
yum -y install glibc-devel-2.* glibc-headers-2.*
yum -y install ksh-2*
yum -y install libgcc-4.*
yum -y install libstdc++-4.*
yum -y install libstdc++-4.*.i686*
yum -y install libstdc++-devel-4.*
yum -y install gcc-4.*x86_64*
yum -y install gcc-c++-4.*x86_64*
yum -y install elfutils-libelf-0*x86_64* elfutils-libelf-devel-0*x86_64*
yum -y install elfutils-libelf-0*i686* elfutils-libelf-devel-0*i686*
yum -y install libtool-ltdl*i686*
yum -y install ncurses*i686*
yum -y install ncurses*
yum -y install readline*
yum -y install unixODBC*
yum -y install zlib
yum -y install zlib*
yum -y install openssl*
yum -y install patch
yum -y install git
yum -y install lzo-devel zlib-devel gcc autoconf automake libtool
yum -y install lzop
yum -y install lrzsz
yum -y install lzo-devel  zlib-devel  gcc autoconf automake libtool
yum -y install nc
yum -y install glibc
yum -y install gzip
yum -y install zlib
yum -y install gcc
yum -y install gcc-c++
yum -y install make
yum -y install protobuf
yum -y install protoc
yum -y install cmake
yum -y install openssl-devel
yum -y install ncurses-devel
yum -y install unzip
yum -y install telnet
yum -y install telnet-server
yum -y install wget
yum -y install svn
yum -y install ntpdate
yum -y install tcl* expect*


3 Setting Hosts
192.168.16.100 name1
192.168.16.101 name2
192.168.16.102 data1
192.168.16.103 data2
192.168.16.104 data3


4 Setting Time Synchronized
(1)
rpm -qa | grep ntpd


(2)ntpd.conf
copy 
mv /etc/ntp.conf ntp.conf.bak

let name1 as th etime service in clusters
vi /etc/ntp.conf
#new ntp server
server 127.127.1.0 prefer #First choice time server
restrict 192.168.16.0 mask 255.255.255.255 nomodify notrap #only allow 200
broadcastdelay 0.008


other node:
vi /etc/ntp.conf
#new ntp server
server 192.168.16.100 prefer
broadcastdelay 0.008


3 setting ntp node param
(name1)
vi /etc/sysconfig/ntpd
SYNC_HWCLOCK=yes #Calibrate System clock and hardware clock
OPTIONS="-x -u ntp:ntp -p /var/run/ntpd.pid"
#OPTIONS="-u ntp:ntp -p /var/run/ntpd.pid -g"


4 setting server(name1)
chkconfig ntpd on
service ntpd start

other node:
chkconfig ntpd off
service ntpd stop

certificate server(name1):
ntpq -p

command of time calibration:
ntpdate name1

crontab -e
*/59 * * * * ntpdate name1

S H D M W(0-6)

5 Mutual Trust:

sh sshUserSetup.sh -user root -hosts "name1 name2 data1 data2 data3" -advanced -noPromptPassphrase


service iptables stop
service ip6tables stop
chkconfig autofs off
chkconfig acpid off
chkconfig sendmail off
chkconfig cups-config-daemon off
chkconfig cpus off
chkconfig xfs off
chkconfig lm_sensors off
chkconfig gpm off
chkconfig openibd off
chkconfig pcmcia off
chkconfig cpuspeed off
chkconfig nfslock off
chkconfig iptables off
chkconfig ip6tables off
chkconfig rpcidmapd off
chkconfig apmd off
chkconfig sendmail off
chkconfig arptables_jf off
chkconfig microcode_ctl off
chkconfig rpcgssd off


7 Setting JAVA Envronment
uname -m #check operating system digit

rpm -qa|grep java #check whether you have installed JAVA before.

If installed:

#!/bin/bash
for h in `cat /etc/hosts|sed '1,2d'|awk '{print $2}'`
do
 ssh $h << "EOF"
  for i in `rpm -qa|grep java`
  do
   rpm -e $i --nodeps
  done
EOF
done
 

8 tar xvf jdk-7u80-linux-x64.gz

mv xxx java

9 vi /etc/profile

export JAVA_HOME=/usr/java
export JRE_HOME=/usr/java/jre
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib
export PATH=$PATH:$JAVA_HOME/bin

source /etc/profile

java -version 

10 distribute java and profile to other node:

#!/bin/bash
read -p"请输出你要传输路径的绝对值: " a
for i in `sed '1,3d' /etc/hosts|awk '{print $1}'`
do
echo
echo
echo "THIS IS $i now!!!"
scp -r $a $i:$a
scp /etc/profile $i:/etc/profile
echo
echo
done


8 Install ZOOKEEPER

tar xzvf zookeeper-3.4.8.tar.gz
 

mv zookeeper-3.4.8 zookeeper

vi /etc/profile

export ZOOKEEPER_HOME=/usr/local/zookeeper
export PATH=$PATH:$JAVA_HOME/bin:$ZOOKEEPER_HOME/bin

srouce /etc/profile
 

mv /usr/local/zookeeper/conf/zoo_sample.cfg zoo.cfg

9 create dataDir directory
mkdir -p /usr/local/zookeeper/data
mkdir -p /usr/local/zookeeper/data/log

then copy the following content in zoo.cfg:

# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial 
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between 
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just 
# example sakes.
dataDir=/usr/local/zookeeper/data
dataLogDir=/usr/local/zookeeper/data/log
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the 
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
server.1=name1:2888:3888  
server.2=name2:2888:3888  
server.3=data1:2888:3888  
server.4=data2:2888:3888  
server.5=data3:2888:3888  

*Remember transfer zookeeper to other nodes
10 generate number of myid:
#!/bin/bash
i=1
for j in `sed '1,2d' /etc/hosts|awk '{print $1}'`
do
ssh $j "echo $i > /usr/local/zookeeper/data/myid"
i=$(($i+1))
echo $i
done


11 Hadoop
tar xzvf hadoop-2.7.3.tar.gz

mv hadoop-2.7.3 hadoop

mkdir -p /var/hadoop/tmp/dfs/datanode
mkdir -p /var/hadoop/tmp/dfs/journal
mkdir -p /var/hadoop/tmp/dfs/namenode
mkdir -p /var/hadoop/tmp/dfs/tmp


12 HADOOP Environment Variable
export HADOOP_HONE=/usr/local/hadoop
export LD_LIBRARY_PATH=$HADOOP_HOME/lib/native
export PATH=$PATH:$JAVA_HOME/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$REDIS_HOME/bin


#####The following operation is under /usr/local/hadoop/etc/hadoop/#####


13
vi hadoop-env.sh
export JAVA_HOME=/usr/java


14
vi slaves
data1
data2
data3

15
vi core-site.xml



16
文件详细内容请参见我给你们的hdfs-site.xml副本


17 mapred-site.xml


18 yarn-site.xml

19
scp -r /usr/local/hadoop (name2 data1 data2 dat3):/usr/local/hadoop


scp -r /etc/profile (name2 data1 data2 dat3):/etc/profile


20 Initialization:

zkServer.sh start (Every node)

hadoop-daemon.sh start journalnode (Every node)

hdfs namenode -format (name1)

hdfs zkfc -formatZK (name1)

start-dfs.sh (name1)

hdfs namenode -bootstrapStandby (name2)
7
stop-dfs.sh (name1)
8
/usr/local/hadoop/sbin/start-dfs.sh (name1)

/usr/local/hadoop/sbin/start-yarn.sh (name2)

/usr/local/hadoop/sbin/yarn-daemon.sh start resourcemanager (name1)

/usr/local/hadoop/sbin/yarn-daemon.sh start proxyserver (name2)

/usr/local/hadoop/sbin/mr-jobhistory-daemon.sh start historyserver (name1)




Solution


如果配置错误需要删除的的地方
rm -rf /usr/local/hadoop/logs/*
rm -rf /var/hadoop/tmp/*


zkCli.sh zookeeper客户端
rmr /yarn-leader-election
rmr /hadoop-ha




集群HA测试


kill掉name1的namenode进程


然后重新启动name1上的namenode
hadoop-daemon.sh start namenode


手工切换namenode
#hdfs haadmin -transitionToActive nn1
hdfs haadmin -failover nn2 nn1

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值