Centos安装Hadoop + Hive集群

  • 环境准备

192.168.8.140 bigdata-node1
192.168.8.141 bigdata-node2
192.168.8.142 bigdata-node3
192.168.8.143 bigdata-node4
192.168.8.144 bigdata-node5
  • 关闭防火墙(所有节点都需要执行)

systemctl stop firewalld
  • 关闭selinux(所有节点都需要执行)

vim /etc/selinux/config

SELINUX=disabled 
  • 安装依赖

sudo yum install -y bind-utils psmisc libxslt cyrus-sasl-plain cyrus-sasl-gssapi fuse portmap fuse-libs httpd mod_ssl openssl-devel python-psycopg2 MySQL-python /lib/lsb/init-functions libpq.so.5
wget http://www.percona.com/redir/downloads/Percona-XtraDB-Cluster/5.5.37-25.10/RPM/rhel6/x86_64/Percona-XtraDB-Cluster-shared-55-5.5.37-25.10.756.el6.x86_64.rpm
rpm -ivh Percona-XtraDB-Cluster-shared-55-5.5.37-25.10.756.el6.x86_64.rpm
yum install python27 
  • 编辑hosts主机名映射(所有节点都需要执行)

    vim /etc/hosts
    在文件末尾增加
          192.168.8.140 bigdata-node1
          192.168.8.141 bigdata-node2
          192.168.8.142 bigdata-node3
          192.168.8.143 bigdata-node4
          192.168.8.144 bigdata-node5
  • 设置.ssh免密登录(只需要在bigdata-node1执行)

    输入以下命令,然后一路回车,不需要输入任何东西
ssh-keygen -t dsa

将本机密钥拷贝到集群所有虚拟机中

ssh-copy-id bigdata-node1
ssh-copy-id bigdata-node2
ssh-copy-id bigdata-node3
ssh-copy-id bigdata-node4
ssh-copy-id bigdata-node5
  • 时间同步(所有节点都需要执行)

 yum -y install ntp 安装ntp
 ntpdate time1.aliyun.com 执行同步命令
 hwclock -w 同步系统时间到硬件时钟(防止重启系统再次同步系统时间)
  • 安装MySQL

参考:https://www.cnblogs.com/xuewenlong/p/12882039.html
  • 关闭MySQL SSL

vim /etc/my.cnf
添加以下内容:
# disable_ssl
skip_ssl

  • 编写脚本

vim /root/bin/xcall

#!/bin/bash

for host in bigdata-node1 bigdata-node2 bigdata-node3 bigdata-node4 bigdata-node5
do
        echo ------------- $host  -------------
        ssh $host "$*"
done

[root@localhost bin]# 

vim /root/bin/xrsync

#!/bin/bash

# 获取输入参数个数,如果没有参数,直接退出
pcount=$#
if((pcount==0));then
echo no args;
exit;
fi

# 获取文件名
p1=$1
fname=`basename $p1`
echo fname=$fname

# 获取上级目录到绝对路径
pdir=`cd -P $(dirname $p1);pwd`
echo pdir=$pdir

# 获取当前用户名字
user=`whoami`

# 将文件拷贝到目标机器
for host in bigdata-node1 bigdata-node2 bigdata-node3 bigdata-node4 bigdata-node5
do
        echo -----------$host --------------
        rsync -av $pdir/$fname $user@$host:$pdir
done

vim /roo/bin/zk

#!/bin/bash

case $1 in
"start"){
        for i in bigdata-node3 bigdata-node4 bigdata-node5
        do
                echo ------------ $i  -------------
                ssh $i "/opt/bigdata/apache-zookeeper-3.6.2-bin/bin/zkServer.sh start"
        done
};;
"restart"){
        for i in bigdata-node3 bigdata-node4 bigdata-node5
        do
                echo ------------ $i  -------------
                ssh $i "/opt/bigdata/apache-zookeeper-3.6.2-bin/bin/zkServer.sh restart"
        done
};;
"stop"){
        for i in bigdata-node3 bigdata-node4 bigdata-node5
        do
                echo ------------ $i  -------------
                ssh $i "/opt/bigdata/apache-zookeeper-3.6.2-bin/bin/zkServer.sh stop"
        done
};;
"status"){
        for i in bigdata-node3 bigdata-node4 bigdata-node5
        do
                echo ------------ $i  -------------
                ssh $i "/opt/bigdata/apache-zookeeper-3.6.2-bin/bin/zkServer.sh status"
        done
};;
esac
  • 创建文件夹并下载安装包

cd /opt
mkdir bigdata
cd ..
mkdir install
cd install
wget https://download.oracle.com/otn/java/jdk/8u271-b09/61ae65e088624f5aaa0b1d2d801acb16/jdk-8u271-linux-x64.tar.gz
wget https://mirrors.tuna.tsinghua.edu.cn/apache/hive/hive-2.3.8/apache-hive-2.3.8-bin.tar.gz
wget http://archive.cloudera.com/cdh5/cdh/5/hadoop-2.6.0-cdh5.7.0.tar.gz
wget http://mirrors.hust.edu.cn/apache/zookeeper/zookeeper-3.6.2/apache-zookeeper-3.6.2-bin.tar.gz

  • 安装JDK

cd /opt/install
xrsync /opt/install
xcall tar -zxvf jdk-8u271-linux-x64.tar.gz
xcall mv jdk1.8.0_271/ /usr/local/
xcall ln -s /usr/local/jdk1.8.0_271 /usr/local/jdk
  • 设置环境变量

/etc/profile.d/env.sh

JAVA_HOME=/usr/local/jdk
JRE_HOME=/usr/local/jdk/jre
PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib
export JAVA_HOME JRE_HOME PATH CLASSPATH
  • 使用环境变量生效

xrsync /etc/profile.d/env.sh
xcall source /etc/profile.d/env.sh 
  • 检查jdk是否安装成功

java -version

java version "1.8.0_271"
Java(TM) SE Runtime Environment (build 1.8.0_271-b09)
Java HotSpot(TM) 64-Bit Server VM (build 25.271-b09, mixed mode)
  • 安装Zookeeper

[root@master ~]# cd /opt/install
[root@master install]# tar -zxf apache-zookeeper-3.6.2-bin.tar.gz -C ../bigdata/
[root@master ~]# cd /opt/bigdata//opt/bigdata/apache-zookeeper-3.6.2-bin/
[root@master apache-zookeeper-3.6.2-bin]# mkdir zkData
[root@master apache-zookeeper-3.6.2-bin]# cd zkData/
[root@master zkData]# touch myid
[root@master zkData]# ll
total 0
-rw-r--r--. 1 root root 0 Jun  9 22:15 myid
  • 配置zoo.cfg

# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial 
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between 
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just 
# example sakes.
dataDir=/opt/bigdata/apache-zookeeper-3.6.2-bin/zkData
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the 
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1

## Metrics Providers
#
# https://prometheus.io Metrics Exporter
#metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
#metricsProvider.httpPort=7000
#metricsProvider.exportJvmInfo=true
server.1=bigdata-node3:2287:3387
server.2=bigdata-node4:2287:3387
server.3=bigdata-node5:2287:3387
  • 配置zookeeper环境变量

vim /etc/profile.d/env.sh

AVA_HOME=/usr/local/jdk
JRE_HOME=/usr/local/jdk/jre
PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib
export JAVA_HOME JRE_HOME PATH CLASSPATH

export ZOOKEEPER_HOME=/opt/bigdata/apache-zookeeper-3.6.2-bin
export PATH=$PATH:$ZOOKEEPER_HOME/bin
  • 分发Zookeeper和环境变量

[root@master ~]# xrsync /opt/bigdata/apache-zookeeper-3.6.2-bin/

[root@master ~]# xrsync /etc/profile.d/env.sh 

[root@master ~]# xcall source /etc/profile.d/env.sh 

  • 修改bigdata-noed3~5三台机器的myid

1. 在bigdata-node3的myid中只编辑一个数字1保存退出即可
2. 在bigdata-node4的myid中只编辑一个数字2保存退出即可
3. 在bigdata-node5的myid中只编辑一个数字3保存退出即可
  • 启动Zookeeper

zk start
  • 安装Hadoop

tar -xzvf hadoop-2.6.0-cdh5.7.0.tar.gz -C /opt/bigdata/

vim /opt/bigdata/hadoop-2.6.0-cdh5.7.0/etc/hadoop/hadoop-env.sh

# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Set Hadoop-specific environment variables here.

# The only required environment variable is JAVA_HOME.  All others are
# optional.  When running a distributed configuration it is best to
# set JAVA_HOME in this file, so that it is correctly defined on
# remote nodes.

# The java implementation to use.
# export JAVA_HOME=${JAVA_HOME}
export JAVA_HOME=/usr/local/jdk

# The jsvc implementation to use. Jsvc is required to run secure datanodes
# that bind to privileged ports to provide authentication of data transfer
# protocol.  Jsvc is not required if SASL is configured for authentication of
# data transfer protocol using non-privileged ports.
#export JSVC_HOME=${JSVC_HOME}

export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}

# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
  if [ "$HADOOP_CLASSPATH" ]; then
    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
  else
    export HADOOP_CLASSPATH=$f
  fi
done

# The maximum amount of heap to use, in MB. Default is 1000.
#export HADOOP_HEAPSIZE=
#export HADOOP_NAMENODE_INIT_HEAPSIZE=""

# Extra Java runtime options.  Empty by default.
export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"

# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"

export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"

export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"

# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"

# On secure datanodes, user to run the datanode as after dropping privileges.
# This **MUST** be uncommented to enable secure HDFS if using privileged ports
# to provide authentication of data transfer protocol.  This **MUST NOT** be
# defined if SASL is configured for authentication of data transfer protocol
# using non-privileged ports.
export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}

# Where log files are stored.  $HADOOP_HOME/logs by default.
#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER

# Where log files are stored in the secure data environment.
export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}

###
# HDFS Mover specific parameters
###
# Specify the JVM options to be used when starting the HDFS Mover.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HADOOP_MOVER_OPTS=""

###
# Advanced Users Only!
###

# The directory where pid files are stored. /tmp by default.
# NOTE: this should be set to a directory that can only be written to by 
#       the user that will run the hadoop daemons.  Otherwise there is the
#       potential for a symlink attack.
export HADOOP_PID_DIR=${HADOOP_PID_DIR}
export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}

# A string representing this instance of hadoop. $USER by default.
export HADOOP_IDENT_STRING=$USER

vim /opt/bigdata/hadoop-2.6.0-cdh5.7.0/etc/hadoop/mapred-env.sh

# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
export JAVA_HOME=/usr/local/jdk

export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000

export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA

#export HADOOP_JOB_HISTORYSERVER_OPTS=
#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.

vim /opt/bigdata/hadoop-2.6.0-cdh5.7.0/etc/hadoop/yarn-env.sh

# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# User for YARN daemons
export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}

# resolve links - $0 may be a softlink
export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"

# some Java parameters
# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
export JAVA_HOME=/usr/local/jdk

if [ "$JAVA_HOME" != "" ]; then
  #echo "run java in $JAVA_HOME"
  JAVA_HOME=$JAVA_HOME
fi
  
if [ "$JAVA_HOME" = "" ]; then
  echo "Error: JAVA_HOME is not set."
  exit 1
fi

JAVA=$JAVA_HOME/bin/java
JAVA_HEAP_MAX=-Xmx1000m 

# For setting YARN specific HEAP sizes please use this
# Parameter and set appropriately
# YARN_HEAPSIZE=1000

# check envvars which might override default args
if [ "$YARN_HEAPSIZE" != "" ]; then
  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
fi

# Resource Manager specific parameters

# Specify the max Heapsize for the ResourceManager using a numerical value
# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
# the value to 1000.
# This value will be overridden by an Xmx setting specified in either YARN_OPTS
# and/or YARN_RESOURCEMANAGER_OPTS.
# If not specified, the default value will be picked from either YARN_HEAPMAX
# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
#export YARN_RESOURCEMANAGER_HEAPSIZE=1000

# Specify the max Heapsize for the timeline server using a numerical value
# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
# the value to 1000.
# This value will be overridden by an Xmx setting specified in either YARN_OPTS
# and/or YARN_TIMELINESERVER_OPTS.
# If not specified, the default value will be picked from either YARN_HEAPMAX
# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
#export YARN_TIMELINESERVER_HEAPSIZE=1000

# Specify the JVM options to be used when starting the ResourceManager.
# These options will be appended to the options specified as YARN_OPTS
# and therefore may override any similar flags set in YARN_OPTS
#export YARN_RESOURCEMANAGER_OPTS=

# Node Manager specific parameters

# Specify the max Heapsize for the NodeManager using a numerical value
# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
# the value to 1000.
# This value will be overridden by an Xmx setting specified in either YARN_OPTS
# and/or YARN_NODEMANAGER_OPTS.
# If not specified, the default value will be picked from either YARN_HEAPMAX
# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
#export YARN_NODEMANAGER_HEAPSIZE=1000

# Specify the JVM options to be used when starting the NodeManager.
# These options will be appended to the options specified as YARN_OPTS
# and therefore may override any similar flags set in YARN_OPTS
#export YARN_NODEMANAGER_OPTS=

# so that filenames w/ spaces are handled correctly in loops below
IFS=


# default log directory & file
if [ "$YARN_LOG_DIR" = "" ]; then
  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
fi
if [ "$YARN_LOGFILE" = "" ]; then
  YARN_LOGFILE='yarn.log'
fi

# default policy file for service-level authorization
if [ "$YARN_POLICYFILE" = "" ]; then
  YARN_POLICYFILE="hadoop-policy.xml"
fi

# restore ordinary behaviour
unset IFS


YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
fi  
YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"

vim /opt/bigdata/hadoop-2.6.0-cdh5.7.0/etc/hadoop/slaves

bigdata-node3
bigdata-node4
bigdata-node5

vim /opt/bigdata/hadoop-2.6.0-cdh5.7.0/etc/hadoop/core-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
                <name>fs.defaultFS</name>
                <value>hdfs://cluster1</value>
        </property>
        <!-- hadoop运行时存储路径 -->
        <property>
                <name>hadoop.tmp.dir</name>
                <value>/opt/bigdata/hadoop-2.6.0-cdh5.7.0/hadoopdata</value>
        </property>
        <property>
                <name>hadoop.proxyuser.root.hosts</name>
                <value>*</value>
        </property>
        <property>
                <name>hadoop.proxyuser.root.groups</name>
                <value>*</value>
        </property>
        <!-- 配置Zookeeper 管理HDFS -->
        <property>
                <name>ha.zookeeper.quorum</name>
                <value>bigdata-node3:2181,bigdata-node4:2181,bigdata-node5:2181</value>
        </property>
</configuration>

vim /opt/bigdata/hadoop-2.6.0-cdh5.7.0/etc/hadoop/hdfs-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
	<!-- 副本数(正常为3) -->
        <property>
                <name>dfs.replication</name>
                <value>3</value>
        </property>

        <!-- 命名空间,它的值与fs.defaultFS的值要对应,namenode高可用之后有两个namenode,cluster1是对外提供的统一入口 -->
        <property>
                <name>dfs.nameservices</name>
                <value>cluster1</value>
        </property>

        <!-- 设置集群中两台NameNode的名称为nn1和nn2 -->
        <property>
                <name>dfs.ha.namenodes.cluster1</name>
                <value>nn1,nn2</value>
        </property>

        <!-- nn1 rpc地址 -->
        <property>
                <name>dfs.namenode.rpc-address.cluster1.nn1</name>
                <value>bigdata-node1:9000</value>
        </property>

        <!-- nn2 rpc地址 -->
        <property>
                <name>dfs.namenode.rpc-address.cluster1.nn2</name>
                <value>bigdata-node2:9000</value>
        </property>
                
        <!-- nn1 http地址 -->
        <property>
                <name>dfs.namenode.http-address.cluster1.nn1</name>
                <value>bigdata-node1:50070</value>
        </property>

        <!-- nn2 http地址 -->
        <property>
                <name>dfs.namenode.http-address.cluster1.nn2</name>
                <value>bigdata-node2:50070</value>
        </property>

        <!-- 启动故障自动恢复 -->
        <property>
                <name>dfs.ha.automatic-failover.enabled</name>
                <value>true</value>
        </property>

        <!-- journal配置 指定NameNode的edits元数据在JournalNode上的存æ¾位置一般跟zookeeper部署在一起 -->
        <property>
                <name>dfs.namenode.shared.edits.dir</name>
                <value>qjournal://bigdata-node3:8485;bigdata-node4:8485;bigdata-node5:8485/cluster1</value>
        </property>

        <!-- 指定JournalNode集群在对nameNode的目录进行共享时,自己存储数据的磁盘路径 -->
        <property>
                <name>dfs.journalnode.edits.dir</name>
                <value>/opt/bigdata/hadoop-2.6.0-cdh5.7.0/journaldata/jn</value>
        </property>

        <!-- namenode高可用主备切换配置  -->

        <!-- 开启NameNode失败自动切换 -->
        <property>
                <name>dfs.ha.automatic-failover.enabled</name>
                <value>true</value>
        </property>

        <!-- 配置失败自动切换实现方式,使用内置的zkfc 客户端通过代理访问namenode,访问文件系统,HDFS 客户端与Active 节点通信的Java 类,使用其确定Active 节点
             是否活跃-->
        <property>
                <name>dfs.client.failover.proxy.provider.cluster1</name>
                <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
        </property>

        <!-- 配置隔离机制 -->
        <property>
                <name>dfs.ha.fencing.methods</name>
                <value>shell(/bin/true)</value>
        </property>

        <!-- 使用sshfence隔离机制时需要ssh免登陆 -->
        <property>
                <name>dfs.ha.fencing.ssh.private-key-files</name>
                <value>/root/.ssh/id_dsa</value>
        </property>

        <!-- 配置sshfence隔离机制超时时间,这个属性同上,如果你是用脚本的方法切换,这个应该是可以不配置的 -->
        <property>
                <name>dfs.ha.fencing.ssh.connect-timeout</name>
                <value>30000</value>
        </property>

        <!-- 关闭权限检查-->
        <property>
                <name>dfs.permissions.enable</name>
                <value>false</value>
        </property>
</configuration>

创建JournalNode集群数据存放目录

cd /opt/bigdata/hadoop-2.6.0-cdh5.7.0
mkdir -p journaldata/jn

vim /opt/bigdata/hadoop-2.6.0-cdh5.7.0/mapred-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
	<property>
                <name>mapreduce.framework.name</name>
                <value>yarn</value>
        </property>
        <property>
                <name>mapreduce.jobhistory.address</name>
                <value>bigdata-node1:10020</value>
        </property>
        <property>
                <name>mapreduce.jobhistory.webapp.address</name>
                <value>bigdata-node1:19888</value>
        </property>
</configuration>

vim /opt/bigdata/hadoop-2.6.0-cdh5.7.0/yarn-site.xml

<?xml version="1.0"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->
<configuration>

<!-- Site specific YARN configuration properties -->

<!-- Site specific YARN configuration properties -->
        <!-- reducer获取数据方式 -->
        <property>
                <name>yarn.nodemanager.aux-services</name>
                <value>mapreduce_shuffle</value>
        </property>
        <property>
                <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
                <value>org.apache.hadoop.mapred.ShuffleHandler</value>
        </property>

        <!-- 日志聚集功能使用 -->
        <property>
                <name>yarn.log-aggregation-enable</name>
                <value>true</value>
        </property>

        <!-- 日志保留时间设置7天 -->
        <property>
                <name>yarn.log-aggregation.retain-seconds</name>
                <value>604800</value>
        </property>

        <!-- 日志聚合HDFS目录 -->
        <property>
                <name>yarn.nodemanager.remote-app-log-dir</name>
                <value>/data/hadoop/yarn-logs</value>
        </property>

        <!-- 超时的周期 -->
        <property>
                <name>yarn.resourcemanager.connect.retry-interval.ms</name>
                <value>2000</value>
        </property>

        <!-- 打开高可用 -->
        <property>
                <name>yarn.resourcemanager.ha.enabled</name>
                <value>true</value>
        </property>

        <!-- 给yarn cluster 取个名字yarn-rm-cluster -->
        <property>
                <name>yarn.resourcemanager.cluster-id</name>
                <value>yarn-rm-cluster</value>
        </property>

        <!-- 给ResourceManager 取个名字 rm1,rm2 -->
        <property>
                <name>yarn.resourcemanager.ha.rm-ids</name>
                <value>rm1,rm2</value>
        </property>

        <!-- 配置ResourceManager rm1 hostname -->
        <property>
                <name>yarn.resourcemanager.hostname.rm1</name>
                <value>bigdata-node1</value>
        </property>

        <!-- 配置ResourceManager rm2 hostname -->
        <property>
                <name>yarn.resourcemanager.hostname.rm2</name>
                <value>bigdata-node2</value>
        </property>

        <!-- 启用resourcemanager 自动恢复 -->
        <property>
                <name>yarn.resourcemanager.recovery.enabled</name>
                <value>true</value>
        </property>

        <!-- 配置Zookeeper地址 -->
        <property>
                <name>yarn.resourcemanager.zk-address</name>
                <value>bigdata-node3:2181,bigdata-node4:2181,bigdata-node5:2181</value>
        </property>

        <!--指定resourcemanager的状态信息存储在zookeeper集群,默认是存放在FileSystem里面 -->
        <property>
                <name>yarn.resourcemanager.store.class</name>
                <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
        </property>
</configuration>

vi /etc/profile.d/env.sh

JAVA_HOME=/usr/local/jdk
JRE_HOME=/usr/local/jdk/jre
PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib
export JAVA_HOME JRE_HOME PATH CLASSPATH

export ZOOKEEPER_HOME=/opt/bigdata/apache-zookeeper-3.6.2-bin
export PATH=$PATH:$ZOOKEEPER_HOME/bin

export HADOOP_HOME=/opt/bigdata/hadoop-2.6.0-cdh5.7.0
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib"
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin

分发Hadoop和环境变量文件

[root@master ~]# xrsync /opt/bigdata/hadoop-2.6.0-cdh5.7.0/

[root@master ~]# xrsync /etc/profile.d/env.sh

[root@master ~]# xcall source /etc/profile.d/env.sh

  • 启动hadoop ha 高可用集群

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值