Hadoop 集群安装

Hadoop 应用集群配置

1.修改集群配置文件

vim /etc/profile

export JAVA_HOME=/usr/local/bigdata/jdk
export HADOOP_HOME=/usr/local/bigdata/hadoop
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin

source /etc/profile


1.配置network
vim /etc/sysconfig/network-script/ifcfg-eth0
DEVICE="eth0"
BOOTPROTO="static"
HWADDR="00:0C:29:92:0C:09"
IPADDR=192.168.13.128
NETMASK=255.255.255.0
GATEWAY=192.168.13.1
#IPV6INIT="yes"
NM_CONTROLLED="yes"
ONBOOT="yes"
TYPE="Ethernet"
#UUID="9f8293fa-f522-4ea6-a3e6-3198b724f9fe"

网卡不能启动使用ifconfig -a 
查看网卡使用硬件信息:
eth0      Link encap:Ethernet  HWaddr 00:0C:29:92:0C:09  
          inet addr:192.168.13.128  Bcast:192.168.13.255  Mask:255.255.255.0
          inet6 addr: fe80::20c:29ff:fe92:c09/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:980 errors:0 dropped:0 overruns:0 frame:0
          TX packets:728 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:112210 (109.5 KiB)  TX bytes:156724 (153.0 KiB)
          Interrupt:19 Base address:0x2000 

lo        Link encap:Local Loopback  
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:46 errors:0 dropped:0 overruns:0 frame:0
          TX packets:46 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:3250 (3.1 KiB)  TX bytes:3250 (3.1 KiB)

这个是使用eth0网卡

关闭主机防火墙
service iptables stop
chkconfig iptables off


报错Bringing up interface eth1:  Error: No suitable device found: no device found for connection 'System eth1'.
使用:cat /etc/udev/rules.d/70-persistent-net.rules 查看eth1的mac信息 修改ifcfg-eth1 的mac信息。重启就好了。

规划:
主机名 ip 安装软件 运行进程
pwrd01 192.168.13.128 JDK,HADOOP NameNode,DFSZKFailoverController(active)
pwrd02 192.168.13.129 JDK,HADOOP NameNode,DFSZKFailoverController(standy)
pwrd03 192.168.13.130 JDK,HADOOP ResourceManager
pwrd04 192.168.13.131 JDK,HADOOP,zookeeper DataNode,NodeManager,JournalNode,QuorumPeerMain
pwrd05 192.168.13.132 JDK,HADOOP,zookeeper DataNode,NodeManager,JournalNode,QuorumPeerMain
pwrd06 192.168.13.133 JDK,HADOOP,zookeeper DataNode,NodeManager,JournalNode,QuorumPeerMain



编写hosts
vim /etc/hosts

192.168.13.128 pwrd01
192.168.13.129 pwrd02
192.168.13.130 pwrd03
192.168.13.131 pwrd04
192.168.13.132 pwrd05
192.168.13.133 pwrd06

首先安装zookeeper

在pwrd04、pwrd05、pwrd06安装Zookeeper
创建文件夹:/user/local/bigdata/zookeeper
使用:
tar -zxvf zookeeper-3.4.8.tar.gz -C /usr/local/bigdata/
重命名:
cp /usr/local/bigdata/zookeeper/conf/zoo_sample.cfg /usr/local/bigdata/zookeeper/conf/zoo.cfg
修改配置文件:
vim zoo.cfg


# The number of milliseconds of each tick心跳时间
tickTime=2000
# The number of ticks that the initial 
# synchronization phase can take--初始化时最多能容忍的心跳次数
initLimit=10  
# The number of ticks that can pass between 
# sending a request and getting an acknowledgement--同步时最多能容忍心跳次数
syncLimit=5  
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just 
# example sakes.
#dataDir=/tmp/zookeeper --zookeeper数据文件夹目录/tmp文件夹重启之后数据会丢失所以启动其他目录
dataDir=/usr/local/bigdata/zookeeper/data
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the 
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1

#newadd
#2888 通信端口 3888 选举端口
server.1=pwrd04:2888:3888 
server.2=pwrd05:2888:3888 
server.3=pwrd06:2888:3888 
保存退出

在/user/local/bigdata/zookeeper 创建data文件夹
mkdir -p /user/local/bigdata/zookeeper/data
创建空文件
vim /user/local/bigdata/zookeeper/data/myid
1 #服务器编号
保存退出
使用SCP讲Zookeeper拷贝到其他机器上 pwrd05、pwrd06
修改pwrd05 vim /user/local/bigdata/zookeeper/data/myid 2
修改pwrd06 vim /user/local/bigdata/zookeeper/data/myid 3

启动zookeeper 查看状态
pwrd04、pwrd05、pwrd06
/user/local/bigdata/zookeeper/bin/zkServer.sh start

使用jps查看进程状态
jps

查看zookeeper 属性

/user/local/bigdata/zookeeper/bin/zkServer.sh status

应该是一台leader两台follower
---------------------------------Hadoop-----------------------------------------
解压缩Hadoop

tar -zxvf /usr/local/software/hadoop_*.tar.gz -C /usr/local/bigdata/

修改配置文件:
1.hadoop_env.sh
vim /user/local/bigdata/hadoop/etc/hadoop/hadoop_env.sh
添加JAVA_HOME详细路径
export JAVA_HOME=/usr/local/bigdata/jdk

2.core_site.xml
vim /usr/local/bigdata/hadoop/etc/hadoop/core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns1</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/bigdata/hadoop/tmp</value> #hdfs data address
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>pwrd04:2181,pwrd05:2181,pwrd06:2181</value> # zookeeper qiehuan
</property>
</configuration>


3.hdfs-site.xml

vim /usr/local/bigdata/hadoop/etc/hadoop/hdfs-site.xml
<configuration>
<property>
<name>dfs.nameservice</name>
<value>ns1</value>
</property>
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>pwrd01:9000</value>
</property>
<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>pwrd01:50070</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>pwrd02:9000</value>
</property>
<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>pwrd02:50070</value>
</property>
<!--指定JournalNode在本地磁盘存放数据的位置-->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://pwrd04:8485;pwrd05:8485;pwrd06:8485</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/usr/local/bigdata/hadoop/journal</value>
</property>
<!--开启NameNode失败自动切换-->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<!--配置失败自动切换实现方式-->
<property>
<name>dfs.client.failover.proxy.provide.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!--配置隔离机制-->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<!--使用隔离机制时需要ssh面登陆-->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
</configuration>

4.

cp /usr/local/bigdata/hadoop/etc/hadoop/mapred-site.xml.template /usr/local/bigdata/hadoop/etc/hadoop/mapred-site.xml

vim /usr/local/bigdata/hadoop/etc/hadoop/mapred-site.xml

<configuration>
<!--指定MR框架为yarn方式-->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>

5.

vim /usr/local/bigdata/hadoop/etc/hadoop/yarn-site.xml

<configuration>
<!--指定resourcemanager地址-->
<property>
<name>yarn.resourcenamager.hostname</name>
<value>pwrd03</value>
</property>
<!--指定nodemanager启动时加载server的方式-->
<property>
<name>yarn.nodemanager.aux-service</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>

6.
vim /usr/local/bigdata/hadoop/etc/hadoop/slaves
添加子节点
pwrd04
pwrd05
pwrd06

配置免登陆

cd /root/.ssh/
rm -rf *
ssh-keygen -t rsa
ssh-copy-id -i pwrd01
ssh-copy-id -i pwrd02
ssh-copy-id -i pwrd03
ssh-copy-id -i pwrd04
ssh-copy-id -i pwrd05
ssh-copy-id -i pwrd06



将Hadoop目录分别拷贝到其他服务器上。
scp -r /usr/local/bigdata/hadoop root@pwrd02:/usr/local/bigdata/
scp -r /usr/local/bigdata/hadoop root@pwrd03:/usr/local/bigdata/
scp -r /usr/local/bigdata/hadoop root@pwrd04:/usr/local/bigdata/
scp -r /usr/local/bigdata/hadoop root@pwrd05:/usr/local/bigdata/
scp -r /usr/local/bigdata/hadoop root@pwrd06:/usr/local/bigdata/



启动zookeeper
pwrd04、pwrd05、pwrd06
/usr/local/bigdata/zookeeper/bin/zkServer.sh start

在pwrd01启动journalnode
/usr/local/bigdata/hadoop/sbin/hadoop-daemons.sh start journalnode

在pwrd01 上执行
hadoop namenode -format

scp -r /usr/local/bigdata/hadoop/tmp root@pwrd02:/usr/local/bigdata/hadoop

在pwrd01 上格式化zk
hdfs zkfc -formatZK

在pwrd01上启动hdfs
/usr/local/bigdata/hadoop/sbin/start-dfs.sh

在pwrd03上启动yarn
/usr/local/bigdata/hadoop/sbin/start-yarn.sh
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值