1.首先创建三台虚拟机
2.修改主机名:
hostnamectl set-hostname master
hostnamectl set-hostname slave1
hostnamectl set-hostname slave2
3.配置三台主机ip(仅供参考)
vi /etc/sysconfig/network-scripts/ifcfg-ens33
4.配置vi /etc/hosts(三台主机都要添加)
5.配置免密 ssh-keyget -t rsa(三台主机都要配置)
6.创建免密公钥(三台主机都要)
[root@master ~]# cp ~/.ssh/id_rsa.pub ~/.ssh/authorized_keys
[root@slave1 ~]# cp ~/.ssh/id_rsa.pub ~/.ssh/authorized_keys
[root@slave2 ~]# cp ~/.ssh/id_rsa.pub ~/.ssh/authorized_keys
7.将密钥拷贝到主机点
8.再将主节点的密钥分发给从节点
9.测试免密
10.解压hadoop和jdk安装包
[root@master src]# tar zxvf /h3cu/jdk-8u151-linux-x64.tar.gz -C /usr/local/src/
[root@master src]# tar zxvf /h3cu/hadoop-2.6.5.tar.gz -C /usr/local/src/
11.对解压后的文件修改为短名
12.进入hadoop/etc/hadoop配置以下文件
vi core-site.xml
vi hdfs-site.xml
vi mapred-site.xml
vi yarn-site.xml
vi hadoop-env.sh
vi yarn-env.sh
vi slaves
13.对从节点分发hadoop和jdk文件
scp -r /usr/local/src/jdk/ slave1:/usr/local/src/
scp -r /usr/local/src/jdk/ slave2:/usr/local/src/
scp -r /usr/local/src/hadoop / slave1:/usr/local/src/
scp -r /usr/local/src/hadoop / slave2:/usr/local/src/
14.配置vi /etc/profile
14.格式化hadoop
[root@master hadoop]# hdfs namenode -format
15.启动集群
start-all.sh
(主节点)
(从节点)