1.前期准备
虚拟机:VMware Workstation Pro
Linux系统:CentOS-7.6-x86_64-bin-DVD1.iso
2.配置网络信息
# 编辑网卡
vi /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
TYPE=Ethernet
UUID=c74441f5-a71d-4fcf-9a80-6bff1218010d
ONBOOT=yes //开机获取(修改项)
NM_CONTROLLED=yes
BOOTPROTO=static //静态获取(修改项)BOOTPROTO=dhcp 动态获取
HWADDR=00:0C:29:5C:4B:A8
DEFROUTE=yes
PEERDNS=yes
PEERROUTES=yes
IPV4_FAILURE_FATAL=yes
IPV6INIT=no
NAME="System eth0"
IPADDR=192.168.111.51 //新增项
NETMASK=255.255.255.0 //新增项
GATEWAY=192.168.111.2 //新增项
DNS1=192.168.111.2 //新增项
# 重启网络服务
service network restart
# 查看
ifconfig
# 检测网络
ping baidu.com
# 可设DNS服务
vi /etc/resolv.conf
nameserver 114.114.114.114
# 修改主机名
vi /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=master
克隆主机并修改网卡信息
# 修改新克隆的主机的网卡配置
vi /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth1 //eth0修改为eth1
TYPE=Ethernet
UUID=c74441f5-a71d-4fcf-9a80-6bff1218010d //删掉
ONBOOT=yes //开机获取(修改项)
NM_CONTROLLED=yes
BOOTPROTO=static //静态获取(修改项)BOOTPROTO=dhcp 动态获取
HWADDR=00:0C:29:5C:4B:A8 //删掉
DEFROUTE=yes
PEERDNS=yes
PEERROUTES=yes
IPV4_FAILURE_FATAL=yes
IPV6INIT=no
NAME="System eth0"
IPADDR=192.168.111.52 //修改IP, 不能与其他主机IP重复
NETMASK=255.255.255.0 //新增项
GATEWAY=192.168.111.2 //新增项
DNS1=192.168.111.2 //新增项
# 修改主机名
vi /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=slave
centos7修改
vi /etc/hostname
# 配置主机映射
192.168.111.51 master
192.168.111.52 slave
# 测试两主机是否连通
ping master
# 配置ssh免密码登录
生成
ssh-keygen 按回车键
添加公钥到节点上
ssh-copy-id 节点名称hostname
# 检测是否关闭了防火墙
service iptables status
service iptables stop
开机关闭
chkconfig iptables off
# centos7
systemctl status firewalld
systemctl stop firewalld
# 安装JDK并配置环境变量
vi /etc/profile
export JAVA_HOME=/opt/jdk1.8.0_151
export PATH=$PATH:$JAVA_HOME/bin
# 生效配置
source /etc/profile
# 安装hadoop
流程参考:“Hadoop单节点伪分布式环境搭建”
配置masters和slaves
cd /opt/apache_hadoop/hadoop-2.7.3/etc/hadoop
touch masters
touch slaves
# 配置主节点(namenode)
vi masters
填写主节点主机名称
hostname
# 配置从节点(datanode)
vi slaves
填写从节点主机名称
hostname1
hostname2
# 从一个节点复制文件夹到另一个节点
scp -r /opt/apache_hadoop/hadoop-2.7.3/etc/hadoop root@192.168.111.52:/opt/apache_hadoop/hadoop-2.7.3/etc/hadoop
# 格式化每个节点namenode
cd /opt/apache_hadoop/hadoop-2.7.3
bin/hdfs namenode -format
# 启动HDFS集群
sbin/start-all.sh
# web客户端访问
http://192.168.111.51:50070