笔记整理 之 Hadoop 安装及环境配置
一、安装虚拟机
二、
1、配置网络 vi /etc/sysconfig/network-scripts/ifcfg-eth0
ONBOOT=yes
IPADDR=
GATEWAY=
NETMASK=
DNS1=114.114.114.114
DNS2=网关
2、修改主机名
vi /etc/sysconfig/network
3、修改hosts文件
192.168.170.91 hadoop01
192.168.170.92 hadoop02
4、关闭防火墙
service iptables stop
chkconfig iptables off
5、安装ssh客户端
yum -y openssh-clients
6、安装jdk
解压jdk文件到 /usr/local/java下
7、安装hadoop
解压jdk文件到 /usr/local/hadoop下
8、配置环境变量
vi /etc/profile
添加
export JAVA_HOME=/usr/local/hadoop/java/jdk1.8.0_102
export HADOOP_HOME=/usr/local/hadoop/hadoop/hadoop-2.7.3
export PATH=
P
A
T
H
:
PATH:
PATH:JAVA_HOME/bin:
H
A
D
O
O
P
H
O
M
E
/
b
i
n
:
HADOOP_HOME/bin:
HADOOPHOME/bin:HADOOP_HOME/sbin
9、修改hadoop配置文件
先进入hadoop 的etc目录下
vi hadoop-env.sh
#The java ipplemengtation to use.
export JAVA_HOME=/usr/local/jdk目录
vi core-site.xml
fs.defaultFS hdfs://hadoop3801:9000 hadoop.tmp.dir /usr/local/hadoop-2.7.3/tmpvi hdfs-site.xml
dfs.namenode.name.dir /usr/local/hadoop-2.7.3/data/name dfs.datanode.data.dir /usr/local/hadoop-2.7.3/data/data dfs.replication 3 dfs.secondary.http.address hadoop3801:50090cp mapred-site.xml.template mapred-site.xml
复制临时文件成为配置文件
vi mapred-site.xml
vi yarn-site.xml
yarn.resourcemanager.hostname hadoop3801 yarn.nodemanager.aux-services mapreduce_shufflevi slaves
Hadoop02
Hadoop03
10、克隆虚拟机
11、修改网卡
vi /etc/sysconfig/network-scripts/ifcfg-eth0
删除 原有eth0 将 eth1改成eth0
12、修改hostname
vi /etc/sysconfig/network
13、修改ip
vi /etc/sysconfig/network-scripts/ifcfg-eth0
14、修改权限
启动免密登录脚本
hadoop01: starting nodemanager, logging to /usr/local/hadoop/hadoop/hadoop-2.7.3/logs/yarn-root-nodemanager-hadoop01.out
[root@hadoop01 hadoop]# vi /etc/profile
[root@hadoop01 hadoop]# ll
总用量 12
-rwxrwxrwx. 1 root root 995 10月 28 03:35 autossh.sh
drwxr-xr-x. 3 root root 4096 10月 28 03:31 hadoop
drwxr-xr-x. 3 root root 4096 10月 28 03:27 java
[root@hadoop01 hadoop]# vi autossh.sh
#PWD_1是登陆密码,可以自己设定
PWD_1=123456
ips=$(cat /etc/hosts |grep -v “::” | grep -v “127.0.0.1”)
key_generate() {
expect -c “set timeout -1;
spawn ssh-keygen -t rsa;
expect {
{Enter file in which to save the key*} {send – \r;exp_continue}
{Enter passphrase*} {send – \r;exp_continue}
{Enter same passphrase again:} {send – \r;exp_continue}
{Overwrite (y/n)*} {send – n\r;exp_continue}
eof {exit 0;}
};”
}
auto_ssh_copy_id () {
expect -c “set timeout -1;
spawn ssh-copy-id -i $HOME/.ssh/id_rsa.pub root@$1;
expect {
{Are you sure you want to continue connecting *} {send – yes\r;exp_continue;}
{*password:} {send – $2\r;exp_continue;}
eof {exit 0;}
};”
}
rm -rf ~/.ssh
key_generate
for ip in $ips
do
auto_ssh_copy_id $ip $PWD_1
done
通过 ssh hostname 检查免密登录是否成功
15、启动集群
初始化HDFS(在hadoop01进行操作)(操作一次就ok)
bin/hadoop namenode -format
启动HDFS
sbin/start-dfs.sh
启动YARN
sbin/start-yarn.sh
通过 hostname:50070
hostname:8088
检测是否启动成功