一.创建统一工作目录(三台机器)
mkdir -p /export/server #软件安装路径
mkdir -p /export/data # 数据存储路径
mkdir -p /export/software # 安装包存放路径
二.将jdk安装包上传到/export/software下进行解压
cd /export/software
tar -zxvf jdk-8u162-linux-x64.tar.gz -C /export/server/
设置软链接
ln -s /export/server/jdk1.8.0_162/ /export/server/jdk
配置环境变量
vim /etc/profile
export JAVA_HOME=/export/server/jdk
export PATH=$PATH:$JAVA_HOME/bin
生效环境变量
source /etc/profile
检验
java -version
为其他两台配置jdk
scp -r
命令来递归地复制整个目录。scp -r /export/server/jdk1.8.0_162/ root@hadoop3:/export/server/
scp -r /export/server/jdk1.8.0_162/ root@hadoop3:/export/server/
到hadoop2下:
设置软连接
ln -s /export/server/jdk1.8.0_162/ /export/server/jdk
到hadoop3下:
设置软连接
ln -s /export/server/jdk1.8.0_162/ /export/server/jdk
回到hadoop1下:
拷贝环境变量
scp /etc/profile root@hadoop2:/etc/profile
scp /etc/profile root@hadoop2:/etc/profile
然后检测即可
java -version
三.将jdk安装包上传到/export/software下进行解压
相关文件配置:
1.hadoop-env.sh文件
cd /export/server/hadoop-2.7.3/etc/hadoop/
vim hadoop-env.sh
export JAVA_HOME=/export/server/jdk
2.core-site.xml
vim core-site.xml
<configuration>
<!-- namenode 默认通讯地址 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop1:8020</value>
</property>
<!-- 整个集群基础路径 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/export/data/hadoop-2.7.3</value>
</property>
</configuration>
3.hdfs-site.xml
vim core-site.xml
<configuration>
<!-- secondarynamenode http访问地址 -->
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>hadoop2:9868</value>
</property>
</configuration>
4.mapred-site.xml
hadoop2.x版本需要下面这步
cp mapred-site.xml.template mapred-site.xml
vim mapred-site.xml
<configuration>
<!-- 整个集群运行框架是yarn -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
5.yarn-site.xml
vim yarn-site.xml
<configuration>
<!--yarn主角色运行机器位置 -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop1</value>
</property>
<!-- 中间服务 shuffle -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
6.slaves(workers) hadoop2.x版本是slave,3.x版本是workers
vim slaves
hadoop1
hadoop2
hadoop3
分发到hadoop2和hadoop上去
scp -r /export/server/hadoop-2.7.3/ root@hadoop2:/export/server/
scp -r /export/server/hadoop-2.7.3/ root@hadoop3:/export/server/
配置环境变量
vim /etc/profile
export HADOOP_HOME=/export/server/hadoop-2.7.3
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
scp /etc/profile root@hadoop2:/etc/profile
scp /etc/profile root@hadoop3:/etc/profile
三台机器都生效一下以下命令
source /etc/profile
验证
hadoop version
其实我这里不知道一开始为什么hadoop不能找到命令,我把/etc/profile改成下面重新生效就好了
export JAVA_HOME=/export/server/jdk
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export HADOOP_HOME=/export/server/hadoop-2.7.3
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
格式化操作(初始化HDFS)
在hadoop1执行就行,并且不要执行多次只能一次
hdfs namenode -format
出现如上图红色位置代表成功