cd /opt
mkdir soft
mkdir data
tar -zxvf soft/jdk-8u202-linux-x64.tar.gz
tar -zxvf soft/hadoop-2.7.1.tar.gz
mv jdk-8u202 jdk
mv hadoop-2.7.1 hadoop
vi /etc/profilr.d/hadoop-eco.sh
JAVA_HOME=/opt/jdk
PATH=$JAVA_HOME/bin:$PATH
HADOOP_HOME=/opt/hadoop
PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
source /etc/profile.d/hadoop-eco.sh
java-version
ssh-keygen -t rsa
ssh-copy-id -i root@localhost
mkdir hadoop-record
cd hadoop-record
mkdir data
mkdir name
mkdir tmp
mkdir secondary
cd ..
cd /opt/hadoop/etc/hadoop
vi hadoop-env.sh
export JAVA_HOME=/opt/jdk
vi yarn-env.sh
export JAVA_HOME=/opt/jdk
vi core-site.xml
<configuration>
<!--NameNode 结点的 URI-->
<property>
<name>fs.defaultFS</name>
<value>hdfs://localhost:9000</value>
</property>
<!-- 指定 Hadoop 运行时产生文件的存储路径 -->
<property>
<name>hadoop.tmp.dir</name>
<value>file:///opt/hadoop-record/tmp</value>
</property>
</configuration>
vi hdfs-site.xml
<configuration>
<property>
<!-- 数据副本数量 -->
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<!-- namenode 数据存放地址 -->
<name>dfs.namenode.name.dir</name>
<value>file:///opt/hadoop-record/name</value>
</property>
<property>
<!-- datanode 数据存放地址 -->
<name>dfs.datanode.data.dir</name>
<value>file:///opt/hadoop-record/data</value>
</property>
</configuration>
vi mapred-site.xml
<configuration>
<property>
<!--mapreduce 运行的平台,默认 Local-->
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
vi yarn-site.xml
<configuration>
<property>
<!--resourcemanager 所在的机器 -->
<name>yarn.resourcemanager.hostname</name>
<value>localhost</value>
</property>
<property>
<!-- 所需要的服务 -->
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
hadoop version
hdfs namenode -format
systenctl stop firewalld.service
start-all.sh