在master主机上编写配置文件
[root@master /]# vim /etc/hosts
把master主机文件的hosts文件分发给slave1和slave2
[root@master /]# scp /etc/hosts root@slave1:/etc
[root@master /]# scp /etc/hosts root@slave2:/etc
生成公钥,连续按3下回车键
[root@master /]# ssh-keygen
把生成的公钥发送给slave1和slave2,然后在slave1和slave2上重复这个操作,先生成公钥,然后在分别发送给另外两个主机
[root@master hadoop]# ssh-copy-id slave1
[root@master hadoop]# ssh-copy-id slave2
#进入目录
[root@master hadoop]# pwd
/usr/cstor/hadoop/etc/hadoop/etc#编写配置文件
配置Java_Home路径
编写配置文件
查看jdk的安装路径
[root@master hadoop]# which java
/usr/local/jdk1.8.0_161/bin/java配置jdk环境变量,注意这里的路径不要bin和后面的字符
[root@master hadoop]# vim hadoop-env.sh
export JAVA_HOME=/usr/local/jdk1.8.0_161
core-site.xml
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/dtadmin/hadooptmp</value>
</property>
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:8020</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
</configuration>
hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/home/dtadmin/hadoopdata/namenode</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/home/dtadmin/hadoopdata/datanode</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>dfs.namenode.handler.count</name>
<value>100</value>
</property></configuration>
yarn-site.xml
<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>master:8030</value>
</property>
<property>
<name>yarn.resoucemanager.address</name>
<value>master:8032</value>
</property>
<property>
<name>yarn.acl.enable</name>
<value>false</value>
</property>
<property>
<name>yarn.admin.acl</name>
<value>*</value>
</property>
<property>
<name>yarn.log-aggregation-enable</name>
<value>false</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>master:8088</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>master</value>
</property></configuration>
slaves.xml
slave1
slave2
把生成的发送给slave1和slave2,这里的hadoop文件的目录是,分发的是/usr/cstor/hadoop下面的所有文件,-r 参数是递归发送所有文件
[root@master /]# which hadoop
/usr/cstor/hadoop/bin/hadoop
[root@master /]# scp -r /usr/cstor/hadoop/ root@slave1:/usr/cstor
[root@master /]# scp -r /usr/cstor/hadoop/ root@slave2:/usr/cstor
格式化namenode
[root@master hadoop]# bin/hdfs namenode -format
启动NameNode 和DataNode 的守护进程
[root@master hadoop]# sbin/start-dfs.sh
输入jps,结果如图
[root@master hadoop]# jps
533 NameNode
2104 Jps
777 DataNode
1001 SecondaryNameNode