系统:CentOS7
环境:jdk8
版本:hadoop-2.7.7
结构
hadoop01 | namedata、nodedata |
hadoop02 | nodedata |
hadoop03 | nodedata |
配置 hadoop-2.7.7/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/opt/app/jdk1.8.0_221 //这里是我的jdk安装目录
配置 hadoop-2.7.7/etc/hadoop/core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop01:9000</value>
<final>true</final>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/kjctar/tmp</value> <!-- 临时目录 -->
</property>
<!-- i/o properties -->
<property>
<name>io.file.buffer.size</name>
<value>4096</value>
</property>
</configuration>
配置hadoop-2.7.7/etc/hadoop/hdfs-site.xml
<!-- /home/hadoopdata/ 是我定义的元数据目录 -->
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>/home/hadoopdata/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/home/hadoopdata/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>134217728</value>
</property>
<property>
<name>fs.checkpoint.dir</name>
<value>/home/hadoopdata/checkpoint/dfs/cname</value>
</property>
<property>
<name>dfs.http.address</name>
<value>hadoop01:50070</value>
</property>
<property>
<name>dfs.secondary.http.address</name>
<value>hadoop01:50090</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>false</value>
</property>
<property>
<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
<value>false</value>
</property>
</configuration>
配置 hadoop-2.7.7/etc/hadoop/mypred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>hadoop01:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>hadoop01:19888</value>
<description>MapReduce JobHistory Server Web UI host:port</description>
</property>
</configuration>
配置hadoop-2.7.7/etc/hadoop/yarn-site.xml
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>hadoop01</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop01</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>hadoop01:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>hadoop01:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>hadoop01:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>hadoop01:8033</value>
</property>
<property>
<description>The http address of the RM web application.</description>
<name>yarn.resourcemanager.webapp.address</name>
<value>hadoop01:8088</value>
</property>
<!-- Site specific YARN configuration properties -->
</configuration>
然后切换到 hadoop-2.7.7/bin 目录执行如下命令
./hdfs namenode–format
./hadoop namenode –format
再切换到 hadoop-2.7.7/sbin 启动
./start-dfs.sh
然后访问主机的50070端口,正常情况下会显示三个nodedata节点
那么如果显示 0个节点 解决方法如下:
方案一https://blog.csdn.net/nengliweb/article/details/41208649
方案二https://blog.csdn.net/qq_25662627/article/details/81143117
方案三 如果显示主节点显示,而从节点不显示,多半是防火墙没有关,吧主节点和从节点的防火墙关闭
systemctl stop firewalld