1.安装 jdk
修改
vi etc/profile
添加
# java env
export JAVA_HOME=/java/jdk1.8.0_281
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin
生效
source /etc/profile
验证
java -version
2.免密登录
测试
ssh localhost
如果有问题 则安装 ssh
yum install openssh-clients openssh-server
取消输入密码
ssh-keygen -t rsa -P ‘’ -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 0600 ~/.ssh/authorized_keys
验证是否免密
ssh localhost
3.安装 Hadoop
修改
vi etc/profile
添加
# hadoop env
export HADOOP_HOME=/hadoop/hadoop-3.3.0
export PATH=$PATH:$HADOOP_HOME/bin
export HADOOP_HOME_WARN_SUPPRESS=1
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS=-Djava.library.path=$HADOOP_HOME/lib
生效
source /etc/profile
4.修改配置文件
cd /hadoop/hadoop-3.3.0/etc/hadoop
vi core-site.xml
vi hdfs-site.xml
<configuration>
<!-- 指定HDFS中NameNode的地址 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://localhost:9000</value>
</property>
<!-- 指定hadoop临时目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/hadoop/tmp</value>
</property>
</configuration>
<configuration>
<!--<property>
<name>fs.default.name</name>
<value>localhost:9000</value>
</property>
<property>
<name>mapred.job.tracker</name>
<value>localhost:9001</value>
</property>-->
<!--官网只配了replication-->
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<!--namenode和datanode的路径-->
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/hadoop/tmp/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/hadoop/tmp/dfs/data</value>
</property>
<!--指定可以通过web访问hdfs目录-->
<!--<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>-->
<!-- 启用了50070的端口访问 -->
<!-- 3.3版本启用了9870的端口访问 -->
<!--<property>
<name>dfs.namenode.http.address</name>
<value>slave1:9870</value>
</property>-->
</configuration>
5.启动 Hadoop (以及无法启动解决方案)
./sbin/stop-dfs.sh # 关闭
rm -r ./tmp # 删除 tmp 文件,注意这会删除 HDFS 中原有的所有数据
./bin/hdfs namenode -format # 重新格式化 NameNode
./sbin/start-dfs.sh # 重启
注:启动报错
vi start-dfs.sh
vi stop-dfs.sh
HDFS_DATANODE_USER=root
HADOOP_SECURE_DN_USER=hdfs
HDFS_NAMENODE_USER=root
HDFS_SECONDARYNAMENODE_USER=root
vi start-yarn.sh
vi stop-yarn.sh
YARN_RESOURCEMANAGER_USER=root
HADOOP_SECURE_DN_USER=yarn
YARN_NODEMANAGER_USER=root