- 配置SSH
# 安装SSH
apt-get install ssh
# 基于空口令生成一个新的SSH密钥,以实现无密码登录
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
# 测试连接
ssh localhost
- 配置hadoop
移动配置
mv /usr/local/hadoop/etc/hadoop/ /data/dfs/conf/
core-site.xml
<?xml version="1.0"?>
<configuration>
<property>
<!-- 指定hadoop运行时产生文件的存储路径 -->
<name>hadoop.tmp.dir</name>
<value>file:/data/dfs/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<property>
<name>fs.defaultFS</name>
<value>hdfs://192.168.140.128:9000</value>
</property>
<property>
<name>hadoop.http.filter.initializers</name>
<value>org.apache.hadoop.security.AuthenticationFilterInitializer</value>
</property>
<property>
<name>hadoop.http.authentication.type</name>
<value>simple</value>
</property>
<property>
<name>hadoop.http.authentication.token.validity</name>
<value>3600</value>
</property>
<property>
<name>hadoop.http.authentication.signature.secret.file</name>
<value>/data/dfs/auth/hadoop-http-auth-signature-secret</value>
</property>
<property>
<name>hadoop.http.authentication.cookie.domain</name>
<value></value>
</property>
<property>
<name>hadoop.http.authentication.simple.anonymous.allowed</name>
<value>false</value>
</property>
<!--<property>-->
<!-- 指定hadoop代理用户 -->
<!-- <name>hadoop.proxyuser.flume.hosts</name>-->
<!-- <value>*</value>-->
<!--</property>-->
<!--<property>-->
<!-- <name>hadoop.proxyuser.flume.groups</name>-->
<!-- <value>*</value> -->
<!--</property>-->
</configuration>
hdfs-site.xml
<?xml version="1.0"?>
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/data/dfs/namenode</value>
<description>NameNode directory for namespace and transaction logs storage.</description>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/data/dfs/datanode</value>
<description>DataNode directory</description>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
</configuration>
mapred-site.xml
<?xml version="1.0"?>
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
yarn-site.xml
<?xml version="1.0"?>
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>192.168.140.128</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<description>The http address of the RM web application.</description>
<name>yarn.resourcemanager.webapp.address</name>
<value>0.0.0.0:8088</value>
</property>
</configuration>
添加启动参数
HADOOP_SECURE_DN_USER has been replaced by HDFS_DATANODE_SECURE_USER. Using value of HADOOP_SECURE_DN_USER
start-dfs.sh,stop-dfs.sh
#!/usr/bin/env bash
HDFS_DATANODE_USER=root
HDFS_DATANODE_SECURE_USER=hdfs
HDFS_NAMENODE_USER=root
HDFS_SECONDARYNAMENODE_USER=root
start-yarn.sh, stop-yarn.sh
#!/usr/bin/env bash
YARN_RESOURCEMANAGER_USER=root
HADOOP_SECURE_DN_USER=yarn
YARN_NODEMANAGER_USER=root
修改JAVA_HOME
etc/hadoop/hadoop-env.sh
export JAVA_HOME=/usr/java/jdk1.8.0_181
添加环境变量
export HADOOP_HOME=/usr/local/hadoop
export HADOOP_CONF_DIR=/data/dfs/conf
export PATH=$PATH:/usr/local/hadoop/bin:/usr/local/hadoop/sbin
source ~/.bashrc
3.格式化HDFS文件系统
# 首次使用Hadoop前,必须格式化文件系统
hdfs namenode -format
4.启动和终止守护进程
# 启动
# 会启动一个namenode、一个辅助namenode、一个datanode(HDFS)
start-dfs.sh --config /data/dfs/conf
# 会启动一个资源管理器、一个节点管理器(YARN)
start_yarn.sh --config /data/dfs/conf
# 会启动一个历史服务器(MapReduce)
mr-jobhistory-daemon.sh --config /data/dfs/conf start historyserver
# 终止
mr-jobhistory-daemon.sh stop historyserver
stop-yarn.sh
stop-dfs.sh
- 创建用户目录
hadoop fs -mkdir -p /user/$USER
本文详细介绍如何配置Hadoop集群,包括SSH免密码登录设置、关键配置文件core-site.xml、hdfs-site.xml、mapred-site.xml及yarn-site.xml的具体配置方法,并提供了启动和停止Hadoop服务的步骤。
2636

被折叠的 条评论
为什么被折叠?



