hadoop集群搭建

vim /etc/hosts

192.168.1.2 Master.Hadoop
192.168.1.3 Slave1.Hadoop
192.168.1.4 Slave2.Hadoop
192.168.1.5 Slave3.Hadoop

若能用主机名进行ping通,说明刚才添加的内容,在局域网内能进行DNS解析。

hadoop:
https://dlcdn.apache.org/hadoop/common/hadoop-3.3.6/hadoop-3.3.6.tar.gz

jdk:
yum install java-1.8.0-openjdk  java-1.8.0-openjdk-devel
export JAVA_HOME=/usr/lib/jvm/java-1.8.0
vim ~/.bashrc    # 使用 vim 编辑器在终端中打开 .bashrc 文件
source ~/.bashrc    # 使变量设置生效
echo $JAVA_HOME  # 检验变量值
java -version
确保所有的服务器都安装,各台机器之间可以通过密码验证相互登录。
yum install ssh 安装SSH协议
yum install rsync (rsync是一个远程数据同步工具,可通过LAN/WAN快速同步多台主机间的文件)
service sshd restart 启动服务
# 在文件最后加入:
# 配置Java安装路径
export JAVA_HOME=/usr/lib/jvm/java-1.8.0
# 配置Hadoop安装路径
export HADOOP_HOME=/usr/local/hadoop-3.3.6
# Hadoop hdfs配置文件路径
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
# Hadoop YARN配置文件路径
export YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop
# Hadoop YARN 日志文件夹
export YARN_LOG_DIR=$HADOOP_HOME/logs/yarn
# Hadoop hdfs 日志文件夹
export HADOOP_LOG_DIR=$HADOOP_HOME/logs/hdfs

# Hadoop的使用启动用户配置
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
export YARN_PROXYSERVER_USER=root
cd /usr/local/hadoop-3.3.6/etc/hadoop/
vi core-site.xml文件
在<configuration></configuration>中间加入以下内容
<property>
    <name>fs.defaultFS</name>
    <value>hdfs://node12:8020</value>
    <description></description>
</property>
 
<property>
    <name>io.file.buffer.size</name>
    <value>131072</value>
    <description></description>
</property>
<!-- 指定hadoop运行时产生文件的存储目录 -->
<property>
     <name>hadoop.tmp.dir</name>
     <value>/data/hadoop/tmp</value>
</property>
vi hdfs-site.xml
<property>
        <name>dfs.datanode.data.dir.perm</name>
        <value>700</value>
    </property>
 
  <property>
    <name>dfs.namenode.name.dir</name>
    <value>/data/hadoop/nn</value>
    <description>Path on the local filesystem where the NameNode stores the namespace and transactions logs persistently.</description>
  </property>
 
  <property>
    <name>dfs.namenode.hosts</name>
    <value>node12,node13,node15</value>
    <description>List of permitted DataNodes.</description>
  </property>
 
  <property>
    <name>dfs.blocksize</name>
    <value>268435456</value>
    <description></description>
  </property>
 
  <property>
    <name>dfs.namenode.handler.count</name>
    <value>100</value>
    <description></description>
  </property>
 
  <property>
    <name>dfs.datanode.data.dir</name>
    <value>/data/hadoop/dn</value>
  </property>
vi mapred-env.sh
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
vi mapred-site.xml
<property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
    <description></description>
  </property>
 
  <property>
    <name>mapreduce.jobhistory.address</name>
    <value>node12:10020</value>
    <description></description>
  </property>
 
  <property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>node12:19888</value>
    <description></description>
  </property>
 
  <property>
    <name>mapreduce.jobhistory.intermediate-done-dir</name>
    <value>/data/hadoop/mr-history/tmp</value>
    <description></description>
  </property>
 
  <property>
    <name>mapreduce.jobhistory.done-dir</name>
    <value>/data/hadoop/mr-history/done</value>
    <description></description>
  </property>
  
<property>
  <name>yarn.app.mapreduce.am.env</name>
  <value>HADOOP_MAPRED_HOME=$HADOOP_HOME</value>
</property>

<property>
  <name>mapreduce.map.env</name>
  <value>HADOOP_MAPRED_HOME=$HADOOP_HOME</value>
</property>
<property>
  <name>mapreduce.reduce.env</name>
  <value>HADOOP_MAPRED_HOME=$HADOOP_HOME</value>
</property>
vi yarn-env.sh
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export HADOOP_HOME=/usr/local/hadoop-3.2.4
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop
export YARN_LOG_DIR=$HADOOP_HOME/logs/yarn
export HADOOP_LOG_DIR=$HADOOP_HOME/logs/hdfs
vi yarn-site.xml
<property>
    <name>yarn.log.server.url</name>
    <value>http://node12:19888/jobhistory/logs</value>
    <description></description>
</property>
 
  <property>
    <name>yarn.web-proxy.address</name>
    <value>node12:8089</value>
    <description>proxy server hostname and port</description>
  </property>
  
  <property>
    <name>yarn.log-aggregation-enable</name>
    <value>true</value>
    <description>Configuration to enable or disable log aggregation</description>
  </property>
 
  <property>
    <name>yarn.nodemanager.remote-app-log-dir</name>
    <value>/tmp/logs</value>
    <description>Configuration to enable or disable log aggregation</description>
  </property>
 
  <property>
    <name>yarn.resourcemanager.hostname</name>
    <value>node12</value>
    <description></description>
  </property>
 
  <property>
    <name>yarn.resourcemanager.scheduler.class</name>
    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value>
    <description></description>
  </property>
 
  <property>
    <name>yarn.nodemanager.local-dirs</name>
    <value>/data/hadoop/nm-local</value>
    <description>Comma-separated list of paths on the local filesystem where intermediate data is written.</description>
  </property>
  
  <property>
    <name>yarn.nodemanager.log-dirs</name>
    <value>/data/hadoop/nm-log</value>
    <description>Comma-separated list of paths on the local filesystem where logs are written.</description>
  </property>
 
  <property>
    <name>yarn.nodemanager.log.retain-seconds</name>
    <value>10800</value>
    <description>Default time (in seconds) to retain log files on the NodeManager Only applicable if log-aggregation is disabled.</description>
  </property>
 
  <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
    <description>Shuffle service that needs to be set for Map Reduce applications.</description>
  </property>
修改workers文件 vi workers
hadoop-node12
hadoop-node13
hadoop-node15

分发hadoop到其他机器
scp -r /usr/local/hadoop-3.2.4 hadoop-node13:/usr/local
scp -r /usr/local/hadoop-3.2.4 hadoop-node15:/usr/local

在每台机器上建立文件夹
mkdir -p /data/hadoop/nn
mkdir -p /data/hadoop/dn
mkdir -p /data/hadoop/nm-log
mkdir -p /data/hadoop/nm-local
修改每台机器环境变量 vi /etc/profile

export HADOOP_HOME=/usr/local/hadoop-3.3.6
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
source /etc/profile 一下生效
启动集群
格式化namenode,在主节点node12上执行format命令
格式化命令只执行一次
hadoop namenode -format
start-all.sh
stop-all.sh

在hadoop-node16、hadoop-node18、hadoop-node12上通过jps命令验证进程是否都启动成功
hadoop-node16:应该包含DataNode和NameNode,ResourceManager进程
hadoop-node18和hadoop-node12:应该包含DataNode, NodeManager进程
验证HDFS:浏览器打开:http://192.168.5.16:9870,可正常访问

打开19888端口:
启动历史服务器
mapred --daemon start historyserver
# 如需停止将start更换为stop
mapred --daemon stop historyserver
启动web代理服务器
yarn-daemon.sh start proxyserver
# 如需停止将start更换为stop
yarn-daemon.sh stop proxyserver
netstat -tpnl | grep java
打开8088端口

wget: 无法解析主机地址”的解决方法
sudo vim /etc/resolv.conf
修改名字服务器:

options timeout:2 attempts:3 rotate single-request-reopen
; generated by /usr/sbin/dhclient-script
#nameserver 100.100.2.138
#nameserver 100.100.2.136
nameserver 8.8.8.8
nameserver 8.8.8.4
问题:
[root@hadoop-node12 html]# tar -zxvf HDP-3.1.5.0-centos7-rpm.tar.gz /var/www/html/
tar (child): HDP-3.1.5.0-centos7-rpm.tar.gz:无法 open: 没有那个文件或目录
tar (child): Error is not recoverable: exiting now
tar: Child returned status 2
tar: Error is not recoverable: exiting now
解决方法
chmod u+x HDP-3.1.5.0-centos7-rpm.tar.gz

问题:ambari Confirm Hosts失败
经测试,在主机使用ssh localhost仍需要输入密码,所以需要将id_rsa.pub的文件内容复制到authorized_keys,设置本主机对自己免密登录即可。
依次执行以下命令:

cd ~/.ssh
cat id_rsa.pub >> authorized_keys

问题:Server at https://hadoop-node20:8440 is not reachable
确保 /etc/ambari-agent/conf/ambari-agent.ini 文件中的 server.hostname 为 ambari-server 节点主机名或 ip。
在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值