Hadoop3.3.6 HA分布式安装
-
安装JAVA环境
-
安装Zookeeper环境
-
编译Hadoop源码
-
安装Hadoop
cd /export/software tar -zxvf hadoop-3.3.6.tar.gz -C ../server/ cd ../server/hadoop-3.3.6/
-
配置hadoop-env.sh
export JAVA_HOME=/export/server/jdk1.8.0_241 export HADOOP_PID_DIR=/export/server/hadoop-3.3.6/hadoop_pid_dir_tmp export HDFS_NAMENODE_USER=root export HDFS_DATANODE_USER=root export HDFS_SECONDARYNAMENODE_USER=root export YARN_RESOURCEMANAGER_USER=root export YARN_NODEMANAGER_USER=root export HDFS_JOURNALNODE_USER=root export HDFS_ZKFC_USER=root
-
检查Hadoop在新环境依赖
[root@node00 bin]# ./hadoop checknative 2023-08-29 04:52:39,162 INFO bzip2.Bzip2Factory: Successfully loaded & initialized native-bzip2 library system-native 2023-08-29 04:52:39,164 INFO zlib.ZlibFactory: Successfully loaded & initialized native-zlib library 2023-08-29 04:52:39,198 INFO nativeio.NativeIO: The native code was built with PMDK support, and PMDK libs were loaded successfully. Native library checking: hadoop: true /export/server/hadoop-3.3.6-src/hadoop-dist/target/hadoop-3.3.6/lib/native/libhadoop.so.1.0.0 zlib: true /lib64/libz.so.1 zstd : true /lib64/libzstd.so.1 bzip2: true /lib64/libbz2.so.1 openssl: true /lib64/libcrypto.so ISA-L: true /lib/libisal.so.2 PMDK: true /usr/local/lib64/libpmem.so.1.0.0 # 如果出现false先在三台机器上安装
-
配置core-site.xml
<configuration> <!-- 用于设置Hadoop的文件系统,由URI指定 --> <property> <name>fs.defaultFS</name> <value>hdfs://nns</value> </property> <!-- 配置Hadoop存储数据目录,默认/tmp/hadoop-${user.name} --> <property> <name>hadoop.tmp.dir</name> <value>/export/server/hadoop-3.3.6/hadoopDatas/tempDatas</value> </property> <!-- 指定zookeeper地址 --> <property> <name>ha.zookeeper.quorum</name> <value>node1:2181,node2:2181,node3:2181</value> </property> <!-- 缓冲区大小,实际工作中根据服务器性能动态调整: 根据自己的虚拟机的内存大小进行配置即可, 不要小于1GB, 最高配置为 4gb --> <property> <name>io.file.buffer.size</name> <value>4096</value> </property> <!-- 开启hdfs的垃圾桶机制,删除掉的数据可以从垃圾桶中回收,单位分钟 --> <property> <name>fs.trash.interval</name> <value>10080</value> </property> <!-- 设置HDFS web UI用户身份 --> <property> <name>hadoop.http.staticuser.user</name> <value>root</value> </property> <!-- 配置该root允许通过代理访问的主机节点 --> <property> <name>hadoop.proxyuser.root.hosts</name> <value>*</value> </property> <!-- 配置该root允许代理的用户所属组 --> <property> <name>hadoop.proxyuser.root.groups</name> <value>*</value> </property> <!-- 配置该root允许代理的用户 --> <property> <name>hadoop.proxyuser.root.users</name> <value>*</value> </property> </configuration>
-
hdfs-site.xml
<configuration> <!--执行hdfs的nameservice为nns,注意要和core-site.xml中的名称保持一致 --> <property> <name>dfs.nameservices</name> <value>nns</value> </property> <!-- nameservice包含的namenode,ns集群下有两个namenode,分别为ns1, ns2 --> <property> <name>dfs.ha.namenodes.nns</name> <value>nn1,nn2</value> </property> <!-- nn1的rpc地址和端口号,rpc用来和datanode通讯,默认值:9000--> <property> <name>dfs.namenode.rpc-address.nns.nn1</name> <value>node1:9000</value> </property> <!-- nn2的rpc地址和端口号,rpc用来和datanode通讯,默认值:9000--> <property> <name>dfs.namenode.rpc-address.nns.nn2</name> <value>node2:9000</value> </property> <!-- nn1的http地址和端口号,web客户端 --> <property> <name>dfs.namenode.http-address.nns.nn1</name> <value>node1:9870</value> </property> <!-- nn2的http地址和端口号,web客户端 --> <property> <name>dfs.namenode.http-address.nns.nn2</name> <value>node2:9870</value> </property> <!-- 指定namenode的元数据在JournalNode上存放的位置,namenode2可以从journalnode集群里的指定位置上获取信息,达到热备效果 --> <property> <name>dfs.namenode.shared.edits.dir</name> <value>qjournal://node1:8485;node2:8485;node3:8485/nns</value> </property> <!-- 配置失败自动切换实现方式,客户端连接可用状态的NameNode所用的代理类,默认值:org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider --> <property> <name>dfs.client.failover.proxy.provider.nns</name> <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value> </property> <!-- 配置隔离机制,HDFS的HA功能的防脑裂方法。建议使用sshfence(hadoop:9922),括号内的是用户名和端口,注意,2台NN之间可免密码登陆.sshfences是防止脑裂的方法,保证NN中仅一个是Active的,如果2者都是Active的,新的会把旧的强制Kill --> <property> <name>dfs.ha.fencing.methods</name> <value>sshfence</value> </property> <!-- 开启NameNode失败自动切换 --> <property> <name>dfs.ha.automatic-failover.enabled</name> <value>true</value> </property> <!-- 配置失败自动切换实现方式 --> <property> <name>dfs.client.failover.proxy.provider.nns</name> <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value> </property> <!-- 指定上述选项ssh通讯使用的密钥文件在系统中的位置 --> <property> <name>dfs.ha.fencing.ssh.private-key-files</name> <value>/root/.ssh/id_rsa</value> </property> <!-- 指定JournalNode在本地磁盘存放数据的位置。 --> <property> <name>dfs.journalnode.edits.dir</name> <value>/export/server/hadoop-3.3.6/hadoopDatas/journalnode</value> </property> <!-- 指定namenode元数据的存放位置 --> <property> <name>dfs.namenode.name.dir</name> <value>file:///export/server/hadoop-3.3.6/hadoopDatas/namenodeDatas</value> </property> <!-- 定义datanode数据存储的节点位置 --> <property> <name>dfs.datanode.data.dir</name> <value>file:///export/server/hadoop-3.3.6/hadoopDatas/datanodeDatas</value> </property> <!-- 定义namenode的edits文件存放路径 --> <property> <name>dfs.namenode.edits.dir</name> <value>file:///export/server/hadoop-3.3.6/hadoopDatas/nn/edits</value> </property> <property> <name>dfs.namenode.checkpoint.edits.dir</name> <value>file:///export/server/hadoop-3.3.6/hadoopDatas/dfs/snn/edits</value> </property> <!-- 配置检查点目录 --> <property> <name>dfs.namenode.checkpoint.dir</name> <value>file:///export/server/hadoop-3.3.6/hadoopDatas/snn/name</value> </property> <!-- 文件切片的副本个数--> <property> <name>dfs.replication</name> <value>3</value> </property> <!-- 设置HDFS的文件权限--> <property> <name>dfs.permissions</name> <value>false</value> </property> <!-- 设置一个文件切片的大小:128M--> <property> <name>dfs.blocksize</name> <value>134217728</value> </property> <!-- 指定DataNode的节点配置文件 --> <property> <name>dfs.hosts</name> <value>/export/server/hadoop-3.3.6/etc/hadoop/slaves</value> </property> </configuration>
-
yarn-site.xml
<configuration> <!-- Site specific YARN configuration properties --> <!-- 是否启用日志聚合.应用程序完成后,日志汇总收集每个容器的日志,这些日志移动到文件系统,例如HDFS. --> <!-- 用户可以通过配置"yarn.nodemanager.remote-app-log-dir"、"yarn.nodemanager.remote-app-log-dir-suffix"来确定日志移动到的位置 --> <!-- 用户可以通过应用程序时间服务器访问日志 --> <!-- 启用日志聚合功能,应用程序完成后,收集各个节点的日志到一起便于查看 --> <property> <name>yarn.log-aggregation-enable</name> <value>true</value> </property> <!--开启ResourceManager HA功能--> <property> <name>yarn.resourcemanager.ha.enabled</name> <value>true</value> </property> <!-- 集群的Id,使用该值确保RM不会做为其它集群的active --> <!--标志ResourceManager--> <property> <name>yarn.resourcemanager.cluster-id</name> <value>mycluster</value> </property> <!--集群中ResourceManager的ID列表,后面的配置将引用该ID--> <property> <name>yarn.resourcemanager.ha.rm-ids</name> <value>rm1,rm2</value> </property> <!-- 设置YARN集群主角色运行节点rm1--> <property> <name>yarn.resourcemanager.hostname.rm1</name> <value>node2</value> </property> <!-- 设置YARN集群主角色运行节点rm2--> <property> <name>yarn.resourcemanager.hostname.rm2</name> <value>node3</value> </property> <!--ResourceManager1的Web页面访问地址--> <property> <name>yarn.resourcemanager.webapp.address.rm1</name> <value>node2:8088</value> </property> <!--ResourceManager2的Web页面访问地址--> <property> <name>yarn.resourcemanager.webapp.address.rm2</name> <value>node3:8088</value> </property> <!-- 配置第一台机器的resourceManager通信地址 --> <property> <name>yarn.resourcemanager.address.rm1</name> <value>node2:8032</value> </property> <property> <name>yarn.resourcemanager.scheduler.address.rm1</name> <value>node2:8030</value> </property> <property> <name>yarn.resourcemanager.resource-tracker.address.rm1</name> <value>node2:8031</value> </property> <property> <name>yarn.resourcemanager.admin.address.rm1</name> <value>node2:8033</value> </property> <!-- 配置第二台机器的resourceManager通信地址 --> <property> <name>yarn.resourcemanager.address.rm2</name> <value>node3:8032</value> </property> <property> <name>yarn.resourcemanager.scheduler.address.rm2</name> <value>node3:8030</value> </property> <property> <name>yarn.resourcemanager.resource-tracker.address.rm2</name> <value>node3:8031</value> </property> <property> <name>yarn.resourcemanager.admin.address.rm2</name> <value>node3:8033</value> </property> <!--启用ResouerceManager重启的功能,默认为false--> <property> <name>yarn.resourcemanager.recovery.enabled</name> <value>true</value> </property> <!--在node2上配置rm1,在node3上配置rm2,注意:一般都喜欢把配置好的文件远程复制到其它机器上,但这个在YARN的另一个机器上一定要修改,其他机器上不配置此项--> <property> <name>yarn.resourcemanager.ha.id</name> <value>rm2</value> <description>If we want to launch more than one RM in single node, we need this configuration</description> </property> <!--用于ResouerceManager状态存储的类--> <property> <name>yarn.resourcemanager.store.class</name> <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value> </property> <!--ZooKeeper集群列表--> <property> <name>yarn.resourcemanager.zk-address</name> <value>node2:2181,node3:2181,node1:2181</value> <description>For multiple zk services, separate them with comma</description> </property> <!--开启resourcemanager故障自动切换,指定机器--> <property> <name>yarn.resourcemanager.ha.automatic-failover.enabled</name> <value>true</value> <description>Enable automatic failover; By default, it is enabled only when HA is enabled.</description> </property> <property> <name>yarn.client.failover-proxy-provider</name> <value>org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider</value> </property> <!-- 允许分配给一个任务最大的CPU核数,默认是8 --> <property> <name>yarn.nodemanager.resource.cpu-vcores</name> <value>4</value> </property> <!-- 每个节点可用内存,单位MB --> <property> <name>yarn.nodemanager.resource.memory-mb</name> <value>4096</value> </property> <!-- 单个任务可申请最少内存,默认1024MB --> <property> <name>yarn.scheduler.minimum-allocation-mb</name> <value>1024</value> </property> <!-- 单个任务可申请最大内存,默认8192MB --> <property> <name>yarn.scheduler.maximum-allocation-mb</name> <value>4096</value> </property> <!--多长时间聚合删除一次日志 此处--> <property> <name>yarn.log-aggregation.retain-seconds</name> <value>2592000</value><!--30 day--> </property> <!--时间在几秒钟内保留用户日志。只适用于如果日志聚合是禁用的--> <property> <name>yarn.nodemanager.log.retain-seconds</name> <value>604800</value><!--7 day--> </property> <!--指定文件压缩类型用于压缩汇总日志--> <property> <name>yarn.nodemanager.log-aggregation.compression-type</name> <value>gz</value> </property> <!-- nodemanager本地文件存储目录--> <property> <name>yarn.nodemanager.local-dirs</name> <value>/export/server/hadoop-3.3.6/hadoopDatas/yarn/local</value> </property> <!-- resourceManager 保存最大的任务完成个数 --> <property> <name>yarn.resourcemanager.max-completed-applications</name> <value>1000</value> </property> <!-- 逗号隔开的服务列表,列表名称应该只包含a-zA-Z0-9_,不能以数字开始--> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <!-- 是否将对容器实施虚拟内存限制 --> <property> <name>yarn.nodemanager.vmem-check-enabled</name> <value>false</value> </property> <!-- 设置日志聚集服务器地址 --> <property> <name>yarn.log.server.url</name> <value>http://node3:19888/jobhistory/logs</value> </property> <!--环境变量通过从NodeManagers的容器继承的环境属性,对于MapReduce应用程序,除了 默认值 hadoop op_mapred_home 应被加入 外,还有如下属性值:--> <property> <name>yarn.nodemanager.env-whitelist</name> <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value> </property> </configuration>
-
mapred-site.xml
<configuration> <!-- 设置MR程序默认运行模式,yarn集群模式,local本地模式 --> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <!-- 历史服务器端地址 --> <property> <name>mapreduce.jobhistory.address</name> <value>node3:10020</value> </property> <!-- 历史服务器web端地址 --> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>node3:19888</value> </property> <!-- The directory where MapReduce stores control files.默认 ${hadoop.tmp.dir}/mapred/system --> <property> <name>mapreduce.jobtracker.system.dir</name> <value>/export/server/hadoop-3.3.6/hadoopDatas/system/jobtracker</value> </property> <!-- The amount of memory to request from the scheduler for each map task. 默认 1024--> <property> <name>mapreduce.map.memory.mb</name> <value>1024</value> </property> <!-- <property> <name>mapreduce.map.java.opts</name> <value>-Xmx1024m</value> </property> --> <!-- The amount of memory to request from the scheduler for each reduce task. 默认 1024--> <property> <name>mapreduce.reduce.memory.mb</name> <value>1024</value> </property> <!-- <property> <name>mapreduce.reduce.java.opts</name> <value>-Xmx2048m</value> </property> --> <!-- 用于存储文件的缓存内存的总数量,以兆字节为单位。默认情况下,分配给每个合并流1MB,给个合并流应该寻求最小化。默认值100--> <property> <name>mapreduce.task.io.sort.mb</name> <value>100</value> </property> <!-- <property> <name>mapreduce.jobtracker.handler.count</name> <value>25</value> </property>--> <!-- 整理文件时用于合并的流的数量。这决定了打开的文件句柄的数量。默认值10--> <property> <name>mapreduce.task.io.sort.factor</name> <value>10</value> </property> <!-- 默认的并行传输量由reduce在copy(shuffle)阶段。默认值5--> <property> <name>mapreduce.reduce.shuffle.parallelcopies</name> <value>15</value> </property> <property> <name>yarn.app.mapreduce.am.command-opts</name> <value>-Xmx2048m</value> </property> <!-- MR AppMaster所需的内存总量。默认值1536--> <property> <name>yarn.app.mapreduce.am.resource.mb</name> <value>1536</value> </property> <!-- MapReduce存储中间数据文件的本地目录。目录不存在则被忽略。默认值${hadoop.tmp.dir}/mapred/local--> <property> <name>mapreduce.cluster.local.dir</name> <value>/export/server/hadoop-3.3.6/hadoopDatas/system/local</value> </property> </configuration>
-
workers
node1 node2 node3
-
配置环境变量三台
vim /etc/profile # set hadoop environment export HADOOP_HOME=/export/server/hadoop-3.3.6 export PATH=:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
-
分发Hadoop
cd /export/server scp -r hadoop-3.3.6/ node2:$PWD scp -r hadoop-3.3.6/ node3:$PWD
-
启动和初始化
# 1. 重启三台机器 reboot # 2.三台机器启动ZK zkServer.sh start zkServer.sh status # 3. 初始化ZK(node1执行) hdfs zkfc -formatZK # 4. 启动 journalnod(node1执行) hadoop-daemons.sh start journalnode # 5. 初始化Hadoop(node1执行) hdfs namenode -format hdfs namenode -initializeSharedEdits -force # 6.启动HDFS(node1执行) start-dfs.sh # 7.将node2的namenode激活并设置状态为Standby(node2执行) hdfs namenode -bootstrapStandby hadoop-daemon.sh start namenode # 8. node2上启动激活yarn(node2执行) start-yarn.sh # 9. node3上启动激活yarn(node3执行) start-yarn.sh # 10. 查看resourceManager的状态 yarn rmadmin -getServiceState rm1(node2执行) yarn rmadmin -getServiceState rm2(node3执行) # 11. 启动jobhistory(node3执行) mr-jobhistory-daemon.sh start historyserver
-
web访访问地址
# HDFS http://node1:9870/dfshealth.html#tab-overview http://node2:9870/dfshealth.html#tab-overview # yarn http://node2:8088/cluster # jobHistory http://node3:19888/jobhistory
-
后面集群启动和关闭就不需要这么麻烦了
# 先启动ZK三台机器启动ZK zkServer.sh start zkServer.sh status # 启动 star-all.sh # 关闭 stop-all.sh