数据建设实践之大数据平台(三)安装hadoop

安装hadoop

上传安装文件到/opt/software目录并解压

[bigdata@node101 software]$ tar -zxvf hadoop-3.3.5.tar.gz -C /opt/services/

配置环境变量

[bigdata@node101 ~]$ sudo vim /etc/profile.d/bigdata_env.sh
export JAVA_HOME=/opt/services/jdk1.8.0_161
export ZK_HOME=/opt/services/zookeeper-3.5.7
export HADOOP_HOME=/opt/services/hadoop-3.3.5
export PATH=$PATH:$JAVA_HOME/bin:$ZK_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

分发环境变量

[bigdata@node101 bin]$ sudo ./bin/xsync /etc/profile.d/bigdata_env.sh

刷新环境变量,5台机器上执行

[bigdata@node101 ~]$ source /etc/profile 

配置core-site.xml文件

<property>
	<name>fs.defaultFS</name>
  <value>hdfs://mycluster</value>
</property>
<property>
	<name>hadoop.tmp.dir</name>
  <value>/opt/services/hadoop-3.3.5/data</value>
</property>
<property>
	<name>hadoop.http.staticuser.user</name>
	<value>bigdata</value>
</property>
<property>  
	<name>fs.trash.interval</name>  
	<value>1440</value>  
</property>
<property>  
	<name>fs.trash.checkpoint.interval</name>  
	<value>1440</value>  
</property>
<property>  
	<name>ha.zookeeper.quorum</name>        
	<value>node101:2181,node102:2181,node103:2181</value>
</property>
<property>
  <name>hadoop.proxyuser.bigdata.hosts</name>
  <value>*</value>
</property>
<property>
  <name>hadoop.proxyuser.bigdata.groups</name>
  <value>*</value>
</property>
<property>
  <name>hadoop.proxyuser.bigdata.users</name>
  <value>*</value>
</property>

配置hdfs-site.xml文件

<property>
		<name>dfs.namenode.data.dir</name>
        <value>file://${hadoop.tmp.dir}/name</value>
</property>
<property>
		<name>dfs.datanode.data.dir</name>
        <value>file://${hadoop.tmp.dir}/data1,file://${hadoop.tmp.dir}/data2</value>
</property>
<property>
		<name>dfs.journalnode.edits.dir</name>
  	    <value>${hadoop.tmp.dir}/journal/</value>
</property>
<property>
  	    <name>dfs.nameservices</name>
  	    <value>mycluster</value>
</property>
<property>
  	    <name>dfs.ha.namenodes.mycluster</name>
  	    <value>nn1,nn2</value>
</property>
<property>
  	    <name>dfs.namenode.rpc-address.mycluster.nn1</name>
  	    <value>node101:8020</value>
</property>
<property>
  	    <name>dfs.namenode.rpc-address.mycluster.nn2</name>
  	    <value>node102:8020</value>
</property>
<property>
  	    <name>dfs.namenode.http-address.mycluster.nn1</name>
  	    <value>node101:9870</value>
</property>
<property>
  	    <name>dfs.namenode.http-address.mycluster.nn2</name>
  	    <value>node102:9870</value>
</property>
<property>
  	    <name>dfs.namenode.shared.edits.dir</name>
 		<value>qjournal://node101:8485;node102:8485/mycluster</value>
</property>
<property>
  	    <name>dfs.client.failover.proxy.provider.mycluster</name>       <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
  	<name>dfs.ha.fencing.methods</name>
      <value>
    	sshfence
shell(/bin/true)
  	</value>
</property>
<property>
		<name>dfs.ha.fencing.ssh.private-key-files</name>
		<value>/home/bigdata/.ssh/id_rsa</value>
</property>
<property>
  	    <name>dfs.ha.automatic-failover.enabled</name>
  	    <value>true</value>
</property>
<property>
		<name>dfs.replication</name>
		<value>3</value>
</property>
<property>
		<name>dfs.namenode.handler.count</name>
		<value>21</value>
</property>
<property>
		<name>dfs.hosts</name>
        <value>/opt/services/hadoop-3.3.5/etc/hadoop/whitelist</value>
</property>
<property>
        <name>dfs.hosts.exclude</name>
        <value>/opt/services/hadoop-3.3.5/etc/hadoop/blacklist</value>
</property>

配置mapred-site.xml文件

<property>
		<name>mapreduce.framework.name</name>
		<value>yarn</value>
</property>
<property>
		<name>mapreduce.jobhistory.address</name>
		<value>node102:10020</value>
</property>
<property>
		<name>mapreduce.jobhistory.webapp.address</name>
		<value>node102:19888</value>
</property>

配置yarn-site.xml文件

<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle,spark_shuffle</value>
</property>
<property>
    <name>yarn.nodemanager.aux-services.spark_shuffle.class</name>
    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
</property>
<property>
    <name>yarn.resourcemanager.ha.enabled</name>
    <value>true</value>
</property>
<property>
    <name>yarn.resourcemanager.cluster-id</name>
    <value>yarn-cluster</value>
</property>
<property>
    <name>yarn.resourcemanager.ha.rm-ids</name>
    <value>rm1,rm2</value>
</property>
<property>
    <name>yarn.resourcemanager.hostname.rm1</name>
    <value>node101</value>
</property>
<property>
    <name>yarn.resourcemanager.webapp.address.rm1</name>
    <value>node101:8088</value>
</property>
<property>
    <name>yarn.resourcemanager.address.rm1</name>
    <value>node101:8032</value>
</property>
<property>
    <name>yarn.resourcemanager.scheduler.address.rm1</name>
    <value>node101:8030</value>
</property>
<property>
    <name>yarn.resourcemanager.resource-tracker.address.rm1</name>
    <value>node101:8031</value>
</property>
<property>
    <name>yarn.resourcemanager.hostname.rm2</name>
    <value>node102</value>
</property>
<property>
    <name>yarn.resourcemanager.webapp.address.rm2</name>
    <value>node102:8088</value>
</property>
<property>
    <name>yarn.resourcemanager.address.rm2</name>
    <value>node102:8032</value>
</property>
<property>
    <name>yarn.resourcemanager.scheduler.address.rm2</name>
    <value>node102:8030</value>
</property>
<property>
    <name>yarn.resourcemanager.resource-tracker.address.rm2</name>
    <value>node102:8031</value>
</property>
<property>
  <name>yarn.resourcemanager.zk-address</name>
  <value>node101:2181,node102:2181,node103:2181</value>
</property>
<property>
    <name>yarn.resourcemanager.recovery.enabled</name>
    <value>true</value>
</property>
<property>
		<name>yarn.resourcemanager.store.class</name>
		<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property> 
<property>
    <name>yarn.log-aggregation-enable</name>
    <value>true</value>
</property>
<property>
    <name>yarn.log.server.url</name>
    <value>http://node102:19888/jobhistory/logs</value>
</property>
<property>
    <name>yarn.log-aggregation.retain-seconds</name>
    <value>604800</value>
</property>
<property>
     <name>yarn.nodemanager.env-whitelist</name>
     <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
</property>
<property>
    <description>配置调度器</description>
    <name>yarn.resourcemanager.scheduler.class</name>
    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
</property>
<property>
    <description>处理调度器的线程数量,一般不超过机器数*线程数,配置总数的2/3</description>
    <name>yarn.resourcemanager.scheduler.client.thread-count</name>
    <value>16</value>
</property>
<property>
    <description>是否让yarn自动检测硬件进行配置,如果手工配置,需要设置false</description>
    <name>yarn.nodemanager.resource.detect-hardware-capabilities</name>
    <value>false</value>
</property>
<property>
    <description>是否将虚拟核数当作cpu核数,如果机器配置一样采用false</description>
    <name>yarn.nodemanager.resource.count-logical-processors-as-cores</name>
    <value>false</value>
</property>
<property>
    <description>虚拟核数和物理核数的比例</description>
    <name>yarn.nodemanager.resource.pcores-vcores-multiplier</name>
    <value>1.0</value>
</property>
<property>
    <description>nodemanager可以使用内存大小</description>
    <name>yarn.nodemanager.resource.memory-mb</name>
    <value>8192</value>
</property> 
<property>
    <description>nodemanager可以使用cup核数,一般一个container4个核8G内存</description>
    <name>yarn.nodemanager.resource.cpu-vcores</name>
    <value>8</value>
</property> 
<property>
    <description>container最小内存</description>
    <name>yarn.scheduler.minimum-allocation-mb</name>
    <value>1024</value>
</property>
<property>
    <description>container最大内存</description>
    <name>yarn.scheduler.maximum-allocation-mb</name>
    <value>4096</value>
</property>
<property>
    <description>一个container最小核数</description>
    <name>yarn.scheduler.minimum-allocation-vcores</name>
    <value>1</value>
</property>
<property>
    <description>一个container最大核数</description>
    <name>yarn.scheduler.maximum-allocation-vcores</name>
    <value>4</value>
</property>
<property>
    <description>虚拟内存检查,修改为关闭</description>
    <name>yarn.nodemanager.vmem-check-enabled</name>
    <value>false</value>
</property>
<property>
    <description>设置任务优先级</description>
    <name>yarn.cluster.max-application-priority</name>
    <value>5</value>
</property>

配置capacity-scheduler.xml文件

<property>
    <name>yarn.scheduler.capacity.root.queues</name>
    <value>default,hive</value>
    <description>
      The queues at the this level (root is the root queue).
    </description>
  </property>
  <property>
    <name>yarn.scheduler.capacity.root.default.capacity</name>
    <value>20</value>
    <description>Default queue target capacity.</description>
  </property>
  <property>
    <name>yarn.scheduler.capacity.root.hive.capacity</name>
    <value>80</value>
    <description>hive queue target capacity.</description>
  </property>
  <property>
    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
    <value>1</value>
    <description>
      Default queue user limit a percentage from 0.0 to 1.0.
    </description>
  </property>
  <property>
    <name>yarn.scheduler.capacity.root.hive.user-limit-factor</name>
    <value>1</value>
    <description>
      hive queue user limit a percentage from 0.0 to 1.0.
    </description>
  </property>
  <property>
    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
    <value>80</value>
    <description>
      The maximum capacity of the default queue. 
    </description>
  </property>
  <property>
    <name>yarn.scheduler.capacity.root.hive.maximum-capacity</name>
    <value>100</value>
    <description>
      The maximum capacity of the hive queue. 
    </description>
  </property>
  <property>
    <name>yarn.scheduler.capacity.root.default.state</name>
    <value>RUNNING</value>
    <description>
      The state of the default queue. State can be one of RUNNING or STOPPED.
    </description>
  </property>
  <property>
    <name>yarn.scheduler.capacity.root.hive.state</name>
    <value>RUNNING</value>
    <description>
      The state of the hive queue. State can be one of RUNNING or STOPPED.
    </description>
  </property>

配置hadoop-env.sh文件

export HDFS_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS -Xmx1024m"
export HDFS_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS -Xmx1024m"

配置workers文件,添加datanode节点

node103
node104
node105

配置whitelist文件

node103
node104
node105

配置blacklist文件

node104

分发hadoop到其他节点

[bigdata@node101 ~]$ xsync /opt/services/hadoop-3.3.5

编写hadoop启动停止脚本

[bigdata@node101 bin]$ vim hadoop.sh
#!/bin/bash

if [ $# -lt 1 ]
then
    echo "No Args Input...[start,stop]"
    exit ;
fi
case $1 in
"start")
        echo " =================== 启动 hadoop集群 ==================="

        echo " --------------- 启动 hdfs -----------------------"
        ssh node101 "$HADOOP_HOME/sbin/start-dfs.sh"
        echo " --------------- 启动 yarn ------------------------"
        ssh node102 "$HADOOP_HOME/sbin/start-yarn.sh"
        echo " --------------- 启动 historyserver ---------------"
        ssh node102 "$HADOOP_HOME/bin/mapred --daemon start historyserver"
;;
"stop")
        echo " =================== 关闭 hadoop集群 ==================="
        echo " --------------- 关闭 historyserver -----------------"
        ssh node102 "$HADOOP_HOME/bin/mapred --daemon stop historyserver"
        echo " --------------- 关闭 yarn --------------------------"
        ssh node102 "$HADOOP_HOME/sbin/stop-yarn.sh"
        echo " --------------- 关闭 hdfs --------------------------"
        ssh node101 "$HADOOP_HOME/sbin/stop-dfs.sh"
;;
*)
    echo "Input Args Error...[start,stop]"
;;
esac

hadoop.sh授权

[bigdata@node101 bin]$ chmod +x hadoop.sh

分发hadoop.sh

[bigdata@node101 bin]$ xsync  hadoop.sh

编写jpsall.sh脚本

[bigdata@node101 bin]$ vim jpsall.sh 
#!/bin/bash
for host in node101 node102 node103 node104 node105
do
    echo " ===================  $host ==================="
    ssh $host jps
done

jpsall.sh授权

[bigdata@node101 bin]$ chmod +x jpsall.sh

分发jpsall.sh

[bigdata@node101 bin]$ xsync  jpsall.sh

注册hadoop

[bigdata@node101 bin]$ hdfs zkfc -formatZK

在node101,node102节点上启动journalnode

[bigdata@node101 bin]$hdfs --daemon start journalnode 

在node101节点格式化hadoop

[bigdata@node101 bin]$hdfs namenode -format 

在node101节点启动namenode

[bigdata@node101 bin]$hdfs --daemon start namenode 

在node102节点同步元数据并启动namenode

[bigdata@node101 bin]$hdfs namenode  -bootstrapStandby
[bigdata@node101 bin]$hdfs --daemon start namenode

重启hadoop

[bigdata@node101 bin]$hadoop.sh stop                         
[bigdata@node101 bin]$hadoop.sh start                          

  • 3
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值