Hadoop,相关配置文件

Hadoop配置
这篇博文,添加了一些Hadoop的配置文件,设置historyserver,以及日志查看。

  1. core-site.xml

    		<!-- 指定hadoop数据的存储目录,没有则创建一个 -->
    	<property>
            <name>hadoop.tmp.dir</name>
            <value>/opt/hadoop/hadoop_data/tmp</value>
            <description>Abase for other temporary directories.</description>
    	   </property>
    
       <property>
    	    <name>fs.default.name</name>	
            <value>hdfs://hadoop1:9000</value>	
       </property>
    
    		
    	<!-- 指定NameNode的地址 -->
    	<property>
            <name>fs.defaultFS</name>
            <value>hdfs://hadoop1:8020</value>
    	</property>
    	
    	
    	<!-- 配置HDFS网页登陆使用的静态用户为root -->
    	<property>
            <name>hadoop.http.staticuser.user</name>
            <value>root</value>
    	</property>
    
    
  2. hdfs-site.xml

     	<!-- 指定NameNode的web端访问地址 -->
    <property>
            <name>dfs.namenode.http-address</name>
            <value>hadoop1:9870</value>
    </property>
    	
    	<!-- 设置SecondaryNameNode(2NN)的web端访问地址 -->
    <property>
            <name>dfs.namenode.secondary.http-address</name>
            <value>hadoop3:9868</value>
    </property>
    
    
    <property>
       <name>dfs.name.dir</name>
       <value>/opt/hadoop/hadoop_data/dfs/name</value>
       <description>Path on the local filesystem where theNameNode stores the namespace and transactions logs persistently.</description>
    </property>
    
    <property>
       <name>dfs.data.dir</name>
       <value>/opt/hadoop/hadoop_data/dfs/data</value>
       <description>Comma separated list of paths on the localfilesystem of a DataNode where it should store its blocks.</description>
    </property>
    
    <property>
       <name>dfs.replication</name>
       <value>3</value>
    </property>
    
    <property>
          <name>dfs.permissions</name>
          <value>true</value>
          <description>need not permissions</description>
    </property>
    
    
  3. mapred-site.xml

    
    <!-- 指定MapReduce程序运行在Yarn上的地址 -->
    <property>
            <name>mapreduce.framework.name</name>
            <value>yarn</value>
    </property>
    <property>
       <name>mapred.job.tracker</name>
       <value>hadoop1:49001</value>
    </property>
    
    <property>
          <name>mapred.local.dir</name>
           <value>/opt/hadoop/hadoop_data/var</value>
    </property>
    <!--历史服务器端地址 -->
    <!--历史服务器配置在NameNode节点上 -->
    <property>
          <name>mapreduce.jobhistory.address</name>
           <value>hadoop1:10020</value>
    </property>
    <property>
          <name>mapreduce.jobhistory.webapp.address</name>
           <value>hadoop1:19888</value>
    </property>
    <property>
      <name>yarn.app.mapreduce.am.env</name>
      <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
    </property>
    <property>
      <name>mapreduce.map.env</name>
      <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
    </property>
    <property>
      <name>mapreduce.reduce.env</name>
      <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
    </property>
    
  4. yarn-site.xml

    <!-- 指定MapReduce走shuffle -->
    <property>
    	<name>yarn.nodemanager.aux-services</name>
    	<value>mapreduce_shuffle</value>
    </property>
    <!-- 指定ResourceManager的地址 -->
    <property>
    	<name>yarn.resourcemanager.hostname</name>
    	<value>hadoop2</value>
    </property>
    
    <!-- 开启日志功能 -->
    <property>
    	<name>yarn.log-aggregation-enable</name>
    	<value>true</value>
    </property>
    <!-- 设置日志聚集服务器地址 -->
    <property>
    	<name>yarn.log.server.url</name>
    	<value>http://hadoop1:19888/jobhistory/logs</value>
    </property>
    <!-- 设置日志保留时间为7天 -->
    <property>
    	<name>yarn.log-aggregation.retain-seconds</name>
    	<value>604800</value>
    </property>
    
    
    <property>
    	<description>The address of the applications manager interface in the RM.</description>
    	<name>yarn.resourcemanager.address</name>
    	<value>${yarn.resourcemanager.hostname}:8032</value>
    </property>
    
    <property>
    	<description>The address of the scheduler interface.</description>
    	<name>yarn.resourcemanager.scheduler.address</name>
    	<value>${yarn.resourcemanager.hostname}:8030</value>
    </property>
    
    <property>
    	<description>The http address of the RM web application.</description>
    	<name>yarn.resourcemanager.webapp.address</name>
    	<value>${yarn.resourcemanager.hostname}:8088</value>
    </property>
    
    <property>
    	<description>The https adddress of the RM web application.</description>
    	<name>yarn.resourcemanager.webapp.https.address</name>
    	<value>${yarn.resourcemanager.hostname}:8090</value>
    </property>
    
    <property>
    	<name>yarn.resourcemanager.resource-tracker.address</name>
    	<value>${yarn.resourcemanager.hostname}:8031</value>
    </property>
    
    <property>
    	<description>The address of the RM admin interface.</description>
    	<name>yarn.resourcemanager.admin.address</name>
    	<value>${yarn.resourcemanager.hostname}:8033</value>
    </property>
    
    
    <property>
    	<name>yarn.scheduler.maximum-allocation-mb</name>
    	<value>2048</value>
    	<discription>每个节点可用内存,单位MB,默认8182MB</discription>
    </property>
    
    <property>
    	<name>yarn.nodemanager.vmem-pmem-ratio</name>
    	<value>2.1</value>
    </property>
    
    <property>
    	<name>yarn.nodemanager.resource.memory-mb</name>
    	<value>2048</value>
    </property>
    
    <property>
    	<name>yarn.nodemanager.vmem-check-enabled</name>
    	<value>false</value>
    </property>
    

一些自定义脚本

  1. xsync.bash

    #!/bin/bash
    #分发文件
    #1.判断传参个数
    if [ $# -lt 1 ]
    then 
    	echo Not Enough Arguement!
    	exit;
    fi
    
    #2.遍历集群所有机器
    for host in hadoop1 hadoop2 hadoop3
    do 
    	echo =================  $host  =================  
    	#3.遍历所有目录,挨个发送
    	
    	for file in $@
    	do
    		#4.判断文件是否存在
    		if [ -e $file ]
    			then
    				#5.获取父目录
    				pdir=$(cd -P $(dirname $file); pwd)
    				
    				#6.获取当前文件的名称
    				fname=$(basename $file)
    				ssh $host "mkdir -p $pdir"
    				rsync -av $pdir/$fname $host:$pdir
    			else
    				echo $file does not exists!	
    		fi
    	done
    done
    
  2. jpsall

    #!/bin/bash
    for host in hadoop1 hadoop2 hadoop3
    do
    	echo =============== $host ===============
    	ssh $host jps
    done
    
  3. myhadoop.bash

    #!/bin/bash
    
    if [ $# -lt 1 ]
    then
    	echo "No Args Input..."
    	exit;
    fi
    
    case $1 in
    "start")
    		echo "====================启动hadoop集群===================="
    
    		echo "-----------------启动hdfs-----------------"
    		ssh hadoop1 "/opt/hadoop/hadoop/sbin/start-dfs.sh"
    		echo "-----------------启动yarn-----------------"
    		ssh hadoop2 "/opt/hadoop/hadoop/sbin/start-yarn.sh"
    		echo "-----------------启动historyserver-----------------"
    		ssh hadoop1 "/opt/hadoop/hadoop/bin/mapred --daemon start historyserver"
    ;;
    "stop")
    		echo "====================关闭hadoop集群===================="
    		
    		echo "-----------------关闭historyserver-----------------"
    		ssh hadoop1 "/opt/hadoop/hadoop/bin/mapred --daemon stop historyserver"
    		echo "-----------------关闭yarn-----------------"
    		ssh hadoop2 "/opt/hadoop/hadoop/sbin/stop-yarn.sh"
    		echo "-----------------关闭hdfs-----------------"
    		ssh hadoop1 "/opt/hadoop/hadoop/sbin/stop-dfs.sh"
    ;;
    *)
    	echo "Input Args Errot..."
    ;;
    esac
    
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值