Hadoop2.7配置

core-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<configuration>

<property>
    <name>fs.defaultFS</name>
    <value>hdfs://bigdata/</value>
</property>


<property>
    <name>ha.zookeeper.quorum</name>
    <value>192.168.56.70:2181,192.168.56.71:2181,192.168.56.72:2181</value> -->
</property>


<property>
    <name>hadoop.tmp.dir</name>
    <value>/export/data/hadoop/tmp</value>
</property>

<property>
    <name>fs.trash.interval</name>
    <value>1440</value>
</property>

<property> 
    <name>io.file.buffer.size</name> 
    <value>131072</value> 
</property>
<!-- 压缩格式 -->
<property>
    <name>io.compression.codecs</name>
    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.BZip2Codec</value>
</property>

<property>
    <name>net.topology.script.file.name</name>
    <value>/export/common/hadoop/conf/topo.sh</value>
</property>

<property>
    <name>net.topology.script.number.args</name>
    <value>1</value>
</property>

<property>
    <name>ha.health-monitor.rpc-timeout.ms</name>
    <value>180000</value>
</property>

<property>
    <name>hadoop.security.authorization</name>
    <value>true</value>
</property>

<property>
   <name>hadoop.security.authentication</name>
   <value>kerberos</value>
</property>

<property>
    <name>dfs.permissions.enabled</name>
    <value>true</value>
</property>

<property>
    <name>dfs.namenode.acls.enabled</name>
    <value>true</value>
</property>

<property> 
    <name>ipc.maximum.data.length</name> 
    <value>268435456</value> 
</property> 

<property>  
    <name>hadoop.proxyuser.httpfs.hosts</name>  
    <value>*</value>  
</property>  
<property>  
    <name>hadoop.proxyuser.httpfs.groups</name>  
    <value>*</value>  
</property>

<property>
    <name>hadoop.proxyuser.hdfs.hosts</name>
    <value>*</value> 
</property>

<property>
    <name>hadoop.proxyuser.hdfs.groups</name>
    <value>*</value>
</property>

<property>
    <name>hadoop.proxyuser.hue.hosts</name>
    <value>*</value> 
</property>

<property>
    <name>hadoop.proxyuser.hue.groups</name>
    <value>*</value>
</property>

<property>
    <name>hadoop.proxyuser.hive.hosts</name>
    <value>*</value> 
</property>

<property>
    <name>hadoop.proxyuser.hive.groups</name>
    <value>*</value>
</property>

<property>
    <name>hadoop.proxyuser.spark.hosts</name>
    <value>*</value> 
</property>

<property>
    <name>hadoop.proxyuser.spark.groups</name>
    <value>*</value>
</property>

<property>
    <name>hadoop.proxyuser.dwetl.hosts</name>
    <value>*</value> 
</property>

<property>
    <name>hadoop.proxyuser.dwetl.groups</name>
    <value>*</value>
</property>

<property>
    <name>hadoop.proxyuser.hbase.hosts</name>
    <value>*</value>
</property>

<property>
    <name>hadoop.proxyuser.hbase.groups</name>
    <value>*</value>
</property>

</configuration>

hadoop-env.sh

export JAVA_HOME=/export/java
export HADOOP_COMMON_LIB_NATIVE_DIR="${HADOOP_HOME}/lib/native"
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib/native/"

for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
  if [ "$HADOOP_CLASSPATH" ]; then
    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
  else
    export HADOOP_CLASSPATH=$f
  fi
done

# The maximum amount of heap to use, in MB. Default is 1000.
#export HADOOP_HEAPSIZE=2048
export HADOOP_HEAPSIZE=4096

# Extra Java runtime options.  Empty by default.
export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"

# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
export HADOOP_NAMENODE_OPTS="-Xmx85g -Xms85g -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}"

#export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
export HADOOP_DATANODE_OPTS="-server -XX:+UseConcMarkSweepGC -XX:SurvivorRatio=3 -XX:MaxTenuringThreshold=10 -XX:CMSInitiatingOccupancyFraction=80 -XX:+ExplicitGCInvokesConcurrent -XX:+PrintGCDateStamps -XX:+PrintTenuringDistribution -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintGCApplicationConcurrentTime -Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANOODE_OPTS"
#export HADOOP_DATANODE_OPTS="-Xmx8g -Xms8g ${HADOOP_DATANODE_OPTS}"
export HADOOP_DATANODE_OPTS="-Xmx16g -Xms16g ${HADOOP_DATANODE_OPTS}"

export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"

export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"

# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"

export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}

# Where log files are stored in the secure data environment.
export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}

export HADOOP_PID_DIR=/export/hadoop/pids
export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}

# A string representing this instance of hadoop. $USER by default.
export HADOOP_IDENT_STRING=$USER

export LD_LIBRARY_PATH=/export/hadoop/lib
export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/export/hadoop/lib
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/export/hadoop/lib/*

TEZ_CONF_DIR=/export/common/hadoop/conf/tez-site.xml
TEZ_JARS=/export/tez
export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:${TEZ_CONF_DIR}:${TEZ_JARS}/*:${TEZ_JARS}/lib/*
export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:/export/hadoop/lib/native/"

HDFS

hdfs-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<configuration>

<property>
    <name>dfs.nameservices</name>
    <value>bigdata</value>
</property>

<property>
    <name>dfs.ha.namenodes.bigdata</name>
    <value>nn1,nn2</value>
</property>

<property>
    <name>dfs.namenode.rpc-address.bigdata.nn1</name>
    <value>192.168.56.71:8020</value>
</property>
<property>
    <name>dfs.namenode.rpc-address.bigdata.nn2</name>
    <value>192.168.56.72:8020</value>
</property>
<property>
    <name>dfs.namenode.http-address.bigdata.nn1</name>
    <value>192.168.56.71:50070</value>
</property>
<property>
    <name>dfs.namenode.http-address.bigdata.nn2</name>
    <value>192.168.56.72:50070</value>
</property>
<property>
    <name>dfs.namenode.shared.edits.dir</name>
    <value>qjournal://192.168.56.71:8485;192.168.56.72:8485;192.168.56.70:8485/bigdata</value>
</property>
<property>
    <name>dfs.journalnode.edits.dir</name>
    <value>/export/data/hadoop/journal</value>
</property>
<property>
    <name>dfs.ha.automatic-failover.enabled</name>
    <value>true</value>
</property>

<property>
    <name>dfs.client.failover.proxy.provider.bigdata</name>
    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
 
<property>
    <name>dfs.namenode.name.dir</name>
    <value>/export/data/hadoop/namenode</value>
</property>

<property>
    <name>dfs.datanode.data.dir</name> 
    <value>/export/grid/01/hadoop/hdfs/data,/export/grid/02/hadoop/hdfs/data</value>
</property> 
<property>
    <name>dfs.datanode.address</name>
    <value>0.0.0.0:50010</value>
</property>

<property>
    <name>dfs.datanode.http.address</name>
    <value>0.0.0.0:50075</value>
</property>

<property>
    <name>dfs.datanode.ipc.address</name>
    <value>0.0.0.0:50020</value>
</property>

<property> 
    <name>dfs.replication</name> 
    <value>3</value> 
</property> 

<property> 
    <name>dfs.permissions</name> 
    <value>true</value> 
</property>  

<property>
    <name>dfs.webhdfs.enabled</name>
    <value>true</value>
</property>

<property>
    <name>dfs.blocksize</name>
    <value>134217728</value>
</property>

<property>  
    <name>dfs.datanode.balance.bandwidthPerSec</name>  
    <value>41943040</value> 
</property>

<property>
    <name>dfs.datanode.max.transfer.threads</name>
    <value>8192</value>
</property>
<!-- namenode工作线程池,20*log e为底  集群数量n的倍数 -->
<!-- python计算,import math ; print int(20*math.log(n)) -->
<property>
    <name>dfs.namenode.handler.count</name>
    <value>200</value>
</property>
<!-- datanode工作线程池 -->
<property>
    <name>dfs.datanode.handler.count</name>
    <value>100</value>
</property>

<property>
    <name>dfs.datanode.max.xcievers</name>
    <value>65535</value>
</property>

<property>
    <name>dfs.namenode.name.dir.restore</name> 
    <value>false</value> 
</property>

<property>
    <name>dfs.namenode.checkpoint.period</name> 
    <value>6000</value> 
</property>

<property>
    <name>dfs.hosts</name>
    <value>/export/common/hadoop/conf/allowed_hosts</value>
</property>

<property>
    <name>dfs.hosts.exclude</name>
    <value>/export/common/hadoop/conf/exclude_datanode_hosts</value>
</property>

<property>
    <name>dfs.webhdfs.enabled</name>
    <value>true</value>
</property>

<property>
    <name>dfs.qjournal.write-txns.timeout.ms</name>
    <value>60000</value>
</property>

<property>
    <name>dfs.permissions.enabled</name>
    <value>true</value>
</property>

<property>
     <name>dfs.namenode.acls.enabled</name>
     <value>true</value>
</property>

<property>
    <name>dfs.ha.fencing.methods</name>
    <value>
        shell(/bin/true)
    </value>
</property>

<property>
    <name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
    <value>true</value>
</property>

<property>
    <name>dfs.client.block.write.replace-datanode-on-failure.policy</name>
    <value>DEFAULT</value>
</property>

<property>
    <name>dfs.block.access.token.enable</name>
    <value>true</value>
</property>

<property>
    <name>dfs.namenode.keytab.file</name>
    <value>/export/common/hadoop/conf/hdfs.keytab</value>
</property>

<property>
    <name>dfs.namenode.kerberos.principal</name>
    <value>hdfs/_HOST@BIGDATA.COM</value>
</property>

<property>
    <name>dfs.namenode.kerberos.internal.spnego.principal</name>
    <value>HTTP/_HOST@BIGDATA.COM</value>
</property>

<property>
    <name>dfs.datanode.data.dir.perm</name>
    <value>700</value>
</property>


<property>
    <name>dfs.datanode.address</name>
    <value>0.0.0.0:2828</value>
</property>

<property>
    <name>dfs.datanode.http.address</name>
    <value>0.0.0.0:2829</value>
</property>

<property>
    <name>dfs.datanode.keytab.file</name>
    <value>/export/common/hadoop/conf/hdfs.keytab</value>
</property>

<property>
    <name>dfs.datanode.kerberos.principal</name>
    <value>hdfs/_HOST@BIGDATA.COM</value>
</property>

<!--journalnode hdfs HA -->
<property>
    <name>dfs.journalnode.keytab.file</name>
    <value>/export/common/hadoop/conf/hdfs.keytab</value>
</property>

<property>
    <name>dfs.journalnode.kerberos.principal</name>
    <value>hdfs/_HOST@BIGDATA.COM</value>
</property>

<property>
    <name>dfs.journalnode.kerberos.internal.spnego.principal</name>
    <value>HTTP/_HOST@BIGDATA.COM</value>
</property>

<!-- WebHdfs secure -->
<property>
    <name>dfs.web.authentication.kerberos.principal</name>
    <value>HTTP/_HOST@BIGDATA.COM</value>
</property>

<property>
    <name>dfs.web.authentication.kerberos.keytab</name>
    <value>/export/common/hadoop/conf/hdfs.keytab</value>
</property>

<property>
    <name>dfs.http.policy</name>
    <value>HTTPS_ONLY</value>
</property>

<property>
    <name>dfs.data.transfer.protection</name>
    <value>integrity</value>
</property>

<property>
    <name>dfs.encrypt.data.thransfer</name>
    <value>true</value>
</property>

<property>
    <name>mapreduce.jobtracker.handler.count</name>
    <value>40</value>
</property>

<!-- 开启HDFS权限 -->
	<property>
		<name>dfs.permissions.enabled</name>
		<value>true</value>
	</property>
  <property>
        <name>jdjr.hadoop.path.permission.enable</name>
        <value>true</value>
    </property>
    <property>
        <name>dfs.namenode.inode.attributes.provider.class</name>
        <value>com.jdjr.flowyed.hadoop.permission.JdjrHdfsAuthorizer</value>
    </property>
    <property>
        <name>jdjr.hadoop.path.permission.file.path</name>
        <value>/export/common/hadoop/conf/hdfs-policies.json</value>
    </property>
    <property>
        <name>jdjr.hadoop.cluster.name</name>
        <value>agent-hadoop-dev</value>
    </property>
<!-- 开启HDFS权限 结束 -->

</configuration>

allowed_hosts

(datanode节点IP)

192.168.56.70
192.168.56.71
192.168.56.72

YARN

yarn-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<configuration>

<property>
    <name>yarn.resourcemanager.ha.enabled</name>
    <value>true</value>
</property>
<property>
    <name>yarn.resourcemanager.cluster-id</name>
    <value>bigdata</value>
</property>
<property>
    <name>yarn.resourcemanager.ha.rm-ids</name>
    <value>rm1,rm2</value>
</property>
<property>
    <name>yarn.resourcemanager.hostname.rm1</name>
    <value>192.168.56.71</value>
</property>

<property>
    <name>yarn.resourcemanager.hostname.rm2</name>
    <value>192.168.56.72</value>
</property>
<property>
    <name>yarn.resourcemanager.webapp.address.rm1</name>
    <value>192.168.56.71:8088</value>
</property>
<property>
    <name>yarn.resourcemanager.webapp.address.rm2</name>
    <value>192.168.56.72:8088</value>
</property>
<property>
    <name>yarn.resourcemanager.zk-address</name>
    <value>192.168.56.71:2181,192.168.56.72:2181,192.168.56.70:2181</value> 
</property>
<property>
    <name>yarn.resourcemanager.ha.automatic-failover.zk-base-path</name>
    <value>/hadoop-yarn-ha</value>
</property>
<property>
    <name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
    <value>true</value>
</property>
<property>
    <name>yarn.resourcemanager.address.rm1</name>
    <value>192.168.56.71:8132</value>
</property>
<property>
    <name>yarn.resourcemanager.address.rm2</name>
    <value>192.168.56.72:8132</value>
</property>
<property>
    <name>yarn.resourcemanager.scheduler.address.rm1</name>
    <value>192.168.56.71:8130</value>
</property>
<property>
    <name>yarn.resourcemanager.scheduler.address.rm2</name>
    <value>192.168.56.72:8130</value>
</property>
<property>
    <name>yarn.resourcemanager.resource-tracker.address.rm1</name>
    <value>192.168.56.71:8131</value>
</property>
<property>
    <name>yarn.resourcemanager.resource-tracker.address.rm2</name>
    <value>192.168.56.72:8131</value>
</property>
<property>
    <name>yarn.resourcemanager.webapp.address.rm1</name>
    <value>192.168.56.71:8088</value>
</property>
<property>
    <name>yarn.resourcemanager.webapp.address.rm2</name>
    <value>192.168.56.72:8088</value>
</property>
<property> 
    <name>yarn.nodemanager.aux-services</name> 
    <value>mapreduce_shuffle</value> 
</property> 

<property>
    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property> 
<!-- nodemanager内存,生产环境需调大 -->
<property>
    <name>yarn.nodemanager.resource.memory-mb</name>
    <value>78848</value>
</property>

<property>
    <name>yarn.nodemanager.vmem-pmem-ratio</name>
    <value>10</value>
</property>

<property>
    <name>yarn.scheduler.minimum-allocation-mb</name>
    <value>1024</value>
</property>

<property>
    <name>yarn.scheduler.maximum-allocation-mb</name>
    <value>78848</value>
</property>

<property>
    <name>yarn.app.mapreduce.am.resource.mb</name>
    <value>4096</value>
</property>

<property>
    <name>yarn.app.mapreduce.am.command-opts</name>
    <value>-Xmx3584M</value>
</property>
<!-- nodemanagerCPU核数,生产环境需调大 -->
<property>
    <name>yarn.nodemanager.resource.cpu-vcores</name>
    <value>76</value>
</property>

<property>
    <name>yarn.nodemanager.log-dirs</name>
    <value>/export/grid/01/hadoop/yarn/log,/export/grid/02/hadoop/yarn/log,/export/grid/03/hadoop/yarn/log,/export/grid/04/hadoop/yarn/log,/export/grid/05/hadoop/yarn/log,/export/grid/06/hadoop/yarn/log,/export/grid/07/hadoop/yarn/log,/export/grid/08/hadoop/yarn/log,/export/grid/09/hadoop/yarn/log,/export/grid/10/hadoop/yarn/log,/export/grid/11/hadoop/yarn/log,/export/grid/12/hadoop/yarn/log</value>
</property>

<property>
    <name>yarn.acl.enable</name>
    <value>false</value>
</property>
<property>
    <name>yarn.admin.acl</name>
    <value>*</value>
</property>

<property>
    <name>yarn.nodemanager.local-dirs</name>
    <value>/export/grid/01/hadoop/yarn/local,/export/grid/02/hadoop/yarn/local</value>
</property>
<property>
    <name>yarn.log.server.url</name>
    <value>http://192.168.56.70:19888/jobhistory/logs</value>
</property>
<property>
    <name>yarn.log-aggregation-enable</name>
    <value>true</value>
</property>
<property>
    <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
    <value>95</value>
</property>
<property>
    <name>yarn.resourcemanager.scheduler.class</name>
    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value>
</property>
<property>
    <name>yarn.scheduler.fair.preemption</name>
    <value>true</value>
</property>
<property>
    <name>yarn.nodemanager.address</name>
    <value>${yarn.nodemanager.hostname}:65033</value>
</property>

<property>
    <name>yarn.resourcemanager.nodes.exclude-path</name>
    <value>/export/common/hadoop/conf/exclude_nodemanager_hosts</value>
</property>

<property>
    <name>yarn.nodemanager.vmem-check-enabled</name>
    <value>false</value>
</property>

<property>
    <name>yarn.resourcemanager.keytab</name>
    <value>/export/common/hadoop/conf/hdfs.keytab</value>
</property>

<property>
    <name>yarn.resourcemanager.principal</name>
    <value>hdfs/_HOST@BIGDATA.COM</value>
</property>

<!-- NodeManager security configs -->
<property>
    <name>yarn.nodemanager.keytab</name>
    <value>/export/common/hadoop/conf/hdfs.keytab</value>
</property>
<property>
    <name>yarn.nodemanager.principal</name>
    <value>hdfs/_HOST@BIGDATA.COM</value>
</property>

<property>
    <name>yarn.nodemanager.linux-container-executor.group</name>
    <value>hadoop</value>
</property>

<property>
    <name>yarn.nodemanager.container-executor.class</name>
    <value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>
</property>

<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>spark_shuffle,mapreduce_shuffle</value>
</property>

<property>
    <name>yarn.nodemanager.aux-services.spark_shuffle.class</name>
    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
</property>

<property>
    <name>yarn.scheduler.fair.user-as-default-queue</name>
    <value>false</value>
</property>

<!-- 开启yarn重启任务保护 -->
<property>
    <name>yarn.resourcemanager.recovery.enabled</name>
    <value>true</value>
</property>
<property>
    <name>yarn.resourcemanager.store.class</name>
    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<property>
    <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
    <value>10000</value>
</property>

</configuration>

fail-scheduler.xml

<?xml version="1.0" encoding="utf-8"?>
<allocations>
	<userMaxAppsDefault>100</userMaxAppsDefault>
	<fairSharePreemptionTimeout>3000</fairSharePreemptionTimeout>
	<queue name="root">
		<aclSubmitApps>hdfs</aclSubmitApps>
		<aclAdministerApps>hdfs</aclAdministerApps>
		<queue name="default">
			<!--<maxResources>最多可以使用的资源量</maxResources>-->
			<maxResources>249311 mb, 147 vcores</maxResources>
			<!--<minResources>最少资源保证量,设置格式为“X mb, Y vcores”,当一个队列的最少资源保证量未满足时,它将优先于其他同级队列获得资源,对于不同的调度策略(后面会详细介绍),最少资源保证量的含义不同,对于fair策略,则只考虑内存资源,即如果一个队列使用的内存资源超过了它的最少资源量,则认为它已得到了满足;对于drf策略,则考虑主资源使用的资源量,即如果一个队列的主资源量超过它的最少资源量,则认为它已得到了满足。</minResources>-->
			<minResources>1024 mb, 1 vcores</minResources>
			<!--<maxRunningApps>最多同时运行的应用程序数目。通过限制该数目,可防止超量Map Task同时运行时产生的中间输出结果撑爆磁盘。</maxRunningApps>-->
			<maxRunningApps>1000</maxRunningApps>
			<!--<weight>1.0</weight>-->
			<weight>1.0</weight>
			<!--<aclSubmitApps>可向队列中提交应用程序的Linux用户或用户组列表,默认情况下为*,表示都可提交</aclSubmitApps>-->
			<aclSubmitApps>hbase,hive</aclSubmitApps>
			<!--<aclAdministerApps>该队列的管理员列表。一个队列的管理员可管理该队列中的资源和应用程序,比如可杀死任意应用程序。</aclAdministerApps>-->
			<aclAdministerApps>hbase,hive</aclAdministerApps>
		</queue>
		<queue name="etl">
			<maxResources>598346 mb, 352 vcores</maxResources>
			<minResources>1024 mb, 1 vcores</minResources>
			<maxRunningApps>1000</maxRunningApps>
			<weight>1.0</weight>
			<aclSubmitApps>*</aclSubmitApps>
			<aclAdministerApps>hdfs</aclAdministerApps>
		</queue>
		<queue name="personal">
			<maxResources>149586 mb, 88 vcores</maxResources>
			<minResources>1024 mb, 1 vcores</minResources>
			<maxRunningApps>50</maxRunningApps>
			<weight>1.0</weight>
			<aclSubmitApps>*</aclSubmitApps>
			<aclAdministerApps>hdfs</aclAdministerApps>
		</queue>
	</queue>
</allocations>

mapred-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<configuration>

<property>
    <name>mapreduce.framework.name</name> 
    <value>yarn</value> 
</property> 


<property>
    <name>mapreduce.jobhistory.address</name> 
    <value>192.168.56.70:10020</value> 
</property>

<property>
    <name>mapreduce.jobhistory.webapp.address</name> 
    <value>192.168.56.70:19888</value> 
</property>


<property>
    <name>mapreduce.map.memory.mb</name>
    <value>2048</value>
</property>

<property>
    <name>mapreduce.reduce.memory.mb</name>
    <value>4096</value>
</property>

<property>
    <name>mapred.child.java.opts</name>
    <value>-Xmx4096M</value>
</property>
<property>
    <name>mapreduce.map.java.opts</name>
    <value>-Xmx1536M</value>
</property>
<property>
    <name>mapreduce.reduce.java.opts</name>
    <value>-Xmx3276M</value>
</property>

<property>
    <name>mapreduce.map.output.compress</name>
    <value>true</value>
</property>

<property>
    <name>mapreduce.task.io.sort.mb</name>
    <value>200</value>
</property>

<property>
    <name>mapreduce.task.io.sort.factor</name>
    <value>50</value>
</property>

<property>
    <name>mapreduce.reduce.shuffle.parallelcopies</name>
    <value>50</value>
</property>

<property>
    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
    <value>0.3</value>
</property>

<property>
    <name>mapred.job.reuse.jvm.num.tasks</name>
    <value>5</value>
</property>


<property>
    <name>mapreduce.job.counters.group.name.max</name>
    <value>100000</value>
</property>

<property>
    <name>mapreduce.job.counters.counter.name.max</name>
    <value>100000</value>
</property>

<property>
    <name>mapreduce.job.counters.groups.max</name>
    <value>100000</value>
</property>

<property>
    <name>mapreduce.job.counters.max</name>
    <value>100000</value>
</property>

<property>
    <name>mapreduce.jobhistory.keytab</name>
    <value>/export/common/hadoop/conf/hdfs.keytab</value>
</property>

<property>
    <name>mapreduce.jobhistory.principal</name>
    <value>hdfs/_HOST@{KDC_REALM}</value>
</property>

<property>
    <name>yarn.app.mapreduce.am.env</name>
    <value>HADOOP_MAPRED_HOME=/export/hadoop</value>
</property>

<property>
    <name>mapreduce.map.env</name>
    <value>HADOOP_MAPRED_HOME=/export/hadoop</value>
</property>

<property>
    <name>mapreduce.reduce.env</name>
    <value>HADOOP_MAPRED_HOME=/export/hadoop</value>
</property>

<property>
    <name>mapreduce.tasktracker.map.tasks.maximum</name>
    <value>34</value>
</property>

<property>
    <name>mapreduce.tasktracker.reduce.tasks.maximum</name>
    <value>18</value>
</property>

</configuration>

log4j.properties

hadoop.root.logger=INFO,console
hadoop.log.dir=.
hadoop.log.file=hadoop.log

log4j.rootLogger=${hadoop.root.logger}, EventCounter

log4j.threshold=ALL

log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender

hadoop.log.maxfilesize=256MB
hadoop.log.maxbackupindex=20
log4j.appender.RFA=org.apache.log4j.RollingFileAppender
log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}

log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}

log4j.appender.RFA.layout=org.apache.log4j.PatternLayout

log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n

log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}

log4j.appender.DRFA.DatePattern=.yyyy-MM-dd

log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout

log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n

log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n

hadoop.tasklog.taskid=null
hadoop.tasklog.iscleanup=false
hadoop.tasklog.noKeepSplits=4
hadoop.tasklog.totalLogFileSize=100
hadoop.tasklog.purgeLogSplits=true
hadoop.tasklog.logsRetainHours=12

log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}

log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n

hadoop.security.logger=INFO,NullAppender
hadoop.security.log.maxfilesize=256MB
hadoop.security.log.maxbackupindex=20
log4j.category.SecurityLogger=${hadoop.security.logger}
hadoop.security.log.file=SecurityAuth-${user.name}.audit
log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}

log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd

hdfs.audit.logger=INFO,NullAppender
hdfs.audit.log.maxfilesize=256MB
hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}

mapred.audit.logger=INFO,NullAppender
mapred.audit.log.maxfilesize=256MB
mapred.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}


log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR

log4j.logger.com.amazonaws=ERROR
log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN

log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter

hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
hadoop.mapreduce.jobsummary.log.maxbackupindex=20
log4j.appender.JSA=org.apache.log4j.RollingFileAppender
log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false

yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log

yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}

log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
log4j.appender.RMSUMMARY.MaxFileSize=256MB
log4j.appender.RMSUMMARY.MaxBackupIndex=20
log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值