ubuntu16.04 + hadoop2.7.2 完全分布式集群搭建(备查)

#一、hosts
/etc/hosts

master
----------------------
127.0.0.1	localhost
192.168.64.100 master
192.168.64.101 slave1
192.168.64.102 slave2
192.168.64.103 slave3
192.168.64.104 slave4
-----------------------
slave1
-----------------------
127.0.0.1       localhost
127.0.1.1       slave1
192.168.64.100 master
192.168.64.101 slave1
192.168.64.102 slave2
192.168.64.103 slave3
192.168.64.104 slave4
-----------------------
slave2
-----------------------
127.0.0.1       localhost
127.0.1.1       slave2
192.168.64.100 master
192.168.64.101 slave1
192.168.64.102 slave2
192.168.64.103 slave3
192.168.64.104 slave4
-----------------------
slave3
-----------------------
127.0.0.1       localhost
127.0.1.1       slave3
192.168.64.100 master
192.168.64.101 slave1
192.168.64.102 slave2
192.168.64.103 slave3
192.168.64.104 slave4
-----------------------
slave4
-----------------------
127.0.0.1       localhost
192.168.64.100 master
192.168.64.101 slave1
192.168.64.102 slave2
192.168.64.103 slave3
192.168.64.104 slave4

#二、environment

hadoop-env.sh
-----------------
export JAVA_HOME=/usr/local/jvm/jdk
-----------------


slaves:
-----------------
slave1
slave2
slave3
---------------------



core-site.xml:
--------------------

<configuration>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>file:/usr/local/hadoop/tmp</value>
    </property>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://master:8020</value>
    </property>
</configuration>

-------------------------  


hdfs-site.xml:
------------------------

<configuration>
	<property>
		<name>dfs.namenode.secondary.http-address</name>
      		<value>slave4:50090</value>
  	</property>
   	<property>
        	<name>dfs.replication</name>
        	<value>3</value>
    </property>
    <property>  
        	<name>dfs.namenode.name.dir</name>  
        	<value>file:/usr/local/hadoop/tmp/dfs/name1,file:/usr/local/hadoop/tmp/dfs/name2</value>  
    </property>
	<property>  
        	<name>dfs.datanode.data.dir</name>  
        	<value>file:/usr/local/hadoop/tmp/dfs/data1,file:/usr/local/hadoop/tmp/dfs/data2</value>  
    </property>   
</configuration>

----------------------------------------------------------




mapred-site.xml(复制mapred-site.xml.template,再修改文件名)
----------------------------------------------------------

<configuration>
	<property>
	        <name>mapreduce.framework.name</name>
	        <value>yarn</value>
	</property>
	<property>
            <name>mapreduce.jobhistory.address</name>
            <value>Master:10020</value>
    </property>
    <property>
            <name>mapreduce.jobhistory.webapp.address</name>
            <value>Master:19888</value>
    </property>
</configuration>

----------------------



yarn-site.xml
---------------------

<configuration>
  <!-- Site specific YARN configuration properties -->
      <property>
          <name>yarn.resourcemanager.hostname</name>
          <value>master</value>
      </property>
      <property>
          <name>yarn.nodemanager.aux-services</name>
          <value>mapreduce_shuffle</value>
      </property>
</configuration>

-------------------------

#三、namenode -format

hdfs namenode -format

#四、shell命令

##1、分发复制

xsync

#!/bin/bash
pcount=$#
if (( pcount<1 )); then
    echo no args;
    exit;
fi

#
p1=$1;
fname=`basename $p1`
#echo fname=$fname;

pdir=`cd -P $(dirname $p1); pwd`
#echo pdir=$pdir

cuser=`whoami`
for(( host=1; host<5;host=host+1)); do
   echo ----------------s$host---------- 
   rsync -rvl $pdir/$fname $cuser@slave$host:$pdir
done

rsync xxx

##2、集群操作命令
xcall

#!/bin/bash
pcount=$#
if (( pcount<1 )); then
    echo no args;
    exit;
fi

#
p1=$1;
fname=`basename $p1`
#echo fname=$fname;

echo    --------master--------
$@
cuser=`whoami`
for(( host=1; host<5;host=host+1)); do
   echo ---------slave$host-------- 
   ssh slave$host $@
done

xcall rm -rf xxx
xcall jps

原文链接:便利贴回收站

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值