Hadoop2.0 YARN cloudra4.4.0安装配置

1,

1
2
3
hadoop@hadoop-virtual-machine:~$ cat /etc/hostname 
yard02
hadoop@hadoop-virtual-machine:~$

2,

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
hadoop@hadoop-virtual-machine:~$ cat /etc/hosts
127.0.0.1	localhost
127.0.1.1	hadoop-virtual-machine

# The following lines are desirable for IPv6 capable hosts
::1     ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
192.168.137.2 yard02
192.168.137.3 yard03
192.168.137.4 yard04
192.168.137.5 yard05
hadoop@hadoop-virtual-machine:~$

3

core-site.xml —-conf/hadoop

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
<configuration>
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!--fs.default.name for MRV1 ,fs.defaultFS for MRV2(yarn) -->
<property>
     <name>fs.defaultFS</name>
     <value>hdfs://yard02</value>
</property>
<property>
	<name>fs.trash.interval</name>
	<value>10080</value>
</property>
<property>
	<name>fs.trash.checkpoint.interval</name>
	<value>10080</value>
</property>
</configuration>

hdfs-site.xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
<configuration>
  <property>
    <name>dfs.replication</name>
    <value>3</value>
  </property>
  <property>
    <name>hadoop.tmp.dir</name>
    <value>/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/data/hadoop-${user.name}</value>
  </property>
  <property>
    <name>dfs.namenode.http-address</name>
    <value>yard02:50070</value>
  </property>
  <property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>yard03:50090</value>
  </property>
  <property>
    <name>dfs.webhdfs.enabled</name>
    <value>true</value>
  </property>
</configuration>

yarn-site.xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
<configuration>
<!-- Site specific YARN configuration properties -->
  <property>
    <name>yarn.resourcemanager.resource-tracker.address</name>
    <value>yard02:8031</value>
  </property>
  <property>
    <name>yarn.resourcemanager.address</name>
    <value>yard02:8032</value>
  </property>
  <property>
    <name>yarn.resourcemanager.scheduler.address</name>
    <value>yard02:8030</value>
  </property>
  <property>
    <name>yarn.resourcemanager.admin.address</name>
    <value>yard02:8033</value>
  </property>
  <property>
    <name>yarn.resourcemanager.webapp.address</name>
    <value>yard02:8088</value>
  </property>
  <property>
    <description>Classpath for typical applications.</description>
    <name>yarn.application.classpath</name>
    <value>$HADOOP_CONF_DIR,$HADOOP_COMMON_HOME/share/hadoop/common/*,
    $HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
    $HADOOP_HDFS_HOME/share/hadoop/hdfs/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
    $YARN_HOME/share/hadoop/yarn/*,$YARN_HOME/share/hadoop/yarn/lib/*,
    $YARN_HOME/share/hadoop/mapreduce/*,$YARN_HOME/share/hadoop/mapreduce/lib/*</value>
  </property>
  <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce.shuffle</value>
  </property>
  <property>
    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
  </property>
  <property>
    <name>yarn.nodemanager.local-dirs</name>
    <value>/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/data/yarn/local</value>
  </property>
  <property>
    <name>yarn.nodemanager.log-dirs</name>
    <value>/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/data/yarn/logs</value>
  </property>
  <property>
    <description>Where to aggregate logs</description>
    <name>yarn.nodemanager.remote-app-log-dir</name>
    <value>/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/data/yarn/logs</value>
  </property>
  <property>
    <name>yarn.app.mapreduce.am.staging-dir</name>
    <value>/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0</value>
</property>
</configuration>

mapred-site.xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
<configuration>  
  <property>  
   <name>mapreduce.framework.name</name>  
   <value>yarn</value>  
  </property>  
  <property>  
    <name>mapreduce.jobhistory.address</name>  
    <value>yard2:10020</value>  
  </property>  
  <property>  
    <name>mapreduce.jobhistory.webapp.address</name>  
    <value>yard02:19888</value>  
  </property>  
</configuration>

masters

1
2
yard02
yard03

slaves

1
2
3
4
yard02
yard03
yard04
yard05

.bashrc

1
2
3
4
5
6
7
8
9
10
export HADOOP_HOME=/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0
export HADOOP_MAPRED_HOME=${HADOOP_HOME}
export HADOOP_COMMON_HOME=${HADOOP_HOME}
export HADOOP_HDFS_HOME=${HADOOP_HOME}
export YARN_HOME=${HADOOP_HOME}
export HADOOP_YARN_HOME=${HADOOP_HOME}
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HDFS_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export YARN_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export PATH=$PATH:$HOME/bin:$JAVA_HOME/bin:$HADOOP_HOME/sbin

同步个配置文件

1
2
3
4
5
6
7
scp  -r /home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/* hadoop@yard03:/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/
scp  -r /home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/* hadoop@yard04:/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/
scp  -r /home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/* hadoop@yard05:/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/

scp /home/hadoop/.bashrc hadoop@yard03:/home/hadoop
scp /home/hadoop/.bashrc hadoop@yard04:/home/hadoop
scp /home/hadoop/.bashrc hadoop@yard05:/home/hadoop

关于ssh无密码还是这个屡试不爽

1
2
3
4
5
6
7
ssh-keygen -t rsa
cp id_rsa.pub authorized_keys //本地也要哦
ssh localhost //确保本地可以无密码登陆

scp authorized_keys  hadoop@yard03:/home/hadoop/.ssh
scp authorized_keys  hadoop@yard04:/home/hadoop/.ssh
scp authorized_keys  hadoop@yard05:/home/hadoop/.ssh

这个启动基本在sbin目录了,
格式化:

1
hadoop namenode -format

启动

1
start-all.sh

启动hdfs:

1
start-dfs.sh

启动mapreduce

1
start-yarn.sh

启动historyserver

1
mr-jobhistory-daemon.sh start historyserver

WordCount 测试
./hadoop fs -put /etc/profile /user/hadoop/input
./hadoop jar ../share/hadoop/mapreduce/hadoop-mapreduce-examples-2.0.0-cdh4.4.0.jar wordcount input output
输出结果
访问:
http://yard02:50070/
http://yard03:50090
http://yard02:8088/cluster
http://yard02:8088/cluster/apps
http://yard02:8088/cluster/nodes
http://yard02:8088/cluster/scheduler

参考链接

http://blog.csdn.net/qiaochao911/article/details/9143303

http://blog.javachen.com/hadoop/2013/03/24/manual-install-Cloudera-Hadoop-CDH/

http://archive.cloudera.com/cdh4/cdh/4/

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值