Hadoop2.0 YARN cloudra4.4.0安装配置

1,

1
2
3

hadoop@hadoop-virtual-machine:~$ cat /etc/hostname
yard02
hadoop@hadoop-virtual-machine:~$

2,

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

hadoop@hadoop-virtual-machine:~$ cat /etc/hosts
127.0.0.1 localhost
127.0.1.1 hadoop-virtual-machine

The following lines are desirable for IPv6 capable hosts

::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
192.168.137.2 yard02
192.168.137.3 yard03
192.168.137.4 yard04
192.168.137.5 yard05
hadoop@hadoop-virtual-machine:~$

3

core-site.xml —-conf/hadoop

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

<configuration>


<configuration>

<property>
<name>fs.defaultFS</name>
<value>hdfs://yard02</value>
</property>
<property>
<name>fs.trash.interval</name>
<value>10080</value>
</property>
<property>
<name>fs.trash.checkpoint.interval</name>
<value>10080</value>
</property>
</configuration>

hdfs-site.xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22

<configuration>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/data/hadoop-${user.name}</value>
</property>
<property>
<name>dfs.namenode.http-address</name>
<value>yard02:50070</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>yard03:50090</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
</configuration>

yarn-site.xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57

<configuration>

<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>yard02:8031</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>yard02:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>yard02:8030</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>yard02:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>yard02:8088</value>
</property>
<property>
<description>Classpath for typical applications.</description>
<name>yarn.application.classpath</name>
<value> H A D O O P _ C O N F _ D I R , HADOOP\_CONF\_DIR, HADOOP_CONF_DIR,HADOOP_COMMON_HOME/share/hadoop/common/*,
$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
KaTeX parse error: Undefined control sequence: \* at position 38: …re/hadoop/hdfs/\̲*̲,HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
KaTeX parse error: Undefined control sequence: \* at position 30: …re/hadoop/yarn/\̲*̲,YARN_HOME/share/hadoop/yarn/lib/*,
KaTeX parse error: Undefined control sequence: \* at position 35: …doop/mapreduce/\̲*̲,YARN_HOME/share/hadoop/mapreduce/lib/*</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce.shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.nodemanager.local-dirs</name>
<value>/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/data/yarn/local</value>
</property>
<property>
<name>yarn.nodemanager.log-dirs</name>
<value>/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/data/yarn/logs</value>
</property>
<property>
<description>Where to aggregate logs</description>
<name>yarn.nodemanager.remote-app-log-dir</name>
<value>/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/data/yarn/logs</value>
</property>
<property>
<name>yarn.app.mapreduce.am.staging-dir</name>
<value>/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0</value>
</property>
</configuration>

mapred-site.xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14

<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>yard2:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>yard02:19888</value>
</property>
</configuration>

masters

1
2

yard02
yard03

slaves

1
2
3
4

yard02
yard03
yard04
yard05

.bashrc

1
2
3
4
5
6
7
8
9
10

export HADOOP_HOME=/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0
export HADOOP_MAPRED_HOME= H A D O O P _ H O M E e x p o r t H A D O O P _ C O M M O N _ H O M E = {HADOOP\_HOME} export HADOOP\_COMMON\_HOME= HADOOP_HOMEexportHADOOP_COMMON_HOME={HADOOP_HOME}
export HADOOP_HDFS_HOME= H A D O O P _ H O M E e x p o r t Y A R N _ H O M E = {HADOOP\_HOME} export YARN\_HOME= HADOOP_HOMEexportYARN_HOME={HADOOP_HOME}
export HADOOP_YARN_HOME= H A D O O P _ H O M E e x p o r t H A D O O P _ C O N F _ D I R = {HADOOP\_HOME} export HADOOP\_CONF\_DIR= HADOOP_HOMEexportHADOOP_CONF_DIR={HADOOP_HOME}/etc/hadoop
export HDFS_CONF_DIR= H A D O O P _ H O M E / e t c / h a d o o p e x p o r t Y A R N _ C O N F _ D I R = {HADOOP\_HOME}/etc/hadoop export YARN\_CONF\_DIR= HADOOP_HOME/etc/hadoopexportYARN_CONF_DIR={HADOOP_HOME}/etc/hadoop
export PATH= P A T H : PATH: PATH:HOME/bin: J A V A _ H O M E / b i n : JAVA\_HOME/bin: JAVA_HOME/bin:HADOOP_HOME/sbin

同步个配置文件

1
2
3
4
5
6
7

scp -r /home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/* hadoop@yard03:/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/
scp -r /home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/* hadoop@yard04:/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/
scp -r /home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/* hadoop@yard05:/home/hadoop/bigdata/hadoop-2.0.0-cdh4.4.0/

scp /home/hadoop/.bashrc hadoop@yard03:/home/hadoop
scp /home/hadoop/.bashrc hadoop@yard04:/home/hadoop
scp /home/hadoop/.bashrc hadoop@yard05:/home/hadoop

关于ssh无密码还是这个屡试不爽

1
2
3
4
5
6
7

ssh-keygen -t rsa
cp id_rsa.pub authorized_keys //本地也要哦
ssh localhost //确保本地可以无密码登陆

scp authorized_keys hadoop@yard03:/home/hadoop/.ssh
scp authorized_keys hadoop@yard04:/home/hadoop/.ssh
scp authorized_keys hadoop@yard05:/home/hadoop/.ssh

这个启动基本在sbin目录了,
格式化:

1

hadoop namenode -format

启动

1

start-all.sh

启动hdfs:

1

start-dfs.sh

启动mapreduce

1

start-yarn.sh

启动historyserver

1

mr-jobhistory-daemon.sh start historyserver

WordCount 测试
./hadoop fs -put /etc/profile /user/hadoop/input
./hadoop jar …/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.0.0-cdh4.4.0.jar wordcount input output
输出结果
访问:
http://yard02:50070/
http://yard03:50090
http://yard02:8088/cluster
http://yard02:8088/cluster/apps
http://yard02:8088/cluster/nodes
http://yard02:8088/cluster/scheduler

参考链接

http://blog.csdn.net/qiaochao911/article/details/9143303

http://blog.javachen.com/hadoop/2013/03/24/manual-install-Cloudera-Hadoop-CDH/

http://archive.cloudera.com/cdh4/cdh/4/

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值