一步一步安装hadoop1.2.1

一步一步安装hadoop1.2.1

--主机基本配置
--配置主机
hostname node1
hostname node2
hostname node3

--配置ip
ifconfig
192.168.239.129
192.168.239.135
192.168.239.136



--配置映射
vi /etc/hosts
192.168.239.129 node1
192.168.239.135 node2
192.168.239.136 node3

--配置网络,3台都要配置,或者用setup命令配置
vi /etc/sysconfig/network
HOSTNAME=node1

vi /etc/sysconfig/network-scripts/ifcfg-eth0
IPADDR=192.168.239.129
NETMASK=255.255.255.0
GATEWAY=192.168.239.1


/sbin/service network restart  #重新启动网络服务

--配置防火墙
service iptables stop
chkconfig iptables off
chkconfig|grep iptables
iptables        0:off   1:off   2:off   3:off   4:off   5:off   6:off

reboot之后主机名生效




--增加hadoop用户
groupadd hadoop
useradd hadoop -g hadoop
passwd hadoop

[hadoop@node1 ~]$ cat /etc/profile
export JAVA_HOME=/home/hadoop/jdk1.7.0_67
export HADOOP_HOME=/home/hadoop/hadoop-1.2.1

source /etc/profile 编译一次

--配置免密码

node1,2,3中执行:
su - hadoop
ssh-keygen -q -t rsa -N "" -f /home/hadoop/.ssh/id_rsa
cd .ssh
cat id_rsa.pub >> authorized_keys
chmod go-wx  authorized_keys

node1中执行:
scp id_rsa.pub hadoop@node2:~
scp id_rsa.pub hadoop@node3:~

node2和node3中执行:
cat ~/id_rsa.pub>>~/.ssh/authorized_keys
cat ~/id_rsa.pub>>~/.ssh/authorized_keys

node1中执行:
ssh node2
ssh node3

----测试命令
/sbin/ifconfig
ping node1
ssh node1
jps
echo $JAVA_HOME
echo $HADOOP_HOME
hadoop



--上传解压
tar -zxvf hadoop-1.2.1.tar.gz
cd /home/hadoop/hadoop-1.2/share/hadoop/templates/conf   --模版的例子


-----------------hadoop基本配置------------------------
[hadoop@node1 conf]$ vi core-site.xml
[hadoop@node1 conf]$ vi hdfs-site.xml
[hadoop@node1 conf]$ vi slaves
[hadoop@node1 conf]$ vi masters
[hadoop@node1 conf]$ vi hadoop-env.sh       

scp conf/* hadoop@node2:~/hadoop-1.2/conf
scp conf/* hadoop@node3:~/hadoop-1.2/conf

--创建data目录,对应core-site.xml中的配置
mkdir -p /home/hadoop/hadoop-1.2/data


--格式化
	NameNode Format
		bin/hdfs namenode -format		            --生成namenode目录 
或bin/hadoop namenode -format

--启动hadoop
		sbin/hadoop-daemon.sh start namenode   
		sbin/hadoop-daemon.sh start datanode   --生成datanode目录

--监控
		http://node1:50070



--配置mapreduce
[hadoop@node1 conf]$ vi mapred-site.xml

scp conf/* hadoop@node2:~/hadoop-1.2/conf
scp conf/* hadoop@node3:~/hadoop-1.2/conf

--监控界面
http://node1:50070/dfshealth.jsp
http://node1:50030/jobtracker.jsp
http://node2:50060/tasktracker.jsp



--windows本机配置ip映射
C:\WINDOWS\system32\drivers\etc\hosts
192.168.239.129 node1
192.168.239.135 node2
192.168.239.136 node3

--登录控制台
http://node1:50070


---------------------------------------------------------------
--附配置:
---------------------------------------------------------------

[hadoop@node1 conf]$ vi core-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<configuration>
    <property>
        <name>fs.default.name</name>
        <value>hdfs://node1:9000</value>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/home/hadoop/hadoop-1.2.1/data</value>
    </property>
</configuration>

[hadoop@node1 conf]$ vi hdfs-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>
    <property>
            <name>dfs.replication</name>
            <value>3</value>
    </property>
</configuration>


[hadoop@node1 conf]$ mapred-site.xml

<configuration>
    <property>
            <name>mapred.job.tracker</name>
            <value>node1:9001</value>
    </property>
</configuration>

[hadoop@node1 conf]$ vi hadoop-env.sh
export JAVA_HOME=/home/hadoop/jdk1.7.0_67

[hadoop@node1 conf]$ vi slaves
node1
node2
node3

[hadoop@node1 conf]$ vi masters    --secondarynamenode
node2

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值