CentOS7 Hadoop Install

Maste: c1 10.0.9.1 namenode,ResourceManager,JobTraker
Slave1: c2 10.0.9.2 datanode,NodeManager,TaskTraker
Slave2: c3 10.0.9.3 datanode,NodeManager,TaskTraker

ssh no password login

make authorized keys(for Maste,Slave1,Slave2)

# vi /etc/ssh/sshd_config

RSAAuthentication yes
PubkeyAuthentication yes


# cd /root
# ssh-keygen -t rsa
# cd /root/.ssh
# cat id_rsa.pub >> authorized_keys

merge authorized keys(for Maste)

# cd /root/.ssh
# ssh root@10.0.9.2 cat ~/.ssh/id_rsa.pub >> authorized_keys
# ssh root@10.0.9.3 cat ~/.ssh/id_rsa.pub >> authorized_keys
# scp authorized_keys root@10.0.9.2:/root/.ssh
# scp authorized_keys root@10.0.9.2:/root/.ssh
# reboot

perpare install support

uncompress & install

# rpm -i jdk-8u121-linux-x64.rpm
# tar -zxvf apache-ant-1.10.1-bin.tar.gz
# tar -zxvf apache-maven-3.3.9-bin.tar.gz
# tar -zxvf hadoop-2.7.3.tar.gz

# mv apache-ant-1.10.1 /usr/local/
# mv apache-maven-3.3.9 /usr/local/
# mv hadoop-2.7.3 /usr/local/

set environment

# vi /etc/profile

export JAVA_HOME=/usr/java/default
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib
export ANT_HOME=/usr/local/apache-ant-1.10.1
export MAVEN_HOME=/usr/local/apache-maven-3.3.9
export HADOOP_HOME=/usr/local/hadoop-2.7.3

export PATH=$PATH:$JAVA_HOME/bin:$ANT_HOME/bin:$MAVEN_HOME/bin:$HADOOP_HOME/bin

configure hadoop

config maste(for Maste)

# cd /usr/local/hadoop-2.7.3/etc/hadoop
# vi core-site.xml

<configuration>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://10.0.9.1:9000</value>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>file:/data/hadoop/tmp</value>
    </property>
    <property>
        <name>io.file.buffer.size</name>
        <value>131702</value>
    </property>
</configuration>
# vi hdfs-site.xml

<configuration>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:/data/hadoop/dfs/name</value>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>file:/data/hadoop/dfs/data</value>
    </property>
    <property>
        <name>dfs.replication</name>
        <value>2</value>
    </property>
    <property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>10.0.9.1:9001</value>
    </property>
    <property>
        <name>dfs.webhdfs.enabled</name>
        <value>true</value>
    </property>
</configuration>
# vi mapred-site.xml

<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <property>
        <name>mapreduce.jobhistory.address</name>
        <value>10.0.9.1:10020</value>
    </property>
    <property>
        <name>mapreduce.jobhistory.webapp.address</name>
        <value>10.0.9.1:19888</value>
    </property>
</configuration>
# vi yarn-site.xml

<configuration>
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <property>
        <name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
        <value>org.apache.hadoop.mapred.ShuffleHandler</value>
    </property>
    <property>
        <name>yarn.resourcemanager.address</name>
        <value>10.0.9.1:8032</value>
    </property>
    <property>
        <name>yarn.resourcemanager.scheduler.address</name>
        <value>10.0.9.1:8030</value>
    </property>
    <property>
        <name>yarn.resourcemanager.resource-tracker.address</name>
        <value>10.0.9.1:8031</value>
    </property>
    <property>
        <name>yarn.resourcemanager.admin.address</name>
        <value>10.0.9.1:8033</value>
    </property>
    <property>
        <name>yarn.resourcemanager.webapp.address</name>
        <value>10.0.9.1:8088</value>
    </property>
    <property>
        <name>yarn.nodemanager.resource.memory-mb</name>
        <value>768</value>
    </property>
</configuration>
# vi hadoop-env.sh

export JAVA_HOME=/usr/java/default


# vi yarn-env.sh

export JAVA_HOME=/usr/java/default
# vi slaves

#localhost
10.0.9.2
10.0.9.3

cp hadoop directory from Maste to Savles

# scp -r /usr/local/hadoop-2.7.3 10.0.9.2:/usr/local/
# scp -r /usr/local/hadoop-2.7.3 10.0.9.3:/usr/local/

run hadoop

initialize name node(for Maste)

# cd /usr/local/hadoop-2.7.3/bin
# ./hdfs namenode -format

start hadoop

# cd /usr/local/hadoop-2.7.3/sbin
# ./start-all.sh  (eq ./start-dfs.sh & start-yarn.sh)

# jps  (view infomation)

# ./stop-all.sh
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值