hadoop伪集群配置

/**
*
*CentOs6.5

*hadoop-2.7.3

*

*/


--设置环境变量   vi /etc/profile
JAVA_HOME=/opt/jdk1.7.0_25
JRE_HOME=/opt/jdk1.7.0_25/jre
CLASS_PATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib
PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
export JAVA_HOME JRE_HOME CLASS_PATH PATH


source /etc/profile    --生效
java -version             --验证


chown -R hadoopa:hadoopa  /opt/server
chmod -R 755 /opt/server


#CentOS修改主机名和hosts文件
vim /etc/sysconfig/network
hostname hadoopa        #立即生效


vi /etc/hosts
192.168.61.139  hadoopa


rpm -qa|grep ssh       #查看SSH client、SSH server




ssh localhost
cd ~/.ssh/                     # 若没有该目录,请先执行一次ssh localhost
ssh-keygen -t rsa              # 会有提示,都按回车就可以
cat id_rsa.pub >> authorized_keys  # 加入授权
chmod 600 ./authorized_keys    # 修改文件权限
exit                           #退出




--设置环境变量   vi /etc/profile
HADOOP_HOME=/opt/server/hadoop-2.7.3
PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin
export HADOOP_HOME


cd /opt/server/hadoop-2.7.3/etc/hadoop
vi hadoop-env.sh
export JAVA_HOME=/opt/jdk1.7.0_25


vi yarn-env.sh
export JAVA_HOME=/opt/jdk1.7.0_25


#core-site.xml
<configuration>
<property>
        <name>hadoop.tmp.dir</name>
        <value>file:/opt/server/hadoop-2.7.3/data/tmp</value>
    </property>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://localhost:9000</value>
    </property>
</configuration>


#hdfs-site.xml
<configuration>
    <property>
        <name>dfs.replication</name>
        <value>1</value>
    </property>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:/opt/server/hadoop-2.7.3/data/tmp/dfs/name</value>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>file:/opt/server/hadoop-2.7.3/data/tmp/dfs/data</value>
    </property>
</configuration>






cp mapred-site.xml.template   mapred-site.xml
#mapred-site.xml
<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
</configuration>


#yarn-site.xml
<configuration>
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
</configuration>




cd /opt/server/hadoop-2.7.3/bin
hdfs namenode -format


cd /opt/server/hadoop-2.7.3/sbin
./start-all.sh


jps
netstat -tnlp


#启动
hadoop-daemons.sh start namenode
hadoop-daemons.sh start datanode
hadoop-daemons.sh start secondarynamenode
hadoop-daemons.sh start jobtracker
hadoop-daemons.sh start tasktracker




#停止
hadoop-daemons.sh stop tasktracker
hadoop-daemons.sh stop jobtracker
hadoop-daemons.sh stop secondarynamenode
hadoop-daemons.sh stop datanode
hadoop-daemons.sh stop namenode






http://192.168.61.139:50070   #HDFS
http://192.168.61.139:8088/

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值