hadoop HA集群环境配置

这里仅仅列出部分配置文件信息,供自己记录使用。

1.core-site.xml

<span style="font-size:18px;"><configuration>
        <property>  
            <name>hadoop.tmp.dir</name>  
            <value>/home/hadoop/tmp</value>  
        </property>  
        <property>  
            <name>fs.defaultFS</name>  
            <value>hdfs://cluster1</value>  
        </property>
        <property>
            <name>ha.zookeeper.quorum</name>
            <value>master.hadoop.com:2181,slave1.hadoop.com:2181,slave2.hadoop.com:2181</value>
        </property>
        <property>
            <name>dfs.namenode.checkpoint.dir</name>
            <value>/home/hadoop/dfs/namenode_ckpt</value>
        </property>   
        <property>  
            <name>io.file.buffer.size</name>  
            <value>4096</value>  
        </property>  
        <property> 
        <name>fs.trash.interval</name> 
        <value>1440</value> 
        <description>Number of minutes between trash checkpoints. If zero, the trash feature is disabled.</description> 
        </property>
</configuration>
</span>

2.hdfs-site.xml
<span style="font-size:18px;"> 
<configuration>
        <property>  
            <name>dfs.nameservices</name>  
            <value>cluster1</value>  
        </property> 
        <property>
                <name>dfs.ha.namenodes.cluster1</name>
                <value>nn1,nn2</value>
        </property> 
        <property>
                <name>dfs.namenode.rpc-address.cluster1.nn1</name>
                <value>master.hadoop.com:8020</value>
        </property>
 
        <property>
                <name>dfs.namenode.rpc-address.cluster1.nn2</name>
                <value>slave1.hadoop.com:8020</value>
        </property>
 
        <property>
                <name>dfs.namenode.http-address.cluster1.nn1</name>
                <value>master.hadoop.com:50070</value>
        </property>
 
        <property>
                <name>dfs.namenode.http-address.cluster1.nn2</name>
                <value>slave1.hadoop.com:50070</value>
 
        </property>
        <property>
                <name>dfs.namenode.shared.edits.dir</name>
                <value>qjournal://192.168.80.102:8485;192.168.80.103:8485;192.168.80.104:8485/cluster1</value>
        </property>
        <property>
                <name>dfs.client.failover.proxy.provider.cluster1</name>
                <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
        </property>
        <property>
                <name>dfs.ha.fencing.methods</name>
                <value>shell(/bin/bash)</value>
        </property>
                <property>
                <name>dfs.ha.automatic-failover.enabled</name>
                <value>true</value>
        </property>
         <property>
                <name>dfs.ha.fencing.ssh.private-key-files</name>
                <value>/home/hadoop/.ssh/id_rsa</value>
        </property>
        <property>  
            <name>dfs.namenode.name.dir</name>  
            <value>/home/hadoop/dfs/name</value>  
        </property>  
        <property>  
            <name>dfs.datanode.data.dir</name>  
            <value>/home/hadoop/dfs/data</value>  
        </property>  
        <property>
                <name>dfs.journalnode.edits.dir</name>
                <value>/home/hadoop/dfs/journal</value>
        </property>
        <property>  
            <name>dfs.replication</name>  
            <value>2</value>  
        </property>  
        <property>  
            <name>dfs.webhdfs.enabled</name>  
            <value>true</value>  
        </property>  
</configuration>
</span>

3.yarn-site.xml
<span style="font-size:18px;"><configuration>
 
<!-- Site specific YARN configuration properties -->
<property>  
            <name>yarn.nodemanager.aux-services</name>  
            <value>mapreduce_shuffle</value>  
        </property>  
        <property>  
            <name>yarn.resourcemanager.address</name>  
            <value>master.hadoop.com:8032</value>  
        </property>  
        <property>  
            <name>yarn.resourcemanager.scheduler.address</name>  
            <value>master.hadoop.com:8030</value>  
        </property>  
        <property>  
            <name>yarn.resourcemanager.resource-tracker.address</name>  
            <value>master.hadoop.com:8031</value>  
        </property>  
        <property>  
            <name>yarn.resourcemanager.admin.address</name>  
            <value>master.hadoop.com:8033</value>  
        </property>  
        <property>  
            <name>yarn.resourcemanager.webapp.address</name>  
            <value>master.hadoop.com:8088</value>  
        </property>  
</configuration>
</span>

4 mapred-site.xml
 
<configuration>
<property>  
        <name>mapreduce.framework.name</name>  
        <value>yarn</value>  
    </property>  
     
    <property>  
        <name>mapreduce.jobhistory.address</name>  
        <value>master.hadoop.com:10020</value>  
    </property>  
    <property>  
        <name>mapreduce.jobhistory.webapp.address</name>  
        <value>master.hadoop.com:19888</value>  
    </property>  
</configuration>


启动顺序

不用DFSZKFailoverController,手动转换namenode

1.启动各节点journalnode  sbin/ hadoop-daemon.sh start journalnode

2.格式化nn1,并启动  bin/hdfs dfs namenode -format   sbin/ hadoop-daemon.sh start namnode

3.在nn2上同步nn1的元数据并启动 bin/hdfs  namenode -bootstrapStandby sbin/ hadoop-daemon.sh start namnode

4.在nn1上将namenode切换为active状态  bin/hdfs haadmin -transitionToActive nn1

5.在nn1上启动集群datanode  sbin/hadoop-daemons.sh start datanode

6.在nn1上启动yarn  sbin/start-yarn.sh

用zk,自动转换namenode(上述配置就可以这样启动)

1.sbin/ hadoop-daemons.sh start journalnode

2.启动nn1 sbin/ hadoop-daemon.sh start namenode

3启动nn2 bin/hdfs  namenode -bootstrapStandby sbin/ hadoop-daemon.sh start namenode

4.nn1 hdfs zkfc -formatZK

5.nn1sbin/ hadoop-daemons.sh start zkfc

6 nn1sbin/hadoop-daemons.sh start datanode



-------------------------

如果是第一次启动需要格式化

用zk,自动转换namenode(上述配置就可以这样启动)

1.sbin/ hadoop-daemons.sh start journalnode

2.bin/hdfs namenode -initializeShareEdits

启动Active NN

3.hdfs namenode -fromat

4.启动nn1 sbin/ hadoop-daemon.sh start namenode

启动Standby NN

5启动nn2 bin/hdfs  namenode -bootstrapStandby sbin/ hadoop-daemon.sh start namenode

启动Automatic Failover

6.nn1 hdfs zkfc -formatZK

7.nn1sbin/ hadoop-daemons.sh start zkfc

启动datanode

8 nn1sbin/hadoop-daemons.sh start datanode





评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值