hadoop3.x集群安装

Hadoop版本

Hadoop集群节点分配

Hadoop3.x端口变化

1:安装系统

2:确定hostname

[root@bigdata1 jdk]# cat /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=master

 3:设置网络 

4:设置hosts

[root@bigdata1 jdk]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.105 master
192.168.1.206 slave1
192.168.1.106 slave2

5:关闭防火墙 

[root@bigdata2 ~]# service iptables stop
iptables:将链设置为政策 ACCEPT:filter                    [确定]
iptables:清除防火墙规则:                                 [确定]
iptables:正在卸载模块:                                   [确定]
[root@bigdata2 ~]# chkconfig iptables off

6:设置selinux

[root@bigdata2 ~]# vim /etc/selinux/config

7:创建hadoop用户

useradd hadoop
passwd hadoop

8:配置ssh免秘钥登录(用hadoop用户设置)

ssh-keygen -t rsa

如果没有该命令:安装 yum -y install openssh-clients
ssh-copy-id hadoop@bigdata1
ssh-copy-id hadoop@bigdata2
ssh-copy-id hadoop@bigdata3

9:安装Java1.8(hadoop用户)

vim .bash_profile

export JAVA_HOME=/opt/app/jdk/jdk1.8.0_171
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH

source .bash_profile

10:安装hadoop(hadoop用户)

1:创建hdfs nameNode
mkdir -p /data/hadoop/hdfs/namenode
2:创建hdfs dataNode
mkdir -p /data/hadoop/hdfs/datanode
3:创建hdfs tmp
mkdir -p /data/hadoop/tmp
4:创建yarn nodemanager
mkdir -p /data/hadoop/yarn/nodemanager
5:创建yarn log
mkdir -p /data/hadoop/yarn/logs
6:mr目录
mkdir -p /data/hadoop/mr

11:解压hadoop-3.2.0.tar.gz包

12:添加hadoop环境变量

export HADOOP_HOME=/opt/app/hadoop/hadoop-3.2.0
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
source /etc/profile

13:修改配置文件(core-site.xml)

<configuration>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://master:9000</value>
		<description>namenode节点地址与端口</description>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/data/hadoop/tmp</value>
		<description>临时文件存储路径</description>
    </property>
</configuration>

 14:修改hadoop-evn.sh文件

export JAVA_HOME=/usr/java/jdk1.8.0_161/

15:修改hdfs-site.xml文件

<configuration>
	<property>
		<name>dfs.replication</name>
		<value>2</value>
	</property>
	<property>
		<name>dfs.namenode.name.dir</name>
		<value>file:/data/hadoop/hdfs/namenode</value>
		<final>true</final>
	</property>
	<property>
		<name>dfs.datanode.data.dir</name>
		<value>file:/data/hadoop/hdfs/datanode</value>
		<final>true</final>
	</property>
	<property>
		<name>dfs.namenode.secondary.http-address</name>
		<value>master:9001</value>
	</property>
	<property>
		<name>dfs.permissions.enabled</name>
		<value>false</value>
	</property>
</configuration>

16:修改mapred-site.xml文件

<configuration>

	<property>
	  <name>mapreduce.framework.name</name>
	  <value>yarn</value>
	</property>
	<property>
	  <name>yarn.app.mapreduce.am.env</name>
	  <value>HADOOP_MAPRED_HOME=/home/george/software/hadoop-3.1.1</value>
	</property>
	<property>
	  <name>mapreduce.map.env</name>
	  <value>HADOOP_MAPRED_HOME=/home/george/software/hadoop-3.1.1</value>
	</property>
	<property>
	  <name>mapreduce.reduce.env</name>
	  <value>HADOOP_MAPRED_HOME=/home/george/software/hadoop-3.1.1</value>
	</property>
	<property> 
		<name>mapreduce.application.classpath</name>
		<value>$HADOOP_HOME/share/hadoop/mapreduce/*,$HADOOP_HOME/share/hadoop/mapreduce/lib/*,$HADOOP_HOME/share/hadoop/common/*,$HADOOP_HOME/share/hadoop/common/lib/*,$HADOOP_HOME/share/hadoop/yarn/*,$HADOOP_HOME/share/hadoop/yarn/lib/*,$HADOOP_HOME/share/hadoop/hdfs/*,$HADOOP_HOME/share/hadoop/hdfs/lib/*</value>
	</property>
	<property>
		<name>mapreduce.map.memory.mb</name>
		<value>512</value>
	</property>	
	<property>
		<name>mapreduce.reduce.memory.mb</name>
		<value>512</value>
	</property>	

</configuration>

17:修改yarn-site.xml文件

<configuration>

	<property>
	  <name>mapreduce.framework.name</name>
	  <value>yarn</value>
	</property>
	<property>
	  <name>yarn.app.mapreduce.am.env</name>
	  <value>HADOOP_MAPRED_HOME=/home/george/software/hadoop-3.1.1</value>
	</property>
	<property>
	  <name>mapreduce.map.env</name>
	  <value>HADOOP_MAPRED_HOME=/home/george/software/hadoop-3.1.1</value>
	</property>
	<property>
	  <name>mapreduce.reduce.env</name>
	  <value>HADOOP_MAPRED_HOME=/home/george/software/hadoop-3.1.1</value>
	</property>
	<property> 
		<name>mapreduce.application.classpath</name>
		<value>$HADOOP_HOME/share/hadoop/mapreduce/*,$HADOOP_HOME/share/hadoop/mapreduce/lib/*,$HADOOP_HOME/share/hadoop/common/*,$HADOOP_HOME/share/hadoop/common/lib/*,$HADOOP_HOME/share/hadoop/yarn/*,$HADOOP_HOME/share/hadoop/yarn/lib/*,$HADOOP_HOME/share/hadoop/hdfs/*,$HADOOP_HOME/share/hadoop/hdfs/lib/*</value>
	</property>
	<property>
		<name>mapreduce.map.memory.mb</name>
		<value>512</value>
	</property>	
	<property>
		<name>mapreduce.reduce.memory.mb</name>
		<value>512</value>
	</property>	

</configuration>

18:添加workers文件内容

slave1
slave2

19:初始化nameNode(master节点)

格式化:hdfs namenode -format

20:启动hdfs和yarn

启动HDFS:start-dfs.sh 启动YARN:start-yarn.sh

21:验证

hdfs dfs -ls / hdfs dfs -mkdir /user 
##将本地文件上传到hdfs文件系统目录下 
hdfs dfs -copyFromLocal <localFile> <hdfs dir>
hdfs dfs -copyFromLocal workers /user hdfs dfs -cat /user/workers 
hadoop jar hadoop-mapreduce-examples-3.1.1.jar pi 5 10

21:访问

hdfs: http://localhost:9870

yarn: http://localhost:8088

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值