目录
pom文件
<dependencies>
<!--安装hadoop系统-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>3.1.3</version>
</dependency>
<!--单元测试-->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
</dependency>
<!--log打印日志-->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.30</version>
</dependency>
</dependencies>
配置代码
rpm -qa | grep ssh
systemctl start sshd.service
cd ~/.ssh
ssh root@localhost
ssh-keygen -t rsa
touch id_rsa.pub
cat id_rsa.pub >> authorized_keys
chmod 644 authorized_keys
ssh root@localhost
mkdir -p /usr/local/java
//配置网络IP
first 192.168.121.134
second 192.168.121.135
three 192.168.121.136
1、chmod是linux中更改文件权限的命令,常用的有:
(1) sudo chmod u+x
(2) sudo chmod g+x
(3) sudo chmod o+x
其中的 u、g、o 分别代表的就是 user、group、others,"+"代表赋予权限,x (executable)代表可执行权。
2、sudo chmod 777 代表什么:
--su root 切换用户
IPv4 地址 . . . . . . . . . . . . : 192.168.0.103
子网掩码 . . . . . . . . . . . . : 255.255.255.0
默认网关. . . . . . . . . . . . . : 192.168.0.1
cd /etc/sysconfig/network-scripts/
vi ifcfg-ens33
集群文本
core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://Mage1:9000</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/usr/temp</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
</configuration>
hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>Mage1:9001</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/usr/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/usr/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
<property>
<name>dfs.web.ugi</name>
<value>supergroup</value>
</property>
</configuration>
mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>Mage1:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>Mage1:19888</value>
</property>
</configuration>
yarn-site.xml
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>Mage1:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>Mage1:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>Mage1:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>Mage1:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>Mage1:8088</value>
</property>
</configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>yarn.app.mapreduce.am.env</name>
<value>HADOOP_MAPRED_HOME=/opt/module/hadoop</value>
</property>
<property>
<name>mapreduce.map.env</name>
<value>HADOOP_MAPRED_HOME=/opt/module/hadoop</value>
</property>
<property>
<name>mapreduce.reduce.env</name>
<value>HADOOP_MAPRED_HOME=/opt/module/hadoop</value>
</property>
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
配置代码2
##JAVA_HOME
export JAVA_HOME=/opt/module/jdk1.8.0_202
export PATH=$PATH:$JAVA_HOME/bin
/opt/module/jdk1.8.0_202
/opt/module/jdk1.8.0_202
##HADOOP_HOME
export HADOOP_HOME=/opt/module/hadoop-3.1.3
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
三台主机名:
192.168.79.132 master
192.168.79.133 node1
192.168.79.134 node2
//增加用户名: useradd hadoop
//密码 passwd hadoop root
vi /etc/hostname
hostnamectl set-hostname master
ssh-copy-id -i ~/.ssh/id_rsa.pub node1
chmod 700 ~/.ssh
chmod 644 ~/.ssh/authorized_keys
sudo systemctl stop firewalld
ssh-copy-id master rootroot
ssh-copy-id node1
ssh-copy-id node2
vi /opt/software/hadoop-3.1.3/etc/hadoop/workers
master
node1
node2
sudo vi /opt/software/hadoop-3.1.3/etc/hadoop/workers
## 启动
start-all.sh
## 关闭
stop-all.sh
hadoop namenode -format