- 准备好文件,jdk1.8
- cd /home
- mkdir java
- mkdir Hadoop
- 将jdk文件放到java夹中
- 将hadoop文件放到hadoop文件夹
- 下载jdk1.8
- wget --no-check-certificate --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.rpm
- rpm jdk-8u131-linux-x64.rpm
- 输入java -version查看jdk安装是否成功 安装路径一般在/usr/java/文件夹下
- cd /home/Hadoop 文件夹下
- tar -xzvf hadoop文件,解压,
- mv Hadoop文件 hadoop 重命名
访问端口是9870
查看系统是否有java
java-version
查看系统有哪些jdk包
rpm -qa | grep java
卸载jdk
rpm -e --nodeps *
例子
rpm -e --nodeps java-1.8.0-openjdk-1.8.0.181-7.b13.el7.x86_64,全部卸载
卸载完以后查看是否还有没有卸载完的
进入home
cd /home
下载jdk1.8
wget --no-check-certificate --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.rpm
查看home下有没有jdk
安装rpm包
查看安装是否成功
安装完后,路径在/usr/java
解压hadoop
cd /home 操作都是在home文件夹下
tar -xzvf Hadoop-3.2.0.tar.gz文件
解压完后重命名文件
mv Hadoop-3.2.0.tar.gz Hadoop
配置文件
cd /home/hadoop/Hadoop
cd etc/hadoop/
vi core-site.xml
复制这段
<property>
<name>fs.defaultFS</name>
<value>hdfs://10.0.0.198:9000</value> //hdfs地址为IP地址
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/home/hadoop/tmp</value> //文件存放路径
<description>Abase for other temporary directories.</description>
</property>
<property>
<name>hadoop.proxyuser.hadoop.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.hadoop.groups</name>
<value>*</value>
</property>
vi hdfs-site.xml
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/home/hadoop/name</value> //文件存放路径
<description>Determineswhere on the local filesystem the DFS name node should store the name table. Ifthis is?a comma-delimited list of directories then the name table is?replicatedin all of the directories, for?redundancy.</description>
<final>true</final>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/home/hadoop/data</value> //文件存放路径
<description>Determineswhere on the local filesystem an DFS data node should store its blocks. If thisis a comma-delimited list of directories, then data will be stored in?all nameddirectories, typically on different devices.Directories that do not?exist areignored.
</description>
<final>true</final>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
vi mapred-site.xml
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
vi yarn-site.xml
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
配置完后
vi hadoop-env.sh
将jdk文件路径添加进去
export JAVA_HOME=/usr/java/jdk1.8.0_131
vi start-dfs.sh
HDFS_DATANODE_USER=root
HDFS_DATANODE_SECURE_USER=hdfs
HDFS_NAMENODE_USER=root
HDFS_SECONDARYNAMENODE_USER=root
vi stop-dfs.sh
HDFS_DATANODE_USER=root
HDFS_DATANODE_SECURE_USER=hdfs
HDFS_NAMENODE_USER=root
HDFS_SECONDARYNAMENODE_USER=root
vi start-yarn.sh
YARN_RESOURCEMANAGER_USER=root
HADOOP_SECURE_DN_USER=yarn
YARN_NODEMANAGER_USER=root
vi stop-yarn.sh
YARN_RESOURCEMANAGER_USER=root
HADOOP_SECURE_DN_USER=yarn
YARN_NODEMANAGER_USER=root
全部配置完以后
vi /etc/profile
export HADOOP_HOME=/home/hadoop/hadoop
export PATH=$PATH:$HADOOP_HOME/bin
Source /etc/profile 生效
配置免密登陆
ssh-keygen -t rsa 命令,然后一直回车确认即可。
cat id_rsa.pub >> authorized_keys
初始化Hadoop
$bin/hdfs namenode -format
一直Y即可
初始化完成后
cd Hadoop sbin文件夹下
./start-all.sh,
启动完后jps
代表启动完成。端口号是9870