搭建 Hadoop 环境

1.hadoop基础配置

1.下载hadoop与jdk

安装包:

hadoop-3.0.3.tar.gz  
jdk-8u181-linux-x64.tar.gz
[root@server1 ~]# ls
hadoop-3.0.3.tar.gz  jdk-8u181-linux-x64.tar.gz

2.创建用户并设定密码

[root@server1 ~]# useradd -u 1000 hadoop
[root@server1 ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop)
[root@server1 ~]# passwd hadoop

在这里插入图片描述
3.解压jdk

#1.拷贝
[root@server1 ~]# mv hadoop-3.0.3.tar.gz jdk-8u181-linux-x64.tar.gz /home/hadoop/
#2.切换用户
[root@server1 ~]# su - hadoop
[hadoop@server1 ~]$ pwd
/home/hadoop
[hadoop@server1 ~]$ ls
hadoop-3.0.3.tar.gz  jdk-8u181-linux-x64.tar.gz
#3.解压
[hadoop@server1 ~]$ tar zxf jdk-8u181-linux-x64.tar.gz 
[hadoop@server1 ~]$ ls
hadoop-3.0.3.tar.gz  jdk1.8.0_181  jdk-8u181-linux-x64.tar.gz
#4.制作软链接
[hadoop@server1 ~]$ ln -s jdk1.8.0_181/ java
[hadoop@server1 ~]$ ls
hadoop-3.0.3.tar.gz  java  jdk1.8.0_181  jdk-8u181-linux-x64.tar.gz

4.解压hadoop

#1.解压
[hadoop@server1 ~]$ tar zxf hadoop-3.0.3.tar.gz 
[hadoop@server1 ~]$ ls
lhadoop-3.0.3         java          jdk-8u181-linux-x64.tar.gz
hadoop-3.0.3.tar.gz  jdk1.8.0_181
#2.制作软链接
[hadoop@server1 ~]$ ln -s  hadoop-3.0.3 hadoop
[hadoop@server1 ~]$ ls
hadoop        hadoop-3.0.3.tar.gz  jdk1.8.0_181
hadoop-3.0.3  java                 jdk-8u181-linux-x64.tar.gz

5.修改配置文件

[hadoop@server1 ~]$ cd hadoop
[hadoop@server1 hadoop]$ ls
bin  include  libexec      NOTICE.txt  sbin
etc  lib      LICENSE.txt  README.txt  share
[hadoop@server1 hadoop]$ cd etc/
[hadoop@server1 etc]$ ls
hadoop
[hadoop@server1 etc]$ cd hadoop/
[hadoop@server1 hadoop]$ pwd
/home/hadoop/hadoop/etc/hadoop
[hadoop@server1 hadoop]$ ls

在这里插入图片描述

[hadoop@server1 hadoop]$ vim hadoop-env.sh 
########################
export JAVA_HOME=/home/hadoop/java

在这里插入图片描述

[hadoop@server1 hadoop]$ cd ..
[hadoop@server1 etc]$ cd ..
[hadoop@server1 hadoop]$ pwd
/home/hadoop/hadoop
[hadoop@server1 hadoop]$ bin/hadoop jar
RunJar jarFile [mainClass] args...
[hadoop@server1 hadoop]$ bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-3.0.3.jar 

在这里插入图片描述

#创建目录
[hadoop@server1 hadoop]$ mkdir input
#拷贝文件
[hadoop@server1 hadoop]$ cp etc/hadoop/*.xml input/
[hadoop@server1 hadoop]$ bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-3.0.3.jar grep input output 'dfs[a-z.]+'

在这里插入图片描述

#生成了output目录
[hadoop@server1 hadoop]$ ls
bin  include  lib      LICENSE.txt  output      sbin
etc  input    libexec  NOTICE.txt   README.txt  share
[hadoop@server1 hadoop]$ cd output/
[hadoop@server1 output]$ ls
part-r-00000  _SUCCESS
[hadoop@server1 output]$ cat *
1	dfsadmin
[hadoop@server1 output]$ cd /home/hadoop/hadoop/etc/hadoop
[hadoop@server1 hadoop]$ vim core-site.xml 
########################
<configuration>
    <property>
          <name>fs.defaultFS</name>
          <value>hdfs://localhost:9000</value>
    </property>
</configuration>

在这里插入图片描述

[hadoop@server1 hadoop]$ vim hdfs-site.xml 
########################
<configuration>
    <property>
         <name>dfs.replication</name>
         <value>1</value>
    </property>
</configuration>

在这里插入图片描述
免密:

[hadoop@server1 hadoop]$ ssh-keygen

在这里插入图片描述

[hadoop@server1 hadoop]$ ssh-copy-id localhost

在这里插入图片描述
测试免密:

[hadoop@server1 hadoop]$ ssh localhost
[hadoop@server1 ~]$ exit
[hadoop@server1 hadoop]$ cd /home/hadoop/hadoop
#格式化
[hadoop@server1 hadoop]$ bin/hdfs namenode -format

在这里插入图片描述

[hadoop@server1 hadoop]$ cd /home/hadoop/hadoop/sbin
#启动
[hadoop@server1 sbin]$ ./start-dfs.sh 

在这里插入图片描述

[hadoop@server1 sbin]$ cd 
#设定环境变量
[hadoop@server1 ~]$ vim .bash_profile 
###################
PATH=$PATH:$HOME/.local/bin:$HOME/bin:$HOME/java/bin

在这里插入图片描述

#重新加载
[hadoop@server1 ~]$ source .bash_profile 
#查看java进程
[hadoop@server1 ~]$ jps

在这里插入图片描述
在网页上输入:172.25.66.1:9870
在这里插入图片描述
上传:

[hadoop@server1 ~]$ cd hadoop
[hadoop@server1 hadoop]$ bin/hdfs dfsadmin -report

在这里插入图片描述

#查看目录/文件
[hadoop@server1 hadoop]$ bin/hdfs dfs -ls
ls: `.': No such file or directory
[hadoop@server1 hadoop]$ bin/hdfs dfs -ls /
#递归创建目录
[hadoop@server1 hadoop]$ bin/hdfs dfs -mkdir -p /user/hadoop 
[hadoop@server1 hadoop]$ bin/hdfs dfs -ls /
Found 1 items
drwxr-xr-x   - hadoop supergroup          0 2019-04-06 10:42 /user

在这里插入图片描述
在这里插入图片描述

#数据默认存储在/tmp目录下
[hadoop@server1 hadoop]$ cd /tmp
[hadoop@server1 tmp]$ ls
hadoop
hadoop-hadoop
hadoop-hadoop-datanode.pid
hadoop-hadoop-namenode.pid
hadoop-hadoop-secondarynamenode.pid
hsperfdata_hadoop
jetty-0.0.0.0-9868-secondary-_-any-1195609709585929106.dir
jetty-0.0.0.0-9870-hdfs-_-any-5933567753048864482.dir
jetty-localhost-34474-datanode-_-any-1275347744145995482.dir
[hadoop@server1 tmp]$ cd -
/home/hadoop/hadoop
#上传目录
[hadoop@server1 hadoop]$ bin/hdfs dfs -put input
[hadoop@server1 hadoop]$ bin/hdfs dfs -ls
Found 1 items
drwxr-xr-x   - hadoop supergroup          0 2019-04-06 10:43 input

在这里插入图片描述
在这里插入图片描述

[hadoop@server1 hadoop]$ ls
bin  include  lib      LICENSE.txt  NOTICE.txt  README.txt  share
etc  input    libexec  logs         output      sbin
[hadoop@server1 hadoop]$ rm -rf input/ output/

下载:

[hadoop@server1 hadoop]$ bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-3.0.3.jar grep input output 'dfs[a-z.]+'

在这里插入图片描述

[hadoop@server1 hadoop]$ ls
bin  include  libexec      logs        README.txt  share
etc  lib      LICENSE.txt  NOTICE.txt  sbin
#默认查看当前目录下的文件/目录
[hadoop@server1 hadoop]$ bin/hdfs dfs -ls 
Found 2 items
drwxr-xr-x   - hadoop supergroup          0 2019-04-06 10:43 input
drwxr-xr-x   - hadoop supergroup          0 2019-04-06 11:22 output
[hadoop@server1 hadoop]$ bin/hdfs dfs -cat output/*
1	dfsadmin
#下载
[hadoop@server1 hadoop]$ bin/hdfs dfs -get output
[hadoop@server1 hadoop]$ ls
bin  include  libexec      logs        output      sbin
etc  lib      LICENSE.txt  NOTICE.txt  README.txt  share
[hadoop@server1 hadoop]$ cd output/
[hadoop@server1 output]$ ls
part-r-00000  _SUCCESS
[hadoop@server1 output]$ cat *
1	dfsadmin
[hadoop@server1 output]$ cd ..
[hadoop@server1 hadoop]$ ls
bin  include  libexec      logs        output      sbin
etc  lib      LICENSE.txt  NOTICE.txt  README.txt  share
[hadoop@server1 hadoop]$ rm -rf output/

删除input 或是output 目录,删除的只是本地文件,而不是文件系统
在这里插入图片描述

2.分布式模式

添加server2和server3节点:

1.利用nfs网络文件系统实现数据同步

[root@server1 ~]# rpm -aq rpcbind
[root@server1 ~]# yum install -y nfs-utils
[root@server1 ~]# rpm -aq rpcbind
rpcbind-0.2.0-38.el7.x86_64
[root@server1 ~]# systemctl start rpcbind
[root@server1 ~]# systemctl status rpcbind

在这里插入图片描述

[root@server1 ~]# systemctl start nfs
[root@server1 ~]# vim /etc/exports 
[root@server1 ~]# cat /etc/exports 
/home/hadoop   *(rw,anonuid=1000,anongid=1000)
[root@server1 ~]# exportfs -rv
exporting *:/home/hadoop
[root@server1 ~]# showmount -e
Export list for server1:
/home/hadoop *
[root@server2 ~]# useradd -u 1000 hadoop
[root@server2 ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop)
[root@server2 ~]# yum install -y nfs-utils
[root@server2 ~]# systemctl start rpcbind
[root@server2 ~]# mount 172.25.66.1:/home/hadoop /home/hadoop
[root@server2 ~]# df

在这里插入图片描述

[root@server2 ~]# su - hadoop
[hadoop@server2 ~]$ ls
hadoop        hadoop-3.0.3.tar.gz  jdk1.8.0_181
hadoop-3.0.3  java                 jdk-8u181-linux-x64.tar.gz
[root@server3 ~]# useradd -u 1000 hadoop
[root@server3 ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop)
[root@server3 ~]# yum install -y nfs-utils
[root@server3 ~]# systemctl start rpcbind
[root@server3 ~]# mount 172.25.66.1:/home/hadoop /home/hadoop
[root@server3 ~]# df

在这里插入图片描述

[root@server3 ~]# su - hadoop
[hadoop@server3 ~]$ ls
hadoop        hadoop-3.0.3.tar.gz  jdk1.8.0_181
hadoop-3.0.3  java                 jdk-8u181-linux-x64.tar.gz

测试免密:

[root@server1 ~]# su - hadoop
[hadoop@server1 ~]$ ssh server1
[hadoop@server1 ~]$ exit
[hadoop@server1 ~]$ ssh server2
[hadoop@server1 ~]$ exit
[hadoop@server1 ~]$ ssh server3
[hadoop@server1 ~]$ exit
[hadoop@server1 hadoop]$ pwd
/home/hadoop/hadoop
[hadoop@server1 hadoop]$ sbin/stop-dfs.sh 
Stopping namenodes on [server1]
Stopping datanodes
Stopping secondary namenodes [server1]
[hadoop@server1 ~]$ cd hadoop/etc/hadoop/
[hadoop@server1 hadoop]$ vim core-site.xml 
########################
<configuration>
    <property>
          <name>fs.defaultFS</name>
          <value>hdfs://172.25.66.1:9000</value>
    </property>
</configuration>

在这里插入图片描述

[hadoop@server1 hadoop]$ vim hdfs-site.xml 
########################
<configuration>
    <property>
         <name>dfs.replication</name>
         <value>2</value>
    </property>
</configuration>

在这里插入图片描述

[hadoop@server2 hadoop]$ vim workers 
[hadoop@server1 hadoop]$ cat works 
172.25.66.2
172.25.66.3
#删除原始数据
[hadoop@server1 ~]$ rm -rf /tmp/*
[hadoop@server1 ~]$ cd hadoop
#格式化
[hadoop@server1 hadoop]$ bin/hdfs namenode -format

在这里插入图片描述

[hadoop@server1 hadoop]$ ls /tmp/
hadoop-hadoop   hadoop-hadoop-namenode.pid  hsperfdata_hadoop  
#启动
[hadoop@server1 hadoop]$ sbin/start-dfs.sh 
Starting namenodes on [server1]
Starting datanodes
Starting secondary namenodes [server1]

测试:

[hadoop@server1 hadoop]$ jps
19692 SecondaryNameNode
19806 Jps
19471 NameNode
[hadoop@server2 ~]$ jps
13620 DataNode
13705 Jps
[hadoop@server3 ~]$ jps
12761 DataNode
12845 Jps

注意:实现分布式的前提是,必须先删除3个节点上存储存储原始数据的/tmp目录下所有关于hadoop的文件

[hadoop@server1 hadoop]$ pwd
/home/hadoop/hadoop
[hadoop@server1 hadoop]$ bin/hdfs dfs -mkdir -p /user/hadoop
[hadoop@server1 hadoop]$  bin/hdfs dfs -ls /
Found 2 items
drwxr-xr-x   - hadoop supergroup          0 2019-04-06 05:20 /user
[hadoop@server1 hadoop]$  bin/hdfs dfs -ls /user
Found 1 items
drwxr-xr-x   - hadoop supergroup          0 2019-04-06 05:22 /user/hadoop
[hadoop@server1 hadoop]$ bin/hdfs dfs -mkdir input
[hadoop@server1 hadoop]$ bin/hdfs dfs -ls 
Found 1 items
drwxr-xr-x   - hadoop supergroup          0 2019-04-06 05:22 input
#拷贝
[hadoop@server1 hadoop]$ bin/hdfs dfs -put etc/hadoop/*.xml input
#上传
[hadoop@server1 hadoop]$ bin/hdfs dfs -ls input

在这里插入图片描述
刷新网页:
在这里插入图片描述

  • 3
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值