从0开始 超详细搭建hadoop平台手册

从0开始 超详细搭建hadoop平台手册

创建三台使用centos7操作系统的虚拟机

基础环境配置

ps:不建议使用DHCP,因为ip地址会变动

配置ip

1.master

[root@master ~]# nmcli connection  add ifname ens32 con-name ens32 autoconnect 
 yes type ethernet  ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@master ~]# nmcli con up ens32 

2.slave1

[root@slave1 ~]# nmcli connection  add ifname ens33 con-name ens33 autoconnect 
 yes type ethernet  ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@slave1 ~]# nmcli con up ens33 

3.slave2

[root@slave2 ~]# nmcli connection  add ifname ens33 con-name ens33 autoconnect 
 yes type ethernet  ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@slave2 ~]# nmcli con up ens33 

ping百度

1.master

[root@master ~]# ping baidu.com
PING baidu.com (39.156.66.10) 56(84) bytes of data.
64 bytes from 39.156.66.10 (39.156.66.10): icmp_seq=1 ttl=128 time=28.5 ms
^C
--- baidu.com ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 28.587/28.587/28.587/0.000 ms
[root@master ~]# 

2.slave1

[root@slave1 ~]# ping baidu.com
PING baidu.com (110.242.68.66) 56(84) bytes of data.
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=1 ttl=128 time=34.5 ms
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=2 ttl=128 time=34.9 ms
^C
--- baidu.com ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1002ms
rtt min/avg/max/mdev = 34.512/34.708/34.904/0.196 ms
[root@slave1 ~]# 

3.slave2

[root@slave2 ~]# ping baidu.com
PING baidu.com (110.242.68.66) 56(84) bytes of data.
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=1 ttl=128 time=33.0 ms
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=2 ttl=128 time=35.2 ms
^C
--- baidu.com ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1002ms
rtt min/avg/max/mdev = 33.035/34.138/35.241/1.103 ms
[root@slave2 ~]# 

关闭防火墙和selinux

1.master

[root@master ~]# systemctl stop firewalld
[root@master ~]# systemctl disable firewalld
[root@master ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
   Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
   Active: inactive (dead)
     Docs: man:firewalld(1)
[root@master ~]# 

2.slave1

[root@slave1 ~]# systemctl stop firewalld
[root@slave1 ~]# systemctl disable firewalld
[root@slave1 ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
   Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
   Active: inactive (dead)
     Docs: man:firewalld(1)
[root@slave1 ~]# 

创建hadoop用户

1.master

[root@master ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@master ~]# 

2.slave1

[root@master ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@master ~]# 

3.slave2

[root@slave2 ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@slave2 ~]# 

创建hadoop用户密码

1.master

[root@master ~]# echo password|passwd --stdin hadoop 
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@master ~]# 

2.slave1

[root@slave1 ~]# echo 'password' |passwd --stdin  hadoop 
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@slave1 ~]# 

3.slave2

[root@slave2 ~]# echo 'password' |passwd --stdin hadoop 
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@slave2 ~]# 

安装jdk

删除原有jdk版本

1.master

[root@master ~]# rpm -qa |grep java
java-1.8.0-openjdk-headless-1.8.0.131-11.b12.el7.x86_64
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
java-1.7.0-openjdk-headless-1.7.0.141-2.6.10.5.el7.x86_64
java-1.7.0-openjdk-1.7.0.141-2.6.10.5.el7.x86_64
java-1.8.0-openjdk-1.8.0.131-11.b12.el7.x86_64
python-javapackages-3.4.1-11.el7.noarch
[root@master ~]# rpm -e  --nodeps $(rpm -qa|grep java)
[root@master ~]# rpm -qa |grep java
[root@master ~]# 

2.slave1

[root@slave1 ~]# rpm -qa |grep java
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
python-javapackages-3.4.1-11.el7.noarch
[root@slave1 ~]# rpm -e  --nodeps $(rpm -qa|grep java)
[root@slave1 ~]# rpm -qa |grep java
[root@slave1 ~]# 

3.slave2

[root@slave2 ~]# rpm -qa |grep java
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
python-javapackages-3.4.1-11.el7.noarch
[root@slave2 ~]# rpm -e  --nodeps $(rpm -qa|grep java)
[root@slave2 ~]# rpm -qa |grep java
[root@slave2 ~]# 

安装新的jdk

一,安装安装包
1.master

[root@master software]# tar -zxvf jdk-8u152-linux-x64.tar.gz  -C /usr/local/src/

2.slave1

3.slave2

二,配置jdk环境变量
1.master

[root@master src]# ls
jdk1.8.0_152
[root@master src]# mv jdk1.8.0_152/ jdk    //修改名字(有数字太长了)
[root@master jdk]# vim  /etc/profile
//G(大写跳转到末行写下面两行)
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
[root@master jdk]# source /etc/profile
//生成新的环境变量
[root@master jdk]# java -version
java version "1.8.0_152"
Java(TM) SE Runtime Environment (build 1.8.0_152-b16)
Java HotSpot(TM) 64-Bit Server VM (build 25.152-b16, mixed mode)
[root@master jdk]# 

hadoop

安装hadoop包

1.master

[root@master software]# tar -xzf hadoop-2.7.1.tar.gz  -C /usr/local/src/
[root@master software]# cd  /usr/local/src/
[root@master src]# mv hadoop-2.7.1/ hadoop

修改环境变量

[root@master hadoop]# tail -n 3 /etc/profile
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[root@master hadoop]# source /etc/profile //生成环境变量
[root@master hadoop]# hadoop
Usage: hadoop [--config confdir] [COMMAND | CLASSNAME]
  CLASSNAME            run the class named CLASSNAME
 or
  where COMMAND is one of:
  fs                   run a generic filesystem user client
  version              print the version
  jar <jar>            run a jar file
                       note: please use "yarn jar" to launch
                             YARN applications, not this command.
  checknative [-a|-h]  check native hadoop and compression libraries availability
  distcp <srcurl> <desturl> copy file or directories recursively
  archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive
  classpath            prints the class path needed to get the
  credential           interact with credential providers
                       Hadoop jar and the required libraries
  daemonlog            get/set the log level for each daemon
  trace                view and modify Hadoop tracing settings

Most commands print help when invoked w/o parameters.
[root@master hadoop]# 

给hadoop权限

[root@master hadoop]# chown -R hadoop:hadoop /usr/local/src/
[root@master hadoop]# ll /usr/local/src/
total 0
drwxr-xr-x 9 hadoop hadoop 149 Jun 29  2015 hadoop
drwxr-xr-x 8 hadoop hadoop 255 Sep 14  2017 jdk

配置hadoop-env.sh

[root@master hadoop]# vi etc/hadoop/hadoop-env.sh 
[root@master hadoop]# cat etc/hadoop/hadoop-env.sh |grep JAVA
# The only required environment variable is JAVA_HOME.  All others are
# set JAVA_HOME in this file, so that it is correctly defined on
export JAVA_HOME=/usr/local/src/jdk

配置集群环境

域名解析

1.master

[root@master hadoop]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.130.101 master 
192.168.130.102 slave1
192.168.130.103 slave2
[root@master hadoop]# 

2.slave1

[root@slave1 ~]# vim /etc/hosts
[root@slave1 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.130.101 master 
192.168.130.102 slave1
192.168.130.103 slave2
[root@slave1 ~]# 

3.slave2

[root@slave2 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.130.101 master 
192.168.130.102 slave1
192.168.130.103 slave2
[root@slave2 ~]# 
免密登陆
生成密钥文件并且发送给自己
  1. master
[root@master ~]# su - hadoop
Last login: Thu Apr 25 17:45:05 CST 2024 on pts/0
[hadoop@master ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa): 
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:2b+pnJuChp6RkHkx9XYqUgEpyhCf8WHBz3dtE67E9lA hadoop@master
The key's randomart image is:
+---[RSA 2048]----+
|....=+o          |
|...=oo o    E    |
|o.o.+o. o..+ .   |
|.. o +o..=* =    |
|  + o ..S+.= .   |
|   o o .  ...    |
|    o. .    .    |
|    .oo .. o o   |
|   .o.   .*oo    |
+----[SHA256]-----+
[hadoop@master ~]$ 
[hadoop@master ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@master ~]$ ls ~/.ssh/
authorized_keys  id_rsa  id_rsa.pub
[hadoop@master ~]$ chmod 600 ~/.ssh/authorized_keys 
[hadoop@master ~]$ ll ~/.ssh/
total 12
-rw------- 1 hadoop hadoop 3358 Apr 25 18:30 authorized_keys
-rw------- 1 hadoop hadoop 1679 Apr 25 17:45 id_rsa
-rw-r--r-- 1 hadoop hadoop  395 Apr 25 17:45 id_rsa.pub
[root@master .ssh]# cat /etc/ssh/sshd_config  |grep Pub
PubkeyAuthentication yes  //(使用vim打开该文件)去掉该文件中这一行的注释
[root@master .ssh]# systemctl restart sshd
[root@master .ssh]# 
  1. slave1
[root@slave1 ~]# su - hadoop
Last login: Tue Apr  7 15:37:22 CST 2020 on :0
[hadoop@slave1 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa): 
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:H5ouJBZdnawOEbyx1xFU3tKt8NEm5XlmnpzghOtQMcY hadoop@slave1
The key's randomart image is:
+---[RSA 2048]----+
|     ... ++B.   .|
|      + . *E+o =.|
|     . * o ++o=.O|
|    . = o o ++oO+|
|     . +S... .o+.|
|    o . .+o.     |
|   . o  o ..     |
|      ..         |
|       ..        |
+----[SHA256]-----+
[hadoop@slave1 ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@slave1 ~]$ ls ~/.ssh/
authorized_keys  id_rsa  id_rsa.pub
[hadoop@slave1 ~]$ chmod 600 ~/.ssh/authorized_keys
[root@slave1 ~]# systemctl restart sshd
  1. slave2
[root@slave2 ~]# su - hadoop
Last login: Tue Apr  7 15:37:22 CST 2020 on :0
[hadoop@slave2 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa): 
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:Brs8qBwc6izUBbj10eS/AWBZ6Dtxs1EZ8mVc9fM97Yg hadoop@slave2
The key's randomart image is:
+---[RSA 2048]----+
|  .  o*+ .+o.... |
| . o.+o.ooo.    .|
|  o + oo..     ..|
| .   = *o       =|
|  o . = So     .+|
| + o = +  o  . o.|
|o o . =  .  E . .|
|+. o   .         |
|.oo              |
+----[SHA256]-----+
[root@slave2 ~]# su - hadoop
Last login: Thu Apr 25 17:45:29 CST 2024 on pts/0
[hadoop@slave2 ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@slave2 ~]$ ls ~/.ssh/
authorized_keys  id_rsa  id_rsa.pub
[hadoop@slave2 ~]$ chmod 600 ~/.ssh/authorized_keys
[root@slave2 ~]# vim /etc/ssh/sshd_config 
[root@slave2 ~]# cat /etc/ssh/sshd_config  |grep Pub
PubkeyAuthentication yes
[root@slave2 ~]# systemctl restart sshd
交换ssh密钥

hadoop参数(全在master上修改)

hdfs-site.xml
[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim hdfs-site.xml 


  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
<name>dfs.namenode.name.dir</name>
      <value>file:/usr/local/src/hadoop/dfs/name</value>
      </property>
      <property>
<name>dfs.datanode.data.dir</name>
      <value>file:/usr/local/src/hadoop/dfs/data</value>
</property>
<property>
      <name>dfs.replication</name>
      <value>3</value>
</property>
core-site.xml
[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim core-site.xml 


  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
       <name>fs.defaultFS</name>
       <value>hdfs://192.168.130.101:9000</value>
</property>
<property>
       <name>io.file.buffer.size</name>
       <value>131072</value>
</property>
<property>
       <name>hadoop.tmp.dir</name>
       <value>file:/usr/local/src/hadoop/tmp</value>
</property>
mapred-site.xml
[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# cp mapred-site.xml.template  mapred-site.xml
[root@master hadoop]# vim mapred-site.xml

  you may not use this file except in compliance with the License.

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
      <name>mapreduce.framework.name</name>
      <value>yarn</value>
</property>
<property>
      <name>mapreduce.jobhistory.address</name>
      <value>master:10020</value>
</property>
<property>
      <name>mapreduce.jobhistory.webapp.address</name>
      <value>master:19888</value>
</property>
</configuration>
yarn-site.xml
[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim yarn-site.xml 

<property>
<configuration>
<!-- Site specific YARN configuration properties -->
<!-- Site specific YARN configuration properties -->
<property>
       <name>yarn.resourcemanager.address</name>
       <value>master:8032</value>
</property>
<property>
       <name>yarn.resourcemanager.scheduler.address</name>
       <value>master:8030</value>
</property>
<property>
       <name>yarn.resourcemanager.resource-tracker.address</name>
       <value>master:8031</value>
</property>
<property>
       <name>yarn.resourcemanager.admin.address</name>
       <value>master:8033</value>
</property>
<property>
       <name>yarn.resourcemanager.webapp.address</name>
       <value>master:8088</value>
</property>
<property>
       <name>yarn.nodemanager.aux-services</name>
       <value>mapreduce_shuffle</value>
</property>
<property>
       <name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
       <value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
</configuration>
其他
[root@master hadoop]# vim masters 
192.168.130.101
[root@master hadoop]# vim slaves 
slave1
slave2
[root@master hadoop]# mkdir /usr/local/src/hadoop/tmp
[root@master hadoop]# mkdir /usr/local/src/hadoop/dfs/name -p
[root@master hadoop]# mkdir /usr/local/src/hadoop/dfs/data -p
[root@master hadoop]# chown -R hadoop:hadoop /usr/local/src/hadoop/
复制到其他节点

1.slave1

[root@master ~]# scp -r /usr/local/src/hadoop/ root@slave1:/usr/local/src/
The authenticity of host 'slave1 (192.168.47.141)' can't be established.
ECDSA key fingerprint is SHA256:vnHclJTJVtDbeULN8jdOLhTCmqxJNqUQshH9g9LfJ3k.
ECDSA key fingerprint is MD5:31:03:3d:83:46:aa:c4:d0:c9:fc:5f:f1:cf:2d:fd:e2.
Are you sure you want to continue connecting (yes/no)? yes
* * * * * * * 

2.slave2

[root@master ~]# scp -r /usr/local/src/hadoop/ root@slave2:/usr/local/src/
The authenticity of host 'slave1 (192.168.47.142)' can't be established.
ECDSA key fingerprint is SHA256:vnHclJTJVtDbeULN8jdOLhTCmqxJNqUQshH9g9LfJ3k.
ECDSA key fingerprint is MD5:31:03:3d:83:46:aa:c4:d0:c9:fc:5f:f1:cf:2d:fd:e2.
Are you sure you want to continue connecting (yes/no)? yes
* * * * * * *
其他节点配置环境变量(slave1,slave2)

1.slave1

[root@slave1 .ssh]# tail -n 8 /etc/profile
unset -f pathmunge

# jdk
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[root@slave1 .ssh]# 

2.slave2

[root@slave2 ~]# tail -n 8 /etc/profile

# jdk
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

[root@slave2 ~]# 

启动hadoop集群

格式化namenode(master)
[hadoop@master hadoop]$ bin/hdfs namenode –format
*****
24/04/25 19:41:28 INFO util.ExitUtil: Exiting with status 0
24/04/25 19:41:28 INFO namenode.NameNode: SHUTDOWN_MSG: 
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at master/192.168.130.101
************************************************************/
启动namenode(master)
[hadoop@master hadoop]$ hadoop-daemon.sh start namenode
starting namenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-namenode-master.out
[hadoop@master hadoop]$ jps  //查看进程
4746 NameNode
4782 Jps
[hadoop@master hadoop]$ 
在slave1上启动datenode
[root@slave1 hadoop]# chown -R  hadoop:hadoop /usr/local/src/
[root@slave1 hadoop]# su - hadoop
[hadoop@slave2 ~]$ source /etc/profile
[hadoop@slave1 src]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
[hadoop@slave1 src]$ jps
4990 Jps
4943 DataNode
[hadoop@slave1 src]$ 

ps:
如果显示

[hadoop@slave1 hadoop]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
/usr/local/src/hadoop/bin/hdfs: line 304: /usr/local/src/jdk/bin/java: No such file or directory
//那么久在master上使用`scp -r jdk/ hadoop@slave1:/usr/local/src/jdk` 传到slave1
在slave2上启动datenode
[hadoop@slave2 hadoop]$ source /etc/profile
[hadoop@slave2 hadoop]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave2.out
[hadoop@slave2 hadoop]$ jps
3598 Jps
3551 DataNode
[hadoop@slave2 hadoop]$ 
启动 SecondaryNameNode(master)
[hadoop@master src]$  hadoop-daemon.sh start secondarynamenode
starting secondarynamenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-secondarynamenode-master.out
[hadoop@master src]$ jps
5009 Jps
4746 NameNode
4974 SecondaryNameNode
[hadoop@master src]$ 
查看hdfs报告
[hadoop@master src]$ hdfs dfsadmin -report
Configured Capacity: 94434762752 (87.95 GB)
Present Capacity: 82971066368 (77.27 GB)
DFS Remaining: 82971058176 (77.27 GB)
DFS Used: 8192 (8 KB)
DFS Used%: 0.00%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
Missing blocks (with replication factor 1): 0

-------------------------------------------------
Live datanodes (2):

Name: 192.168.130.103:50010 (slave2)
Hostname: slave2
Decommission Status : Normal
Configured Capacity: 47217381376 (43.97 GB)
DFS Used: 4096 (4 KB)
Non DFS Used: 5731614720 (5.34 GB)
DFS Remaining: 41485762560 (38.64 GB)
DFS Used%: 0.00%
DFS Remaining%: 87.86%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Apr 25 19:57:20 CST 2024


Name: 192.168.130.102:50010 (slave1)
Hostname: slave1
Decommission Status : Normal
Configured Capacity: 47217381376 (43.97 GB)
DFS Used: 4096 (4 KB)
Non DFS Used: 5732081664 (5.34 GB)
DFS Remaining: 41485295616 (38.64 GB)
DFS Used%: 0.00%
DFS Remaining%: 87.86%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Apr 25 19:57:20 CST 2024


[hadoop@master src]$ 
浏览器查看节点状态
  1. http://master:50070/ (看namenode和datanode节点状态)
  2. http://master:50090 (看SecondaryNameNode节点状态)
全部启动服务
[hadoop@master hadoop]$ start-yarn.sh
[hadoop@master hadoop]$ start-dfs.sh
[hadoop@master hadoop]$ jps
34257 NameNode 
34449 SecondaryNameNode 
34494 Jps 
32847 ResourceManager
hive搭建

学习hadoop的第三天——hive搭建-CSDN博客

  • 11
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 3
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值