Hadoop集群安装


###修改hostname  all node

hostnamectl set-hostname master
hostnamectl set-hostname slave1
hostnamectl set-hostname slave2
hostnamectl set-hostname slave3

##master 到 salve 做无密码认证

ssh-keygen
ssh-copy-id -i .ssh/id_rsa.pub root@192.168.x.{51,57,126,131}


##ntp 时间认证服务

for i in 51 57 126 131;do ssh 192.168.x.$i "yum install ntp -y";done
for i in 51 57 126 131;do ssh 192.168.x.$i "systemctl enable ntpd && systemctl start ntpd";done

## /etc/hosts all node

192.168.x.19 master.sxw.com master
192.168.x.51 slave1.sxw.com slave1
192.168.x.57 slave2.sxw.com slave2
192.168.x.126 slave3.sxw.com slave3

###FQDN all node

vim /etc/sysconfig/network
# Created by anaconda
NOZERCONF=yes
HOSTNAME=slave3.sxw.com


###disabled firewalld  all node

for i in 51 57 126 131;do ssh 192.168.x.$i "systemctl disable firewalld";done
for i in 51 57 126 131;do ssh 192.168.x.$i "systemctl stop firewalld ";done

 

###查看selinux 的状态

for i in 51 57 126 131;do ssh 192.168.x.$i "sestatus";done


##设置最大文件数  /etc/security/limits.conf 

 * soft nofile 204800
 * hard nofile 204800
 * soft nproc 204800
 * hard nproc 204800

ansible ambari -m shell -a "ulimit -Hn"


###安装java 版本1.8
https://www.oracle.com/technetwork/java/javase/overview/index.html
jdk1.8.0_111.zip

vim /etc/profile 追加一下变量
export JAVA_HOME=/usr/local/java
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

source /etc/profile 

###设置hadoop 用户 所有主机都设置

adduser hadoop
passwd hadoop
hadoop 
groupadd hadoop
usermod -G hadoop hadoop
#sudo权限配置
groupadd admin
usermod -G admin,hadoop hadoop
chmod u+w /etc/sudoers 
vi /etc/sudoers
root    ALL=(ALL) ALL 
#在下边再加一条配置: 
%admin    ALL=(ALL) ALL

chmod u-w /etc/sudoers

##下载hadoop 
https://hadoop.apache.org/releases.html

wget http://mirror.bit.edu.cn/apache/hadoop/common/hadoop-2.7.7/hadoop-2.7.7.tar.gz 

##设置环境变量

vim /etc/profile 
# set hadoop path
export HADOOP_HOME=/usr/local/hadoop
export PATH=$PATH:$HADOOP_HOME/bin


##所有机器上挂载盘

vim /etc/fstab
/dev/sdb              /usr/local/hadoop_data1                      ext4     defaults        0 0
/dev/sdc              /usr/local/hadoop_data2                      ext4     defaults        0 0

mkfs.ext4 /dev/sdb
mkfs.ext4 /dev/sdc 

mkdir /usr/local/hadoop_data1 
mkdir /usr/local/hadoop_data2 


##设置 配置 core-site.xml

cat /usr/local/hadoop/etc/hadoop/core-site.xml 
<configuration>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>file:/usr/local/hadoop/tmp</value>
        <description>Abase for other temporaty directories.</description>
    </property>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://master.sxw.com:9000</value>
    </property>
</configuration>

###hdfs-site.xml

cat /usr/local/hadoop/etc/hadoop/hdfs-site.xml
<configuration>
    <property>
        <name>dfs.replication</name>
        <value>3</value>
    </property>
    <property>
        <name>dfs.name.dir</name>
        <value>/usr/local/hadoop_data1,/usr/local/hadoop_data2</value>
    </property>
    <property>
        <name>dfs.data.dir</name>
        <value>/usr/local/hadoop_data1,/usr/local/hadoop_data2</value>
    </property>
</configuration>


###设置mapred-site.xml 

cp /usr/local/hadoop/etc/hadoop/mapred-site.xml.template /usr/local/hadoop/etc/hadoop/mapred-site.xml  
vim /usr/local/hadoop/etc/hadoop/mapred-site.xml

<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <property>
        <name>mapred.job.tracker</name>
        <value>http://master.sxw.com:9001</value>
    </property>
</configuration>


## 设置 yarn-site.xml 

vim /usr/local/hadoop/etc/hadoop/yarn-site.xml
<configuration>

<!-- Site specific YARN configuration properties -->
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <property>
        <name>yarn.resourcemanager.hostname</name>
        <value>master.sxw.com</value>
    </property>
   <property>
        <name>yarn.nodemanager.resource.memory-mb</name>
        <value>5120</value>
   </property>
   <property>
        <name>yarn.scheduler.maximum-allocation-mb</name>
        <value>6000</value>
   </property>
   <property>
        <name>yarn.scheduler.minimum-allocation-mb</name>
        <value>2048</value>
   </property> 
   <property>
        <name>mapredule.reduce.memory.mb</name>
        <value>2048</value>
   </property>
</configuration>

                                    


###配置master 
修改/usr/local/hadoop/etc/hadoop/masters文件,该文件指定namenode节点所在的服务器机器。删除localhost,添加namenode节点的主机名hadoop-master;不建议使用IP地址,因为IP地址可能会变化,但是主机名一般不会变化。

[root@master ~]# vim /usr/local/hadoop/etc/hadoop/masters
## 内容
master.sxw.com 


###配置slaves 

[root@master ~]# vim /usr/local/hadoop/etc/hadoop/slaves 
slave1.sxw.com
slave2.sxw.com
slave3.sxw.com

###配置slave 节点

scp -r /usr/local/hadoop hadoop-slave1、2、3:/usr/local/
rm -rf /usr/local/hadoop/etc/hadoop/slaves


###设置JAVA_HOME 环境变量 所有节点执行

vim /usr/local/hadoop/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/usr/local/java

###启动hadoop 集群  master 设置,格式化硬盘

[root@master ~]# hadoop namenode -format
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it.

19/02/13 17:20:35 INFO namenode.NameNode: STARTUP_MSG: 
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG:   host = master.sxw.com/192.168.x.51
STARTUP_MSG:   args = [-format]
STARTUP_MSG:   version = 2.7.7
STARTUP_MSG:   classpath = /usr/local/hadoop/etc/hadoop:/usr/local/hadoop/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hadoop/share/hadoop/common/lib/activation-1.1.jar:/usr/local/hadoop/share/hadoop/common/lib/hadoop-annotations-2.7.7.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-configuration-1.6.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/usr/local/hadoop/share/hadoop/common/lib/xz-1.0.jar:/usr/local/hadoop/share/hadoop/common/lib/junit-4.11.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-httpclient-3.1.jar:/usr/local/hadoop/share/hadoop/common/lib/stax-api-1.0-2.jar:/usr/local/hadoop/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/usr/local/hadoop/share/hadoop/common/lib/hadoop-auth-2.7.7.jar:/usr/local/hadoop/share/hadoop/common/lib/httpclient-4.2.5.jar:/usr/local/hadoop/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/usr/local/hadoop/share/hadoop/common/lib/mockito-all-1.8.5.jar:/usr/local/hadoop/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-logging-1.1.3.jar:/usr/local/hadoop/share/hadoop/common/lib/curator-recipes-2.7.1.jar:/usr/local/hadoop/share/hadoop/common/lib/jetty-sslengine-6.1.26.jar:/usr/local/hadoop/share/hadoop/common/lib/jersey-json-1.9.jar:/usr/local/hadoop/share/hadoop/common/lib/avro-1.7.4.jar:/usr/local/hadoop/share/hadoop/common/lib/log4j-1.2.17.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-cli-1.2.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-digester-1.8.jar:/usr/local/hadoop/share/hadoop/common/lib/servlet-api-2.5.jar:/usr/local/hadoop/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop/share/hadoop/common/lib/xmlenc-0.52.jar:/usr/local/hadoop/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/usr/local/hadoop/share/hadoop/common/lib/jetty-util-6.1.26.jar:/usr/local/hadoop/share/hadoop/common/lib/guava-11.0.2.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-compress-1.4.1.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-io-2.4.jar:/usr/local/hadoop/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/common/lib/jersey-core-1.9.jar:/usr/local/hadoop/share/hadoop/common/lib/jsp-api-2.1.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-codec-1.4.jar:/usr/local/hadoop/share/hadoop/common/lib/netty-3.6.2.Final.jar:/usr/local/hadoop/share/hadoop/common/lib/jetty-6.1.26.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/usr/local/hadoop/share/hadoop/common/lib/jersey-server-1.9.jar:/usr/local/hadoop/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/usr/local/hadoop/share/hadoop/common/lib/paranamer-2.3.jar:/usr/local/hadoop/share/hadoop/common/lib/zookeeper-3.4.6.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-collections-3.2.2.jar:/usr/local/hadoop/share/hadoop/common/lib/jettison-1.1.jar:/usr/local/hadoop/share/hadoop/common/lib/asm-3.2.jar:/usr/local/hadoop/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/usr/local/hadoop/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/usr/local/hadoop/share/hadoop/common/lib/hamcrest-core-1.3.jar:/usr/local/hadoop/share/hadoop/common/lib/jsch-0.1.54.jar:/usr/local/hadoop/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/usr/local/hadoop/share/hadoop/common/lib/curator-framework-2.7.1.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-net-3.1.jar:/usr/local/hadoop/share/hadoop/common/lib/gson-2.2.4.jar:/usr/local/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar:/usr/local/hadoop/share/hadoop/common/lib/jets3t-0.9.0.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-lang-2.6.jar:/usr/local/hadoop/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/usr/local/hadoop/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-math3-3.1.1.jar:/usr/local/hadoop/share/hadoop/common/lib/httpcore-4.2.5.jar:/usr/local/hadoop/share/hadoop/common/lib/htrace-core-3.1.0-incubating.jar:/usr/local/hadoop/share/hadoop/common/lib/jsr305-3.0.0.jar:/usr/local/hadoop/share/hadoop/common/lib/slf4j-api-1.7.10.jar:/usr/local/hadoop/share/hadoop/common/lib/curator-client-2.7.1.jar:/usr/local/hadoop/share/hadoop/common/hadoop-common-2.7.7-tests.jar:/usr/local/hadoop/share/hadoop/common/hadoop-nfs-2.7.7.jar:/usr/local/hadoop/share/hadoop/common/hadoop-common-2.7.7.jar:/usr/local/hadoop/share/hadoop/hdfs:/usr/local/hadoop/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/guava-11.0.2.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-io-2.4.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/asm-3.2.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/netty-all-4.0.23.Final.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/htrace-core-3.1.0-incubating.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jsr305-3.0.0.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-2.7.7-tests.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-2.7.7.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-nfs-2.7.7.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/activation-1.1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/zookeeper-3.4.6-tests.jar:/usr/local/hadoop/share/hadoop/yarn/lib/aopalliance-1.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/xz-1.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-json-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/log4j-1.2.17.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-cli-1.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/servlet-api-2.5.jar:/usr/local/hadoop/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/usr/local/hadoop/share/hadoop/yarn/lib/guava-11.0.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-io-2.4.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-core-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-codec-1.4.jar:/usr/local/hadoop/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jetty-6.1.26.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-server-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/guice-3.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-client-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-collections-3.2.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jettison-1.1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/asm-3.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-lang-2.6.jar:/usr/local/hadoop/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/yarn/lib/javax.inject-1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jsr305-3.0.0.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.7.7.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-common-2.7.7.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-client-2.7.7.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-common-2.7.7.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.7.7.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.7.7.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-tests-2.7.7.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-api-2.7.7.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-registry-2.7.7.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-sharedcachemanager-2.7.7.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.7.7.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.7.7.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.7.7.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/hadoop-annotations-2.7.7.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/xz-1.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/junit-4.11.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/guice-3.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/asm-3.2.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/javax.inject-1.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.7.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.7.7.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.7.7.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.7.7.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.7-tests.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.7.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.7.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.7.7.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.7.7.jar:/usr/local/hadoop/contrib/capacity-scheduler/*.jar:/usr/local/hadoop/contrib/capacity-scheduler/*.jar
STARTUP_MSG:   build = Unknown -r c1aad84bd27cd79c3d1a7dd58202a8c3ee1ed3ac; compiled by 'stevel' on 2018-07-18T22:47Z
STARTUP_MSG:   java = 1.8.0_111
************************************************************/
19/02/13 17:20:35 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT]
19/02/13 17:20:35 INFO namenode.NameNode: createNameNode [-format]
19/02/13 17:20:36 WARN common.Util: Path /usr/local/hadoop_data1 should be specified as a URI in configuration files. Please update hdfs configuration.
19/02/13 17:20:36 WARN common.Util: Path /usr/local/hadoop_data2 should be specified as a URI in configuration files. Please update hdfs configuration.
19/02/13 17:20:36 WARN common.Util: Path /usr/local/hadoop_data1 should be specified as a URI in configuration files. Please update hdfs configuration.
19/02/13 17:20:36 WARN common.Util: Path /usr/local/hadoop_data2 should be specified as a URI in configuration files. Please update hdfs configuration.
Formatting using clusterid: CID-5be40bb7-215e-4371-8817-106d96f182c7
19/02/13 17:20:36 INFO namenode.FSNamesystem: No KeyProvider found.
19/02/13 17:20:36 INFO namenode.FSNamesystem: fsLock is fair: true
19/02/13 17:20:36 INFO namenode.FSNamesystem: Detailed lock hold time metrics enabled: false
19/02/13 17:20:36 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit=1000
19/02/13 17:20:36 INFO blockmanagement.DatanodeManager: dfs.namenode.datanode.registration.ip-hostname-check=true
19/02/13 17:20:36 INFO blockmanagement.BlockManager: dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
19/02/13 17:20:36 INFO blockmanagement.BlockManager: The block deletion will start around 2019 Feb 13 17:20:36
19/02/13 17:20:36 INFO util.GSet: Computing capacity for map BlocksMap
19/02/13 17:20:36 INFO util.GSet: VM type       = 64-bit
19/02/13 17:20:36 INFO util.GSet: 2.0% max memory 889 MB = 17.8 MB
19/02/13 17:20:36 INFO util.GSet: capacity      = 2^21 = 2097152 entries
19/02/13 17:20:36 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false
19/02/13 17:20:36 INFO blockmanagement.BlockManager: defaultReplication         = 3
19/02/13 17:20:36 INFO blockmanagement.BlockManager: maxReplication             = 512
19/02/13 17:20:36 INFO blockmanagement.BlockManager: minReplication             = 1
19/02/13 17:20:36 INFO blockmanagement.BlockManager: maxReplicationStreams      = 2
19/02/13 17:20:36 INFO blockmanagement.BlockManager: replicationRecheckInterval = 3000
19/02/13 17:20:36 INFO blockmanagement.BlockManager: encryptDataTransfer        = false
19/02/13 17:20:36 INFO blockmanagement.BlockManager: maxNumBlocksToLog          = 1000
19/02/13 17:20:36 INFO namenode.FSNamesystem: fsOwner             = root (auth:SIMPLE)
19/02/13 17:20:36 INFO namenode.FSNamesystem: supergroup          = supergroup
19/02/13 17:20:36 INFO namenode.FSNamesystem: isPermissionEnabled = true
19/02/13 17:20:36 INFO namenode.FSNamesystem: HA Enabled: false
19/02/13 17:20:36 INFO namenode.FSNamesystem: Append Enabled: true
19/02/13 17:20:36 INFO util.GSet: Computing capacity for map INodeMap
19/02/13 17:20:36 INFO util.GSet: VM type       = 64-bit
19/02/13 17:20:36 INFO util.GSet: 1.0% max memory 889 MB = 8.9 MB
19/02/13 17:20:36 INFO util.GSet: capacity      = 2^20 = 1048576 entries
19/02/13 17:20:36 INFO namenode.FSDirectory: ACLs enabled? false
19/02/13 17:20:36 INFO namenode.FSDirectory: XAttrs enabled? true
19/02/13 17:20:36 INFO namenode.FSDirectory: Maximum size of an xattr: 16384
19/02/13 17:20:36 INFO namenode.NameNode: Caching file names occuring more than 10 times
19/02/13 17:20:36 INFO util.GSet: Computing capacity for map cachedBlocks
19/02/13 17:20:36 INFO util.GSet: VM type       = 64-bit
19/02/13 17:20:36 INFO util.GSet: 0.25% max memory 889 MB = 2.2 MB
19/02/13 17:20:36 INFO util.GSet: capacity      = 2^18 = 262144 entries
19/02/13 17:20:36 INFO namenode.FSNamesystem: dfs.namenode.safemode.threshold-pct = 0.9990000128746033
19/02/13 17:20:36 INFO namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 0
19/02/13 17:20:36 INFO namenode.FSNamesystem: dfs.namenode.safemode.extension     = 30000
19/02/13 17:20:36 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.window.num.buckets = 10
19/02/13 17:20:36 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.num.users = 10
19/02/13 17:20:36 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25
19/02/13 17:20:36 INFO namenode.FSNamesystem: Retry cache on namenode is enabled
19/02/13 17:20:36 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
19/02/13 17:20:36 INFO util.GSet: Computing capacity for map NameNodeRetryCache
19/02/13 17:20:36 INFO util.GSet: VM type       = 64-bit
19/02/13 17:20:36 INFO util.GSet: 0.029999999329447746% max memory 889 MB = 273.1 KB
19/02/13 17:20:36 INFO util.GSet: capacity      = 2^15 = 32768 entries
Re-format filesystem in Storage Directory /usr/local/hadoop_data1 ? (Y or N) Y
Re-format filesystem in Storage Directory /usr/local/hadoop_data2 ? (Y or N) Y
19/02/13 17:20:52 INFO namenode.FSImage: Allocated new BlockPoolId: BP-1615210404-192.168.97.51-1550049652565
19/02/13 17:20:52 INFO common.Storage: Storage directory /usr/local/hadoop_data1 has been successfully formatted.
19/02/13 17:20:52 INFO common.Storage: Storage directory /usr/local/hadoop_data2 has been successfully formatted.
19/02/13 17:20:52 INFO namenode.FSImageFormatProtobuf: Saving image file /usr/local/hadoop_data2/current/fsimage.ckpt_0000000000000000000 using no compression
19/02/13 17:20:52 INFO namenode.FSImageFormatProtobuf: Saving image file /usr/local/hadoop_data1/current/fsimage.ckpt_0000000000000000000 using no compression
19/02/13 17:20:53 INFO namenode.FSImageFormatProtobuf: Image file /usr/local/hadoop_data2/current/fsimage.ckpt_0000000000000000000 of size 321 bytes saved in 0 seconds.
19/02/13 17:20:53 INFO namenode.FSImageFormatProtobuf: Image file /usr/local/hadoop_data1/current/fsimage.ckpt_0000000000000000000 of size 321 bytes saved in 0 seconds.
19/02/13 17:20:53 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0
19/02/13 17:20:53 INFO util.ExitUtil: Exiting with status 0
19/02/13 17:20:53 INFO namenode.NameNode: SHUTDOWN_MSG: 
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at master.sxw.com/192.168.x.51
************************************************************/
[root@master ~]# /usr/local/hadoop/sbin/start-all.sh 
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [master.sxw.com]
master.sxw.com: namenode running as process 4273. Stop it first.
slave1.sxw.com: datanode running as process 3110. Stop it first.
slave2.sxw.com: datanode running as process 2884. Stop it first.
slave3.sxw.com: datanode running as process 2946. Stop it first.
Starting secondary namenodes [0.0.0.0]
0.0.0.0: secondarynamenode running as process 4477. Stop it first.
starting yarn daemons
resourcemanager running as process 3780. Stop it first.
slave1.sxw.com: nodemanager running as process 3219. Stop it first.
slave2.sxw.com: nodemanager running as process 2993. Stop it first.
slave3.sxw.com: nodemanager running as process 3055. Stop it first.


启动 集群

/usr/local/hadoop/sbin/start-all.sh
[root@master ~]# jps
4273 NameNode
3780 ResourceManager
4477 SecondaryNameNode
5837 Jps
[root@master ~]# ssh slave1
[root@slave1 ~]# jps
3219 NodeManager
3110 DataNode
3431 Jps

##查看hdfs

hadoop fs -du


http://192.168.x.51:8088


http://192.168.x.51:50070

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

石兴稳

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值