添加RPC配置
core-site.xml
[root@node01 hadoop]# cat core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://node01:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/opt/bigdata/hadoop/tmp</value>
</property>
</configuration>
[root@node01 hadoop]# mkdir -p /opt/bigdata/hadoop/tmp
1.5 配置hdfs-site.xml
修改配置文件 添加HDFS配置
hdfs-site.xml
[root@node01 hadoop]# cat hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>node02:50090</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/opt/bigdata/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/opt/bigdata/hadoop/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.checkpoint.dir</name>
<value>file:/opt/bigdata/hadoop/dfs/secondary</value>
</property>
</configuration>
[root@node01 hadoop]# mkdir -p /opt/bigdata/hadoop/dfs/{name,data,secondary}
mapred-site.xml
[root@node01 hadoop]# cp mapred-site.xml.template mapred-site.xml
[root@node01 hadoop]# vim mapred-site.xml
[root@node01 hadoop]# cat mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
1.8 配置yarn-site.xml
[root@node01 hadoop]# cat yarn-site.xml
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>node01:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>node01:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>node01:8035</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>node01:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>node01:8088</value>
</property>
</configuration>
hdfs 格式化[生成fsimage]
[root@node01 hadoop]# hadoop namenode -format
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it.
20/03/11 09:58:18 INFO namenode.NameNode: STARTUP_MSG:
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG: host = node01/192.168.71.133
STARTUP_MSG: args = [-format]
STARTUP_MSG: version = 2.6.5
STARTUP_MSG: classpath = /opt/bigdata/hadoop-2.6.5/etc/hadoop:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/activation-1.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/guava-11.0.2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-net-3.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/servlet-api-2.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/httpclient-4.2.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/xz-1.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-cli-1.2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/slf4j-api-1.7.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jersey-server-1.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/stax-api-1.0-2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jersey-json-1.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/htrace-core-3.0.4.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/asm-3.2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/hadoop-annotations-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-collections-3.2.2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/curator-framework-2.6.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-configuration-1.6.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-math3-3.1.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-digester-1.8.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jasper-runtime-5.5.23.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/httpcore-4.2.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/curator-recipes-2.6.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jsr305-1.3.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/junit-4.11.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jettison-1.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jets3t-0.9.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jsp-api-2.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/paranamer-2.3.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-io-2.4.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/zookeeper-3.4.6.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-el-1.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jersey-core-1.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/slf4j-log4j12-1.7.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jetty-util-6.1.26.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-lang-2.6.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/hadoop-auth-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/avro-1.7.4.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jsch-0.1.42.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jasper-compiler-5.5.23.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/netty-3.6.2.Final.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/gson-2.2.4.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-codec-1.4.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jetty-6.1.26.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-httpclient-3.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-logging-1.1.3.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/curator-client-2.6.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/xmlenc-0.52.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/hamcrest-core-1.3.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/log4j-1.2.17.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/commons-compress-1.4.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/mockito-all-1.8.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/hadoop-common-2.6.5-tests.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/hadoop-nfs-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/hadoop-common-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/guava-11.0.2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/htrace-core-3.0.4.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/asm-3.2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/jasper-runtime-5.5.23.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/jsr305-1.3.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/jsp-api-2.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-io-2.4.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-el-1.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-2.6.5-tests.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-nfs-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/activation-1.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/guava-11.0.2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/servlet-api-2.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jline-0.9.94.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/javax.inject-1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/xz-1.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/commons-cli-1.2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-server-1.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-json-1.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/asm-3.2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/commons-collections-3.2.2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/aopalliance-1.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jsr305-1.3.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jettison-1.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/commons-io-2.4.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-core-1.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-client-1.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/commons-lang-2.6.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/guice-3.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/commons-codec-1.4.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jetty-6.1.26.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/commons-httpclient-3.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/log4j-1.2.17.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-common-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-api-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-tests-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-registry-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-client-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-common-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/javax.inject-1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/xz-1.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/asm-3.2.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/hadoop-annotations-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/junit-4.11.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/guice-3.0.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.6.5-tests.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.6.5.jar:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.6.5.jar:/contrib/capacity-scheduler/*.jar:/contrib/capacity-scheduler/*.jar
STARTUP_MSG: build = https://github.com/apache/hadoop.git -r e8c9fe0b4c252caf2ebf1464220599650f119997; compiled by 'sjlee' on 2016-10-02T23:43Z
STARTUP_MSG: java = 1.8.0_112
************************************************************/
20/03/11 09:58:18 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT]
20/03/11 09:58:18 INFO namenode.NameNode: createNameNode [-format]
Formatting using clusterid: CID-04102693-ae36-486e-a01b-a3df291bc13a
20/03/11 09:58:21 INFO namenode.FSNamesystem: No KeyProvider found.
20/03/11 09:58:21 INFO namenode.FSNamesystem: fsLock is fair:true
20/03/11 09:58:21 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit=1000
20/03/11 09:58:21 INFO blockmanagement.DatanodeManager: dfs.namenode.datanode.registration.ip-hostname-check=true
20/03/11 09:58:21 INFO blockmanagement.BlockManager: dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
20/03/11 09:58:21 INFO blockmanagement.BlockManager: The block deletion will start around 2020 Mar 11 09:58:21
20/03/11 09:58:21 INFO util.GSet: Computing capacity for map BlocksMap
20/03/11 09:58:21 INFO util.GSet: VM type = 64-bit
20/03/11 09:58:21 INFO util.GSet: 2.0% max memory 966.7 MB = 19.3 MB
20/03/11 09:58:21 INFO util.GSet: capacity = 2^21 = 2097152 entries
20/03/11 09:58:21 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false
20/03/11 09:58:21 INFO blockmanagement.BlockManager: defaultReplication = 3
20/03/11 09:58:21 INFO blockmanagement.BlockManager: maxReplication = 512
20/03/11 09:58:21 INFO blockmanagement.BlockManager: minReplication = 1
20/03/11 09:58:21 INFO blockmanagement.BlockManager: maxReplicationStreams = 2
20/03/11 09:58:21 INFO blockmanagement.BlockManager: replicationRecheckInterval = 3000
20/03/11 09:58:21 INFO blockmanagement.BlockManager: encryptDataTransfer = false
20/03/11 09:58:21 INFO blockmanagement.BlockManager: maxNumBlocksToLog = 1000
20/03/11 09:58:21 INFO namenode.FSNamesystem: fsOwner = root (auth:SIMPLE)
20/03/11 09:58:21 INFO namenode.FSNamesystem: supergroup = supergroup
20/03/11 09:58:21 INFO namenode.FSNamesystem: isPermissionEnabled = true
20/03/11 09:58:21 INFO namenode.FSNamesystem: HA Enabled: false
20/03/11 09:58:21 INFO namenode.FSNamesystem: Append Enabled: true
20/03/11 09:58:22 INFO util.GSet: Computing capacity for map INodeMap
20/03/11 09:58:22 INFO util.GSet: VM type = 64-bit
20/03/11 09:58:22 INFO util.GSet: 1.0% max memory 966.7 MB = 9.7 MB
20/03/11 09:58:22 INFO util.GSet: capacity = 2^20 = 1048576 entries
20/03/11 09:58:22 INFO namenode.NameNode: Caching file names occuring more than 10 times
20/03/11 09:58:22 INFO util.GSet: Computing capacity for map cachedBlocks
20/03/11 09:58:22 INFO util.GSet: VM type = 64-bit
20/03/11 09:58:22 INFO util.GSet: 0.25% max memory 966.7 MB = 2.4 MB
20/03/11 09:58:22 INFO util.GSet: capacity = 2^18 = 262144 entries
20/03/11 09:58:22 INFO namenode.FSNamesystem: dfs.namenode.safemode.threshold-pct = 0.9990000128746033
20/03/11 09:58:22 INFO namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 0
20/03/11 09:58:22 INFO namenode.FSNamesystem: dfs.namenode.safemode.extension = 30000
20/03/11 09:58:22 INFO namenode.FSNamesystem: Retry cache on namenode is enabled
20/03/11 09:58:22 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
20/03/11 09:58:22 INFO util.GSet: Computing capacity for map NameNodeRetryCache
20/03/11 09:58:22 INFO util.GSet: VM type = 64-bit
20/03/11 09:58:22 INFO util.GSet: 0.029999999329447746% max memory 966.7 MB = 297.0 KB
20/03/11 09:58:22 INFO util.GSet: capacity = 2^15 = 32768 entries
20/03/11 09:58:22 INFO namenode.NNConf: ACLs enabled? false
20/03/11 09:58:22 INFO namenode.NNConf: XAttrs enabled? true
20/03/11 09:58:22 INFO namenode.NNConf: Maximum size of an xattr: 16384
20/03/11 09:58:22 INFO namenode.FSImage: Allocated new BlockPoolId: BP-218129651-192.168.71.133-1583935102537
20/03/11 09:58:22 INFO common.Storage: Storage directory /opt/bigdata/hadoop/dfs/name has been successfully formatted.
20/03/11 09:58:22 INFO namenode.FSImageFormatProtobuf: Saving image file /opt/bigdata/hadoop/dfs/name/current/fsimage.ckpt_0000000000000000000 using no compression
20/03/11 09:58:23 INFO namenode.FSImageFormatProtobuf: Image file /opt/bigdata/hadoop/dfs/name/current/fsimage.ckpt_0000000000000000000 of size 321 bytes saved in 0 seconds.
20/03/11 09:58:23 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0
20/03/11 09:58:23 INFO util.ExitUtil: Exiting with status 0
20/03/11 09:58:23 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at node01/192.168.71.133
************************************************************/
提示下列内容即是成功完成格式化
20/03/11 09:58:22 INFO common.Storage: Storage directory /opt/bigdata/hadoop/dfs/name has been successfully formatted.
[root@node01 ~]# cd /opt/bigdata/hadoop/dfs/name/current/
[root@node01 current]# ls
fsimage_0000000000000000000 fsimage_0000000000000000000.md5 seen_txid VERSION
注意
hadoop namenode -format做了什么
1. 创建目录,并初始化一个空的fsimage
2. VERSION CID
2. 启动集群
2.1 启动集群
[root@node01 hadoop]# start-all.sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [node01]
node01: starting namenode, logging to /opt/bigdata/hadoop-2.6.5/logs/hadoop-root-namenode-node01.out
node03: starting datanode, logging to /opt/bigdata/hadoop-2.6.5/logs/hadoop-root-datanode-node03.out
node02: starting datanode, logging to /opt/bigdata/hadoop-2.6.5/logs/hadoop-root-datanode-node02.out
node04: starting datanode, logging to /opt/bigdata/hadoop-2.6.5/logs/hadoop-root-datanode-node04.out
node01: starting datanode, logging to /opt/bigdata/hadoop-2.6.5/logs/hadoop-root-datanode-node01.out
Starting secondary namenodes [node02]
node02: starting secondarynamenode, logging to /opt/bigdata/hadoop-2.6.5/logs/hadoop-root-secondarynamenode-node02.out
starting yarn daemons
starting resourcemanager, logging to /opt/bigdata/hadoop-2.6.5/logs/yarn-root-resourcemanager-node01.out
node02: starting nodemanager, logging to /opt/bigdata/hadoop-2.6.5/logs/yarn-root-nodemanager-node02.out
node03: starting nodemanager, logging to /opt/bigdata/hadoop-2.6.5/logs/yarn-root-nodemanager-node03.out
node04: starting nodemanager, logging to /opt/bigdata/hadoop-2.6.5/logs/yarn-root-nodemanager-node04.out
node01: starting nodemanager, logging to /opt/bigdata/hadoop-2.6.5/logs/yarn-root-nodemanager-node01.out
start-all.sh,第一次:datanode和secondary角色初始化创建自己的数据目录