一、环境

192.168.3.66    namenode    NameNode,JobTracker

192.168.3.67    datanode1    DataNode,TaskTracker

192.168.3.68    datanode2    DataNode,TaskTracker

二、添加用户,配置hosts本机解析,配置ssh免验证

#在三台机器上添加用户hadoop
[root@namenode ~]# useradd hadoop
[root@namenode ~]# echo 'hadoop' |passwd --stdin hadoop
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.

#在三台机器上添加本地hosts文件解析
[root@namenode ~]# tail -3 /etc/hosts
192.168.3.66	namenode
192.168.3.67	datanode1
192.168.3.68	datanode2

#在NameNode节点上配置hadoop能免验证管理其余的节点,以下命令在NameNode节点执行即可
[root@namenode ~]# su - hadoop
[hadoop@namenode ~]$ ssh-keygen -t rsa -P ''
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa): 
Created directory '/home/hadoop/.ssh'.
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
ef:a8:f2:f7:dc:80:64:5e:2f:ca:5c:f7:d0:ba:7f:68 hadoop@namenode
The key's randomart p_w_picpath is:
+--[ RSA 2048]----+
|                 |
|                 |
|                 |
|                 |
|        S .      |
|       + + . .   |
|        o = + .. |
|    .  o.* = +E .|
|     ooo=.+ ++o. |
+-----------------+
[hadoop@namenode ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@datanode1
The authenticity of host 'datanode1 (192.168.3.67)' can't be established.
RSA key fingerprint is 63:98:1b:10:7c:81:b9:5f:a3:9b:5a:b0:a9:a8:d5:0f.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'datanode1,192.168.3.67' (RSA) to the list of known hosts.
hadoop@datanode1's password: 
Now try logging into the machine, with "ssh 'hadoop@datanode1'", and check in:

  .ssh/authorized_keys

to make sure we haven't added extra keys that you weren't expecting.

[hadoop@namenode ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@datanode2
The authenticity of host 'datanode2 (192.168.3.68)' can't be established.
RSA key fingerprint is fd:99:57:33:5e:07:54:f1:7f:9c:0a:22:40:54:4b:0f.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'datanode2,192.168.3.68' (RSA) to the list of known hosts.
hadoop@datanode2's password: 
Now try logging into the machine, with "ssh 'hadoop@datanode2'", and check in:

  .ssh/authorized_keys

to make sure we haven't added extra keys that you weren't expecting.

三、在三台机器上安装配置jdk环境

#下载jdk
[root@namenode ~]# wget http://download.oracle.com/otn-pub/java/jdk/8u73-b02/jdk-8u73-linux-x64.tar.gz?AuthParam=1459842113_778c86699265fc978efa1e1c999a7c3a
[root@namenode ~]# tar xf jdk-8u73-linux-x64.tar.gz -C /usr/local/

#配置环境配置文件
[root@namenode ~]# cat /etc/profile.d/java.sh
JAVA_HOME=/usr/local/jdk1.8.0_73
JAVA_BIN=/usr/local/jdk1.8.0_73/bin
JRE_HOME=/usr/local/jdk1.8.0_73/jre
PATH=$PATH:/usr/local/jdk1.8.0_73/bin:/usr/local/jdk1.8.0_73/jre/bin
CLASSPATH=/usr/local/jdk1.8.0_73/jre/lib:/usr/local/jdk1.8.0_73/lib:/usr/local/jdk1.8.0_73/jre/lib/charsets.jar
export JAVA_HOME PATH

#加载环境配置文件
[root@namenode ~]# source /etc/profile.d/java.sh

#测试结果
[root@namenode ~]# which java
/usr/local/jdk1.8.0_73/bin/java
[root@namenode ~]# java -version
java version "1.8.0_73"
Java(TM) SE Runtime Environment (build 1.8.0_73-b02)
Java HotSpot(TM) 64-Bit Server VM (build 25.73-b02, mixed mode)

四、安装配置hadoop

#下载hadoop
[root@namenode ~]# wget http://apache.fayea.com/hadoop/common/hadoop-2.6.4/hadoop-2.6.4.tar.gz

#解压文件,并修改属主属组
[root@namenode ~]# tar xf hadoop-2.6.4.tar.gz -C /usr/local/
[root@namenode ~]# chown -R hadoop.hadoop /usr/local/hadoop-2.6.4/
[root@namenode ~]# ln -sv /usr/local/hadoop-2.6.4/ /usr/local/hadoop
`/usr/local/hadoop' -> `/usr/local/hadoop-2.6.4/'

#添加hadoop的环境变量配置文件
[root@namenode ~]# cat /etc/profile.d/hadoop.sh
HADOOP_HOME=/usr/local/hadoop
PATH=$HADOOP_HOME/bin:$PATH
export HADOOP_BASE PATH

#切换到hadoop用户检查jdk是否正常
[root@namenode ~]# su - hadoop
[hadoop@namenode ~]$ hadoop version
Hadoop 2.6.4
Subversion https://git-wip-us.apache.org/repos/asf/hadoop.git -r 5082c73637530b0b7e115f9625ed7fac69f937e6
Compiled by jenkins on 2016-02-12T09:45Z
Compiled with protoc 2.5.0
From source with checksum 8dee2286ecdbbbc930a6c87b65cbc010
This command was run using /usr/local/hadoop-2.6.4/share/hadoop/common/hadoop-common-2.6.4.jar

#修改core-site.xml配置文件
[root@namenode ~]# vim /usr/local/hadoop/etc/hadoop/core-site.xml
<configuration>
        <property>
                <name>fs.default.name</name>
                <value>hdfs://192.168.3.66:8020</value>
                <final>true</final>
        </property>
</configuration>

#编辑mapred-site.xml配置文件
[root@namenode ~]# cp /usr/local/hadoop/etc/hadoop/mapred-site.xml.template /usr/local/hadoop/etc/hadoop/mapred-site.xml
[root@namenode ~]# vim /usr/local/hadoop/etc/hadoop/mapred-site.xml
<configuration>
        <property>
                <name>mapred.job.tracker</name>
                <value>namenode:8021</value>
                <final>true</final>
        </property>
</configuration>

#编辑hdfs-site.xml配置文件
[root@namenode ~]# vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml
<configuration>
        <property>
                <name>dfs.replication</name>
                <value>1</value>
        </property>
        <property>
                <name>dfs.data.dir</name>
                <value>/hadoop/data</value>
        </property>
        <property>
                <name>dfs.name.dir</name>
                <value>/hadoop/name</value>        #说明/hadoop目录需要事先存在
        </property>
</configuration>

#编辑slaves配置文件
[root@namenode ~]# vim /usr/local/hadoop/etc/hadoop/slaves
datanode1        #将datanode1作为数据节点,即192.168.3.67
datanode2                #将datanode2作为数据节点,即192.168.3.68

#创建hadoop目录,并修改相应的权限
[root@namenode ~]# mkdir /hadoop
[root@namenode ~]# chown -R hadoop.hadoop /hadoop

五、将hadoop文件夹和hadoop的环境变量配置文件复制过去

[root@namenode ~]# scp -r /usr/local/hadoop-2.6.4 datanode1:/usr/local/
[root@namenode ~]# scp -r /usr/local/hadoop-2.6.4 datanode2:/usr/local/
[root@namenode ~]# scp /etc/profile.d/hadoop.sh datanode1:/etc/profile.d/
[root@namenode ~]# scp /etc/profile.d/hadoop.sh datanode2:/etc/profile.d/

#在datanode1和datanode2上面修改hadoop的权限
[root@datanode1 local]# ln -sv /usr/local/hadoop-2.6.4/ /usr/local/hadoop
`/usr/local/hadoop' -> `/usr/local/hadoop-2.6.4/'
[root@datanode1 local]# chown -R hadoop.hadoop /usr/local/hadoop/
[root@datanode1 local]# ll /usr/local/ |grep hadoop
lrwxrwxrwx  1 hadoop hadoop   24 Apr  6 10:25 hadoop -> /usr/local/hadoop-2.6.4/
drwxr-xr-x  9 hadoop hadoop 4096 Apr  6 10:16 hadoop-2.6.4

#检查结果
[root@datanode1 ~]# su - hadoop
[hadoop@datanode1 ~]$ which java
/usr/local/jdk1.8.0_73/bin/java
[hadoop@datanode1 ~]$ hadoop version
Hadoop 2.6.4
Subversion https://git-wip-us.apache.org/repos/asf/hadoop.git -r 5082c73637530b0b7e115f9625ed7fac69f937e6
Compiled by jenkins on 2016-02-12T09:45Z
Compiled with protoc 2.5.0
From source with checksum 8dee2286ecdbbbc930a6c87b65cbc010
This command was run using /usr/local/hadoop-2.6.4/share/hadoop/common/hadoop-common-2.6.4.jar

#在datanode2上执行同样的操作
[root@datanode2 ~]# ln -sv /usr/local/hadoop-2.6.4/ /usr/local/hadoop
`/usr/local/hadoop' -> `/usr/local/hadoop-2.6.4/'
[root@datanode2 ~]# chown -R hadoop.hadoop /usr/local/hadoop/
[root@datanode2 ~]# ll /usr/local/ |grep hadoop
lrwxrwxrwx  1 root   root     24 Apr  6 10:28 hadoop -> /usr/local/hadoop-2.6.4/
drwxr-xr-x  9 hadoop hadoop 4096 Apr  6 10:22 hadoop-2.6.4

#检查结果
[root@datanode2 ~]# su - hadoop
[hadoop@datanode2 ~]$ hadoop version
Hadoop 2.6.4
Subversion https://git-wip-us.apache.org/repos/asf/hadoop.git -r 5082c73637530b0b7e115f9625ed7fac69f937e6
Compiled by jenkins on 2016-02-12T09:45Z
Compiled with protoc 2.5.0
From source with checksum 8dee2286ecdbbbc930a6c87b65cbc010
This command was run using /usr/local/hadoop-2.6.4/share/hadoop/common/hadoop-common-2.6.4.jar
通天塔

六、启动集群

#初始化数据节点
[root@namenode ~]# su - hadoop
[hadoop@namenode ~]$ hadoop namenode -format

#在namenode上执行start-all.sh启动集群
[hadoop@namenode ~]$ /usr/local/hadoop/sbin/start-all.sh

#检查结果
[hadoop@namenode ~]$ jps
7826 ResourceManager
7683 SecondaryNameNode
8136 Jps
7502 NameNode
[hadoop@namenode ~]$ ssh datanode1 'jps'
2899 DataNode
3173 Jps
3001 NodeManager
[hadoop@namenode ~]$ ssh datanode2 'jps'
1457 NodeManager
1362 DataNode
1614 Jps

#调用hadoop命令查看结果
[hadoop@namenode ~]$ hadoop dfsadmin -report
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it.

16/04/06 12:08:59 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Configured Capacity: 206834581504 (192.63 GB)
Present Capacity: 193137291264 (179.87 GB)
DFS Remaining: 193137238016 (179.87 GB)
DFS Used: 53248 (52 KB)
DFS Used%: 0.00%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0

-------------------------------------------------
Live datanodes (2):

Name: 192.168.3.67:50010 (datanode1)
Hostname: datanode1
Decommission Status : Normal
Configured Capacity: 103417290752 (96.31 GB)
DFS Used: 28672 (28 KB)
Non DFS Used: 6848925696 (6.38 GB)
DFS Remaining: 96568336384 (89.94 GB)
DFS Used%: 0.00%
DFS Remaining%: 93.38%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Wed Apr 06 12:08:58 CST 2016


Name: 192.168.3.68:50010 (datanode2)
Hostname: datanode2
Decommission Status : Normal
Configured Capacity: 103417290752 (96.31 GB)
DFS Used: 24576 (24 KB)
Non DFS Used: 6848364544 (6.38 GB)
DFS Remaining: 96568901632 (89.94 GB)
DFS Used%: 0.00%
DFS Remaining%: 93.38%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Wed Apr 06 12:08:58 CST 2016

#在hdfs中创建一个目录
[hadoop@namenode ~]$ hadoop fs -mkdir -p test
[hadoop@namenode ~]$ hadoop fs -ls
drwxr-xr-x   - hadoop supergroup          0 2016-04-06 12:13 test

#上传一个文件上去
[hadoop@namenode ~]$ cat /home/hadoop/test.php 
<?php
	echo 'hello hadoop!!!!!';
?>
[hadoop@namenode ~]$ hadoop fs -put /home/hadoop/test.php test
[hadoop@namenode ~]$ hadoop fs -ls test
-rw-r--r--   1 hadoop supergroup         36 2016-04-06 12:17 test/test.php
[hadoop@namenode ~]$ hadoop fs -cat test/test.php
<?php
	echo 'hello hadoop!!!!!';
?>

七、zookeeper安装

[root@namenode ~]# wget http://apache.fayea.com/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz

#解压文件/usr/local下,并修改权限
[root@namenode ~]# tar xf zookeeper-3.4.6.tar.gz -C /usr/local/
[root@namenode ~]# chown -R hadoop.hadoop /usr/local/zookeeper-3.4.6/

#修改zookeeper配置文件
[root@namenode ~]# cp /usr/local/zookeeper-3.4.6/conf/zoo_sample.cfg /usr/local/zookeeper-3.4.6/conf/zoo.cfg
[root@namenode ~]# egrep -v '^#|^$' /usr/local/zookeeper-3.4.6/conf/zoo.cfg 
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper-3.4.6/tmp
clientPort=2181
maxClientCnxns=60
server.1=namenode:28888:38888
server.2=datanode1:28888:38888
server.3=datanode2:28888:38888

#创建myid,配置zookeeper环境变量
[root@namenode ~]# mkdir /usr/local/zookeeper-3.4.6/tmp
[root@namenode ~]# echo 1 > /usr/local/zookeeper-3.4.6/tmp/myid
[root@namenode ~]# cat /usr/local/zookeeper-3.4.6/tmp/myid
1
[root@namenode ~]# cat /etc/profile.d/zookeeper.sh 
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.6
export PATH=$PATH:$ZOOKEEPER_HOME/bin

#将zookeeper目录和环境变量配置文件复制到datanode1上
[root@namenode ~]# scp -r /usr/local/zookeeper-3.4.6/ datanode1:/usr/local/
[root@namenode ~]# scp /etc/profile.d/zookeeper.sh datanode1:/etc/profile.d/

#修改datanode1的myid为2
[root@datanode1 ~]# echo 2 >/usr/local/zookeeper-3.4.6/tmp/myid 
[root@datanode1 ~]# cat /usr/local/zookeeper-3.4.6/tmp/myid 
2
[root@datanode1 ~]# chown -R hadoop.hadoop /usr/local/zookeeper-3.4.6/
[root@datanode1 ~]# source /etc/profile.d/zookeeper.sh 

#将zookeeper目录和环境变量配置文件复制到datanode2上
[root@namenode ~]# scp -r /usr/local/zookeeper-3.4.6/ datanode2:/usr/local/
[root@namenode ~]# scp /etc/profile.d/zookeeper.sh datanode2:/etc/profile.d/

#修改datanode2的myid为3
[root@datanode2 ~]# echo 3 >/usr/local/zookeeper-3.4.6/tmp/myid 
[root@datanode2 ~]# cat /usr/local/zookeeper-3.4.6/tmp/myid 
3
[root@datanode2 ~]# chown -R hadoop.hadoop /usr/local/zookeeper-3.4.6/
[root@datanode2 ~]# source /etc/profile.d/zookeeper.sh

#在三台机器上启动zookeeper
[root@namenode ~]# /usr/local/zookeeper-3.4.6/bin/zkServer.sh start    #在namenode上启动
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@datanode1 ~]# /usr/local/zookeeper-3.4.6/bin/zkServer.sh start    #在datanode1上启动
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@datanode2 ~]# /usr/local/zookeeper-3.4.6/bin/zkServer.sh start    #在datanode2上启动
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED

#在namenode上查看启动后的状态
[root@namenode ~]# /usr/local/zookeeper-3.4.6/bin/zkServer.sh status
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Mode: follower

#在datanode1上查看启动后的状态
[root@datanode1 ~]# /usr/local/zookeeper-3.4.6/bin/zkServer.sh status
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Mode: leader

#在datanode2上查看启动后的状态
[root@datanode2 ~]# /usr/local/zookeeper-3.4.6/bin/zkServer.sh status
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Mode: follower

#停止所有zookeeper节点
[root@namenode ~]# /usr/local/zookeeper-3.4.6/bin/zkServer.sh stop
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Stopping zookeeper ... STOPPED
[root@datanode1 ~]# /usr/local/zookeeper-3.4.6/bin/zkServer.sh stop
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Stopping zookeeper ... STOPPED
[root@datanode2 ~]# /usr/local/zookeeper-3.4.6/bin/zkServer.sh stop
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Stopping zookeeper ... STOPPED

八、安装Hbase

[root@namenode ~]# wget http://apache.fayea.com/hbase/1.2.0/hbase-1.2.0-bin.tar.gz
[root@namenode ~]# tar xf hbase-1.2.0-bin.tar.gz -C /usr/local/
[root@namenode ~]# chown -R hadoop.hadoop /usr/local/hbase-1.2.0/

#修改hbase的环境变量配置文件
[root@namenode ~]# egrep -v '^$|^#' /usr/local/hbase-1.2.0/conf/hbase-env.sh
export HBASE_CLASSPATH=/usr/local/hadoop/etc/hadoop
export HBASE_OPTS="-XX:+UseConcMarkSweepGC"
export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"
export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"
export HBASE_MANAGES_ZK=false
export JAVA_HOME=/usr/local/jdk1.8.0_73

#编辑hbase的配置文件
[root@namenode ~]# vim /usr/local/hbase-1.2.0/conf/hbase-site.xml
<configuration>
	<property>
		<name>hbase.rootdir</name>
		<value>hdfs://namenode:8020/hbase</value>
	</property>
	<property>
		<name>hbase.master</name>
		<value>namenode</value>
	</property>
	<property>
		<name>hbase.cluster.distributed</name>
		<value>true</value>
	</property>
	<property>
		<name>hbase.zookeeper.quorum</name>
		<value>namenode,datanode1,datanode2</value>
	</property>
	<property>
		<name>hbase.session.timeout</name>
		<value>60000000</value>
	</property>
	<property>
		<name>dfs.support.append</name>
		<value>true</value>
	</property>
</configuration>

#在regionservers文件中添加slave列表
[root@namenode ~]# cat /usr/local/hbase-1.2.0/conf/regionservers
datanode1
datanode2

#将hbase的目录复制到datanode1和datanode2上
[root@namenode ~]# scp -r /usr/local/hbase-1.2.0/ datanode1:/usr/local/
[root@namenode ~]# scp -r /usr/local/hbase-1.2.0/ datanode2:/usr/local/

#在datanode1上修改权限
[root@datanode1 ~]# chown -R hadoop.hadoop /usr/local/hbase-1.2.0/

#在datanode2上修改权限
[root@datanode2 ~]# chown -R hadoop.hadoop /usr/local/hbase-1.2.0/

九、启动集群

#启动zookeeper集群
[hadoop@namenode ~]$ /usr/local/zookeeper-3.4.6/bin/zkServer.sh start
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[hadoop@datanode1 ~]$ /usr/local/zookeeper-3.4.6/bin/zkServer.sh start
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[hadoop@datanode2 ~]$ /usr/local/zookeeper-3.4.6/bin/zkServer.sh start
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED

#查看zookeeper集群状态
[hadoop@namenode ~]$ /usr/local/zookeeper-3.4.6/bin/zkServer.sh status
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Mode: follower
[hadoop@namenode ~]$ ssh datanode1 '/usr/local/zookeeper-3.4.6/bin/zkServer.sh status'
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Mode: leader
[hadoop@namenode ~]$ ssh datanode2 '/usr/local/zookeeper-3.4.6/bin/zkServer.sh status'
JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
Mode: follower

#启动hadoop集群
[hadoop@namenode ~]$ /usr/local/hadoop/sbin/start-all.sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
16/04/06 16:05:53 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Starting namenodes on [namenode]
namenode: starting namenode, logging to /usr/local/hadoop-2.6.4/logs/hadoop-hadoop-namenode-namenode.out
datanode1: datanode running as process 2899. Stop it first.
datanode2: starting datanode, logging to /usr/local/hadoop-2.6.4/logs/hadoop-hadoop-datanode-datanode2.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /usr/local/hadoop-2.6.4/logs/hadoop-hadoop-secondarynamenode-namenode.out
16/04/06 16:06:11 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
starting yarn daemons
starting resourcemanager, logging to /usr/local/hadoop-2.6.4/logs/yarn-hadoop-resourcemanager-namenode.out
datanode1: starting nodemanager, logging to /usr/local/hadoop-2.6.4/logs/yarn-hadoop-nodemanager-datanode1.out
datanode2: starting nodemanager, logging to /usr/local/hadoop-2.6.4/logs/yarn-hadoop-nodemanager-datanode2.out

#查看hadoop集群的状态
[hadoop@namenode ~]$ jps
1187 QuorumPeerMain
1397 NameNode
1978 Jps
1580 SecondaryNameNode
1724 ResourceManager
[hadoop@namenode ~]$ ssh datanode1 'jps'
2899 DataNode
3699 Jps
3444 QuorumPeerMain
3576 NodeManager
[hadoop@namenode ~]$ ssh datanode2 'jps'
1168 QuorumPeerMain
1481 Jps
1257 DataNode
1358 NodeManager

[hadoop@namenode ~]$ hadoop dfsadmin -report
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it.

16/04/06 16:07:36 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Configured Capacity: 206834581504 (192.63 GB)
Present Capacity: 192312418304 (179.10 GB)
DFS Remaining: 192312344576 (179.10 GB)
DFS Used: 73728 (72 KB)
DFS Used%: 0.00%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0

-------------------------------------------------
Live datanodes (2):

Name: 192.168.3.67:50010 (datanode1)
Hostname: datanode1
Decommission Status : Normal
Configured Capacity: 103417290752 (96.31 GB)
DFS Used: 28672 (28 KB)
Non DFS Used: 7262519296 (6.76 GB)
DFS Remaining: 96154742784 (89.55 GB)
DFS Used%: 0.00%
DFS Remaining%: 92.98%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Wed Apr 06 16:07:35 CST 2016


Name: 192.168.3.68:50010 (datanode2)
Hostname: datanode2
Decommission Status : Normal
Configured Capacity: 103417290752 (96.31 GB)
DFS Used: 45056 (44 KB)
Non DFS Used: 7259643904 (6.76 GB)
DFS Remaining: 96157601792 (89.55 GB)
DFS Used%: 0.00%
DFS Remaining%: 92.98%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Wed Apr 06 16:07:35 CST 2016

#启动hbase
[hadoop@namenode ~]$ /usr/local/hbase-1.2.0/bin/start-hbase.sh

#在namenode上查看状态
[hadoop@namenode ~]$ jps
1187 QuorumPeerMain        #zookeeper进程
1397 NameNode              #hadoop master进程
2154 HMaster               #hbase master进程
1580 SecondaryNameNode     #hadoop进程
1724 ResourceManager       #hadoop进程
2415 Jps

#查看datanode1的状态
[hadoop@namenode ~]$ ssh datanode1 'jps'
2899 DataNode              #hadoop datanode节点进程
3444 QuorumPeerMain        #zookeeper进程
5018 Jps
4875 HRegionServer         #hbase进程

#查看datanode2的状态
[hadoop@namenode ~]$ ssh datanode2 'jps'
1168 QuorumPeerMain
3074 Jps
2872 HRegionServer
1257 DataNode

#进入hbase shell进行验证
[hadoop@namenode ~]$ /usr/local/hbase-1.2.0/bin/hbase shell
HBase Shell; enter 'help<RETURN>' for list of supported commands.
Type "exit<RETURN>" to leave the HBase Shell
Version 1.2.0, r25b281972df2f5b15c426c8963cbf77dd853a5ad, Thu Feb 18 23:01:49 CST 2016

hbase(main):001:0> list
TABLE
0 row(s) in 0.3220 seconds

=> []
hbase(main):002:0> status
1 active master, 2 backup masters, 1 servers, 0 dead, 2.0000 average load

hbase(main):003:0> quit