-JDK
rpm -ivh jdk-8u191-linux-x64.rpm
vi /etc/profile
export JAVA_HOME=/usr/java/default
export CLASSPATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar
export PATH=$JAVA_HOME/bin:$PATH
-ssh免密
ssh-keygen
cat ~/.ssh/id_rsa.pub > authorized_keys
hadoop-2.6.5.tar.gz 文件上传
scp -rp ./hadoop-2.6.5.tar.gz master:/usr/local/src
tar -xzvf hadoop-2.6.5.tar.gz
vi /ext/profile
export HADOOP_HOME=/usr/local/src/hadoop-2.6.5
export PATH=$PATH:$HADOOP_HOME/bin:$$HADOOP_HOME/sbin
-- (3.0版本需要配置用户角色到hadoop-env.sh)
vi /usr/local/src/hadoop-2.6.5/etc/hadoop/hadoop-env.sh
JAVA_HOME=/usr/java/default
core-size.xml
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/src/hadoop-2.6.5/tmp</value>
</property>
hdfs-site.xml
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<!-- 注意3.0版本的端口号配置有区别 -->
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>slave1:50090</value>
</property>
Slaves修改
-- 3.0版本为works
vim /usr/local/src/hadoop-2.6.5/etc/hadoop/slaves
# 此处添加3个datanode节点
slave1
slave2
slave3
集群搭建
scp -r ~/.ssh/authorized_keys slave1:~/.ssh/authorized_keys
scp -r ~/.ssh/authorized_keys slave2:~/.ssh/authorized_keys
scp -r ~/.ssh/authorized_keys slave3:~/.ssh/authorized_keys
scp -rp hadoop-2.6.5 slave1:/usr/local/src/
scp -rp hadoop-2.6.5 slave2:/usr/local/src/
scp -rp hadoop-2.6.5 slave3:/usr/local/src/
启动hdfs
hdfs namenode -format -- 每次执行都会重新生成一个clusterId,所以你懂的
start-dfs.sh
jps
hdfs dfs -mkdir /data
hadoop fs -mkdir /data