软件环境
Hadoop
JDK
Development Tools(yum groupinstall)
软件安装
#3台机器
#192.168.60.71 Master
#192.168.60.72 Node1
#192.168.60.73 Node2
#安装Hadoop和创建文件夹
mkdir -p /home/hadoop
mkdir -p /home/tmp
mkdir -p /home/hdfs
mkdir -p /home/hdfs/data
mkdir -p /home/hdfs/name
cp hadoop-2.6.3.tar.gz /home/hadoop
cp jdk1.8.0_91.tar.gz /home
#解压文件
tar -zxvf jdk1.8.0_91
tar -zxvf hadoop-2.6.3.tar.gz
#设置变量
vi /etc/profile
Export JAVA_HOME=/home/jdk1.8.0_91
Export HADOOP_home=/home/hadoop/hadoop-2.6.3
Export PAH=$PATH:${HADOOP_HOME}/bin
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin
#使配置生效,输入命令,
source /etc/profile
#输入命令查看版本
java -version
openjdk version "1.8.0_91"
OpenJDK Runtime Environment (build 1.8.0_91-b14)
OpenJDK 64-Bit Server VM (build 25.91-b14, mixed mode)
#显示版本成功
#设置网络映像
vi /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=master|node1|node2
:wq
#修改hosts文件
vi etc/hosts #hosts中添加
192.168.60.71 master
192.168.60.72 node1
192.168.60.73 node2
:wq
#设置SSH免密码登录
vi /etc/ssh/sshd_config
RSAAuthentiocation yes
PubkeyAuthentication yes
:wq
输入命令
ssh-keygen -t rsa
#生成key,ssh -keygen -t rsa,生成key,都不输入密码,一直回车,/root就会生成.ssh文件夹,每台服务器都要设置
#合并公钥到authorized_key文件,在Master服务器中,进入/root/.ssh,通过ssh命令合并
cat id_rsa.pub >> authorized_keys
ssh root@192.168.60.72 cat ~/.ssh/id_rsa.pub>> authorized_keys
ssh root@192.168.60.73 cat ~/.ssh/id_rsa.pub>> authorized_keys
#并把id_dsa.pub和authorized_keys复制到其他机器上面的目录下面
cd ~/.ssh
scp id_dsa.pub authorized_keys root@192.168.60.72:/root/.ssh
scp id_dsa.pub authorized_keys root@192.168.60.73:/root/.ssh
#如果没有更改免密设置。这里会提示输入密码,然后按回车
#设置hadoop-env.sh文件中的变量设置(每个服务器都要设置)
vi /home/hadoop/hadoop-2.6.3/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/home/jdk1.8.0_91
#配置/home/hadoop/hadoop-2.6.3/etc/hadoop目录下的core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://192.168.60.71:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/home/hadoop/tmp</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131702</value>
</property>
</configuration>
#配置/home/hadoop/hadoop-2.6.3/etc/hadoop目录下的hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/home/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/home/hadoop/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>192.168.60.71:9001</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
</configuration>
#配置/home/hadoop/hadoop-2.6.3/etc/hadoop目录下的mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>192.168.60.71:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>192.168.60.71:19888</value>
</property>
</configuration>
#配置/home/hadoop/hadoop-2.6.3/etc/hadoop目录下的mapred-site.xml
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>192.168.60.71:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>192.168.60.71:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>192.168.60.71:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>192.168.60.71:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>192.168.60.71:8088</value>
</property>
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>768</value>
</property>
</configuration>
配置/home/hadoop/hadoop-2.6.3/etc/hadoop目录下hadoop-env.sh、yarn-env.sh的JAVA_HOME,不设置的话,启动不了,
export JAVA_HOME=/home/java/jdk1.7.0_79
配置/home/hadoop/hadoop-2.6.3/etc/hadoop目录下的slaves,删除默认的localhost,增加2个从节点,
192.168.0.72
192.168.0.73
将配置好的Hadoop复制到各个节点对应位置上,通过scp传送,
scp -r /home/hadoop root@192.168.60.72:/home/
scp -r /home/hadoop root@192.168.60.73:/home/
在Master服务器启动hadoop,从节点会自动启动,进入/home/hadoop/hadoop-2.6.3目录
初始化,输入命令,bin/hdfs namenode -format
全部启动sbin/start-all.sh,也可以分开sbin/start-dfs.sh、sbin/start-yarn.sh
停止的话,输入命令,sbin/stop-all.sh
输入命令,jps,可以看到相关信息
Web访问,要先开放端口或者直接关闭防火墙
输入命令,systemctl stop firewalld.service
浏览器打开http://192.168.60.71:8088/
浏览器打开http://192.168.60.71:50070/
安装完成。这只是大数据应用的开始,之后的工作就是,结合自己的情况,编写程序调用Hadoop的接口,发挥hdfs、mapreduce的作用。