hadoop集群安装
使用root用户
- 查看用户
ls –l /home
- 给hadoop用户赋权
vi /etc/sudoers
hadoop ALL=(ALL) ALL # 修改
- 更改主机名
vim /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=slave2
- IP映射
vim /etc/hosts
192.168.233.128 master
192.168.233.129 slave1
192.168.233.130 slave2
- ssh免密
ssh-keygen -t rsa
cd ~/.ssh
ls
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys //复制公匙
chmod 600 ~/.ssh/authorized_keys //给600权限
cat authorized_keys //查看slave1 slave2的公匙并将slave1slave2的秘匙复制到master
scp ~/.ssh/authorized_keys hadoop@slave2:~/.ssh/
//将master公匙文件复制到slave1slave2
- 关闭防火墙
service iptables stop // 关闭防火墙服务
chkconfig iptables off // 禁止防火墙开机自启,就不用手动关闭了
- 安装java
----yum安装
sudo yum install java-1.7.0-openjdk java-1.7.0-openjdk-devel
打开文件添加
vim ~/.bashrc
export JAVA_HOME=/usr/lib/jvm/java-1.7.0-openjdk-1.7.0.231.x86_64
文件生效
source ~/.bashrc
检测Java安装是否成功
java -version
----安装包安装
tar -zxvf jdk-7u80-linux-x64.tar.gz //解压
mv jdk1.7.0_80 jdk1.7 //重命名
配置环境变量
vim ~/.bashrc
export JAVA_HOME=/usr/Java/jdk1.7
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export JRE_HOME=$JAVA_HOME/jre
source ~/.bashrc
检测Java安装是否成功
java -version
- 安装hadoop
tar -zxvf hadoop-2.7.3.tar.gz //解压
mv hadoop-2.7.3 hadoop2.7 //重命名
vim ~/.bashrc //环境变量
export HADOOP_HOME=/usr/local/hadoop
export PATH=$HADOOP_HOME/bin$PATH
source ~/.bashrc
- 在主节点创建文件夹
cd /usr/local/hadoop/
mkdir -p dfs/name
mkdir -p dfs/data
mkdir temp
- 配置hadoop文件
《一》 slaves文件
vim slaves
slave1
slave2
《二》 core-site.xml 文件
vim core-site.xml
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/usr/local/hadoop/temp</value>
<description>Abase for other temporary directories.</description>
</property>
《三》 hdfs-site.xml 文件
vim hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>master:50090</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/usr/local/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/usr/local/hadoop/dfs/data</value>
</property>
</configuration>
《四》mapred-site.xml 文件
vim mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>master:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>master:19888</value>
</property>
</configuration>
《五》yarn-site.xml 文件
vim yarn-site.xml
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>master</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
将master节点Hadoop文件夹传输到slave1 slave2
scp -r /usr/local/hadoop slave2:/usr/local/
初始化
hadoop namenode -format