一、hadoop-2.6.0-CDH5.10.0安装
1.1安装JDK
#tar -zxvfjdk-8u144-linux-x64.tar.gz –C /home/hadoop1/Desktop/jdk
#vim /etc/profile
export JAVA_HOME=/home/hadoop1/Desktop/jdk
exportJRE_HOME=/home/hadoop1/Desktop/jdk/jre
exportCLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib
export PATH=$PATH:$JAVA_HOME/bin
#source/etc/profile
验证java环境是否配好
#java -version
1.2角色分配
主机名 |
Ip |
角色 |
master |
192.168.100.110 |
nameNode、ResourceManager、SecondaryNameNode |
slave |
192.168.100.111 |
DataNode、NodeManager |
配置master
修改 /etc/hostname 内容为master (在slave机器上修改为slave)
/etc/hosts 添加内容行
192.168.100.110 master
192.168.100.111 slave
1.3 master上配置免密码登录(若不配置此项在启动hadoop集群时会不断提示输入slave密码)
#cd ~/.ssh/ # 若没有该目录,先执行一次ssh #localhost
#rm ./id_rsa* # 删除之前生成的公匙(如果有)
#ssh-keygen -trsa # 会有提示,都按回车就可以 #cat ./id_rsa.pub >>./authorized_keys # 加入授权
让 master 节点需能无密码 SSH 本机,在 master 节点上执行:
#cat ./id_rsa.pub>> ./authorized_keys
接着在 master 节点将上公匙传输到 slave节点:
#scp ~/.ssh/id_rsa.pub hadoop1@slave:/home/hadoop1/
接着在 slave 节点上,将 ssh 公匙加入授权:
#mkdir ~/.ssh # 如果不存在该文件夹需先创建,若已存在则忽略
#cat ~/id_rsa.pub >> ~/.ssh/authorized_keys
#rm ~/id_rsa.pub
1.4安装hadoop-cdh
#cd /home/hadoop1/Desktop
# tar -zxvf hadoop-2.6.0-cdh5.10.0.tar.gz –C /home/hadoop1/Desktop
#mv hadoop-2.6.0-cdh5.10.0.tar.gz hadoop
#vim /etc/profile
export HADOOP_HOME=/home/hadoop1/Desktop/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
#source /etc/profile
cd hadoop/etc/hadoop
# vim hadoop-env.sh
修改JAVA_HOME的值
export JAVA_HOME=/home/hadoop1/Desktop/jdk
# vim slaves
slave
#vim masters
master
1.5.xml文件配置
1.5.1 core-site.xml
<configuration>
<!-- file system properties -->
<property>
<name>fs.default.name</name>
<value>hdfs://master:9000</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
</configuration>
1.5.2 hdfs-site.xml
/hdfs/namenode、/hdfs/datanode 目录需要自己新建
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/usr/local/cloudera/hadoop_tmp/hdfs/namenode</value>
</property>
<property>
<name>dfs.datanoe.data.dir</name>
<value>file:/home/hadoop1/Desktop/hadoop/hdfs/datanode</value>
</property>
</configuration>
1.5.3 yarn-site.xml
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>master</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>master:8025</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>master:8035</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>master:8050</value>
</property>
</configuration>
1.5.4 mapred-site.xml
<configuration>
<property>
<name>mapreduce.job.tracker</name>
<value>master:5431</value>
</property>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
基本配置项完毕
1.6复制hadoop文件到slave节点
#scp –r hadoop slave@hadoop1:/home/hadoop1/Desktop/
二、 mysql安装
2.1解压
#tar -xvf mysql-5.7.18-1.el7.x86_64.rpm-bundle.tar -C mysql
解压后有如下rpm:
-rw-r--r