0、下载 CDH 组件环境
http://archive.cloudera.com/cdh5/cdh/5/jdk-7u79-linux-x64.tar.gz
http://archive.cloudera.com/cdh5/cdh/5/hadoop-2.5.0-cdh5.3.6.tar.gz
http://archive.cloudera.com/cdh5/cdh/5/hive-0.13.1-cdh5.3.6.tar.gz
http://archive.cloudera.com/cdh5/cdh/5/zookeeper-3.4.5-cdh5.3.6.tar.gz
http://archive.cloudera.com/cdh5/cdh/5/hbase-0.98.6-cdh5.3.6.tar.gz
http://archive.cloudera.com/cdh5/cdh/5/sqoop-1.4.5-cdh5.3.6.tar.gz
http://archive.cloudera.com/cdh5/cdh/5/flume-ng-1.5.0-cdh5.3.6.tar.gz
https://mvnrepository.com/artifact/mysql/mysql-connector-java/5.1.27
1、/etc/selinux/config -> SELINUX=disabled
/etc/hosts
192.168.198.131 lining05
192.168.198.132 lining06
192.168.198.133 lining07
/etc/sysconfig
hostname=lining05
/etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE="eth0"
BOOTPROTO="static"
NM_CONTROLLED="yes"
ONBOOT="yes"
TYPE="Ethernet"
UUID="2dc126cb-ef2a-412e-a373-45fbe1829354"
IPADDR=192.168.198.131
GATEWAY=192.168.198.2
NETMASK=255.255.255.0
DNS1=192.168.198.2
DNS2=114.114.114.114
DNS3=8.8.8.8
2、usr下建java文件夹 并加压Java1.7
tar -zxvf jdk-7u79-linux-x64.tar.gz
opt下建/modules/cdh 并加压hadoop
tar -zxvf hadoop-2.5.0-cdh5.3.6.tar.gz
将网上下载的下载hadoop-native-64-2.5.0,上传至
/opt/modules/cdh/hadoop-2.5.0-cdh5.3.6/lib
3、/etc/profile
export JAVA_HOME=/usr/java/jdk1.7.0_79
export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib:$JAVA_HOME/jre/lib
export HADOOP_HOME=/opt/modules/cdh/hadoop-2.5.0-cdh5.3.6
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOOME/sbin:$HADOOP_HOME/lib
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib"
4、在/opt/modules/cdh/hadoop-2.5.0-cdh5.3.6下新建
tmp var hdfs hdfs/data hdfs/name 几个文件夹
5、core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://lining05:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:///opt/modules/cdh/hadoop-2.5.0-cdh5.3.6/tmp</value>
</property>
</configuration>
6、hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:///opt/modules/cdh/hadoop-2.5.0-cdh5.3.6/hdfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///opt/modules/cdh/hadoop-2.5.0-cdh5.3.6/hdfs/data</value>
</property>
<property>
<name>dfs.namenode.http-address</name>
<value>http://192.168.198.131:50070</value>
</property>
</configuration>
7、mapred-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>lining05:10020</value>
</property>
</configuration>
8、yarn-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>lining05</value>
</property>
</configuration>
9、hadoop-env.sh yarn-env.sh
export JAVA_HOME=/usr/java/jdk1.7.0_79
mapred-env.sh
export JAVA_HOME=/usr/java/jdk1.7.0_79
export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=2000
export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
10、slaves
lining05
lining06
lining07
11、配置SSH /etc/ssh/sshd_config
RSAAuthentication yes
PubkeyAuthentication yes
AuthorizedKeysFile .ssh/authorized_keys
12、生成本机秘钥 ssh-keygen -t rsa -P ‘’
将 id_rsa.pub -》重命名为 authorized_keys
13、关闭防火墙
systemctl disable firewalld.service
systemctl stop firewalld
service iptables stop
14、克隆lining06、lining07两台slaver 按上述步骤修改必要的配置
15、把master的pub 追加到 每个slaver 的 root/.ssh/authorized_keys
把每个slaver的pub 追加到 master的 root/.ssh/authorized_keys
测试 ssh lining06
16、hadoop namenode -format
17、/opt/modules/cdh/hadoop-2.5.0-cdh5.3.6/sbin/start-all.sh
18、hdfs dfs -mkdir /test
hdfs dfs -put /opt/modules/cdh/
19、cd /opt/modules/cdh
tar -zxvf hive-0.13.1-cdh5.3.6.tar.gz
在/opt/modules/cdh/hive-0.13.1-cdh5.3.6/lib下
上传mysql-connector-java-5.1.27.jar文件夹
20、hdfs dfs -mkdir -p /user/hive/warehouse
hdfs dfs -chmod g+w /user/hive/warehouse
hdfs dfs -mkdir -p /tmp
hdfs dfs -chmod g+w /tmp
mkdir /opt/modules/cdh//hive-0.13.1-cdh5.3.6/logs
21、hive-env.sh (去掉.template)
HADOOP_HOME=/opt/modules/cdh/hadoop-2.5.0-cdh5.3.6
export HIVE_CONF_DIR=/opt/modules/cdh/hive-0.13.1-cdh5.3.6/conf
22、hive-log4j.properties (去掉.template)
hive.log.dir=/opt/modules/cdh/hive-0.13.1-cdh5.3.6/logs
23、hive-site.xml
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://bj-cdb-2bbtu1h5.sql.tencentcdb.com:63324/hive?createDatabaseIfNotExist=true&useUnicode=true&characterEncoding=UTF-8&useSSL=false</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>001vanni</value>
</property>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/opt/modules/cdh/hive-0.13.1-cdh5.3.6/warehouse</value>
</property>
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
</property>
<property>
<name>system:java.io.tmpdir</name>
<value>/opt/modules/cdh/hive-0.13.1-cdh5.3.6/data/tmp/</value>
</property>
<property>
<name>system:user.name</name>
<value>root</value>
</property>
</configuration>
24、/etc/profile ->
export HIVE_HOME=/opt/modules/cdh/hive-0.13.1-cdh5.3.6
export PATH=$PATH:$HIVE_HOME/bin:$HIVE_HOME/lib
25、测试:hive -> show databases;
26、create database if not exists lining_test;
27、service iptables stop 关闭防火墙
打开http://192.168.198.131:50070
28、use lining_test;(转HIVE实验笔记)