Mysql安装
1. 配置 yum 源
# 挂载光盘 mount /dev/cdrom /media
# 查看挂载点 df
2. 配置 yum 仓库
cd /etc/yum.repos.d
rm -rf CentOS-Base.repo
vi CentOS-Media.repo
[c5-media]
name=CentOS-$releasever - Media
baseurl=file:///media
gpgcheck=0
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-5
3. 安装 Mysql
yum -y install mysql-server*
4. 配置Mysql
# 修改root密码
service mysqld start
/usr/bin/mysqladmin -u root password '123456'
/usr/bin/mysqladmin -u root -h yq01-caipei01.epc.baidu.com password '123456'
# 登陆
mysql -uroot -p123456
# 授权
use mysql
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '123456' WITH GRANT OPTION;
flush privileges;
HiveServer2 HA 安装
1. 解压安装
# 安装
tar -zxvf hive-1.1.0-cdh5.7.0.tar.gz
# 建立软连接
ln -s hive hive-1.1.0-cdh5.7.0
# 配置环境变量
vim ~/.bash_profile
export HIVE_HOME=/home/caipei01/app/hive
export PATH=$HIVE_HOME/bin:$PATH
source ~/.bash_profile
2. 修改配置
# 修改 hive-env.sh
HADOOP_HOME=/home/caipei01/work/hadoop
# 修改 hive-site.xml
<configuration>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://xx.xx.xx.xx:3306/hive?createDatabaseIfNotExist=true</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>123456</value>
</property>
<property>
<name>hive.server2.support.dynamic.service.discovery</name>
<value>true</value>
</property>
<property>
<name>hive.server2.zookeeper.namespace</name>
<value>hiveserver2_zk</value>
</property>
<property>
<name>hive.zookeeper.quorum</name>
<value> zkNode1:2181,zkNode2:2181,zkNode3:2181</value>
</property>
<property>
<name>hive.zookeeper.client.port</name>
<value>2181</value>
</property>
<property>
<name>hive.server2.thrift.bind.host</name>
<value>0.0.0.0</value>
</property>
<property>
<name>hive.server2.thrift.port</name>
<value>10001</value> //两个HiveServer2实例的端口号要一致
</property>
</configuration>
3. 启动测试
# 在所有hive 节点上启动服务
cd $HIVE_HOME/bin
./hiveserver2
# 查看zk 是否注册上
./zkCli -h locahost -p 2181
ls /hiveserver2_zk
# 可以看到两个实例都已经注册上去了
4. JDBC 连接
# JDBC连接的URL格式为:
jdbc:hive2://<zookeeper quorum>/<dbName>;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2
# 其中:
<zookeeper quorum> 为Zookeeper的集群链接串,如zkNode1:2181,zkNode2:2181,zkNode3:2181
<dbName> 为Hive数据库,默认为default
serviceDiscoveryMode=zooKeeper 指定模式为zooKeeper
zooKeeperNamespace=hiveserver2 指定ZK中的nameSpace,即参数hive.server2.zookeeper.namespace所定义,我定义为hiveserver2_zk
# 启动
cd $HIVE_HOME/bin
./beeline
!connect jdbc:hive2://zkNode1:2181,zkNode2:2181,zkNode3:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2_zk user ""
5. Hadoop用户权限认证
# 在启动beeline链接时候报错
Error: Failed to open new session:
java.lang.RuntimeException: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.security.authorize.AuthorizationException):
User: user is not allowed to impersonate user (state=,code=0)
# 这是由于Hadoop2中的用户权限认证导致的
# 在hadoop core-site.xml 中增加配置, 注意user 为启动HiveServer2的用户
<property>
<name>hadoop.proxyuser.user.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.user.hosts</name>
<value>*</value>
</property>
# 使用超级用户hadoop刷新配置:
yarn rmadmin -refreshSuperUserGroupsConfiguration
hdfs dfsadmin -refreshSuperUserGroupsConfiguration
# 如果是对namenode做过HA,则需要在主备namenode上执行:
hdfs dfsadmin -fs hdfs://cdh5 -refreshSuperUserGroupsConfiguration
6. HiveServer2 权限认证
HiveServer2提供了JDBC链接操作Hive的功能,非常实用,但如果在使用HiveServer2时候,不注意安全控制,将非常危险,因为任何人都可以作为超级用户来操作Hive及HDFS数据。
因此,如果使用HiverServer2来提供给用户来链接Hive,必须启用安全认证,也就是hive.server2.authentication的配置。
目前HiveServer2支持多种用户安全认证方式:NONE,NOSASL, KERBEROS, LDAP, PAM ,CUSTOM等等。
本文介绍使用自定义的用户认证方式,即CUSTOM;
如果将hive.server2.authentication设置成CUSTOM,则需要设置
hive.server2.custom.authentication.class来指定用于权限认证的类,这个类需要实现
org.apache.hive.service.auth.PasswdAuthenticationProvider接口。
我们将使用HiveServer2的用户名和密码保存起来,其中,密码以32位小写md5加密来保存,这个数据即可以保存在Hive元数据库中,也可以保存在一个配置文件中。为了方便起见,这里使用配置文件来保存。
1.编写用户权限验证的类
package com.hive.auth;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import javax.security.sasl.AuthenticationException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hive.service.auth.PasswdAuthenticationProvider;
public class CustomHiveServer2Auth implements PasswdAuthenticationProvider {
@Override
public void Authenticate(String username, String password)
throws AuthenticationException {
boolean ok = false;
String passMd5 = new MD5().md5(password);
HiveConf hiveConf = new HiveConf();
Configuration conf = new Configuration(hiveConf);
String filePath = conf.get("hive.server2.custom.authentication.file");
System.out.println("hive.server2.custom.authentication.file [" + filePath + "] ..");
File file = new File(filePath);
BufferedReader reader = null;
try {
reader = new BufferedReader(new FileReader(file));
String tempString = null;
while ((tempString = reader.readLine()) != null) {
String[] datas = tempString.split(",", -1);
if(datas.length != 2) continue;
//ok
if(datas[0].equals(username) && datas[1].equals(passMd5)) {
ok = true;
break;
}
}
reader.close();
} catch (Exception e) {
e.printStackTrace();
throw new AuthenticationException("read auth config file error, [" + filePath + "] ..", e);
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException e1) {}
}
}
if(ok) {
System.out.println("user [" + username + "] auth check ok .. ");
} else {
System.out.println("user [" + username + "] auth check fail .. ");
throw new AuthenticationException("user [" + username + "] auth check fail .. ");
}
}
//MD5加密
class MD5 {
private MessageDigest digest;
private char hexDigits[] = {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
public MD5() {
try {
digest = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
public String md5(String str) {
byte[] btInput = str.getBytes();
digest.reset();
digest.update(btInput);
byte[] md = digest.digest();
// 把密文转换成十六进制的字符串形式
int j = md.length;
char strChar[] = new char[j * 2];
int k = 0;
for (int i = 0; i < j; i++) {
byte byte0 = md[i];
strChar[k++] = hexDigits[byte0 >>> 4 & 0xf];
strChar[k++] = hexDigits[byte0 & 0xf];
}
return new String(strChar);
}
}
}
2. 将上面的程序打包成HiveServer2Auth.jar,放到$HIVE_HOME/lib下。
3. 在hive-site.xml中设置以下参数
<property>
<name>hive.server2.authentication</name>
<value>CUSTOM</value>
</property>
<property>
<name>hive.server2.custom.authentication.class</name>
<value>com.hive.auth.CustomHiveServer2Auth</value>
</property>
<property>
<name>hive.server2.custom.authentication.file</name>
<value>/home/caipei01/app/hive/conf/hive.server2.users.conf</value>
</property>
4. 在$HIVE_HOME/conf下新建文件hive.server2.users.conf,里面写入内容:
cat hive.server2.users.conf
user1,48d9a656690e1b1bf5ddee4c12d1bbd7
user,5f4dcc3b5aa765d61d8327deb882cf99
# 其中,48d9a656690e1b1bf5ddee4c12d1bbd7为user1_password的md5加密.
5. 用户名密码启动验证
cd $HIVE_HOME/bin
./beeline
!connect jdbc:hive2://zkNode1:2181,zkNode2:2181,zkNode3:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2_zk user 123456
至此,HiveServer2的多实例高可用-Ha配置完成,的确能解决生产中的很多问题,比如:并发、负载均衡、单点故障、安全等等。