#--------------------Hive安装配置----------------------
# 上传压缩包到/export/software目录里,并解压安装包
cd /export/software/
tar -zxvf apache-hive-3.1.2-bin.tar.gz -C /export/server
#将解压后的文件夹进行重命名
cd /export/server
mv apache-hive-3.1.2-bin hive-3.1.2
#解决hadoop、hive之间guava版本差异
cd /export/server/hive-3.1.2
rm -rf lib/guava-19.0.jar
cp /export/server/hadoop-3.3.0/share/hadoop/common/lib/guava-27.0-jre.jar ./lib/
#添加mysql的驱动包mysql-connector-java-5.1.47-bin.jar到hive安装包lib/文件下
cd /export/server/hive-3.1.2/lib
上传
#修改hive环境变量文件 添加Hadoop_HOME
cd /export/server/hive-3.1.2/conf/
mv hive-env.sh.template hive-env.sh
vim hive-env.sh
HADOOP_HOME=/export/server/hadoop-3.3.0
export HIVE_CONF_DIR=/export/server/hive-3.1.2/conf
export HIVE_AUX_JARS_PATH=/export/server/hive-3.1.2/lib
export HADOOP_HEAPSIZE=4096
#在/export/server/hive-3.1.2/conf目录下新增hive-site.xml 配置mysql等相关信息
vim hive-site.xml
<configuration>
<!-- 存储元数据mysql相关配置 -->
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value> jdbc:mysql://node1:3306/hive?createDatabaseIfNotExist=true&useSSL=false&useUnicode=true&characterEncoding=UTF-8</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>123456</value>
</property>
<!-- H2S运行绑定host -->
<property>
<name>hive.server2.thrift.bind.host</name>
<value>node1</value>
</property>
<!-- 远程模式部署metastore 服务地址 -->
<property>
<name>hive.metastore.uris</name>
<value>thrift://node1:9083</value>
</property>
<!-- 关闭元数据存储授权 -->
<property>
<name>hive.metastore.event.db.notification.api.auth</name>
<value>false</value>
</property>
<!-- 关闭元数据存储版本的验证 -->
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
</property>
</configuration>
#添加环境变量
vim /etc/profile
export HIVE_HOME=/export/server/hive-3.1.2
export PATH=:$HIVE_HOME/bin:$PATH
#让环境变量生效
source /etc/profile
#初始化metadata(只需要在配置完Hive之后进行一次操作)
cd /export/server/hive-3.1.2
schematool -initSchema -dbType mysql -verbos
#初始化成功会在mysql中创建74张表
#-----------------Metastore 和 Hiveserver2启动----
nohup /export/server/hive-3.1.2/bin/hive --service metastore 2>&1 &
nohup /export/server/hive-3.1.2/bin/hive --service hiveserver2 2>&1 &
#验证是否安装成功!
在Linux中输入hive命令,直接回车,出现一个终端,在该终端中可以输入sql命令:
show databases;
#如果hive没有安装成功,则需要做以下操作:
1、 检查参数是否有问题
2、 杀死两个junjar进程
3、 进入mysql删除hive数据库:drop database hive
4、 重新对hive初始化:schematool -initSchema -dbType mysql -verbos
Hive自动启动脚本
#!/bin/bash
echo "1:启动"
echo "2:停止"
echo "3:状态"
read -p "请输入你的选择:" input_option
op=''
case $input_option in
1)
echo "---------启动Hadoop.....--------------."
/export/server/hadoop-3.3.0/sbin/start-all.sh
while :
do
sleep 1
flag=$(hdfs dfsadmin -safemode get)
if [[ "$flag" == *"OFF"* ]];then
echo "HDFS退出安全模式......."
echo "---------启动Metastore服务.....---------"
nohup /export/server/hive-3.1.2/bin/hive --service metastore 2>&1 &
echo "---------启动Hiveserver2服务.......---------"
nohup /export/server/hive-3.1.2/bin/hive --service hiveserver2 2>&1 &
break
else
echo "HDFS正在安全模式,请稍后...."
fi
done
sleep 1
clear
echo "--------------启动成功!---------------------"
;;
2)
pid_hiveserver2=`ps -ef | grep hiveserver2 | grep -v grep | awk '{print $2}'`
pid_metastore=`ps -ef | grep metastore | grep -v grep | awk '{print $2}'`
if [ "$pid_hiveserver2" != "" ];then
echo "正在关闭Hiveserver2服务......"
kill -9 $pid_hiveserver2
else
echo "Hiveserver2服务未启动,无需关闭......"
fi
if [ "$pid_metastore" != '' ];then
echo "正在关闭Metastore服务......"
kill -9 $pid_metastore
else
echo "Metastore服务未启动,无需关闭......"
fi
echo "正在关闭Hadoop......"
/export/server/hadoop-3.3.0/sbin/stop-all.sh
echo "---------关闭成功!-----------------"
;;
3) jps
;;
*) echo '你没有输入 1 到 3 之间的数字' #你输入的不是1和3之间的数字,则执行
exit
;;
esac
#ps -ef | grep nginx | grep -v grep | awk '{print $2}' | xargs kill -9