1.安装mysql 作为hive的meta store
2.上传hive的安装包
sudo mkdir /usr/local/src/hive
sudo chown -R ucmed:ucmed /usr/local/src/hive
cp /tmp/apache-hive-3.1.2-bin.tar.gz /usr/local/src/hive
cd /usr/local/src/hive
tar -xzvf apache-hive-3.1.2-bin.tar.gz
3.修改配置文件
cd /usr/local/src/hive/apache-hive-3.1.2-bin/conf
cp hive-exec-log4j2.properties.template hive-exec-log4j2.properties
cp hive-log4j2.properties.template hive-log4j2.properties
cp hive-default.xml.template hive-default.xml
cp hive-env.sh.template hive-env.sh
vim hive-env.sh
export HADOOP_HOME=/usr/local/src/hadoop/hadoop-3.3.1/
export HIVE_CONF_DIR=/usr/local/src/hive/apache-hive-3.1.2-bin/conf/
vim hive-site.xm
<configuration>
<!-- 记录HIve中的元数据信息 记录在mysql中 -->
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://master:3306/hive?createDatabaseIfNotExist=true&useSSL=false</value>
</property>
<!-- jdbc mysql驱动 -->
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<!-- mysql的用户名和密码 -->
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>WY@KW9OQpj5Spztd</value>
</property>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive/warehouse</value>
</property>
<property>
<name>hive.exec.scratchdir</name>
<value>/user/hive/tmp</value>
</property>
<!-- 日志目录 -->
<property>
<name>hive.querylog.location</name>
<value>/user/hive/log</value>
</property>
<!-- 设置metastore的节点信息 -->
<property>
<name>hive.metastore.uris</name>
<value>thrift://master:9083</value>
</property>
<!-- 客户端远程连接的端口 -->
<property>
<name>hive.server2.thrift.port</name>
<value>10000</value>
</property>
<property>
<name>hive.server2.thrift.bind.host</name>
<value>0.0.0.0</value>
</property>
<property>
<name>hive.server2.webui.host</name>
<value>0.0.0.0</value>
</property>
<!-- hive服务的页面的端口 -->
<property>
<name>hive.server2.webui.port</name>
<value>10002</value>
</property>
<property>
<name>hive.server2.long.polling.timeout</name>
<value>5000</value>
</property>
<property>
<name>hive.server2.enable.doAs</name>
<value>true</value>
</property>
<property>
<name>datanucleus.autoCreateSchema</name>
<value>false</value>
</property>
<property>
<name>datanucleus.fixedDatastore</name>
<value>true</value>
</property>
<property>
<name>hive.metastore.event.db.notification.api.auth</name>
<value>false</value>
</property>
<property>
<name>hive.metastore.sasl.enabled</name>
<value>false</value>
<description>If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos.</description>
</property>
<property>
<name>hive.server2.enable.doAs</name>
<value>false</value>
</property>
<property>
<name>hive.server2.authentication</name>
<value>NONE</value>
</property>
<property>
<name>hive.execution.engine</name>
<value>mr</value>
</property>
<property>
<name>hive.stats.autogather</name>
<value>false</value>
</property>
</configuration>
编辑Hadoop的core-site.xml
vim /usr/local/src/hadoop/hadoop-3.3.1/etc/hadoop/core-site.xml
#同步配置文件
scp /usr/local/src/hadoop/hadoop-3.3.1/etc/hadoop/core-site.xml slave1:/usr/local/src/hadoop/hadoop-3.3.1/etc/hadoop/core-site.xml
scp /usr/local/src/hadoop/hadoop-3.3.1/etc/hadoop/core-site.xml slave2:/usr/local/src/hadoop/hadoop-3.3.1/etc/hadoop/core-site.xml
追加
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
<property>
<name>hadoop.proxyuser.ucmed.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.ucmed.groups</name>
<value>*</value>
</property>
4.拷贝hive所需的jdbc驱动
拷贝到/usr/local/src/hive/apache-hive-3.1.2-bin/lib
cp /tmp/mysql-connector-java-5.1.39.jar /usr/local/src/hive/apache-hive-3.1.2-bin/lib
5.配置hive环境变量
sudo vim /etc/profile
export HIVE_HOME=/usr/local/src/hive/apache-hive-3.1.2-bin
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HIVE_HOME/bin
source /etc/profile
5.重启hdfs和yarn
slave1上
/usr/local/src/hadoop/hadoop-3.3.1/****in/stop-yarn.sh
/usr/local/src/hadoop/hadoop-3.3.1/****in/start-yarn.sh
master 上
/usr/local/src/hadoop/hadoop-3.3.1/****in/stop-dfs.sh
/usr/local/src/hadoop/hadoop-3.3.1/****in/start-dfs.sh
6.初始化hive的元数据库
schematool -initSchema -dbType mysql
会在目标数据库生产对应表
7.启动测试hive
#启动元数据服务
sudo mkdir -p /opt/logs/hive/
sudo chown -R ucmed:ucmed /opt/logs/hive/
nohup hive --service metastore >> /opt/logs/hive/metastore.log 2>&1 &
nohup hive --service hiveserver2 >> /opt/logs/hive/hiveserver2.log 2>&1 &
hive
输入以下sql语句,进行功能验证
show databases ;
create database db_doit ;
use db_doit ;
create table if not exists tb_user (id int , name string) ;
show tables ;
通过第三方工具连接hive
DBeaver
连接地址 jdbc:hive2://192.168.3.184:10000/default
hive web ui