hive3.1 安装配置

#一、解压缩

tar -xzvf apache-hive-3.1.1-bin.tar.gz

#二、环境变量

vi /etc/profile

HIVE_HOME=/opt/bigdata/hive

PATH=$HIVE_HOME:$PATH

export   PATH   HIVE_HOME

source /etc/profile

#三、hive 配置

##1.hive-default.xml

cp hive-default.xml.template hive-site.xml

vi  hive-site.xml

#删除全部属性

<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<configuration>
<property>
<name>system:java.io.tmpdir</name>
<value>/tmp/hive/java</value>
</property>
<property>
<name>system:user.name</name>
    <value>spark</value>
</property>
<property>
<name>hive.exec.scratchdir</name>
<value>/user/hive-exe</value>
</property>
<property>
<name>hive.scratch.dir.permission</name>
<value>777</value>
</property>
<property>
<name>hive.metastore.db.type</name>
    <value>mysql</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
    <value>username</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
    <value>passwd</value>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
    <value>jdbc:mysql://ip:port/db</value>
</property>
<property>
<name>hive.druid.metadata.username</name>
<value>user</value>
</property>
<property>
<name>hive.druid.metadata.password</name>
<value>passwd</value>
</property>
<property>
<name>hive.druid.metadata.uri</name>
<value>jdbc:mysql://ip:port/db</value>
</property>
<!-- spark2.4 与hive3.1集成版本问题解决-->
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
</property>
<!-- spark 引擎-->
<property>
<name>hive.execution.engine</name>
<value>spark</value>
</property>
<property>
<name>hive.merge.sparkfiles</name>
<value>true</value>
</property>
<property>
<name>hive.exec.reducers.max</name>
<value>1009</value>
</property>
<property>
<name>hive.exec.reducers.bytes.per.reducer</name>
<value>256000000</value> <!-- 256M, if the input size is 1 GB then 4 reducers will be used-->
</property>
<property>
<name>hive.mapjoin.optimized.hashtable</name>
<value>true</value>
</property>
<property>
<name>hive.mapjoin.optimized.hashtable.wbsize</name>
<value>10485760</value> <!--10 * 1024 * 1024 -->
</property>
<property>
<name>hive.spark.dynamic.partition.pruning</name>
<value>true</value>
</property>
<property>
<name>hive.spark.dynamic.partition.pruning.map.join.only</name>
<value>true</value>
</property>
<property>
<name>hive.spark.dynamic.partition.pruning.max.data.size</name>
<value>104857600</value> <!-- 100MB,If reaches this limit, the optimization will be turned off.-->
</property>
<property>
<name>mapreduce.job.reduces</name>
<value>-1</value> <!-- not limit -->
</property>
<!-- 并行度-->
<property>
<name>hive.exec.parallel</name>
<value>true</value>
</property>
<property>
<name>hive.exec.parallel.thread.number</name>
<value>8</value>
</property>
</configuration>

###2.hive-env.sh

cp hive-env.sh.template  hive-env.sh
vi  hive-env.sh

export HADOOP_HEAPSIZE=1024

HADOOP_HOME=/opt/bigdata/hadoop-2.7.7

export HIVE_CONF_DIR=/opt/bigdata/apache-hive-3.1.1-bin/conf

export HIVE_AUX_JARS_PATH=/opt/bigdata/apache-hive-3.1.1-bin/lib

###3.hive-log4j2.properties

cp hive-log4j2.properties.template hive-log4j2.properties

vi hive-log4j2.properties

property.hive.log.dir=/opt/bigdata/apache-hive-3.1.1-bin/logs

#4. mysql jar包

cp mysql-connector-java-5.1.12.jar   /opt/bigdata/apache-hive-3.1.1-bin/lib

#5.初始化数据库

bin/schematool -dbType mysql -initSchema

#6.启动hive

bin/hive --service metastore 2>&1 &

#7.hdfs 权限

bin/hadoop dfs -chmod 777 /user/hive/warehouse

#99.测试

bin/hive

show databases;

create database xx;
use xx;
create table userinfo(id int,name string,age int,tel string)
row format delimited
fields terminated by ','
stored as textfile;

vi userinfo.txt
1,zpp,18,18788888888
2,myt,18,18788888888

load data local  inpath '/opt/bigdata/data/userinfo.txt' into table userinfo;

 

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

吃火锅的胖纸

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值