刷新Hive分区表
#删除重建表
DROP TABLE asmp.tt_tmp;
create external table if not exists asmp.tt_tmp(
rssc_name string,
rssc_code string,
sst_name string,
update_date string
)
STORED AS parquet
LOCATION '/user/asmp/hive/asmp/tt_tmp';
#修复分区
MSCK REPAIR TABLE asmp.tt_tmp;
快速删除hIve表空分区
--删除所有空分区
alter table wd_tt_repair_part drop IF EXISTS PARTITION (partition_rssc='__HIVE_DEFAULT_PARTITION__') ;
--删除指定月份下面空分区(防止误删除历史数据)
alter table wd_tt_repair_part drop IF EXISTS PARTITION (partition_date=201906,partition_rssc='__HIVE_DEFAULT_PARTITION__') ;
增加hive执行内存
SET mapreduce.map.memory.mb=8192;
SET mapreduce.map.java.opts='-Xmx6552M';
SET mapreduce.reduce.memory.mb=8192;
SET mapreduce.reduce.java.opts='-Xmx6552M';
SET mapreduce.child.map.java.opts='-Xmx6552M';
SET mapred.child.java.opts='-Xmx4096M';
使用动态分区
-- 是否开启动态分区功能,默认false
hive.exec.dynamic.partition =true;
-- 动态分区的模式,默认为strict
hive.exec.dynamic.partition.mode = nonstrict;
-- 增加动态分区数量
set hive.exec.max.dynamic.partitions=2000;
set hive.exec.max.dynamic.partitions.pernode=2000;
hive-site.xml中添加动态分区(sparksql使用生效)
<property>
<name>hive.exec.dynamic.partition</name>
<value>true</value>
</property>
<property>
<name>hive.exec.dynamic.partition.mode</name>
<value>nonstrict</value>
</property>
<property>
<name>hive.exec.max.dynamic.partitions</name>
<value>100000</value>
</property>
<property>
<name>hive.exec.max.dynamic.partitions.pernode</name>
<value>100000</value>
</property>
<property>
<name>hive.exec.max.created.files</name>
<value>500000</value>
</property>
hive执行并发度
set hive.exec.parallel=true;
set hive.exec.parallel.thread.number=8;
hive中不选择某列数据
set hive.support.quoted.identifiers=none;
select `(oil_code)?+.+` from table_oil;
实现字符串转map
str_to_map(concat_ws(',',collect_set(concat_ws(':',key, cast(value as string)))))
外部表修改为内部表
alter table tbl_name set TBLPROPERTIES('EXTERNAL'='FALSE')
mr读取递归目录设置
set mapreduce.input.fileinputformat.input.dir.recursive=true;
set hive.mapred.supports.subdirectories=true;
显示当前数据库名称
set hive.cli.print.current.db=true;
set hive.cli.print.header=true;
map和reducer个数控制
---map设置
set mapred.max.split.size=128000000;
set mapred.min.split.size.per.node=8000000;
set mapred.min.split.size.per.rack=8000000;
---reducer设置
set hive.exec.reducers.bytes.per.reducer = 128000000;
set hive.exec.reducers.max = 999;