1.分区表
create table sample_data(id int,name string,gender string,x int,y int,z int)
row format delimited fields terminated by','
load data inpath '/student/sampleData.txt' into table sample_data;
create table partition_t1(id int,name string,x int,y int,z int)
partitioned by(gender string)
row format delimited fields terminated by ',';
insert into table partition_t1 partition(gender='M')
select id,name,x,y,z from sample_data where gender='M';
查看分区
show partitions employees1;
增加分区
alter table employees1 add if not exists partition(dt='20180830',type='test');
alter table employees1 add if not exists partition(dt='20180831',type='test');
删除分区
alter table employees1 drop if exists partition(dt='20180830');
查询执行计划
explain select * from sample_data;
2.hive 动态分区
1.不需要为不同的分区添加不同的插入语句
2.分区不确定,需要从数据中获取
create table if not exists g1(id int,name string,age int) row format delimited fields terminated by ',' stored as textfile;
1,tom,24
2,jack,25
3,lc,27
4,ljc,28
load data local inpath '/home/lz/g1.txt' overwrite into table g1;
create table if not exists g2(name string)
partitioned by(id int,age int)
row format delimited fields terminated by ','
stored as textfile;
insert overwrite table g2 partition(id,age) select name,id,age from g1;
动态分区配置参数
set hive.exec.dynamic.partition=true;(使用动态分区)
set hive.exec.dynamic.partition.mode=nonstrict;(无限制模式,如果模式是strict,则必须有一个静态分区,且放在最前面)
set hive.exec.max.dynamic.partitions.pernode=10000;(每个节点生成动态分区的最大个数)
set hive.exec.max.dynamic.partitions=10000;(生成动态分区的最大个数)
set hive.exec.max.created.files=1500000;(一个任务最多可以创建文件的数目)
set dfs.datanode.max.xcievers=8192;(限定一次最多打开的文件数,在2.1.1的配置文件中没找到该值)
3.外部表
create external table external_student(id int,name string,gender string,x int,y int,z int)
row format delimited fields terminated by ','
location '/student/';
4.桶表(对于每一个表或者分区,hive可以进一步组织成桶,也就是说桶是更为细粒度的数据范围划分)
hive是针对某一列进行分桶
hive采用对列值哈希,然后除以桶的个数求余的方式决定该条记录存放在哪个桶中
好处
获得更高的查询处理效率
使取样更高效
热块????
create table bucket_table
(id int,name string,gender string,x int,y int,z int)
clustered by(id) sorted by(name) into 5 buckets;
按id聚集然后按name排序
set hive.enforce.bucketing=true;(开启分桶,2.1.1不需要开启该功能,自动开启了)
分桶就是分区之后的文件,按某个字段的hash值区分
5.视图(简化查询)
create view view_student
as
select id,name,x from external_student;
删除视图
drop view xxx;