SELECT
select用于映射符合指定查询条件的行
SELECT 1;
SELECT * FROM employee ;
SELECT * FROM employee WHERE name!='Lucy' LIMIT 2;
-- 起别名
select ename as name, deptno dn from emp;
CTE和嵌套查询
-- CTE语法示例
with
a as (select * from employee),
b as (select * from a)
select * from b where name='Will';
-- 嵌套查询语法示例
select * from (select * from employee) a where name='Will';
Hive JOIN - 关联查询
join用于将两个或多个表中的行组合在一起查询
类似于sql join,但是Hive仅支持等值连接
内连接:inner join
外连接:outer join
right join, left join, full outer join
交叉连接cross join
隐式连接:Im3plicit join
join关联发生在where子句之前
*mapjoin支持不等值连接(小表关联大表)
开启mapjoin:set hive.auto.converrt.join=true
Hive集合操作(UNION)
用处:把两个子集数据合并
所有子集数据必须具有相同的名称和类型
UNION ALL:合并后保留重复项
UNION:合并后删除重复项(v1.2之后)
装载数据INSERT
插入表数据
--使用INSERT语句将数据插入表/分区
INSERT支持OVERWRITE(覆盖)和INTO(追加)
INSERT OVERWRITE/INTO TABLE tablename1
[PARTITION (partcol1=val1, partcol2=val2 ...)]
select fileds,... from tb_other;
Hive支持从同一个表进行多次插入
INSERT INTO中TABLE关键字是可选的
--INSERT INTO可以指定插入到哪些字段中
INSERT INTO t(x,y,z);
INSERT INTO table_name VALUES,支持插入值列表
数据插入必须与指定列数相同
-- 通过查询语句插入
insert into employee select * from ctas_employee;
-- 多插入(高性能:只需扫描一次输入数据)
from ctas_employee
insert overwrite table employee select *
insert overwrite table employee_external select *;
-- 插入到分区(需要动态分区)
from ctas_employee
insert overwrite table employee_partitioned partition(year, month)
select *,2018,09;
-- 通过指定列插入(Hive1.2版本以后支持)
insert into employee(name) select 'John' from test limit 1;
-- 通过指定值插入(Hive1.2版本以后支持)
insert into employee(name) values('Judy'),('John');
插入文件
使用insert语句将数据插入/导出到文件
-文件插入只支持OVERWRITE
-支持来自同一个数据源/表的多次插入
-LOCAL:写入本地文件系统
-默认数据以TEXT格式写入,列由^A分隔
-支持自定义分隔符导出文件为不同格式,CSV,JSON等
-- 从同一数据源插入本地文件,hdfs文件,表
from ctas_employee
insert overwrite local directory '/tmp/out1' select *
insert overwrite directory 'hdfs://hadoop101:9000/tmp/out1' select *
insert overwrite table employee_external select *;
-- 以指定格式插入数据
insert overwrite directory '/tmp/out3'
row format delimited fields terminated by ','
select * from ctas_employee;
-- 其他方式从表获取文件
hdfs dfs -getmerge <table_file_path>
数据交换IMPORT/EXPORT
IMPORT和EXPORT用于数据导入和导出
-常用于数据迁移场景
-除数据库,可导入导出所有数据和元数据
--使用EXPORT导出数据
EXPORT TABLE employee TO '/tmp/output3';
EXPORT TABLE employee_partitioned partition (year=2018, month=9) TO '/tmp/output5'
--使用IMPORT导入数据
IMPORT TABLE employee1 FROM '/tmp/output3';
IMPORT TABLE employee_partitioned1 partition (year=2018, month=9) FROM '/tmp/output5';
数据分组排序
*GROUP BY用于分组
-Hive基本内置聚合函数与GROUP BY一起使用
-如果没有指定GROUP BY子句,则默认聚合整个表
-除聚合函数外,所选的其他列也必须包含在GROUP BY中
-GROUP BY支持使用CASE WHEN或表达式
*ORDER BY (ASC|DESC)类似于标准SQL
-只使用一个Reducer执行全局数据排序
-速度慢,应提前做好数据过滤
-支持使用CASE WHEN或表达式
-支持按位置编号排序
-set hive.groupby.orderby.position.alias=true;
*DISTRIBUTE BY类似于标准SQL中的GROUP BY
-根据相应列以及对应reduce的个数进行分发
-默认是采用hash算法
-根据分区字段的hash码与reduce的个数进行模除
-通常使用在SORT BY语句之前
SORT BY对每个Reducer中的数据进行排序
-当Reducer数量设置为1时,等于ORDER BY
-排序列必须出现在SELECT column列表中
* CLUSTER BY = DISTRIBUTE BY + SORT BY
-不支持ASC|DESC
-排序列必须出现在SELECT column列表中
-为了充分利用所有的Reducer来执行全局排序,可以先使用CLUSTER BY,然后使用ORDER BY
聚合运算 - 基础聚合
基础聚合函数
max, min, count, sum, avg
max(distinct col1)、avg(col2)等
collect_set, collect_list:返回每个组列中的对象集/列表
注意事项:
- 一般与GROUP BY一起使用
- 可应用于列或表达式
- 对NULL的count聚合为0
- select count(null) = 0
Hive函数
表生成函数
侧视图: select ... from TABLE_ NAME lateral view func (...) V_ALIAS as f1,...
T/row explode(array<T>/Map<k,V>)
select explode(array('aa','bb','cc'));
select name,city from emp lateral view explode(cities) ct as city;
select name,pos,score from emp lateral view explode(scores) st as pos,score;
row posexplode (array<T>)
select posexplode(array('aa','bb','cc'));
row stack(rowNum , v1 ,. . . , vm)#m/n为整数
select stack(2,'aa','bb','cc','dd');
tuple json_tuple(string json,string. ..key)(key, value) ,. . .
tuple parse_url_tuple (string url ,string. . .parts)
select name,hobbies,age from jsontuple lateral view json_tuple(line,'name','hobbies','age') jt as name,hobbies,age;
select printf('%s %s',fn,ln) name,hobby,age from jsontuple
lateral view json_tuple(line,'name','hobbies','age') jt as name,hobbies,age
lateral view json_tuple(name,'first','last') jt1 as fn,ln
lateral view explode(split(regexp_replace(hobbies,'\\[|\\]|"',''),',')) hs as hobby;
row inline (array<struct<...>>)
窗口函数
窗口函数是一组特殊函数
扫描多个输入行来计算每个输出值,为每行数据生成一行结果
可以通过窗口函数来实现复杂的计算和聚合
语法:
Function (arg1,..., arg n) OVER ([PARTITION BY <...>] [ORDER BY <....>] [<window_clause>])
PARTITION BY类似于GROUP BY,未指定则按整个结果集
只有指定ORDER BY子句之后才能进行窗口定义
可同时使用多个窗口函数
过滤窗口函数计算结果必须在外面一层
窗口函数按功能划分:
-
序列
ROW_NUMBER() 对所有数值输出不同的序号,序号唯一连续 RANK() 对相同数值,输出相同的序号,下一个序号跳过(1,1,3) DENSE_RANK() 对相同数值,输出相同的序号,下一个序号连续(1,1,2) NLITE(n) 将有序的数据集合平均分配到n个桶中, 将桶号分配给每一行,根据桶号,选取前或后 n分之几的数据 PERCENT_RANK() (目前排名- 1)/(总行数- 1),值相对于一组值的百分比排名
-
聚合
COUNT() 计数,可以和DISTINCT一起用 SUM():求和 AVG():平均值 MAX()/MIN(): 最大/小值 从Hive 2.1.0开始在OVER子句中支持聚合函数 select rank() over (order by sum(b)) from T group by a;
-
分析
CUME_DIST() 小于等于当前值的行数/分组内总行数 LEAD/LAG(col,n) 某一列进行往前/后第n行值(n可选,默认为1) FIRST_VALUE() 对该列到目前为止的首个值 LAST_VALUE() 到目前行为止的最后一个值
-
窗口子句
两类窗口子句 行类型窗口 范围类型窗口 RANK、NTILE、DENSE_RANK、CUME_DIST、PERCENT_RANK、LEAD、LAG和ROW_NUMBER 函数不支持与窗口子句一起使用
--行窗口:根据当前行之前或之后的行号确定的窗口
SELECT
name, dept_num AS dept, salary AS sal,
MAX(salary) OVER (PARTITION BY dept_num ORDER BY name ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) win1,
MAX(salary) OVER (PARTITION BY dept_num ORDER BY name ROWS BETWEEN 2 PRECEDING AND UNBOUNDED FOLLOWING) win2,
MAX(salary) OVER (PARTITION BY dept_num ORDER BY name ROWS BETWEEN 1 PRECEDING AND 2 FOLLOWING) win3,
MAX(salary) OVER (PARTITION BY dept_num ORDER BY name ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING) win4,
MAX(salary) OVER (PARTITION BY dept_num ORDER BY name ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING) win5,
MAX(salary) OVER (PARTITION BY dept_num ORDER BY name ROWS BETWEEN CURRENT ROW AND CURRENT ROW) win6,
MAX(salary) OVER (PARTITION BY dept_num ORDER BY name ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING) win7,
MAX(salary) OVER (PARTITION BY dept_num ORDER BY name ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) win8,
MAX(salary) OVER (PARTITION BY dept_num ORDER BY name ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) win9,
MAX(salary) OVER (PARTITION BY dept_num ORDER BY name ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) win10,
MAX(salary) OVER (PARTITION BY dept_num ORDER BY name ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) win11,
MAX(salary) OVER (PARTITION BY dept_num ORDER BY name ROWS 2 PRECEDING) win12
FROM employee_contract ORDER BY dept, name;
--范围窗口是取分组内的值在指定范围区间内的行
--该范围值/区间必须是数字或日期类型
--目前只支持一个ORDER BY列
SELECT name, dept_num AS dept, salary AS sal,
MAX(salary) OVER (PARTITION BY dept_num ORDER BY name ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) win1,
salary - 1000 as sal_r_start,salary as sal_r_end,
MAX(salary) OVER (PARTITION BY dept_num ORDER BY name RANGE BETWEEN 1000 PRECEDING AND CURRENT ROW) win13
FROM employee_contract ORDER BY dept, name;