背景:外包项目中,最近接触一些儿教育数据。上周公司需要做一个关于教育的数仓,其中有一个指标是关于教师在某线上软件使用情况的统计。具体指标为每个小时,教师的登录情况、备课情况、授课情况。
由于每条记录中只有开始时间和结束时间,所以我们只能取到时间段,无法定位到每个小时数。
综上:
解决方案(三步走):
第一步:将开始时间与结束时间进行连接
第二步:写hive udtf函数将时间段取出年月日时,并解决时间临界问题
第三步:写hive udf函数将时间段取出每个小时数,存入列表
第四步:使用行转列,是一条记录变成多条记录
原生HQL:
drop table if exists `cdm.ads_use_1`;
create table if not exists `cdm.ads_use_1`
(
Yea string comment "年",
Mon string comment "月",
Da string comment "天",
HH string comment "小时",
login_num string comment "登陆人数",
prepare_num string comment "备课人数",
lesson_num string comment "授课人数"
);
-- add jar hdfs:///user/xxx/hivefunction.jar;
add jar ./hivefunction.jar;
create temporary function TimeToHour as "com.HourList";
-- add jar hdfs:///user/xxx/YMD.jar;
add jar ./YMD.jar;
create temporary function TimeStandard as "com.TimeStandard";
with staff_login as
(
select
concat_ws("-",cast(yyyy as string),cast(MM as string),cast(dd as string)) YeaMonDa,
HH,
staff_code
from
(
select
staff_code,
year(login_time) as yyyy,
month(login_time) as MM,
day(login_time) as dd,
hour(login_time) as HH
from
cdm.DWD_XKT_STAFF_LOGIN_1
) t
),staff_prepare_HourList as
(
select
id,
times,
year(start_prepare) as yyyy,
month(start_prepare) as MM,
day(start_prepare) as dd,
TimeToHour(times) as ReHH,
start_prepare,
staff_code
from
(
select
id,
start_prepare,
end_prepare,
concat(start_prepare,'_',end_prepare) as times,
staff_code
from
cdm.DWD_XKT_PREPARE_LESSONS_1
) t
),staff_prepare as
(
select
HH,
YeaMonDa,
staff_code
from
(
select
if(HH >= 24,cast(HH-24 as int),cast(HH as int)) HH,
if(HH >= 24,cast(dd+1 as string),cast(dd as string)) dd,
cast(MM as string) MM,
cast(yyyy as string) yyyy,
times,
staff_code,
start_prepare
from
(
select
yyyy,
MM,
dd,
start_prepare,
times,
staff_code,
HH
from
staff_prepare_HourList a
lateral view explode(split(ReHH,'\\|')) a as HH
) t
) temp
lateral view TimeStandard(concat_ws("-",yyyy,MM,dd),"-") temp as YeaMonDa
),staff_lesson_HourList as
(
select
id,
staff_code,
create_time,
concat(create_time,'_',update_time) times,
TimeToHour(concat(create_time,'_',update_time)) as reHH
from
cdm.DWD_XKT_TEACH_LESSONS_1
),staff_lesson as
(
select
HH,
YeaMonDa,
staff_code
from
(
select
if(HH >= 24,cast(HH-24 as int),cast(HH as int)) HH,
if(HH >= 24,cast(dd+1 as string),cast(dd as string)) dd,
cast(MM as string) MM,
cast(yyyy as string) yyyy,
create_time,
times,
staff_code
from
(
select
year(create_time) as yyyy,
month(create_time) as MM,
day(create_time) as dd,
create_time,
staff_code,
times,
HH
from
staff_lesson_HourList a
lateral view explode(split(reHH,'\\|')) a as HH
) t
) temp
lateral view TimeStandard(concat_ws("-",yyyy,MM,dd),"-") temp as YeaMonDa
)
insert overwrite table `cdm.ads_xkt_teacher_use_1`
select
split(YeaMonDa,"-")[0] Yea,
split(YeaMonDa,"-")[1] Mon,
split(YeaMonDa,"-")[2] Da,
HH,
sum(login_num) over(partition by YeaMonDa,HH),
sum(prepare_num) over(partition by YeaMonDa,HH),
sum(lesson_num) over(partition by YeaMonDa,HH)
from
(
select
nvl(tempa.YeaMonDa,tempb.YeaMonDa) YeaMonDa,
nvl(tempa.HH,tempb.HH) HH,
nvl(tempa.login_num,0) login_num,
nvl(tempa.prepare_num,0) prepare_num,
nvl(tempb.lesson_num,0) lesson_num
from
(
select
nvl(a.YeaMonDa,b.YeaMonDa) YeaMonDa,
nvl(a.HH,b.HH) HH,
nvl(a.login_num,0) login_num,
nvl(b.prepare_num,0) prepare_num
from
(
select
YeaMonDa,
HH,
count(distinct staff_code) login_num
from
staff_login
group by
YeaMonDa,
HH
) a
full join
(
select
YeaMonDa,
HH,
count(distinct staff_code) prepare_num
from
staff_prepare
group by
YeaMonDa,
HH
) b
on
a.HH = b.HH
and
a.YeaMonDa = b.YeaMonDa
) tempa
full join
(
select
YeaMonDa,
HH,
count(distinct staff_code) lesson_num
from
staff_lesson
group by
YeaMonDa,
HH
) tempb
on
tempa.HH = tempb.HH
and
tempa.YeaMonDa = tempb.YeaMonDa
) t;
Hive UDTF函数
import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import java.util.ArrayList;
import java.util.List;
public class TimeStandard extends GenericUDTF {
private ArrayList<String> outList = new ArrayList();
@Override
public StructObjectInspector initialize(StructObjectInspector argOIs) throws UDFArgumentException {
// 1.定义输出数据的列名和类型
List<String> fieldNames = new ArrayList();
ArrayList<ObjectInspector> fieldOIs = new ArrayList();
// 2.添加输出数据的列名和类型
fieldNames.add("Year-Mon-Day");
fieldOIs.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
return ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldOIs);
}
public void process(Object[] args) throws HiveException {
// 1.获取原始数据
String arg = args[0].toString();
// 2.获取数据传入的第二个参数,此处为分隔符
String splitKey = args[1].toString();
// 3.将原始数据按照传入的分隔符进行切分
String[] fields = arg.split(splitKey);
Integer Yea = new Integer(fields[0]);
Integer Mon = new Integer(fields[1]);
Integer Da = new Integer(fields[2]);
String result = null;
if (Mon == 1 || Mon == 3 || Mon == 5 || Mon == 7 || Mon == 8 || Mon == 10 || Mon == 12){
if (Da > 31){
Mon += 1;
Da -= 31;
}
}
if (Mon == 2){
if ( Yea % 4 == 0 && Yea % 100 != 0 || Yea % 400 == 0 ){
if ( Da > 29){
Mon += 1;
Da -= 29;
}
}else{
if ( Da > 28){
Mon += 1;
Da -= 28;
}
}
}
if ( Mon == 4 || Mon == 6 || Mon == 9 || Mon == 11 ){
if (Da > 30){
Mon += 1;
Da -= 30;
}
}
if (Mon > 13){
Yea += 1;
Mon -= 12;
}
result = Yea + "-" + Mon + "-" + Da;
// 集合为服用的,首先清空集合
outList.clear();
// 将每一个单词添加至集合
outList.add(result);
// 将集合内容写出
forward(outList);
}
public void close() throws HiveException {
}
}
Hive UDF函数
import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
import org.apache.hadoop.io.Text;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
public class TimeToHour extends GenericUDF {
private transient PrimitiveObjectInspector inputOI;
public ObjectInspector initialize(ObjectInspector[] objectInspectors) throws UDFArgumentException {
// 检查参数数量
if (objectInspectors.length != 1) {
throw new UDFArgumentException("urlDecode() takes only one argument");
}
// 检查参数类型
if (objectInspectors[0].getCategory() != ObjectInspector.Category.PRIMITIVE) {
if (((PrimitiveObjectInspector) objectInspectors[0]).getPrimitiveCategory() != PrimitiveObjectInspector.PrimitiveCategory.STRING) {
throw new UDFArgumentTypeException(0, "Only String type argument are accepted, but "
+ objectInspectors[0].getTypeName() + " was pass as parameter 1");
}
}
// 设置输入数据的 ObjectInspector
inputOI = (PrimitiveObjectInspector) objectInspectors[0];
// 输出数据的 ObjectInspector
return PrimitiveObjectInspectorFactory.writableStringObjectInspector;
}
public Object evaluate(DeferredObject[] deferredObjects) throws HiveException {
if (deferredObjects == null || deferredObjects[0] == null) {
return new Text("");
}
// 提取数据
String component = PrimitiveObjectInspectorUtils.getString(deferredObjects[0].get(), inputOI);
if (component == null || component.length() <= 0) {
return "";
}
String result = null;
//提取出时间,并返回时间中小时
// eg:输入:2021-02-02 10:12:21 2021-02-02 14:23:33
// 输出:10 | 11 | 12 | 13 | 14
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss");
Date start_time = null;
Date end_time = null;
String[] split = component.split("\\_");
// int start_timeYear = start_time.getYear();
// int end_timeYear = end_time.getYear();
// int start_timeMonth = start_time.getMonth();
// int end_timeMonth = end_time.getMonth();
// int start_timeDay = start_time.getDay();
// int end_timeDay = end_time.getDay();
try {
start_time = sdf.parse(split[0]);
end_time = sdf.parse(split[1]);
int start_timeHours = start_time.getHours();
int end_timeHours = end_time.getHours();
if (start_timeHours == end_timeHours)
{
component = String.valueOf(start_timeHours);
}else{
for (;start_timeHours <= end_timeHours;start_timeHours ++){
component = start_timeHours + "|";
}
}
result = URLDecoder.decode(component, "UTF-8");
} catch (UnsupportedEncodingException e) {
result = component;
}catch (ParseException e) {
e.printStackTrace();
}
return result;
}
public String getDisplayString(String[] strings) {
// StringBuilder sb = new StringBuilder();
// sb.append("url_decode");
// sb.append("(");
// if (strings.length > 0) {
// sb.append(strings[0]);
// for (int index = 1; index < strings.length - 1; index++) {
// sb.append(",");
// sb.append(strings[index]);
// }
// }
// sb.append(")");
// return sb.toString();
return "housrList";
}
}
CREATE [EXTERNAL] TABLE [IF NOT EXISTS] table_name
[(col_name data_type [COMMENT col_comment], ...)]
[COMMENT table_comment]
[PARTITIONED BY (col_name data_type [COMMENT col_comment], ...)]
[CLUSTERED BY (col_name, col_name, ...)
[SORTED BY (col_name [ASC|DESC], ...)] INTO num_buckets BUCKETS]
[ROW FORMAT row_format]
[STORED AS file_format]
[LOCATION hdfs_path]
[TBLPROPERTIES (property_name=property_value, ...)]
[AS select_statement]
(1)CREATE TABLE 创建一个指定名字的表。如果相同名字的表已经存在,则抛出异常;用户可以用 IF NOT EXISTS 选项来忽略这个异常。
(2)EXTERNAL关键字可以让用户创建一个外部表,在建表的同时可以指定一个指向实际数据的路径(LOCATION),在删除表的时候,内部表的元数据和数据会被一起删除,而外部表只删除元数据,不删除数据。
(3)COMMENT:为表和列添加注释。
(4)PARTITIONED BY创建分区表
(5)CLUSTERED BY创建分桶表
(6)SORTED BY不常用,对桶中的一个或多个列另外排序
(7)ROW FORMAT
DELIMITED [FIELDS TERMINATED BY char] [COLLECTION ITEMS TERMINATED BY char]
[MAP KEYS TERMINATED BY char] [LINES TERMINATED BY char]
| SERDE serde_name [WITH SERDEPROPERTIES (property_name=property_value, property_name=property_value, ...)]
用户在建表的时候可以自定义SerDe或者使用自带的SerDe。如果没有指定ROW FORMAT 或者ROW FORMAT DELIMITED,将会使用自带的SerDe。在建表的时候,用户还需要为表指定列,用户在指定表的列的同时也会指定自定义的SerDe,Hive通过SerDe确定表的具体的列的数据。
SerDe是Serialize/Deserilize的简称, hive使用Serde进行行对象的序列与反序列化。
(8)STORED AS指定存储文件类型
常用的存储文件类型:SEQUENCEFILE(二进制序列文件)、TEXTFILE(文本)、RCFILE(列式存储格式文件)
如果文件数据是纯文本,可以使用STORED AS TEXTFILE。如果数据需要压缩,使用 STORED AS SEQUENCEFILE。
(9)LOCATION :指定表在HDFS上的存储位置。
(10)AS:后跟查询语句,根据查询结果创建表。
(11)LIKE允许用户复制现有的表结构,但是不复制数据
内部表和外部表
元数据、原始数据
1)删除数据时:
内部表:元数据、原始数据,全删除
外部表:元数据 只删除
2)在公司生产环境下,什么时候创建内部表,什么时候创建外部表?
在公司中绝大多数场景都是外部表。
自己使用的临时表,才会创建内部表;
4个By区别
1)Order By:全局排序,只有一个Reducer;
2)Sort By:分区内有序;
3)Distrbute By:类似MR中Partition,进行分区,结合sort by使用。
4) Cluster By:当Distribute by和Sorts by字段相同时,可以使用Cluster by方式。Cluster by除了具有Distribute by的功能外还兼具Sort by的功能。但是排序只能是升序排序,不能指定排序规则为ASC或者DESC。
在生产环境中Order By用的比较少,容易导致OOM。
在生产环境中Sort By+ Distrbute By用的多