1、时间属性
1、处理时间
-- PROCTIME() 生成处理时间的函数
CREATE TABLE words (
word STRING,
proctime AS PROCTIME() -- 声明一个额外的列作为处理时间属性
) WITH (
'connector' = 'kafka',
'topic' = 'words',
'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
'properties.group.id' = 'testGroup',
'scan.startup.mode' = 'earliest-offset',
'format' = 'csv',
'csv.ignore-parse-errors' ='true' -- 当有脏数据时是否跳过当前行
);
-- 实时统计每个单词最近5秒单词的数量
select
word,
TUMBLE_START(proctime,INTERVAL '5' SECOND) win_start,
TUMBLE_END(proctime,INTERVAL '5' SECOND) win_end,
count(1) as num
from
words
group by
word,
TUMBLE(proctime,INTERVAL '5' SECOND);
2、事件时间
时间戳转换日期:
event_time as TO_TIMESTAMP(FROM_UNIXTIME(`time`))
java,2024-06-04 09:21:10
java,2024-06-04 09:21:11
java,2024-06-04 09:21:12
java,2024-06-04 09:21:13
java,2024-06-04 09:21:16
java,2024-06-04 09:21:20
CREATE TABLE words_event_time (
word STRING,
`event_time` TIMESTAMP(3), -- 时间字段
-- 指定时间字段和水位线生成策略
WATERMARK FOR event_time AS event_time - INTERVAL '5' SECOND
) WITH (
'connector' = 'kafka',
'topic' = 'words_event_time',
'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
'properties.group.id' = 'testGroup',
'scan.startup.mode' = 'earliest-offset',
'format' = 'csv',
'csv.ignore-parse-errors' ='true' -- 当有脏数据时是否跳过当前行
);
select
word,
TUMBLE_START(event_time,INTERVAL '5' SECOND) win_start,
TUMBLE_END(event_time,INTERVAL '5' SECOND) win_end,
count(1) as num
from
words_event_time
group by
word,
TUMBLE(event_time,INTERVAL '5' SECOND);
2、SQL语法
1、Hints(/*+ OPTIONS( ) */)
动态表选择:可以在查询表的时候动态修改表的参数配置
CREATE TABLE students (
id STRING,
name STRING,
age INT,
sex STRING,
clazz STRING
) WITH (
'connector' = 'kafka',
'topic' = 'students', -- 指定topic
'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092', -- 指定kafka集群列表
'properties.group.id' = 'testGroup', -- 指定消费者组
'scan.startup.mode' = 'earliest-offset', -- 指定读取数据的位置
'format' = 'csv' -- 指定数据的格式
);
select * from students /*+ OPTIONS('csv.ignore-parse-errors' ='true') */;
-- latest-offset: 读取任务启动之后生产的数据
select * from students /*+ OPTIONS('csv.ignore-parse-errors' ='true','scan.startup.mode' = 'latest-offset') */;
CREATE TABLE students_hdfs_stream (
id int,
name STRING,
age INT,
gender STRING,
clazz STRING
) WITH (
'connector' = 'filesystem', -- 必选:指定连接器类型
'path' = 'hdfs://master:9000/data/students', -- 必选:指定路径
'format' = 'csv' -- 必选:文件系统连接器指定 format
);
select * from students_hdfs_stream /*+OPTIONS('source.monitor-interval' = '5000') */
2、WITH
当有一段sql逻辑重复时,可以定义在with语句中,减少代码量
with tmp as (
select
id,name,age,clazz
from
students_hdfs_stream
where age > 22
)
select * from tmp
union all
select * from tmp;
3、SELECT WHERE
select * from students_hdfs_stream
where
age > 21
and gender in ('男','女');
4、SELECT DISTINCT
对于流处理的问题
1、flink会将之前的数据保存在状态中,用于判断是否重复
2、如果表的数据量很大,随着时间的推移状态会越来越大,状态的数据时先保存在TM的内存中的,时间长了可能会出问题
select distinct * from students /*+ OPTIONS('csv.ignore-parse-errors' ='true','scan.startup.mode' = 'latest-offset') */;
5、窗口函数(TVFs)
1、滚动窗口函数
CREATE TABLE bid (
item STRING,
price DECIMAL(10, 2),
bidtime TIMESTAMP(3),
WATERMARK FOR bidtime AS bidtime - INTERVAL '5' SECOND
) WITH (
'connector' = 'kafka',
'topic' = 'bid',
'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
'properties.group.id' = 'testGroup',
'scan.startup.mode' = 'earliest-offset',
'format' = 'csv',
'csv.ignore-parse-errors' ='true' -- 当有脏数据时是否跳过当前行
);
kafka-console-producer.sh --broker-list master:9092,node1:9092,node2:9092 --topic bid
C,4.00,2020-04-15 08:05:01
A,2.00,2020-04-15 08:07:01
D,5.00,2020-04-15 08:09:01
B,3.00,2020-04-15 08:11:01
E,1.00,2020-04-15 08:13:01
F,6.00,2020-04-15 08:17:01
-- TUMBLE:滚动窗口函数,在原表的基础上增加窗口开始时间,窗口结束时间,窗口时间
SELECT item,price,bidtime,window_start,window_end,window_time FROM TABLE(
TUMBLE(TABLE bid, DESCRIPTOR(bidtime), INTERVAL '10' MINUTES)
);
-- 窗口聚合计算
-- 实时统计最近10分钟所有商品的平均价格
SELECT
window_start,
window_end,
avg(price) as avg_price
FROM
TABLE(
TUMBLE(TABLE bid, DESCRIPTOR(bidtime), INTERVAL '10' MINUTES)
)
group by
window_start,
window_end;
2、滑动窗口函数
CREATE TABLE bid_proctime (
item STRING,
price DECIMAL(10, 2),
proctime AS PROCTIME()
) WITH (
'connector' = 'kafka',
'topic' = 'bid_proctime',
'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
'properties.group.id' = 'testGroup',
'scan.startup.mode' = 'earliest-offset',
'format' = 'csv',
'csv.ignore-parse-errors' ='true' -- 当有脏数据时是否跳过当前行
);
kafka-console-producer.sh --broker-list master:9092,node1:9092,node2:9092 --topic bid_proctime
C,4.00
A,2.00
D,5.00
B,3.00
E,1.00
F,6.00
-- HOP: 滑动窗口函数
SELECT item,price,proctime,window_start,window_end,window_time FROM TABLE(
HOP(TABLE bid_proctime, DESCRIPTOR(proctime), INTERVAL '5' SECOND, INTERVAL '10' SECOND)
);
-- 窗口聚合
SELECT
window_start,
window_end,
avg(price) as avg_price
FROM
TABLE(
HOP(TABLE bid_proctime, DESCRIPTOR(proctime), INTERVAL '5' SECOND, INTERVAL '10' SECOND)
)
group by
window_start,
window_end;
3、CUMULATE
CREATE TABLE bid (
item STRING,
price DECIMAL(10, 2),
bidtime TIMESTAMP(3),
WATERMARK FOR bidtime AS bidtime - INTERVAL '5' SECOND
) WITH (
'connector' = 'kafka',
'topic' = 'bid',
'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
'properties.group.id' = 'testGroup',
'scan.startup.mode' = 'earliest-offset',
'format' = 'csv',
'csv.ignore-parse-errors' ='true' -- 当有脏数据时是否跳过当前行
);
kafka-console-producer.sh --broker-list master:9092,node1:9092,node2:9092 --topic bid
C,4.00,2020-04-15 08:05:01
A,2.00,2020-04-15 08:07:01
D,5.00,2020-04-15 08:09:01
B,3.00,2020-04-15 08:11:01
E,1.00,2020-04-15 08:13:01
F,6.00,2020-04-15 08:17:01
SELECT * FROM TABLE(
CUMULATE(TABLE bid, DESCRIPTOR(bidtime), INTERVAL '2' MINUTES, INTERVAL '10' MINUTES)
);
4、会话窗口
CREATE TABLE bid_proctime (
item STRING,
price DECIMAL(10, 2),
proctime AS PROCTIME()
) WITH (
'connector' = 'kafka',
'topic' = 'bid_proctime',
'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
'properties.group.id' = 'testGroup',
'scan.startup.mode' = 'earliest-offset',
'format' = 'csv',
'csv.ignore-parse-errors' ='true' -- 当有脏数据时是否跳过当前行
);
kafka-console-producer.sh --broker-list master:9092,node1:9092,node2:9092 --topic bid_proctime
C,4.00
C,2.00
C,5.00
C,3.00
C,1.00
C,6.00
-- 实时统计每个商品的总的金额,隔5秒没有数据开始统计
select
item,
SESSION_START(proctime,INTERVAL '5' SECOND) as session_start,
SESSION_END(proctime,INTERVAL '5' SECOND) as session_end,
sum(price) as sum_price
from
bid_proctime
group by
item,
SESSION(proctime,INTERVAL '5' SECOND);
6、GROUP BY
-- 创建datagen source表
CREATE TABLE words_datagen (
word STRING
) WITH (
'connector' = 'datagen',
'rows-per-second'='50000', -- 指定每秒生成的数据量
'fields.word.length'='5'
);
CREATE TABLE blackhole_table (
word STRING,
num BIGINT
) WITH (
'connector' = 'blackhole'
);
-- 分组聚合需要将之前的计算结果保存在状态中,
-- 如果状态无限增长,会导致checkpoint时间拉长,如果checkpoint超时失败了,也会导致任务失败
insert into blackhole_table
select
word,
count(1)as num
from
words_datagen /*+ OPTIONS('fields.word.length'='7') */
group by
word;
7、OVER
1、sum max min avg count
CREATE TABLE `order` (
order_id STRING,
amount DECIMAL(10, 2),
product STRING,
order_time TIMESTAMP(3),
WATERMARK FOR order_time AS order_time - INTERVAL '5' SECOND
) WITH (
'connector' = 'kafka',
'topic' = 'order',
'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
'properties.group.id' = 'testGroup',
'scan.startup.mode' = 'earliest-offset',
'format' = 'csv',
'csv.ignore-parse-errors' ='true' -- 当有脏数据时是否跳过当前行
);
kafka-console-producer.sh --broker-list master:9092,node1:9092,node2:9092 --topic order
1,4.00,001,2020-04-15 08:05:01
2,2.00,001,2020-04-15 08:07:01
3,5.00,001,2020-04-15 08:09:01
4,3.00,001,2020-04-15 08:11:01
5,1.00,001,2020-04-15 08:13:01
6,6.00,001,2020-04-15 08:17:01
6,6.00,001,2020-04-15 08:20:01
6,6.00,001,2020-04-15 08:21:01
6,10.00,001,2020-04-15 08:21:02
6,11.00,001,2020-04-15 08:21:03
6,12.00,001,2020-04-15 08:21:04
-- 1、实时统计每个商品的累计总金额,将总金额放在每一条数据的后面
-- 流处理的问题
-- a、sum over必须按照时间升序排序,因为数据时一条一套过来的,只能做累加求和,不能做全局求和
-- b、只能按照时间升序排序,如果按照其他的字段排序,每来一条数据都需要重新排序,计算代价太大,影响性能
select
order_id,
amount,
product,
order_time,
sum(amount) over(
partition by product
order by order_time
)
from
`order`
;
-- 2、实时统计每个商品的累计总金额,将总金额放在每一条数据的后面,只统计最近10分钟的数据
select
order_id,
amount,
product,
order_time,
sum(amount) over(
partition by product
order by order_time
-- 统计10分钟前到当前行的数据
RANGE BETWEEN INTERVAL '10' MINUTES PRECEDING AND CURRENT ROW
)
from
`order`
;
-- 2、实时统计每个商品的累计总金额,将总金额放在每一条数据的后面,计算最近5条数据
select
order_id,
amount,
product,
order_time,
sum(amount) over(
partition by product
order by order_time
-- 从前4条数据到当前行
ROWS BETWEEN 4 PRECEDING AND CURRENT ROW
)
from
`order`
;
-- 2、实时统计每个商品的最大金额,将总金额放在每一条数据的后面,计算最近5条数据
select
order_id,
amount,
product,
order_time,
max(amount) over(
partition by product
order by order_time
-- 从前4条数据到当前行
ROWS BETWEEN 4 PRECEDING AND CURRENT ROW
)
from
`order`
;
2、row_number
-- 如果只是增加排名,只能按照时间字段升序排序
select
order_id,
amount,
product,
order_time,
row_number() over(partition by product order by order_time) as r
from
`order`
;
-- 实时统计每个商品金额最高的前两个商品 -- TOPN
-- 去完topn之后需要计算的排名的数据较少了,计算代价降低了
select *
from (
select
order_id,
amount,
product,
order_time,
row_number() over(partition by product order by amount desc) as r
from
`order`
)
where r <= 2
8、ORDER BY
-- 子流处理模式中,order by 需要按照时间字段升序排序
select * from
`order`
order by
order_time,amount
-- 加上limit ,计算代价就不搞了,就可以按照普通字段进行排序了
select * from
`order`
order by
amount
limit 2;
9、模式检测(CEP)
1、案例1
我们先实现第一版报警程序,对于一个账户,如果出现小于 $1 美元的交易后紧跟着一个大于 $500 的交易,就输出一个报警信息。
CREATE TABLE tran (
id STRING,
amount DECIMAL(10, 2),
proctime as PROCTIME()
) WITH (
'connector' = 'kafka',
'topic' = 'tran',
'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
'properties.group.id' = 'testGroup',
'scan.startup.mode' = 'latest-offset',
'format' = 'csv',
'csv.ignore-parse-errors' ='true' -- 当有脏数据时是否跳过当前行
);
kafka-console-producer.sh --broker-list master:9092,node1:9092,node2:9092 --topic tran
1,4.00
1,2.00
1,5.00
1,0.90
1,600.00
1,4.00
1,2.00
1,0.10
1,200.00
1,700.00
-- MATCH_RECOGNIZE(模式检测)
-- 在数据流上对数据进行匹配,当数满足我们定义的规则后,返回匹配的结果
-- 我们先实现第一版报警程序,对于一个账户,如果出现小于 $1 美元的交易后紧跟着一个大于 $500 的交易,就输出一个报警信息。
SELECT *
FROM tran
MATCH_RECOGNIZE (
PARTITION BY id -- 分组字段
ORDER BY proctime -- 排序字段,只能按照时间字段升序排序
MEASURES -- 相当于select
A.amount as min_amount,
A.proctime as min_proctime,
B.amount as max_amount,
B.proctime as max_proctime
PATTERN (A B) -- 定义规则
DEFINE -- 定义条件
A as amount < 1,
B as amount > 500
) AS T
-- 我们先实现第一版报警程序,对于一个账户,如果出现小于 $1 美元的交易后紧跟着一个大于 $500 的交易,就输出一个报警信,两次事件需要在10秒内出现
SELECT *
FROM tran
MATCH_RECOGNIZE (
PARTITION BY id -- 分组字段
ORDER BY proctime -- 排序字段,只能按照时间字段升序排序
MEASURES -- 相当于select
A.amount as min_amount,
A.proctime as min_proctime,
B.amount as max_amount,
B.proctime as max_proctime
PATTERN (A B) WITHIN INTERVAL '5' SECOND -- 定义规则,增加事件约束,需要在5秒内匹配出结果
DEFINE -- 定义条件
A as amount < 1,
B as amount > 500
) AS T
-- 我们先实现第一版报警程序,对于一个账户,如果连续出现三次出现小于 $1 美元的交易后紧跟着一个大于 $500 的交易,就输出一个报警信息
SELECT *
FROM tran
MATCH_RECOGNIZE (
PARTITION BY id -- 分组字段
ORDER BY proctime -- 排序字段,只能按照时间字段升序排序
MEASURES -- 相当于select
A.amount as a_amount, -- 获取最后一条
min(A.amount) as min_a_amount, -- 取最小的
max(A.amount) as max_a_amount, -- 取最大的
sum(A.amount) as sum_a_amount, -- 求和
avg(A.amount) as avg_a_amount, -- 平均
FIRST(A.amount) AS first_a_amount, -- 取前面第一条
LAST(A.amount) AS LAST_a_amount, -- 取后面第一条
B.amount as b_amount
PATTERN (A{3} B) -- 定义规则
DEFINE -- 定义条件
A as amount < 1,
B as amount > 500
) AS T;
1,0.90
1,0.10
1,0.20
1,600.00
2、案例2
找出一个单一股票价格不断下降的时期
CREATE TABLE ticker (
symbol STRING,
rowtime TIMESTAMP(3), -- 时间字段
price DECIMAL(10, 2) ,
tax DECIMAL(10, 2),
-- 指定时间字段和水位线生成策略
WATERMARK FOR rowtime AS rowtime
) WITH (
'connector' = 'kafka',
'topic' = 'ticker',
'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
'properties.group.id' = 'testGroup',
'scan.startup.mode' = 'latest-offset',
'format' = 'csv',
'csv.ignore-parse-errors' ='true' -- 当有脏数据时是否跳过当前行
);
kafka-console-producer.sh --broker-list master:9092,node1:9092,node2:9092 --topic ticker
ACME,2024-06-04 10:00:00,12,1
ACME,2024-06-04 10:00:01,17,2
ACME,2024-06-04 10:00:02,19,1
ACME,2024-06-04 10:00:03,21,3
ACME,2024-06-04 10:00:04,25,2
ACME,2024-06-04 10:00:05,18,1
ACME,2024-06-04 10:00:06,15,1
ACME,2024-06-04 10:00:07,14,2
ACME,2024-06-04 10:00:08,24,2
ACME,2024-06-04 10:00:09,25,2
ACME,2024-06-04 10:00:10,19,1
-- 找出一个单一股票价格不断下降的时期
select * from
ticker
MATCH_RECOGNIZE (
PARTITION BY symbol -- 分组字段
ORDER BY rowtime -- 排序字段,只能按照时间字段升序排序
MEASURES -- 相当于select
A.price as a_price,
FIRST(B.price) as FIRST_b_price,
LAST(B.price) as last_b_price,
C.price as c_price
AFTER MATCH SKIP PAST LAST ROW -- 从当前匹配成功止呕的下一行开始匹配
PATTERN (A B+ C) -- 定义规则
DEFINE -- 定义条件
-- 如果时第一个B,就和A比较,如果时后面的B,就和前一个B比较
B as (LAST(B.price,1)is null and B.price < A.price) or B.price < LAST(B.price,1),
C as C.price > LAST(B.price)
) AS T;
案例
1、实时统计道路拥堵情况
每隔1分钟计算最近15分钟每个道路的车流量和平均车速(滑动事件时间窗口)
CREATE TABLE cars (
car STRING,
city_code STRING,
county_code STRING,
card BIGINT,
camera_id STRING,
orientation STRING,
road_id BIGINT,
`time` BIGINT,
speed DOUBLE,
event_time as TO_TIMESTAMP(FROM_UNIXTIME(`time`)),-- 生成新的字段
-- 指定事件时间和水位线
WATERMARK FOR event_time AS event_time - INTERVAL '5' SECOND
) WITH (
'connector' = 'kafka',
'topic' = 'cars', -- 指定topic
'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092', -- 指定kafka集群列表
'properties.group.id' = 'testGroup', -- 指定消费者组
'scan.startup.mode' = 'earliest-offset', -- 指定读取数据的位置
'format' = 'json', -- 指定数据的格式
'json.ignore-parse-errors' ='true'
);
--每隔1分钟计算最近15分钟每个道路的车流量和平均车速(滑动事件时间窗口)
select
road_id,
HOP_start(event_time,INTERVAL '1' MINUTES, INTERVAL '15' MINUTES) as win_start,
HOP_end(event_time,INTERVAL '1' MINUTES, INTERVAL '15' MINUTES) as win_end,
count(distinct car) as flow,
avg(speed) as avg_speed
from
cars
group by
road_id,
HOP(event_time,INTERVAL '1' MINUTES, INTERVAL '15' MINUTES);