flink08 FlinkSQL(上)

1、sql命令行

# 启动flink集群
yarn-seesion.sh -d

# 进入sql命令行
sql-client.sh

# 1、创建表,数据源时kafka
CREATE TABLE students (
  id STRING,
  name STRING,
  age INT,
  sex STRING,
  clazz STRING
) WITH (
  'connector' = 'kafka',
  'topic' = 'students', -- 指定topic
  'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092', -- 指定kafka集群列表
  'properties.group.id' = 'testGroup', -- 指定消费者组
  'scan.startup.mode' = 'earliest-offset', -- 指定读取数据的位置
  'format' = 'csv' -- 指定数据的格式
);

# 2、编写sql进行连续查询
select 
clazz,count(1)as num
from students
group by clazz;


# 3、生产数据
kafka-console-producer.sh --broker-list master:9092,node1:9092,node2:9092 --topic students

2、sql命令行打印结果模式

1、表格模式(table mode)默认

在内存中实体化结果,并将结果用规则的分页表格可视化展示出来。执行如下命令启用:

SET 'sql-client.execution.result-mode' = 'table';

2、变更日志模式(changelog mode

不会实体化和可视化结果,而是由插入(+)和撤销(-)组成的持续查询产生结果流。

SET 'sql-client.execution.result-mode' = 'changelog';

3、Tableau模式(tableau mode)

更接近传统的数据库,会将执行的结果以制表的形式直接打在屏幕之上。具体显示的内容会取决于作业 执行模式的不同(execution.type):

SET 'sql-client.execution.result-mode' = 'tableau';

3、处理模式

1、流处理模式

1、可以用于处理有界流和无界流

2、流处理模式输出连续结果

3、流处理模式底层时持续流模型

SET 'execution.runtime-mode' = 'streaming'; 

2、批处理模式

1、批处理模式只能用于处理有界流

2、输出最终结果

3、底层是MapReduce模型

SET 'execution.runtime-mode' = 'batch'; 

4、连接器

1、kafka

kafka source

-- 创建biao --- 无界流
-- TIMESTAMP(3): 时flink总的时间字段
CREATE TABLE students_kafka (
    id STRING,
    name STRING,
    age INT,
    sex STRING,
    clazz STRING,
    `event_time` TIMESTAMP(3) METADATA FROM 'timestamp',-- 获取kfka时间戳
    `partition` BIGINT METADATA VIRTUAL, -- 获取kafka数据所在的分区
    `offset` BIGINT METADATA VIRTUAL,-- 偏移量
    -- 指定时间字段和水位线生成策略
    WATERMARK FOR event_time AS event_time - INTERVAL '5' SECOND
) WITH (
    'connector' = 'kafka',
    'topic' = 'students',
    'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
    'properties.group.id' = 'testGroup',
    'scan.startup.mode' = 'earliest-offset',
    'format' = 'csv',
    'csv.ignore-parse-errors' ='true' -- 当有脏数据时是否跳过当前行
);

select id,name,event_time,`partition`,`offset` from students_kafka;

-- 每隔5秒统计每个班级的人数
select 
clazz,
TUMBLE_START(event_time,INTERVAL '5' SECOND) as win_start,
TUMBLE_END(event_time,INTERVAL '5' SECOND) as win_end,
count(id) as num
from 
students_kafka
group by
clazz,
-- 滚动的事件时间窗口
TUMBLE(event_time,INTERVAL '5' SECOND);

kafka sink

-- 创建sink表
CREATE TABLE students_kafka_sink (
    id STRING,
    name STRING
) WITH (
  'connector' = 'kafka',
  'topic' = 'id_name',
  'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
  'properties.group.id' = 'testGroup',
  'scan.startup.mode' = 'earliest-offset',
  'format' = 'csv'
);

-- 1、将仅插入的结果写入sink表
insert into students_kafka_sink
select id,name from 
students_kafka;

-- 查看结果
kafka-console-consumer.sh --bootstrap-server  master:9092,node1:9092,node2:9092 --from-beginning --topic id_name
select * from students_kafka_sink;


-- 2、将更新更改查询结果写入kafka
-- 将更新更改的流写入kafka需要使用canal-json格式,
-- canal-json中带上了数据操作的类型
-- {"data":[{"clazz":"理科六班","num":377}],"type":"INSERT"}

CREATE TABLE clazz_num (
    clazz STRING,
    num BIGINT
) WITH (
  'connector' = 'kafka',
  'topic' = 'clazz_num',
  'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
  'properties.group.id' = 'testGroup',
  'scan.startup.mode' = 'earliest-offset',
  'format' = 'canal-json'
);


insert into clazz_num
select 
clazz,
count(1) as num
from 
students_kafka
group by 
clazz;


-- 查看结果
kafka-console-consumer.sh --bootstrap-server  master:9092,node1:9092,node2:9092 --from-beginning --topic clazz_num
select * from clazz_num;

2、JDBC

整合

# 将依赖包上传到flink的lib目录下
flink-connector-jdbc-1.15.2.jar
mysql-connector-java-5.1.47.jar

# 依赖更新后需要重启集群才会生效
yarn application -list
yarn application -kill [appid]
yarn-session.sh -d

sql-client.sh

mysql source

-- 创建soure  表  --- 有界流
-- 字段名称和字段类型需要和数据库中保存一致
CREATE TABLE students_mysql (
    id int,
    name STRING,
    age INT,
    gender STRING,
    clazz STRING
) WITH (
    'connector' = 'jdbc',
    'url' = 'jdbc:mysql://master:3306/bigdata29',
    'table-name' = 'student',
    'username' ='root',
    'password' = '123456'
);

mysql sink

-- 创建mysql sink表。需要增加主键约束,flink会通过主键更新数据
CREATE TABLE clazz_num_mysql (
    clazz STRING,
    num BIGINT,
    PRIMARY KEY (clazz) NOT ENFORCED
) WITH (
    'connector' = 'jdbc',
    'url' = 'jdbc:mysql://master:3306/bigdata29?useUnicode=true&characterEncoding=UTF-8',
    'table-name' = 'clazz_num_mysql', -- 需要手动创建
    'username' ='root',
    'password' = '123456'
);

insert into clazz_num_mysql
select 
clazz,
count(1) as num
from 
students_kafka
group by 
clazz;

3、HDFS

hdfs source

-- 创建hdfs source表 -- 有界流
CREATE TABLE students_hdfs (
    id int,
    name STRING,
    age INT,
    gender STRING,
    clazz STRING
) WITH (
    'connector' = 'filesystem',           -- 必选:指定连接器类型
    'path' = 'hdfs://master:9000/bigdata29/data/students.csv',  -- 必选:指定路径
    'format' = 'csv'                    -- 必选:文件系统连接器指定 format
);

CREATE TABLE clazz_num_batch (
    clazz STRING,
    num BIGINT
) WITH (
    'connector' = 'filesystem',           -- 必选:指定连接器类型
    'path' = 'hdfs://master:9000/data/clazz_num_batch',  -- 必选:指定路径
    'format' = 'csv'                    -- 必选:文件系统连接器指定 format
);

-- 查询数据
insert into clazz_num_batch
select 
clazz,
count(1) as num
from
students_hdfs
group by clazz;


-- 创建hdfs source表 -- 无界流
CREATE TABLE students_hdfs_stream (
    id int,
    name STRING,
    age INT,
    gender STRING,
    clazz STRING
) WITH (
    'connector' = 'filesystem',           -- 必选:指定连接器类型
    'path' = 'hdfs://master:9000/data/students',  -- 必选:指定路径
    'format' = 'csv',                    -- 必选:文件系统连接器指定 format
    'source.monitor-interval' = '5000' -- 指定扫描目录的间隔时间
);

hdfs sink

-- 将仅追加的结果流写入hdfs
CREATE TABLE students_hdfs_sink (
    id STRING,
    name STRING,
    age INT,
    sex STRING,
    clazz STRING
) WITH (
    'connector' = 'filesystem',           -- 必选:指定连接器类型
    'path' = 'hdfs://master:9000/data/students_sink',  -- 必选:指定路径
    'format' = 'csv'                    -- 必选:文件系统连接器指定 format
);
insert into students_hdfs_sink
select id,name,age,sex,clazz from students_kafka;

-- 2、将更新更改的结果写入hdfs
CREATE TABLE clazz_num_hdfs (
    clazz STRING,
    num BIGINT
) WITH (
    'connector' = 'filesystem',           -- 必选:指定连接器类型
    'path' = 'hdfs://master:9000/data/clazz_num',  -- 必选:指定路径
    'format' = 'canal-json'                    -- 必选:文件系统连接器指定 format
);

insert into clazz_num_hdfs
select 
clazz,
count(1) as num
from
students_kafka
group by clazz;

4、hbase

整合

# 将依赖包上传到flink的lib目录下
flink-sql-connector-hbase-2.2-1.15.2.jar

# 依赖更新后需要重启集群才会生效
yarn application -list
yarn application -kill [appid]
yarn-session.sh -d

sql-client.sh
-- 1、在hbase中创建表
create 'students_flink','info'

-- 创建hbase sink表
CREATE TABLE students_hbase (
 id STRING, 
 info ROW<name STRING,age INT,sex STRING,clazz STRING>, -- 指定列簇中的咧
 PRIMARY KEY (id) NOT ENFORCED -- 设置hbaserowkey
) WITH (
 'connector' = 'hbase-2.2',
 'table-name' = 'students_flink',
 'zookeeper.quorum' = 'master:2181,node1:2181,node2:2181'
);

insert into students_hbase
select 
id,
ROW(name,age,sex,clazz) as info
from students_kafka;

-- 查看结果
select * from students_hbase;
scan 'students_flink'

5、datagen

用于生成测试数据,可以用于高性能测试

-- 出啊关键datagen source表
CREATE TABLE students_datagen (
    id STRING,
    name STRING,
    age INT,
    sex STRING,
    clazz STRING
) WITH (
    'connector' = 'datagen',
    'rows-per-second'='5', -- 指定每秒生成的数据量
    'fields.id.length'='5',
    'fields.name.length'='3',
    'fields.age.min'='1',
    'fields.age.max'='100',
    'fields.sex.length'='1',
    'fields.clazz.length'='4'
);

6、print

在task manager中打印结果

CREATE TABLE print_table (
    id STRING,
    name STRING,
    age INT,
    sex STRING,
    clazz STRING
) WITH (
 'connector' = 'print'
);

CREATE TABLE print_table 
WITH ('connector' = 'print')
-- 应用目标表的字段创建新的
LIKE students_datagen (EXCLUDING ALL);

insert into print_table
select * from students_datagen;

7、BlackHole

用于高性能测试

CREATE TABLE blackhole_table (
    id STRING,
    name STRING,
    age INT,
    sex STRING,
    clazz STRING
) WITH (
  'connector' = 'blackhole'
);

insert into blackhole_table
select * from students_datagen;

5、数据格式

1、csv

数据中字段的顺序需要和建表语句字段的顺序保持一致 (顺序映射) 默认按照逗号分割

CREATE TABLE students_csv (
    id STRING,
    name STRING,
    age INT,
    sex STRING,
    clazz STRING
) WITH (
    'connector' = 'kafka',
    'topic' = 'students', -- 指定topic
    'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092', -- 指定kafka集群列表
    'properties.group.id' = 'testGroup', -- 指定消费者组
    'scan.startup.mode' = 'earliest-offset', -- 指定读取数据的位置
    'format' = 'csv', -- 指定数据的格式
    'csv.field-delimiter' = ',' ,-- 指定分隔符
    'csv.ignore-parse-errors' ='true' -- 跳过脏数据
);

2、json

flink表中的字段和类型需要和json中保持一致(同名映射)

CREATE TABLE cars (
    car STRING,
    city_code STRING,
    county_code STRING,
    card BIGINT,
    camera_id STRING,
    orientation STRING,
    road_id BIGINT,
    `time` BIGINT,
    speed DOUBLE
) WITH (
    'connector' = 'kafka',
    'topic' = 'cars', -- 指定topic
    'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092', -- 指定kafka集群列表
    'properties.group.id' = 'testGroup', -- 指定消费者组
    'scan.startup.mode' = 'earliest-offset', -- 指定读取数据的位置
    'format' = 'json', -- 指定数据的格式
    'json.ignore-parse-errors' ='true'
);

3、canal-json

用于保存更新更改的结果流

CREATE TABLE clazz_num (
    clazz STRING,
    num BIGINT
) WITH (
  'connector' = 'kafka',
  'topic' = 'clazz_num',
  'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
  'properties.group.id' = 'testGroup',
  'scan.startup.mode' = 'earliest-offset',
  'format' = 'canal-json'
);

insert into clazz_num
select 
clazz,
count(1) as num
from 
students_kafka
group by 
clazz;

  • 3
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值