1、kafka中样例数据
{
"id":1238123899121,
"name":"asdlkjasjkdla998y1122",
"date":"1990-10-14",
"obj":{
"time1":"12:12:43Z",
"str":"sfasfafs",
"lg":2324342345
},
"arr":[
{
"f1":"f1str11",
"f2":134
},
{
"f1":"f1str22",
"f2":555
}
],
"time":"12:12:43Z",
"timestamp":"1990-10-14T12:12:43Z",
"map":{
"flink":123
},
"mapinmap":{
"inner_map":{
"key":234
}
}
}
2、建表语句
CREATE TABLE json_source (
id BIGINT,
name STRING,
`date` DATE,
obj ROW<time1 TIME,str STRING,lg BIGINT>,
arr ARRAY<ROW<f1 STRING,f2 INT>>,
`time` TIME,
`timestamp` TIMESTAMP(3),
`map` MAP<STRING,BIGINT>,
mapinmap MAP<STRING,MAP<STRING,INT>>,
proctime as PROCTIME()
) WITH (
'connector.type' = 'kafka',
'connector.topic' = 'test',
'connector.properties.zookeeper.connect' = 'localhost:2181',
'connector.properties.bootstrap.servers' = 'localhost:9092',
'connector.properties.group.id' = 'testGroup',
'connector.version'='universal',
'format.type' = 'json',
'connector.startup-mode'='latest-offset'
);
3、查询
select id,
name,
`date`,
obj.str,
arr[1].f1,
`map`['flink'],
mapinmap['inner_map']['key'] from json_source;
//注意数组index从1开始
4、插入数据
insert into json_source
select
111 as id,
'name' as name,
Row(CURRENT_TIME,'ss',123) as obj,
Array[Row('f',1),Row('s',2)] as arr,
Map['k1','v1','k2','v2'] as `map`,
Map['inner_map',Map['k','v']] as mapinmap
;
借鉴于:
Flink实战之Flink SQL中的Map、Array、Row_优优我心的博客-CSDN博客_flinksql row