总结项目中的常用 flink sql 开发模板,供大家参考。
主要分三大步:
- 创建表执行环境。
- 创建输入表、输出表,并进行数据的逻辑处理。
- 任务执行。
其中第二部是重点,细分三小步(也就是三个 tabEnv.executeSql)
- tableEnv.executeSql(inputTableDDL) // 1) create inputTable
- tableEnv.executeSql(outputTableDDL) // 2) create outputTable
- tableEnv.executeSql( ... )// 3) data process and data sink
package template
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
/**
* @Author: shipfei
* @Date: 2021/3/15 10:26
* motto: Saying and doing are two different things.
*/
object MyStructure03 {
def main(args: Array[String]): Unit = {
// 1. create table env (创建表执行环境)
val env = StreamExecutionEnvironment.getExecutionEnvironment
val settings = EnvironmentSettings
.newInstance()
.useBlinkPlanner()
.inStreamingMode()
.build()
val tableEnv = StreamTableEnvironment.create(env, settings)
// 2. create inputTable and outputTable and data process
val inputTableDDL: String =
"""
|CREATE TABLE inputTable (
| `magic` STRING,
| `type` STRING,
| `message` STRING
|) WITH (
| 'connector' = 'kafka',
| 'topic' = 'QlikTest',
| 'properties.bootstrap.servers' = 'localhost:9092',
| 'scan.startup.mode' = 'latest-offset',
| 'format' = 'json',
| 'json.fail-on-missing-field' = 'false',
| 'json.ignore-parse-errors' = 'true'
|)
|""".stripMargin
val outputTableDDL: String =
"""
|CREATE TABLE outputTable (
| magic STRING,
| type STRING,
| message STRING
|) WITH (
| 'connector' = 'jdbc',
| 'url' = 'jdbc:mysql://localhost:3306/my_auto',
| 'table-name' = 'w_qlik_msg',
| 'driver' = 'com.mysql.cj.jdbc.Driver',
| 'username' = 'root',
| 'password' = '123456'
|)
|""".stripMargin
tableEnv.executeSql(inputTableDDL) // 1) create inputTable
tableEnv.executeSql(outputTableDDL) // 2) create outputTable
tableEnv.executeSql( // 3) data process and data sink
"""
|insert into outputTable
|select magic, type, message from inputTable
|""".stripMargin)
// 3. env exec (任务执行)
env.execute("MyStructure01 JOB")
}
}