Flink实时仓库-DWD层(交易域-加购维度退化处理)模板代码

简介

这里的代码就是双流join的经验

前置知识

intervalJoin

 

public class DoubleJoin {
    public static void main(String[] args) throws Exception {
//得到执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();


        DataStreamSource<String> socketTextStream = env.socketTextStream("master", 9999);

        //并行度设置为1才能看到效果,因为如果不为1,那么有些分区的水位线就是负无穷
        //由于自己的水位线是分区里面最小的水位线,那么自己的一直都是负无穷
        //就触发不了水位线的上升
        env.setParallelism(1);

        //第一个参数就一个名字,第二个参数用来表示事件时间
        SingleOutputStreamOperator<Tuple2<String, Long>> initData = socketTextStream.map(new MapFunction<String, Tuple2<String, Long>>() {
            @Override
            public Tuple2<String, Long> map(String value) throws Exception {
                String[] s = value.split(" ");
                //假设我们在控制台输入的参数是a 15s,那么我们要15*1000才能得到时间戳的毫秒时间
                return Tuple2.of(s[0], Long.parseLong(s[1]) * 1000L);
            }
        });

        //设置水位线
        SingleOutputStreamOperator<Tuple2<String, Long>> watermarks = initData.assignTimestampsAndWatermarks(
                // 针对乱序流插入水位线,延迟时间设置为 2s
                WatermarkStrategy.<Tuple2<String, Long>>forBoundedOutOfOrderness(Duration.ofSeconds(0))
                        .withTimestampAssigner(new SerializableTimestampAssigner<Tuple2<String, Long>>() {
                            @Override
                            public long extractTimestamp(Tuple2<String, Long> element, long recordTimestamp) {
                                //指定事件时间
                                return element.f1;
                            }
                        })
        );


        DataStreamSource<String> socketTextStream2 = env.socketTextStream("master", 9998);

        //并行度设置为1才能看到效果,因为如果不为1,那么有些分区的水位线就是负无穷
        //由于自己的水位线是分区里面最小的水位线,那么自己的一直都是负无穷
        //就触发不了水位线的上升
        env.setParallelism(1);

        //第一个参数就一个名字,第二个参数用来表示事件时间
        SingleOutputStreamOperator<Tuple2<String, Long>> initData2 = socketTextStream2.map(new MapFunction<String, Tuple2<String, Long>>() {
            @Override
            public Tuple2<String, Long> map(String value) throws Exception {
                String[] s = value.split(" ");
                //假设我们在控制台输入的参数是a 15s,那么我们要15*1000才能得到时间戳的毫秒时间
                return Tuple2.of(s[0], Long.parseLong(s[1]) * 1000L);
            }
        });

        //设置水位线
        SingleOutputStreamOperator<Tuple2<String, Long>> watermarks2 = initData2.assignTimestampsAndWatermarks(
                // 针对乱序流插入水位线,延迟时间设置为 2s
                WatermarkStrategy.<Tuple2<String, Long>>forBoundedOutOfOrderness(Duration.ofSeconds(0))
                        .withTimestampAssigner(new SerializableTimestampAssigner<Tuple2<String, Long>>() {
                            @Override
                            public long extractTimestamp(Tuple2<String, Long> element, long recordTimestamp) {
                                //指定事件时间
                                return element.f1;
                            }
                        })
        );

        //这里使用intervaljoin
        SingleOutputStreamOperator<Tuple2<String, Long>> resultProcess = watermarks.keyBy(data -> data.f0)
                .intervalJoin(watermarks2.keyBy(data -> data.f0))
                .between(Time.seconds(-5), Time.seconds(5))
                .lowerBoundExclusive()
                .process(new ProcessJoinFunction<Tuple2<String, Long>, Tuple2<String, Long>, Tuple2<String, Long>>() {
                    @Override
                    public void processElement(Tuple2<String, Long> left, Tuple2<String, Long> right, Context ctx, Collector<Tuple2<String, Long>> out) throws Exception {
                        System.out.println("left: " + left);
                        System.out.println("right: " + right);
                        out.collect(Tuple2.of(left.f0, left.f1 + right.f1));
                    }
                });

        resultProcess.print("result: ");

        env.execute();
    }
}

输入

nc -lk 9999
a 1

输入

nc -lk 9998
a 2

输出

left: (a,1000)
right: (a,2000)
result: > (a,3000)

FlinkSql

join内连接

<scala.binary.version>2.12</scala.binary.version>
<dependency>
    <groupId>org.apache.flink</groupId>
    <artifactId>flink-table-planner-blink_${scala.version}</artifactId>
    <version>${flink.version}</version>
</dependency>

测试代码

public class DoubleJoin {
    public static void main(String[] args) throws Exception {
//得到执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();


        DataStreamSource<String> socketTextStream = env.socketTextStream("master", 9999);

        //并行度设置为1才能看到效果,因为如果不为1,那么有些分区的水位线就是负无穷
        //由于自己的水位线是分区里面最小的水位线,那么自己的一直都是负无穷
        //就触发不了水位线的上升
        env.setParallelism(1);

        //第一个参数就一个名字,第二个参数用来表示事件时间
        SingleOutputStreamOperator<Tuple2<String, Long>> initData = socketTextStream.map(new MapFunction<String, Tuple2<String, Long>>() {
            @Override
            public Tuple2<String, Long> map(String value) throws Exception {
                String[] s = value.split(" ");
                //假设我们在控制台输入的参数是a 15s,那么我们要15*1000才能得到时间戳的毫秒时间
                return Tuple2.of(s[0], Long.parseLong(s[1]) * 1000L);
            }
        });


        DataStreamSource<String> socketTextStream2 = env.socketTextStream("master", 9998);

        //并行度设置为1才能看到效果,因为如果不为1,那么有些分区的水位线就是负无穷
        //由于自己的水位线是分区里面最小的水位线,那么自己的一直都是负无穷
        //就触发不了水位线的上升
        env.setParallelism(1);

        //第一个参数就一个名字,第二个参数用来表示事件时间
        SingleOutputStreamOperator<Tuple2<String, Long>> initData2 = socketTextStream2.map(new MapFunction<String, Tuple2<String, Long>>() {
            @Override
            public Tuple2<String, Long> map(String value) throws Exception {
                String[] s = value.split(" ");
                //假设我们在控制台输入的参数是a 15s,那么我们要15*1000才能得到时间戳的毫秒时间
                return Tuple2.of(s[0], Long.parseLong(s[1]) * 1000L);
            }
        });


        //使用FlinkSql
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        tableEnv.createTemporaryView("t1", initData);
        tableEnv.createTemporaryView("t2", initData2);

        tableEnv.sqlQuery("select * from t1 join t2 on t1.f0=t2.f0")
                .execute()
                .print();
    }
}

设置数据的过期时间如果不设置默认是一次保存

tableEnv.getConfig().setIdleStateRetention(Duration.ofSeconds(10));

 输入

nc -lk 9999
a 1
a 1
a 1
a 2

输入

nc -lk 9998
a 1
a 1
a 1
a 2

结果

| +I |                              a |                 1000 |                              a |                 2000 |
| +I |                              a |                 1000 |                              a |                 2000 |
| +I |                              a |                 1000 |                              a |                 2000 |
| +I |                              a |                 2000 |                              a |                 2000 |

left join

tableEnv.getConfig().setIdleStateRetention(Duration.ofSeconds(10));

        tableEnv.sqlQuery("select * from t1 left join t2 on t1.f0=t2.f0")
                .execute()
                .print();

说明

使用左外连接的时候,哪个左边就是主表,如果右边有数据和主编进行关联以后,那么主表的数据存活时间10秒会一直更新,就是每读一次就更新一次

right join

tableEnv.getConfig().setIdleStateRetention(Duration.ofSeconds(10));

        tableEnv.sqlQuery("select * from t1 right join t2 on t1.f0=t2.f0")
                .execute()
                .print();

说明

使用右外连接的时候,就是如果左边关联右边的数据进行更新了,那么就是右边的数据的时间就会一直更新

full join

Lookup Join (Mysql)

实验

下面的程序每一条数据过来以后都会去mysql里面进行关联

 pojo

@Data
@AllArgsConstructor
@NoArgsConstructor
public class Event {
    private String id;
    private String base_dic_id;
}

实验程序

public class LookUpTest {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        DataStreamSource<String> socketTextStream = env.socketTextStream("master", 9999);

        //并行度设置为1才能看到效果,因为如果不为1,那么有些分区的水位线就是负无穷
        //由于自己的水位线是分区里面最小的水位线,那么自己的一直都是负无穷
        //就触发不了水位线的上升
        env.setParallelism(1);

        //第一个参数就一个名字,第二个参数用来表示事件时间
        SingleOutputStreamOperator<Event> sockTest = socketTextStream.map(new MapFunction<String, Event>() {
            @Override
            public Event map(String value) throws Exception {
                String[] initData = value.split(" ");
                String dataone = initData[0];
                String datatow = initData[1];
                return new Event(dataone, datatow);
            }
        });

        //使用FlinkSQL
        StreamTableEnvironment tableEnvironment = StreamTableEnvironment.create(env);
        //从流中得到表结构数据
        Table dataStream = tableEnvironment.fromDataStream(sockTest,
                //下面的数据要和对应的实体类字段对应起来
                $("id"),
                $("base_dic_id"),
                //使用Lookupjoin要加上一个处理时间
                $("pt").proctime());

        //从table创建一个主表
        tableEnvironment.createTemporaryView("t1",dataStream);
//        tableEnvironment.sqlQuery("select * from t1")
//                        .execute().print();

        //使用lookup得到维度退化表
        TableResult tableResult = tableEnvironment.executeSql(
                //这里下面的字段要和数据库里面的字段位置对应起来
                "CREATE TEMPORARY TABLE base_dic( " +
                        "  dic_code STRING, " +
                        "  dic_name STRING " +
                        ") WITH ( " +
                        "  'connector' = 'jdbc', " +
                        "  'username' = 'root', " +
                        "  'password' = 'root', " +
                        //加上缓存但是会牺牲准确性
                        "  'lookup.cache.max-rows' = '10'," +
                        "  'lookup.cache.ttl' = '1 hour'," +
                        "  'url' = 'jdbc:mysql://master:3306/gmall', " +
                        "  'table-name' = 'base_dic' " +
                        ")"
        );

        tableEnvironment.sqlQuery("SELECT t1.id, t1.base_dic_id,t2.dic_code,t2.dic_name  " +
                        "FROM t1  " +
                //要加上FOR SYSTEM_TIME AS OF t1.pt(这个就是上面的处理时间$("pt").proctime())
                        "JOIN base_dic FOR SYSTEM_TIME AS OF t1.pt AS t2  " +
                        "ON t1.id = t2.dic_code")
                .execute()
                .print();
    }
}

数据库里面表的信息为

 控制台输入

nc -lk 9999
10 fkadjsf
10 fhkd

输出的结果为

+----+--------------------------------+--------------------------------+--------------------------------+--------------------------------+
| op |                             id |                    base_dic_id |                       dic_code |                       dic_name |
+----+--------------------------------+--------------------------------+--------------------------------+--------------------------------+
| +I |                             10 |                           fhkd |                             10 |                       单据状态 |

辅助工具类

public class MysqlUtil {
public static String getBaseDicLookUpDDL() {

        return "create table `base_dic`(\n" +
                "`dic_code` string,\n" +
                "`dic_name` string,\n" +
                "`parent_code` string,\n" +
                "`create_time` timestamp,\n" +
                "`operate_time` timestamp,\n" +
                "primary key(`dic_code`) not enforced\n" +
                ")" + MysqlUtil.mysqlLookUpTableDDL("base_dic");
    }

public static String mysqlLookUpTableDDL(String tableName) {

        String ddl = "WITH (\n" +
                "'connector' = 'jdbc',\n" +
                "'url' = 'jdbc:mysql://hadoop102:3306/gmall',\n" +
                "'table-name' = '" + tableName + "',\n" +
                "'lookup.cache.max-rows' = '10',\n" +
                "'lookup.cache.ttl' = '1 hour',\n" +
                "'username' = 'root',\n" +
                "'password' = '000000',\n" +
                "'driver' = 'com.mysql.cj.jdbc.Driver'\n" +
                ")";
        return ddl;
    }
}

Lookup Join(kafka)

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-json</artifactId>
            <version>${flink.version}</version>
        </dependency>

从maxwell到kafka的topic_db的json格式的数据为

{
    "database": "gmall",
    "table": "base_dic",
    "type": "update",
    "ts": 1658998497,
    "xid": 1602,
    "commit": true,
    "data": {
        "dic_code": "1006",
        "dic_name": "退款完成",
        "parent_code": "10",
        "create_time": null,
        "operate_time": null
    },
    "old": {
        "dic_code": "10066"
    }
}

实时程序

public class KafkaUtil {
    private final static String BOOTSTRAP_SERVERS="master:9092";

    /**
     * Kafka-Source DDL 语句
     *
     * @param topic   数据源主题
     * @param groupId 消费者组
     * @return 拼接好的 Kafka 数据源 DDL 语句
     */
    public static String getKafkaDDL(String topic, String groupId) {

        return " with ('connector' = 'kafka', " +
                " 'topic' = '" + topic + "'," +
                " 'properties.bootstrap.servers' = '" + BOOTSTRAP_SERVERS + "', " +
                " 'properties.group.id' = '" + groupId + "', " +
                " 'format' = 'json', " +
                " 'scan.startup.mode' = 'group-offsets')";
    }

    /**
     * Kafka-Sink DDL 语句
     *
     * @param topic 输出到 Kafka 的目标主题
     * @return 拼接好的 Kafka-Sink DDL 语句
     */
    public static String getUpsertKafkaDDL(String topic) {

        return "WITH ( " +
                "  'connector' = 'upsert-kafka', " +
                "  'topic' = '" + topic + "', " +
                "  'properties.bootstrap.servers' = '" + BOOTSTRAP_SERVERS + "', " +
                "  'key.format' = 'json', " +
                "  'value.format' = 'json' " +
                ")";
    }
}

实现

public class KafkaLookup {
    public static void main(String[] args) throws Exception {
        // TODO 1. 环境准备
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        // 设定 Table 中的时区为本地时区
        tableEnv.getConfig().setLocalTimeZone(ZoneId.of("GMT+8"));

//        // TODO 2. 状态后端设置
//        env.enableCheckpointing(3000L, CheckpointingMode.EXACTLY_ONCE);
//        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000L);
//        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000L);
//        env.getCheckpointConfig().enableExternalizedCheckpoints(
//                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
//        );
//        env.setRestartStrategy(RestartStrategies.failureRateRestart(
//                3, Time.days(1), Time.minutes(1)
//        ));
//        env.setStateBackend(new HashMapStateBackend());
//        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/ck");
//        System.setProperty("HADOOP_USER_NAME", "atguigu");


        // TODO 3. 从 Kafka 读取业务数据,封装为 Flink SQL 表
        tableEnv.executeSql("" +
                "create table topic_db(   " +
                "`database` string,   " +
                "`table` string,   " +
                "`type` string,   " +
                "`data` map<string, string>,   " +
                "`old` map<string, string>,   " +
                "`ts` string,   " +
                "`proc_time` as PROCTIME()   " +
                //消费topic_db数据
                ")" + KafkaUtil.getKafkaDDL("topic_db", "dwd_trade_cart_add"));

        //打印测试
        tableEnv.sqlQuery("select * from topic_db")
                .execute()
                .print();

        env.execute();

    }
}

输出的结果为

+----+--------------------------------+--------------------------------+--------------------------------+--------------------------------+--------------------------------+--------------------------------+-------------------------+
| op |                       database |                          table |                           type |                           data |                            old |                             ts |               proc_time |
+----+--------------------------------+--------------------------------+--------------------------------+--------------------------------+--------------------------------+--------------------------------+-------------------------+
| +I |                          gmall |                       base_dic |                         update | {parent_code=10, dic_code=1... |               {dic_code=10066} |                     1658998497 | 2022-07-28 16:54:58.077 |

交易域加购维度Lookup join维度降维处理

工具类

public class KafkaUtil {
    private final static String BOOTSTRAP_SERVERS="master:9092";

    /**
     * Kafka-Source DDL 语句
     *
     * @param topic   数据源主题
     * @param groupId 消费者组
     * @return 拼接好的 Kafka 数据源 DDL 语句
     */
    public static String getKafkaDDL(String topic, String groupId) {

        return " with ('connector' = 'kafka', " +
                " 'topic' = '" + topic + "'," +
                " 'properties.bootstrap.servers' = '" + BOOTSTRAP_SERVERS + "', " +
                " 'properties.group.id' = '" + groupId + "', " +
                " 'format' = 'json', " +
                " 'scan.startup.mode' = 'group-offsets')";
    }

    /**
     * Kafka-Sink DDL 语句
     *
     * @param topic 输出到 Kafka 的目标主题
     * @return 拼接好的 Kafka-Sink DDL 语句
     */
    public static String getUpsertKafkaDDL(String topic) {

        return "WITH ( " +
                "  'connector' = 'upsert-kafka', " +
                "  'topic' = '" + topic + "', " +
                "  'properties.bootstrap.servers' = '" + BOOTSTRAP_SERVERS + "', " +
                "  'key.format' = 'json', " +
                "  'value.format' = 'json' " +
                ")";
    }
}
public class MysqlUtils {
    public static String getBaseDicLookUpDDL() {

        return "create table `base_dic`( " +
                "`dic_code` string, " +
                "`dic_name` string, " +
                "`parent_code` string, " +
                "`create_time` timestamp, " +
                "`operate_time` timestamp, " +
                "primary key(`dic_code`) not enforced " +
                ")" + MysqlUtils.mysqlLookUpTableDDL("base_dic");
    }

    public static String mysqlLookUpTableDDL(String tableName) {

        String ddl = "WITH ( " +
                "'connector' = 'jdbc', " +
                "'url' = 'jdbc:mysql://hadoop102:3306/gmall', " +
                "'table-name' = '" + tableName + "', " +
                "'lookup.cache.max-rows' = '10', " +
                "'lookup.cache.ttl' = '1 hour', " +
                "'username' = 'root', " +
                "'password' = '000000', " +
                "'driver' = 'com.mysql.cj.jdbc.Driver' " +
                ")";
        return ddl;
    }
}

应用实现 

public class DwdTradeCartAdd {
    public static void main(String[] args) throws Exception {

        // TODO 1. 环境准备
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        // 设定 Table 中的时区为本地时区
        tableEnv.getConfig().setLocalTimeZone(ZoneId.of("GMT+8"));

        // TODO 2. 状态后端设置
        env.enableCheckpointing(3000L, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000L);
        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000L);
        env.getCheckpointConfig().enableExternalizedCheckpoints(
                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
        );
        env.setRestartStrategy(RestartStrategies.failureRateRestart(
                3, Time.days(1), Time.minutes(1)
        ));
        env.setStateBackend(new HashMapStateBackend());
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/ck");
        System.setProperty("HADOOP_USER_NAME", "atguigu");

        // TODO 3. 从 Kafka 读取业务数据,封装为 Flink SQL 表
        tableEnv.executeSql("" +
                "create table topic_db( " +
                "`database` string, " +
                "`table` string, " +
                "`type` string, " +
                "`data` map<string, string>, " +
                "`old` map<string, string>, " +
                "`ts` string, " +
                "`proc_time` as PROCTIME() " +
                ")" + KafkaUtil.getKafkaDDL("topic_db", "dwd_trade_cart_add"));

        // TODO 4. 读取购物车表数据
        Table cartAdd = tableEnv.sqlQuery("" +
                "select " +
                "data['id'] id, " +
                "data['user_id'] user_id, " +
                "data['sku_id'] sku_id, " +
                "data['source_id'] source_id, " +
                "data['source_type'] source_type, " +
                "if(`type` = 'insert', " +
                "data['sku_num'],cast((cast(data['sku_num'] as int) - cast(`old`['sku_num'] as int)) as string)) sku_num, " +
                "ts, " +
                "proc_time " +
                "from `topic_db`  " +
                "where `table` = 'cart_info' " +
                "and (`type` = 'insert' " +
                "or (`type` = 'update'  " +
                "and `old`['sku_num'] is not null  " +
                "and cast(data['sku_num'] as int) > cast(`old`['sku_num'] as int)))");
        tableEnv.createTemporaryView("cart_add", cartAdd);

        // TODO 5. 建立 MySQL-LookUp 字典表
        tableEnv.executeSql(MysqlUtils.getBaseDicLookUpDDL());

        // TODO 6. 关联两张表获得加购明细表
        Table resultTable = tableEnv.sqlQuery("select " +
                "cadd.id, " +
                "user_id, " +
                "sku_id, " +
                "source_id, " +
                "source_type, " +
                "dic_name source_type_name, " +
                "sku_num, " +
                "ts " +
                "from cart_add cadd " +
                "left join base_dic for system_time as of cadd.proc_time as dic " +
                "on cadd.source_type=dic.dic_code");
        tableEnv.createTemporaryView("result_table", resultTable);

        // TODO 7. 建立 Upsert-Kafka dwd_trade_cart_add 表
        tableEnv.executeSql("" +
                "create table dwd_trade_cart_add( " +
                "id string, " +
                "user_id string, " +
                "sku_id string, " +
                "source_id string, " +
                "source_type_code string, " +
                "source_type_name string, " +
                "sku_num string, " +
                "ts string, " +
                "primary key(id) not enforced " +
                ")" + KafkaUtil.getUpsertKafkaDDL("dwd_trade_cart_add"));

        // TODO 8. 将关联结果写入 Upsert-Kafka 表
        tableEnv.executeSql("" +
                "insert into dwd_trade_cart_add select * from result_table");
    }
}

pom.xml

<build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <configuration>
                    <source>8</source>
                    <target>8</target>
                </configuration>
            </plugin>

            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-assembly-plugin</artifactId>
                <version>3.0.0</version>
                <configuration>
                    <descriptorRefs>
                        <descriptorRef>jar-with-dependencies</descriptorRef>
                    </descriptorRefs>
                </configuration>
                <executions>
                    <execution>
                        <id>make-assembly</id>
                        <phase>package</phase>
                        <goals>
                            <goal>single</goal>
                        </goals>
                    </execution>
                </executions>
            </plugin>
        </plugins>
    </build>

    <properties>
        <flink.version>1.13.0</flink.version>
        <java.version>1.8</java.version>
        <scala.binary.version>2.12</scala.binary.version>
        <slf4j.version>1.7.30</slf4j.version>
        <scala.version>2.12</scala.version>
    </properties>
    <dependencies>
        <!-- 引入 Flink 相关依赖-->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-java</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-java_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-cep_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-kafka_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-clients_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <!-- 引入日志管理相关依赖-->
        <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-api</artifactId>
            <version>${slf4j.version}</version>
        </dependency>
        <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-log4j12</artifactId>
            <version>${slf4j.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-api-java-bridge_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-planner-blink_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.logging.log4j</groupId>
            <artifactId>log4j-to-slf4j</artifactId>
            <version>2.14.0</version>
        </dependency>

        <dependency>
            <groupId>org.apache.bahir</groupId>
            <artifactId>flink-connector-redis_2.11</artifactId>
            <version>1.0</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-elasticsearch7_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>


        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-jdbc_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <version>5.1.47</version>
        </dependency>


        <!--        这里的依赖是一个 Java 的“桥接器”(bridge),主要就是负责 Table API 和下层 DataStream-->
        <!--        API 的连接支持,按照不同的语言分为 Java 版和 Scala 版。-->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-api-java-bridge_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <!--        如果我们希望在本地的集成开发环境(IDE)里运行 Table API 和 SQL,还需要引入以下-->
        <!--        依赖-->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-planner-blink_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-scala_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>


        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-cep_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>


        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-planner-blink_${scala.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <version>RELEASE</version>
            <scope>compile</scope>
        </dependency>


        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-planner-blink_${scala.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-json</artifactId>
            <version>${flink.version}</version>
        </dependency>

    </dependencies>

图解逻辑

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

工作变成艺术

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值