flink cdc

flink cdc

环境要求:
flink版本:1.12+
java版本:java 8+
git:https://github.com/ververica/flink-cdc-connectors

1 依赖包


<dependencies>
    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-java</artifactId>
        <version>1.12.0</version>
    </dependency>
    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-streaming-java_2.12</artifactId>
        <version>1.12.0</version>
    </dependency>
    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-clients_2.12</artifactId>
        <version>1.12.0</version>
    </dependency>
    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-client</artifactId>
        <version>3.1.3</version>
    </dependency>
    <dependency>
        <groupId>mysql</groupId>
        <artifactId>mysql-connector-java</artifactId>
        <version>5.1.49</version>
    </dependency>
    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-table-planner-blink_2.12</artifactId>
        <version>1.12.0</version>
    </dependency>
    <dependency>
        <groupId>com.ververica</groupId>
        <artifactId>flink-connector-mysql-cdc</artifactId>
        <version>2.0.0</version>
    </dependency>
    <dependency>
        <groupId>com.alibaba</groupId>
        <artifactId>fastjson</artifactId>
        <version>1.2.75</version>
    </dependency>
</dependencies>
<build>
<plugins>
    <plugin>
        <groupId>org.apache.maven.plugins</groupId>
        <artifactId>maven-assembly-plugin</artifactId>
        <version>3.0.0</version>
        <configuration>
            <descriptorRefs>
                <descriptorRef>jar-with-dependencies</descriptorRef>
            </descriptorRefs>
        </configuration>
        <executions>
            <execution>
                <id>make-assembly</id>
                <phase>package</phase>
                <goals>
                    <goal>single</goal>
                </goals>
            </execution>
        </executions>
    </plugin>
</plugins>
</build>

2 stream

  1. stream
import com.ververica.cdc.connectors.mysql.MySqlSource;
import com.ververica.cdc.debezium.DebeziumSourceFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

public class CdcStream {

    public static void main(String[] args) throws Exception {

        // 1. 创建环境变量
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //2.Flink-CDC 将读取 binlog 的位置信息以状态的方式保存在 CK,如果想要做到断点续传,需要从 Checkpoint 或者 Savepoint 启动程序
        //2.1 开启 Checkpoint,每隔 5 秒钟做一次 CK
        env.enableCheckpointing(15000L);
        //2.2 指定 CK 的一致性语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //2.3 设置任务关闭的时候保留最后一次 CK 数据
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //2.4 指定从 CK 自动重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 2000L));
        //2.5 设置状态后端
        env.setStateBackend(new FsStateBackend("hdfs://hadoop101:8020/user/develop/flink/cdc"));
        //2.6 设置访问 HDFS 的用户名
        System.setProperty("HADOOP_USER_NAME", "develop");

        //3.创建 Flink-MySQL-CDC 的 Source

        DebeziumSourceFunction<String> mySqlSource = MySqlSource.<String>builder()
                .hostname("host")
                .port(3306)
                .databaseList("testdb") // set captured database
                .tableList("testdb.user") // set captured table
                .username("root")
                .password("root_5.5")
                .deserializer(new CustomerStringDebeziumDeserializationSchema()) // converts SourceRecord to JSON String
                 // 启动参数 提供了如下几个静态方法
                // StartupOptions.initial() 第一次启动的时候,会把历史数据读过来(全量)做快照,后续读取binlog加载新的数据,如果不做 chackpoint 会存在重启又全量一遍。
                // StartupOptions.earliest() 只从binlog开始的位置读(源头),这里注意,如果binlog开启的时间比你建库时间晚,可能会读不到建库语句会报错,earliest要求能读到建表语句
                // StartupOptions.latest() 只从binlog最新的位置开始读
                // StartupOptions.specificOffset() 自指定从binlog的什么位置开始读
                // StartupOptions.timestamp() 自指定binlog的开始时间戳
                .startupOptions(StartupOptions.initial())
                .build();

        //4.使用 CDC Source 从 MySQL 读取数据
        DataStreamSource<String> mysqlDS = env.addSource(mySqlSource);

        //5.打印数据
        mysqlDS.print("sql bin");

        // 6.执行任务
        env.execute("cdc job");

    }
}
  1. CustomerStringDebeziumDeserializationSchema 自定义json
import com.alibaba.fastjson.JSONObject;
import com.ververica.cdc.debezium.DebeziumDeserializationSchema;
import io.debezium.data.Envelope;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.util.Collector;
import org.apache.kafka.connect.source.SourceRecord;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.data.Field;
import org.apache.kafka.connect.data.Schema;

public class CustomerStringDebeziumDeserializationSchema implements DebeziumDeserializationSchema<String> {

    /**
     * 变为一个JSON格式
     * {
     * "database":"",
     * "tableName":"",
     * "operate":"",
     * // 修改之前的数据
     * "before":{
     * <p>
     * },
     * // 修改之后的数据
     * "after":{
     * <p>
     * }
     * }
     **/
    @Override
    public void deserialize(SourceRecord sourceRecord, Collector<String> collector) throws Exception {
        // 1. 创建JSON对象
        JSONObject result = new JSONObject();
        // 2. 获取库名&表名
        String topic = sourceRecord.topic();
        String[] fields = topic.split("\\.");
        String database = fields[1];
        String tableName = fields[2];

        Struct struct = (Struct) sourceRecord.value();
        // 3. 获取before数据
        Struct before = struct.getStruct("before");
        JSONObject beforeJson = new JSONObject();
        if (before != null) {
            Schema beforeSchema = before.schema();
            for (Field field : beforeSchema.fields()) {
                Object beforeValue = before.get(field);
                beforeJson.put(field.name(), beforeValue);
            }
        }
        // 4. 获取after数据
        Struct after = struct.getStruct("after");
        JSONObject afterJson = new JSONObject();
        if (after != null) {
            Schema afterSchema = after.schema();
            for (Field field : afterSchema.fields()) {
                Object afterValue = after.get(field);
                afterJson.put(field.name(), afterValue);
            }
        }
        // 5. 获取操作类型
        Envelope.Operation operation = Envelope.operationFor(sourceRecord);
        String opName = operation.toString().toLowerCase();
        // 为后续方便转一下
        if ("create".equals(opName)) {
            opName = "insert";
        }
        // 6. 将字段写入JSON对象
        result.put("database", database);
        result.put("tableName", tableName);
        result.put("before", beforeJson);
        result.put("after", afterJson);
        result.put("operate", opName);
        // 7. 输出数据
        collector.collect(result.toJSONString());
    }

    // 和 StringDebeziumDeserializationSchema 保持一致
    @Override
    public TypeInformation<String> getProducedType() {
        return BasicTypeInfo.STRING_TYPE_INFO;
    }


}

3 checkpoint,savepiont 断点续存

注意:

  1. 正常flink cancel job,checkpoint 文件会自动删除
  2. 在取消job之前,需要手动savepoint job
  • 手动创建存储savePoint
# 执行完成取消job
flink savepoint 387bbf770086336de78819d9fee38579 hdfs://hadoop101:8020/user/develop/flink/cdc 
  • 2 hdfs://hadoop101:8020/user/develop/flink/cdc
flink run -s  hdfs://hadoop101:8020/user/develop/flink/cdc/savepoint-387bbf770086336de78819d9fee38579 -c com.example.flinkcdcmysql.FlinkStreamCdc xxxxx.jar
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值