【Flink-Kafka-To-Mysql】使用 Flink 实现 Kafka 数据写入 Mysql(根据对应操作类型进行增、删、改操作)

需求描述:

1、数据从 Kafka 写入 Mysql。

2、相关配置存放于 Mysql 中,通过 Mysql 进行动态读取。

3、此案例中的 Kafka 是进行了 Kerberos 安全认证的,如果不需要自行修改。

4、Kafka 数据为 Json 格式,获取到的数据根据操作类型字段进行增删改操作。

5、读取时使用自定义 Source,写入时使用自定义 Sink。

6、消费 Kafka 数据时自定义反序列化。

1)导入依赖

这里的依赖比较冗余,大家可以根据各自需求做删除或保留。

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>gaei.cn.x5l</groupId>
    <artifactId>x8vbusiness</artifactId>
    <version>1.0.0</version>

    <properties>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
        <target.java.version>1.8</target.java.version>
        <maven.compiler.source>${target.java.version}</maven.compiler.source>
        <maven.compiler.target>${target.java.version}</maven.compiler.target>

        <scala.binary.version>2.12</scala.binary.version>
        <scala.version>2.12.10</scala.version>
        <flink.version>1.14.0</flink.version>
        <log4j.version>2.17.2</log4j.version>
        <hadoop.version>3.1.2</hadoop.version>
        <hive.version>3.1.2</hive.version>

        <mongo.driver.version>3.12.6</mongo.driver.version>
        <mongo.driver.core.version>4.3.1</mongo.driver.core.version>

    </properties>
    <dependencies>
        <dependency>
            <groupId>com.ververica</groupId>
            <artifactId>flink-connector-mysql-cdc</artifactId>
            <version>2.3.0</version>
            <!--            <exclusions>-->
            <!--                <exclusion>-->
            <!--                    <groupId>mysql</groupId>-->
            <!--                    <artifactId>mysql-connector-java</artifactId>-->
            <!--                </exclusion>-->
            <!--            </exclusions>-->
        </dependency>

        <dependency>
            <groupId>redis.clients</groupId>
            <artifactId>jedis</artifactId>
            <version>2.9.0</version>
        </dependency>

        <!-- 基础依赖  开始-->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-java</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-java_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-clients_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>
        <!-- 基础依赖  结束-->
        <!-- TABLE  开始-->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-api-java-bridge_${scala.binary.version}</artifactId>
            <version>1.14.0</version>
            <scope>provided</scope>
        </dependency>

        <!-- 使用 hive sql时注销,其他时候可以放开 -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-planner_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-scala_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-common</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-cep_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <!-- TABLE  结束-->
        <!-- sql  开始-->
        <!-- sql解析 开始 -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-json</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-csv</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>
        <!-- sql解析 结束 -->
        <!-- sql连接 kafka -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-sql-connector-kafka_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <!-- sql  结束-->
        <!-- 检查点 -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-state-processor-api_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>


        <dependency>
            <groupId>commons-lang</groupId>
            <artifactId>commons-lang</artifactId>
            <version>2.5</version>
            <scope>compile</scope>
        </dependency>

        <!-- 本地监控任务 开始 -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-runtime-web_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>
        <!-- 本地监控任务 结束 -->
        <!-- DataStream 开始 -->
        <dependency>
            <groupId>org.apache.logging.log4j</groupId>
            <artifactId>log4j-slf4j-impl</artifactId>
            <version>${log4j.version}</version>
            <scope>runtime</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.logging.log4j</groupId>
            <artifactId>log4j-api</artifactId>
            <version>${log4j.version}</version>
            <scope>runtime</scope>
        </dependency>

        <dependency>
            <groupId>org.apache.logging.log4j</groupId>
            <artifactId>log4j-core</artifactId>
            <version>${log4j.version}</version>
            <scope>runtime</scope>
        </dependency>
        <!-- hdfs -->
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>3.3.1</version>
            <exclusions>
                <exclusion>
                    <groupId>org.apache.avro</groupId>
                    <artifactId>avro</artifactId>
                </exclusion>
            </exclusions>
        </dependency>


        <!-- 重点,容易被忽略的jar -->
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-auth</artifactId>
            <version>${hadoop.version}</version>


        </dependency>
        <!-- rocksdb_2 -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-statebackend-rocksdb_${scala.binary.version}</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>

        <!-- 其他 -->
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.1.23</version>
        </dependency>
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <version>1.16.18</version>
            <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.jyaml</groupId>
            <artifactId>jyaml</artifactId>
            <version>1.3</version>
        </dependency>


        <!-- TABLE  开始-->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-planner-blink_${scala.binary.version}</artifactId>
            <!--            <version>${flink.version}</version>-->
            <version>1.13.5</version>
            <scope>provided</scope>
        </dependency>


        <!-- TABLE  结束-->


        <dependency>
            <groupId>com.google.code.gson</groupId>
            <artifactId>gson</artifactId>
            <version>2.8.3</version>
        </dependency>

        <dependency>
            <groupId>com.ververica</groupId>
            <artifactId>flink-connector-mongodb-cdc</artifactId>
            <version>2.3.0</version>
        </dependency>

        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <!--            <version>5.1.44</version>-->
            <version>8.0.27</version>
            <scope>runtime</scope>
        </dependency>

        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>druid</artifactId>
            <version>1.2.8</version>
        </dependency>



        <dependency>
            <groupId>org.mongodb</groupId>
            <artifactId>bson</artifactId>
            <version>${mongo.driver.core.version}</version>
        </dependency>


        <dependency>
            <groupId>org.mongodb</groupId>
            <artifactId>mongodb-driver-core</artifactId>
            <version>${mongo.driver.core.version}</version>
        </dependency>

        <!--    使用 mongodb-driver 重新打包成的 custom-mongo-core  -->
        <dependency>
            <groupId>org.mongodb</groupId>
            <artifactId>mongodb-driver</artifactId>
            <version>3.12.6</version>
        </dependency>

    </dependencies>
    <build>
        <plugins>

            <!-- Java Compiler -->
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <version>3.1</version>
                <configuration>
                    <source>${target.java.version}</source>
                    <target>${target.java.version}</target>
                </configuration>
            </plugin>

            <!-- We use the maven-shade plugin to create a fat jar that contains all necessary dependencies. -->
            <!-- Change the value of <mainClass>...</mainClass> if your program entry point changes. -->
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-shade-plugin</artifactId>
                <version>3.0.0</version>
                <executions>
                    <!-- Run shade goal on package phase -->
                    <execution>
                        <phase>package</phase>
                        <goals>
                            <goal>shade</goal>
                        </goals>
                        <configuration>
                            <artifactSet>
                                <excludes>
                                    <exclude>org.apache.flink:force-shading</exclude>
                                    <exclude>com.google.code.findbugs:jsr305</exclude>
                                    <exclude>org.slf4j:*</exclude>
                                    <exclude>org.apache.logging.log4j:*</exclude>
                                    <exclude>org.apache.flink:flink-runtime-web_2.11</exclude>
                                </excludes>
                            </artifactSet>
                            <filters>
                                <filter>
                                    <!-- Do not copy the signatures in the META-INF folder.
                                    Otherwise, this might cause SecurityExceptions when using the JAR. -->
                                    <artifact>*:*</artifact>
                                    <excludes>
                                        <exclude>META-INF/*.SF</exclude>
                                        <exclude>META-INF/*.DSA</exclude>
                                        <exclude>META-INF/*.RSA</exclude>
                                    </excludes>
                                </filter>
                            </filters>
                            <transformers>
                                <transformer
                                        implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
                                    <mainClass>com.owp.flink.kafka.KafkaSourceDemo</mainClass>
                                </transformer>
                                <!-- flink sql 需要  -->
                                <!-- The service transformer is needed to merge META-INF/services files -->
                                <transformer
                                        implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
                                <!-- ... -->
                            </transformers>
                        </configuration>
                    </execution>
                </executions>
            </plugin>
        </plugins>

        <pluginManagement>
            <plugins>
                <!-- This improves the out-of-the-box experience in Eclipse by resolving some warnings. -->
                <plugin>
                    <groupId>org.eclipse.m2e</groupId>
                    <artifactId>lifecycle-mapping</artifactId>
                    <version>1.0.0</version>
                    <configuration>
                        <lifecycleMappingMetadata>
                            <pluginExecutions>
                                <pluginExecution>
                                    <pluginExecutionFilter>
                                        <groupId>org.apache.maven.plugins</groupId>
                                        <artifactId>maven-shade-plugin</artifactId>
                                        <versionRange>[3.0.0,)</versionRange>
                                        <goals>
                                            <goal>shade</goal>
                                        </goals>
                                    </pluginExecutionFilter>
                                    <action>
                                        <ignore/>
                                    </action>
                                </pluginExecution>
                                <pluginExecution>
                                    <pluginExecutionFilter>
                                        <groupId>org.apache.maven.plugins</groupId>
                                        <artifactId>maven-compiler-plugin</artifactId>
                                        <versionRange>[3.1,)</versionRange>
                                        <goals>
                                            <goal>testCompile</goal>
                                            <goal>compile</goal>
                                        </goals>
                                    </pluginExecutionFilter>
                                    <action>
                                        <ignore/>
                                    </action>
                                </pluginExecution>
                            </pluginExecutions>
                        </lifecycleMappingMetadata>
                    </configuration>
                </plugin>

            </plugins>
        </pluginManagement>

    </build>

</project>

2)resources

2.1.appconfig.yml

mysql.url: "jdbc:mysql://1.1.1.1:3306/test?useSSL=false"
mysql.username: "test"
mysql.password: "123456"
mysql.driver: "com.mysql.jdbc.Driver"

2.2.application.properties

url=mongodb://test:test123456@10.1.1.1:34516/?authSource=admin
#database=diagnosis
#collection=diagnosisEntiry
maxConnectionIdleTime=1000000
batchSize=1

# flink
checkpoint.interval=300000
checkpoint.minPauseBetweenCheckpoints=10000
checkpoint.checkpointTimeout=400000
maxConcurrentCheckpoints=1
restartInterval=120
restartStrategy=3
checkpointDataUri=hdfs://nameserver/user/flink/rocksdbcheckpoint_mongo

mysql.url=jdbc:mysql://1.1.1.1:3306/test?useSSL=false
mysql.username=test
mysql.password=123456

#envType=PRE
envType=PRD

# mysql  druid 连接池生产环境连接池配置
druid.driverClassName=com.mysql.jdbc.Driver
#生产
druid.url=jdbc:mysql://1.1.1.1:3306/test
druid.username=test
druid.password=123456
# 初始化连接数
druid.initialSize=1
# 最大连接数
druid.maxActive=5
# 最大等待时间
druid.maxWait=3000

2.3.log4j.properties

log4j.rootLogger=info, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n

2.4.log4j2.xml

<?xml version="1.0" encoding="UTF-8"?>
<configuration monitorInterval="5">
    <Properties>
        <property name="LOG_PATTERN" value="%date{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n" />
        <property name="LOG_LEVEL" value="ERROR" />
    </Properties>

    <appenders>
        <console name="console" target="SYSTEM_OUT">
            <PatternLayout pattern="${LOG_PATTERN}"/>
            <ThresholdFilter level="${LOG_LEVEL}" onMatch="ACCEPT" onMismatch="DENY"/>
        </console>
        <File name="log" fileName="tmp/log/job.log" append="false">
            <PatternLayout pattern="%d{HH:mm:ss.SSS} %-5level %class{36} %L %M - %msg%xEx%n"/>
        </File>
    </appenders>

    <loggers>
        <root level="${LOG_LEVEL}">
            <appender-ref ref="console"/>
            <appender-ref ref="log"/>
        </root>
    </loggers>
</configuration>

3)util

3.1.KafkaMysqlUtils

public class KafkaUtils {
    public static FlinkKafkaConsumer<ConsumerRecord<String, String>> getKafkaConsumer(List<String> topic) throws IOException {
        Properties prop1 = confFromYaml();
        //认证环境
        String envType = prop1.getProperty("envType");


        Properties prop = new Properties();

        System.setProperty("java.security.krb5.conf", "/opt/conf/krb5.conf");
        prop.put("security.protocol", "SASL_PLAINTEXT");
        prop.put("sasl.jaas.config", "com.sun.security.auth.module.Krb5LoginModule required "
                + "useTicketCache=false  "
                + "serviceName=\"" + "kafka" + "\" "
                + "useKeyTab=true "
                + "keyTab=\"" + "/opt/conf/test.keytab" + "\" "
                + "principal=\"" + getKafkaKerberos(envType).get("principal") + "\";");
        prop.put("bootstrap.servers", getKafkaKerberos(envType).get("bootstrap.servers"));
        prop.put("group.id", "Kafka2Mysql_test");
        prop.put("auto.offset.reset", "earliest");
        prop.put("enable.auto.commit", "false");
        prop.put("max.poll.interval.ms", "60000");
        prop.put("max.poll.records", "3000");
        prop.put("session.timeout.ms", "600000");

//        List<String> topics = Stream.of(prop.getProperty("topics").split(",", -1))
//                .collect(Collectors.toList());

        prop.put("key.serializer", "org.apache.flink.kafka.shaded.org.apache.kafka.common.serialization.ByteArrayDeserializer");
        prop.put("value.serializer", "org.apache.flink.kafka.shaded.org.apache.kafka.common.serialization.ByteArrayDeserializer");


        FlinkKafkaConsumer<ConsumerRecord<String, String>> consumer = new FlinkKafkaConsumer<ConsumerRecord<String, String>>(topic, new CustomDeSerializationSchema(), prop);

        consumer.setStartFromGroupOffsets();
        consumer.setCommitOffsetsOnCheckpoints(true);

        return consumer;
    }

    public static void main(String[] args) throws Exception {
        Properties druidConf = KafkaUtils.getDruidConf();
        if (druidConf == null) {
            throw new RuntimeException("缺少druid相关配置信息,请检查");
        }

        DataSource dataSource = DruidDataSourceFactory.createDataSource(druidConf);
        Connection connection = dataSource.getConnection();
        PreparedStatement showDatabases = connection.prepareStatement("\n" +
                "select count(*) from tab_factory");
        ResultSet resultSet = showDatabases.executeQuery();
        while (resultSet.next()) {
            String string = resultSet.getString(1);
            System.out.println(string);
        }
        resultSet.close();
        showDatabases.close();

        connection.close();


    }

    public static Properties getDruidConf() {
        try {
            Properties prop = confFromYaml();
            String driverClassName = prop.get("druid.driverClassName").toString();
            String url = prop.get("druid.url").toString();
            String username = prop.get("druid.username").toString();
            String password = prop.get("druid.password").toString();
            String initialSize = prop.get("druid.initialSize").toString();
            String maxActive = prop.get("druid.maxActive").toString();
            String maxWait = prop.get("druid.maxWait").toString();

            Properties p = new Properties();
            p.put("driverClassName", driverClassName);
            p.put("url", url);
            p.put("username", username);
            p.put("password", password);
            p.put("initialSize", initialSize);
            p.put("maxActive", maxActive);
            p.put("maxWait", maxWait);
//            p.forEach((k,v)-> System.out.println("连接池属性 "+k+"="+v));
            return p;
        } catch (Exception e) {
            e.printStackTrace();
        }
        return null;
    }


    // envType     PRE  PRD
    public static Map<String, String> getKafkaKerberos(String envType) {
        Map<String, String> map = new HashMap<>();
        if ("PRD".equalsIgnoreCase(envType)) {
            map.put("principal", "prd@PRD.PRD.COM");
            map.put("bootstrap.servers", "kfk01.prd:9092,kfk02.prd:9092,kfk03.prd:9092,kfk04.prd:9092,kfk05.prd:9092,kfk06.prd:9092");
        } else if ("PRE".equalsIgnoreCase(envType)) {
            map.put("principal", "pre@PRE.PRE.COM");
            map.put("bootstrap.servers", "kfk01.pre:9092,kfk02.pre:9092,kfk03.pre:9092");
        } /*else if ("TEST".equalsIgnoreCase(envType)) {
            map.put("principal","test@TEST.TEST.COM");
            map.put("bootstrap.servers","test@TEST.TEST.COM");
        } */ else {
            System.out.println("没有该" + envType + "环境");
            throw new RuntimeException("没有该" + envType + "环境");
        }

        return map;
    }

    public static StreamExecutionEnvironment setupFlinkEnv(StreamExecutionEnvironment env) throws IOException {
        Properties prop = confFromYaml();
        env.enableCheckpointing(Long.valueOf(prop.getProperty("checkpoint.interval")), CheckpointingMode.EXACTLY_ONCE);//这里会造成offset提交的延迟
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(Long.valueOf(prop.getProperty("checkpoint.minPauseBetweenCheckpoints")));
        env.getCheckpointConfig().setCheckpointTimeout(Long.valueOf(prop.getProperty("checkpoint.checkpointTimeout")));
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(Integer.valueOf(prop.getProperty("maxConcurrentCheckpoints")));
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
//        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(5);
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(
                Integer.valueOf(prop.getProperty("restartStrategy")), // 尝试重启的次数,不宜过小,分布式任务很容易出问题(正常情况),建议3-5次
                Time.of(Integer.valueOf(prop.getProperty("restartInterval")), TimeUnit.SECONDS) // 延时
        ));
        // 设置状态后端存储方式
//        env.setStateBackend(new RocksDBStateBackend((String) prop.getProperty("checkPointPath"), true));
//        env.setStateBackend(new MemoryStateBackend());
        env.setStateBackend(new RocksDBStateBackend(String.valueOf(prop.getProperty("checkpointDataUri")), true));
        return env;

    }

    public static Properties confFromYaml() {
        Properties prop = new Properties();
        InputStream resourceStream = Thread.currentThread().getContextClassLoader().getResourceAsStream("application.properties");
        try {
            prop.load(resourceStream);
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            try {
                if (resourceStream != null) {
                    resourceStream.close();
                }
            } catch (Exception ex) {
                ex.printStackTrace();
            }
        }
        return prop;
    }
}

3.2.CustomDeSerializationSchema

public class CustomDeSerializationSchema implements KafkaDeserializationSchema<ConsumerRecord<String, String>> {
    private static String encoding = "UTF8";

    //是否表示l流的最后一条元素,设置为false,表示数据会源源不断的到来
    @Override
    public boolean isEndOfStream(ConsumerRecord<String, String> nextElement) {
        return false;
    }

    //这里返回一个ConsumerRecord<String,String>类型的数据,除了原数据还包括topic,offset,partition等信息
    @Override
    public ConsumerRecord<String, String> deserialize(ConsumerRecord<byte[], byte[]> record) throws Exception {
        byte[] key = (record.key() == null ? "".getBytes() : record.key());
        return new ConsumerRecord<String, String>(
                record.topic(),
                record.partition(),
                record.offset(),
                record.timestamp(),
                record.timestampType(),
                record.checksum(),
                record.serializedKeySize(),
                record.serializedValueSize(),
                /*这里我没有进行空值判断,生产一定记得处理*/
                new  String(key, encoding),
                new  String(record.value(), encoding));
    }

    //指定数据的输入类型
    @Override
    public TypeInformation<ConsumerRecord<String, String>> getProducedType() {
        return TypeInformation.of(new TypeHint<ConsumerRecord<String, String>>() {
        });
    }
}

4)po

4.1.TableBean

@Data
public class TableBean {
    private String database;
    private String table;
    private String primaryKey;

    private TableBean() {
    }

    public TableBean(String database, String table, String primaryKey) {
        this.database = '`' + database + '`';
        this.table = '`' + table + '`';
        this.primaryKey = primaryKey;
    }
}

5)kafkacdc2mysql

5.1.Kafka2MysqlApp

public class Kafka2MysqlApp {
    // key 是 topic 名,value是对应数据库表中的主键列名
    private static final Map<String, TableBean> map = new HashMap<>();

    static {
    	//表名这里没有进行配置,后面根据实际业务进行配置即可
        map.put("mysql_tab1", new TableBean("db1", "", "alarm_id"));
        map.put("mysql_tab2", new TableBean("db2", "", "id"));
    }

    public static void main(String[] args) throws Exception {

        ArrayList<String> topicList = new ArrayList<>(map.keySet());

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment().disableOperatorChaining();
        KafkaUtils.setupFlinkEnv(env);

        RichSinkFunction<ConsumerRecord<String, String>> sinkFunction =
                new RichSinkFunction<ConsumerRecord<String, String>>() {
                    DataSource dataSource = null;

                    @Override
                    public void open(Configuration parameters) throws Exception {
                        initDruidDataSource();
                    }

                    private void initDruidDataSource() throws Exception {
                        Properties druidConf = KafkaUtils.getDruidConf();
                        if (druidConf == null) {
                            throw new RuntimeException("缺少druid相关配置信息,请检查");
                        }

                        dataSource = DruidDataSourceFactory.createDataSource(druidConf);
                    }

                    @Override
                    public void close() throws Exception {

                    }

                    @Override
                    public void invoke(ConsumerRecord<String, String> record, Context context) throws Exception {
                        if (dataSource == null) {
                            throw new RuntimeException("连接池未初始化");
                        }
                        String operationType = "";
                        String keyId = "";
                        String sql = "";
                        try (Connection connection = dataSource.getConnection()) {
                        	//定义表名
                            String table_name = record.topic();

                            JSONObject jsonObject = JSONObject.parseObject(record.value());
                            operationType = jsonObject.getString("operationType");
                            jsonObject.remove("operationType");
                            String primaryKey = map.get(record.topic()).getPrimaryKey();
                            String database = map.get(record.topic()).getDatabase();
                            keyId = jsonObject.getString(primaryKey);
                            List<String> columns = new ArrayList<>();
                            List<String> columnValues = new ArrayList<>();

                            jsonObject.forEach((k, v) -> {
                                columns.add(k);
                                columnValues.add(v.toString());
                            });

                            if ("INSERT".equals(operationType)) {
                                try {
                                    sql = "delete from " + database + "." + table_name + " where " + primaryKey + "= ?";

                                    PreparedStatement preparedStatement = connection.prepareStatement(sql);
                                    preparedStatement.setObject(1, keyId);
                                    preparedStatement.executeUpdate();
                                    preparedStatement.close();
                                } catch (Exception ignore) {
                                }


                                StringBuilder sb = new StringBuilder();
                                sb.append("insert into ").append(database).append(".").append(table_name).append("(");
                                for (String column : columns) {
                                    sb.append("`").append(column).append("`,");
                                }
                                sb.append(") values(");
                                for (String columnValue : columnValues) {
                                    sb.append("?,");
                                }
                                sb.append(")");
                                //去除最后一个逗号
                                sql = sb.toString().replace(",)", ")");

                                PreparedStatement preparedStatement = connection.prepareStatement(sql);
                                for (int i = 0; i < columnValues.size(); i++) {
                                    preparedStatement.setObject(i + 1, columnValues.get(i));
                                }
                                preparedStatement.executeUpdate();
                                preparedStatement.close();


                            } else if ("UPDATE".equals(operationType)) {

                                StringBuilder sb = new StringBuilder();
                                sb.append("update ").append(database).append(".").append(table_name).append(" set ");
                                for (String column : columns) {
                                    sb.append("`").append(column).append("`= ?,");
                                }
                                String sqlPre = sb.substring(0, sb.length() - 1);
                                sql = sqlPre + " where " + primaryKey + "='" + keyId + "'";

                                PreparedStatement preparedStatement = connection.prepareStatement(sql);
                                for (int i = 0; i < columnValues.size(); i++) {
                                    preparedStatement.setObject(i + 1, columnValues.get(i));
                                }
                                preparedStatement.executeUpdate();
                                preparedStatement.close();

                            } else if ("DELETE".equals(operationType)) {
                                sql = "delete from " + database + "." + table_name + " where " + primaryKey + "= ?";

                                PreparedStatement preparedStatement = connection.prepareStatement(sql);
                                preparedStatement.setObject(1, keyId);
                                preparedStatement.executeUpdate();
                                preparedStatement.close();
                            }
                        } catch (Exception e) {
                            System.out.printf("mysql同步操作(%s)有误,主键是%s,原因是%s,对应topic数据是%s%n", operationType, keyId, e.getMessage(), record);
                            System.out.println("执行sql语句为 " + sql);
                            throw new RuntimeException(e);
                        }
                    }
                };

        env.addSource(KafkaUtils.getKafkaConsumer(topicList)).addSink(sinkFunction);

        env.execute("kafka2mysql synchronization " + topicList.toString());
    }
}
  • 32
    点赞
  • 19
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
### 回答1: 下面是一个简单的 Flink 应用程序,将 Kafka 主题中的数据读取并写入到另一个 Kafka 主题: ```java import org.apache.flink.api.common.serialization.SimpleStringSchema; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer; import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.serialization.StringSerializer; import java.util.Properties; public class KafkaToFlinkToFlinkKafka { public static void main(String[] args) throws Exception { // set up the execution environment final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // configure Kafka consumer Properties consumerProps = new Properties(); consumerProps.setProperty("bootstrap.servers", "localhost:9092"); consumerProps.setProperty("group.id", "test-group"); // create a Kafka consumer data source FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<>("input-topic", new SimpleStringSchema(), consumerProps); // read from Kafka DataStream<String> stream = env.addSource(consumer); // configure Kafka producer Properties producerProps = new Properties(); producerProps.setProperty("bootstrap.servers", "localhost:9092"); // create a Kafka producer sink FlinkKafkaProducer<String> producer = new FlinkKafkaProducer<>("output-topic", new KafkaSerializationSchema<String>() { @Override public ProducerRecord<byte[], byte[]> serialize(String s, Long aLong) { return new ProducerRecord<>("output-topic", s.getBytes()); } }, producerProps, FlinkKafkaProducer.Semantic.EXACTLY_ONCE); // write to Kafka stream.addSink(producer); // execute the Flink job env.execute("Kafka to Flink to Kafka"); } } ``` 在上面的代码中,我们首先创建了一个 `StreamExecutionEnvironment` 对象,然后使用 Kafka 的消费者API读取一个名为 `input-topic` 的 Kafka 主题中的数据,并将其转换为 Flink 数据流。接下来,我们创建一个 Kafka 生产者,并将数据写入名为 `output-topic` 的 Kafka 主题。最后,我们调用 `env.execute()` 来启动 Flink 作业。 请注意,上面的代码只是一个简单的入门级示例。在生产环境中,您可能需要更复杂的逻辑来处理数据并将其写入 Kafka 主题。 ### 回答2: Flink是一个流处理框架,它可以处理流式数据,并且可以与Kafka等消息队列相结合,实现数据的实时处理和分析。下面是一个使用FlinkKafka数据写入Kafka的入门级示例。 首先,我们需要引入FlinkKafka的相关依赖包,并创建一个Flink程序入口。 ``` import org.apache.flink.api.common.serialization.SimpleStringSchema; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.producer.ProducerConfig; import java.util.Properties; public class KafkaToKafkaDemo { public static void main(String[] args) throws Exception { // 创建Flink程序入口 StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // 设置Kafka的相关配置 Properties properties = new Properties(); properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka1:9092,kafka2:9092,kafka3:9092"); properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "flink-consumer"); properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka1:9092,kafka2:9092,kafka3:9092"); // 创建Kafka消费者 FlinkKafkaConsumer<String> kafkaConsumer = new FlinkKafkaConsumer<>("input-topic", new SimpleStringSchema(), properties); // 添加Kafka消费者到Flink程序中 DataStream<String> input = env.addSource(kafkaConsumer); // 创建Kafka生产者 FlinkKafkaProducer<String> kafkaProducer = new FlinkKafkaProducer<>("output-topic", new SimpleStringSchema(), properties); // 将输入数据写入Kafka input.addSink(kafkaProducer); // 执行Flink程序 env.execute("Kafka to Kafka Demo"); } } ``` 上述代码中,我们首先创建了一个Flink程序入口,然后设置了连接Kafka所需的配置信息。之后,我们创建了一个Kafka消费者,并将其添加到Flink程序中。接着,我们创建了一个Kafka生产者,并将输入的数据写入Kafka中。最后,我们执行了Flink程序。 需要注意的是,在上述代码中,我们需要将`kafka1:9092,kafka2:9092,kafka3:9092`替换为实际的Kafka集群地址,`input-topic`和`output-topic`替换为实际的输入和输出主题名称。 这是一个简单的入门级示例,演示了如何使用FlinkKafka数据写入Kafka。你可以根据自己的需求,在此基础上进行更复杂的流处理操作。 ### 回答3: Flink是一个流计算引擎,可以用来处理大规模的实时数据流。而Kafka是一种高吞吐量的分布式消息队列,常用于构建数据流处理平台。那么如果想要将Kafka中的数据写入到另一个Kafka集群中,可以使用Flink实现。下面是一个入门级的示例代码,演示了如何使用Java编一个简单的Flink作业来实现Kafka数据写入到另一个Kafka集群中。 首先,需要在项目的pom.xml文件中添加FlinkKafka相关的依赖: ```xml <dependencies> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-streaming-java_2.12</artifactId> <version>1.11.2</version> </dependency> <dependency> <groupId>org.apache.flink</groupId> <<artifactId>flink-connector-kafka_2.12</artifactId> <version>1.11.2</version> </dependency> </dependencies> ``` 接下来,可以编一个简单的Flink作业,该作业从一个Kafka主题中消费数据,并将其写入到另一个Kafka主题中: ```java import org.apache.flink.api.common.serialization.SimpleStringSchema; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer; import org.apache.flink.streaming.connectors.kafka.internals.KafkaSerializationSchemaWrapper; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import java.util.Properties; public class KafkaToFlinkToFlinkKafkaDemo { public static void main(String[] args) throws Exception { // 创建执行环境 StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // 设置Kafka消费者的配置 Properties consumerProps = new Properties(); consumerProps.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka_source:9092"); consumerProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "flink-consumer"); // 创建一个从Kafka读取数据数据流 DataStream<String> kafkaSource = env .addSource(new FlinkKafkaConsumer<>("input_topic", new SimpleStringSchema(), consumerProps)); // 设置Kafka生产者的配置 Properties producerProps = new Properties(); producerProps.setProperty("bootstrap.servers", "kafka_target:9092"); // 创建一个写入Kafka数据流 // 注意:这里将消息序列化为字符串,所以消费者也需要使用相同的字符串序列化器来读取数据 DataStream<String> kafkaSink = kafkaSource .addSink(new FlinkKafkaProducer<>("output_topic", new KafkaSerializationSchemaWrapper<>(new SimpleStringSchema()), producerProps)); // 执行作业并等待任务完成 env.execute("Kafka to Flink to Kafka Demo"); } } ``` 在上述代码中,首先通过FlinkKafkaConsumer创建一个从Kafka中读取数据数据流,然后通过FlinkKafkaProducer创建一个将数据写入Kafka中的数据流。注意,需要为消费者和生产者配置正确的Kafka集群地址和主题名称。 以上就是一个简单的使用FlinkKafka数据写入到另一个Kafka集群的示例。当然,实际应用中还需要考虑更多的业务需求和数据转换操作。希望对您有帮助!

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值