pom文件
flink-connector-debezium:2.2.0
flink-connector-kafka_2.11:1.13.6
flink-connector-mysql-cdc:2.20
flink-connector-kafka_2.11:1.13.6
报错
java.lang.NoClassDefFoundError: org/apache/kafka/common/utils/ThreadUtils at com.ververica.cdc.debezium.internal.FlinkOffsetBackingStore.start(FlinkOffsetBackingStore.java:152) ~[flink-connector-debe
解决方案
添加kafka依赖
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.7.0</version>
</dependency>
代码案例
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
public class Testkafka {
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(conf);
env.setParallelism(1);
// StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
EnvironmentSettings settings = EnvironmentSettings.newInstance().inStreamingMode().useBlinkPlanner().build();
StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings);
Configuration configuration = tableEnv.getConfig().getConfiguration();
// configuration.setString("table.exec.mini-batch.enabled", "true"); // enable mini-batch optimization
// configuration.setString("table.exec.mini-batch.allow-latency", "5 s"); // use 5 seconds to buffer input records
// configuration.setString("table.exec.mini-batch.size", "5000"); // the maximum number of records can be buffered by each aggregate operator task
//configuration.setString("table.exec.state.ttl", "60000");
/* String skersker = "CREATE TABLE skersker (\n" +
" d_id varchar(255),\n" +
" d_sugg_word varchar(255),\n" +
" d_type varchar(255),\n" +
" d_update_time timestamp,\n" +
" primary key(d_type) NOT ENFORCED \n" +
") WITH (\n" +
*//* " 'connector' = 'jdbc',\n" +
" 'url' = 'jdbc:mysql://192.168.50.120:3336/mydb?characterEncoding=UTF-8',\n" +
" 'table-name' = 'flink_test_dt',\n" +
" 'lookup.cache.max-rows' = '1000000',\n" +
" 'lookup.cache.ttl' = '60000' ,\n" +
" 'username' = 'root',\n" +
" 'password' = '123456'\n" +*//*
" 'connector' = 'mysql-cdc',\n" +
" 'hostname' = '192.168.50.120',\n" +
" 'port' = '3336',\n" +
" 'username' = 'root',\n" +
" 'password' = '123456',\n" +
" 'database-name' = 'mydb',\n" +
" 'table-name' = 'flink_test_dt'\n" +
")";*/
String skersker = "create table source2_1 ( \n" +
" id BIGINT,\n" +
" day_time VARCHAR,\n" +
" amnount BIGINT,\n" +
" proctime AS PROCTIME ()\n" +
")\n" +
" with ( \n" +
" 'connector' = 'kafka',\n" +
" 'topic' = 'source1',\n" +
" 'properties.bootstrap.servers' = '192.168.50.111:9092,192.168.50.112:9092,192.168.50.113:9092', \n" +
" 'properties.group.id' = 'flink_gp_test2-1',\n" +
" 'scan.startup.mode' = 'earliest-offset',\n" +
" 'format' = 'json',\n" +
" 'json.fail-on-missing-field' = 'false',\n" +
" 'json.ignore-parse-errors' = 'true'\n" +
" )";
String flink_test_mt = "CREATE TABLE flink_test_mt (\n" +
" m_type varchar(255),\n" +
" m_id varchar(255),\n" +
" m_sugg_word varchar(255),\n" +
" m_update_time timestamp,\n" +
" primary key(m_type) NOT ENFORCED \n" +
") WITH (\n" +
" 'connector' = 'mysql-cdc',\n" +
" 'hostname' = '192.168.50.120',\n" +
" 'port' = '3336',\n" +
" 'username' = 'root',\n" +
" 'password' = '123456',\n" +
" 'database-name' = 'mydb',\n" +
" 'table-name' = 'flink_test_mt_tmp'\n" +
")";
String sink = "create table source2_2 ( \n" +
" id BIGINT,\n" +
" coupon_amnount BIGINT,\n" +
" proctime AS PROCTIME ()\n" +
")\n" +
" with ( \n" +
" 'connector' = 'kafka',\n" +
" 'topic' = 'source2',\n" +
" 'properties.bootstrap.servers' = '192.168.50.111:9092,192.168.50.112:9092,192.168.50.113:9092', \n" +
" 'properties.group.id' = 'flink_gp_test2-2',\n" +
" 'scan.startup.mode' = 'earliest-offset',\n" +
" 'format' = 'json',\n" +
" 'json.fail-on-missing-field' = 'false',\n" +
" 'json.ignore-parse-errors' = 'true'\n" +
" )";
String sink_kafka ="CREATE TABLE sink (\n" +
" day_time string,\n" +
" total_gmv bigint,\n" +
" PRIMARY KEY (day_time) NOT ENFORCED\n" +
") WITH (\n" +
" 'connector' = 'upsert-kafka',\n" +
" 'topic' = 'sink',\n" +
" 'properties.bootstrap.servers' = '192.168.50.111:9092,192.168.50.112:9092,192.168.50.113:9092', \n" +
" 'key.format' = 'json',\n" +
" 'value.format' = 'json'\n" +
")";
/* String sink_kafka = "create table skersker ( \n" +
" Data_d_id varchar(255),\n" +
" Data_d_sugg_word varchar(255),\n" +
" Data_d_type varchar(255),\n" +
" Data_d_update_time BIGINT,\n" +
" update_time AS TO_TIMESTAMP(FROM_UNIXTIME(Data_d_update_time / 1000, 'yyyy-MM-dd HH:mm:ss')),\n" +
" proctime AS PROCTIME (),\n" +
" primary key(Data_d_type) NOT ENFORCED \n" +
")\n" +
" with ( \n" +
" 'connector' = 'upsert-kafka',\n" +
" 'topic' = 'mysql_up',\n" +
" 'properties.bootstrap.servers' = '192.168.50.120:9092', \n" +
" 'key.format' = 'json',\n" +
" 'value.format' = 'json'\n" +
" )";*/
tableEnv.executeSql(skersker);
tableEnv.executeSql(flink_test_mt);
tableEnv.executeSql(sink_kafka);
tableEnv.executeSql(sink);
tableEnv.executeSql(" INSERT INTO sink SELECT \n" +
" day_time, \n" +
" SUM(amnount - coupon_amnount) AS total_gmv \n" +
"FROM \n" +
" (\n" +
" SELECT\n" +
" a.day_time as day_time, \n" +
" a.amnount as amnount, \n" +
" b.coupon_amnount as coupon_amnount \n" +
" FROM \n" +
" source2_1 as a \n" +
" LEFT JOIN source2_2 b on b.id = a.id\n" +
" ) \n" +
"GROUP BY \n" +
" day_time").print();
}
}