- 先一定 KafkaSourceFunction:
package com.alpha;
import com.alibaba.fastjson.JSON;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.functions.source.RichSourceFunction;
import org.apache.flink.types.Row;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.util.Arrays;
import java.util.Map;
import java.util.Properties;
class KafkaSourceFunction extends RichSourceFunction<Row> {
KafkaProducer<String, String> producer;
KafkaConsumer<String,String> consumer;
@Override
public void open(Configuration parameters) throws Exception {
Properties props = new Properties();
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.GROUP_ID_CONFIG, "default-009");
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "bigdata006:9092,bigdata007:9092,bigdata008:9092");
consumer=new KafkaConsumer<String, String>(props);
consumer.subscribe(Arrays.asList("topic_test_flink"));
}
@Override
public void run(SourceContext sourceContext) throws Exception {
while(true) {
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
String key = record.key();
String value = record.value();
Map map = JSON.parseObject(value);
Row row = Row.of(new Object[]{map.get("timestamp").toString(), map.get("name").toString(), map.get("age").toString()});
sourceContext.collect(row);
}
}
}
@Override
public void cancel() {
}
}
- 主程序:
package com.alpha;
import com.alibaba.fastjson.JSON;
import kafka.utils.Json;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.io.InputFormat;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
import org.apache.flink.api.common.typeinfo.TypeInfo;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple4;
import org.apache.flink.api.java.tuple.Tuple5;
import org.apache.flink.api.java.tuple.Tuple9;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.InputFormatSourceFunction;
import org.apache.flink.streaming.api.functions.source.RichSourceFunction;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.table.api.java.StreamTableEnvironment;
import com.alibaba.fastjson.JSONObject;
import org.apache.flink.table.sinks.CsvTableSink;
import org.apache.flink.table.sinks.TableSink;
import org.apache.flink.table.sources.StreamTableSource;
import org.apache.flink.table.sources.TableSource;
import org.apache.flink.types.Row;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Arrays;
import java.util.Date;
import java.util.Map;
import java.util.Properties;
public class KafkaAvro {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
TypeInformation[] fieldTypes = new TypeInformation[]{
TypeInformation.of(String.class),
TypeInformation.of(String.class),
TypeInformation.of(String.class)
};
String[] sourceFieldNames = new String[]{"ttime", "name", "age"};
TypeInformation<Row> typeInformation = new RowTypeInfo(fieldTypes, sourceFieldNames);
DataStreamSource<Row> streamSource= env.addSource(new KafkaSourceFunction(),typeInformation);
Table tbl = tableEnv.fromDataStream(streamSource,"ttime,name,age");
String[] fieldNames = {"ttime", "name", "age"};
TypeInformation[] fTypes = {Types.STRING, Types.STRING, Types.STRING};
CsvTableSink csvSink = new CsvTableSink("/Users/bozhong/Documents/code/flinkStreamSQL/data/"+new Date().getTime()+"/");
tableEnv.registerTableSink("sink_tbl",csvSink.configure(fieldNames,fTypes));
tableEnv.sqlUpdate(
"INSERT INTO sink_tbl SELECT ttime,name,age FROM " + tbl);
tableEnv.execute("not table run....");
}
}