背景简介
本文简单模拟对流量的处理,大概步骤如下:
1、通过获取一个维度流,内容是流量内容的元数据信息,获取解析并进行广播
2、获取实时流量流,做延迟处理(防止数据关联不上)
3、流量流关联元数据广播流,通过元数据信息获取对应的数据
4、打包成avro格式(自行百度)数据并进行sink
代码开发
1、pom引入
flink版本1.14
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.example</groupId>
<artifactId>FlinkCode</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>
<jdk.version>1.8</jdk.version>
<jar.name>ubs-data-converter</jar.name>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<!--Flink 版本-->
<flink.version>1.14.4</flink.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-streaming-java_2.11</artifactId>
<version>${flink.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-connector-kafka_2.11</artifactId>
<version>${flink.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-runtime-web_2.11</artifactId>
<version>${flink.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.10</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.8</version>
</dependency>
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
<version>1.9.2</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>4.4.1</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.16</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.16</version>
<scope>compile</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.avro</groupId>
<artifactId>avro-maven-plugin</artifactId>
<version>1.9.2</version>
<executions>
<execution>
<phase>generate-sources</phase>
<goals>
<goal>schema</goal>
</goals>
<configuration>
<sourceDirectory>${project.basedir}/src/main/resources/</sourceDirectory>
<outputDirectory>${project.basedir}/src/main/java/com/msxf</outputDirectory>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.1</version>
<configuration>
<source>${jdk.version}</source>
<target>${jdk.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>3.1.1</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<finalName>${jar.name}</finalName>
<artifactSet>
<excludes>
<exclude>com.google.code.findbugs:jsr305</exclude>
<exclude>org.slf4j:*</exclude>
<exclude>log4j:*</exclude>
<exclude>org.glassfish.jersey.core:jersey-common</exclude>
</excludes>
</artifactSet>
<relocations>
<relocation>
<pattern>com.google.common</pattern>
<shadedPattern>com.shade.google.common</shadedPattern>
</relocation>
<relocation>
<pattern>org.apache.kafka</pattern>
<shadedPattern>org.shade.apache.kafka</shadedPattern>
</relocation>
</relocations>
<filters>
<filter>
<artifact>*</artifact>
<includes>
<include>org/apache/htrace/**</include>
<include>org/apache/avro/**</include>
<include>com/msxf/**</include>
<include>org/apache/flink/streaming/**</include>
<include>org/apache/flink/connector/**</include>
<include>org/apache/kafka/**</include>
<include>org/apache/hive/**</include>
<include>org/apache/hadoop/hive/**</include>
<include>org/apache/curator/**</include>
<include>org/apache/zookeeper/**</include>
<include>org/apache/jute/**</include>
<include>org/apache/thrift/**</include>
<include>org/apache/http/**</include>
<include>org/I0Itec/**</include>
<include>jline/**</include>
<include>com/yammer/**</include>
<include>kafka/**</include>
<include>org/apache/hadoop/hbase/**</include>
<include>com/alibaba/fastjson/**</include>
<include>org/elasticsearch/action/**</include>
<include>io/confluent/**</include>
<include>com/fasterxml/**</include>
<include>org/elasticsearch/**</include>
<include>hbase-default.xml</include>
<include>hbase-site.xml</include>
</includes>
</filter>
<filter>
<artifact>org.apache.hadoop.hive.*:*</artifact>
<excludes>
<exclude></exclude>
<exclude></exclude>
<exclude></exclude>
</excludes>
</filter>
</filters>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
2、模拟元数据流
正常数据流应该通过其他方式(比如访问数据库、KAFKA流)获取,本次我们直接自定义source
package source;
import org.apache.flink.streaming.api.functions.source.RichSourceFunction;
public class StringSource extends RichSourceFunction<String> {
Boolean running = true;
@Override
public void run(SourceContext ctx) throws Exception {
while (running){
String value = String.format("{\"data\":[{\"name\":\"id\",\"comment\":\"ID\"}" +
",{\"name\":\"age\",\"comment\":\"年龄\"}" +
",{\"name\":\"sex\",\"comment\":\"性别\"}]}");
ctx.collect(value);
running=false;
}
}
@Override
public void cancel() {
running=false;
}
}
3、模拟流量流
模拟流量数据,本身也是通过KAFKA获取实时流量数据,本文是简单Demo,所以也通过自定义Source获取
package source;
import org.apache.flink.streaming.api.functions.source.RichSourceFunction;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
public class FlowSourceFunction extends RichSourceFunction<String> {
Boolean running=true;
private final List<String> USERS = Arrays.asList("张三","李四","牛二");
private final List<String> BEHAVIOR = Arrays.asList("login", "out", "delete");
private final List<String> SEX = Arrays.asList("男", "女");
Random random = new Random();
@Override
public void run(SourceContext<String> ctx) throws Exception {
while (running){
String id = USERS.get(random.nextInt(USERS.size()));
String age = String.valueOf(random.nextInt(100));
String sex = SEX.get(random.nextInt(SEX.size()));
String time = String.valueOf(System.currentTimeMillis());
String res = String.format("{\"id\":\"%s\"," +
"\"age\":\"%s\"," +
"\"sex\":\"%s\"," +
"\"time\":\"%s\"}",id,age,sex,time);
ctx.collect(res);
Thread.sleep(1000);
}
}
@Override
public void cancel() {
running=false;
}
}
4、MapFunction处理元数据信息
处理元数据信息
package func;
import bean.SchemaInfo;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import org.apache.avro.Schema;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.avro.SchemaBuilder;
import org.apache.flink.util.StringUtils;
import java.util.HashSet;
import java.util.stream.Stream;
public class MetaDataMapFunction extends RichMapFunction<String,SchemaInfo> {
private String db;
private String table;
private final String NAME="name";
public MetaDataMapFunction(String db, String table) {
this.db = db;
this.table = table;
}
@Override
public SchemaInfo map(String value) throws Exception {
String[] aliases = {db.concat(".").concat(table)};
//存储fields
HashSet<Object> fieldsSet = new HashSet<>();
//schema支持多种类型,这里我们选择构建常见的record类型
//初始化结果:
//{"type":"record","name":"flow","namespace":"com.flow","doc":"fow_event","fields":[],"aliases":["db.table"]}
SchemaBuilder.RecordBuilder<Schema> recordBuilder = SchemaBuilder.record("flow").namespace("com.flow").aliases(aliases).doc("fow_event");
//初始化之后要完善schema中的fields,首先获取fields
SchemaBuilder.FieldAssembler<Schema> fields = recordBuilder.fields();
//处理元数据流
JSONObject obj = JSON.parseObject(value);
//过滤掉不包含ID的数据
Stream<JSONObject> data= obj.getJSONArray("data").stream().filter(
o -> !StringUtils.isNullOrWhitespaceOnly(JSON.parseObject(o.toString()).getOrDefault(NAME, "").toString())
).map(o->JSONObject.parseObject(o.toString()));
data.forEach(
o->{
String name = o.get("name").toString();
String comment = o.get("comment").toString();
buildFields(fields,name,comment);
fieldsSet.add(name);
}
);
Schema schema = fields.endRecord();
return new SchemaInfo(schema.toString(),fieldsSet);
}
public void buildFields(SchemaBuilder.FieldAssembler<Schema> fields,String name,String comment){
fields.name(name)//字段名称
.doc(comment)//描述内容、注释
.orderAscending()//排序方式无
.type()//类型
.optional()
.stringType();
}
}
定义返回对象
package bean;
import lombok.AllArgsConstructor;
import lombok.Data;
import java.util.HashSet;
@Data
@AllArgsConstructor
public class SchemaInfo {
public String info;
public HashSet set;
}
5、延迟处理主流(流量流)
package func;
import com.alibaba.fastjson.JSON;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.configuration.Configuration;
/**
*
* 1、延时map的初始化,阻塞主流数据,等待广播流schema写入广播状态,保证主流数据可以获取到schema
* 2、标准化数据
*/
public class DelayEtlMap extends RichMapFunction<String,String> {
private final long delayTime;
public DelayEtlMap(long delayTime) {
this.delayTime = delayTime;
}
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
Thread.sleep(delayTime);
}
@Override
public String map(String value) throws Exception {
return value;
}
}
6、双流关联,处理数据
package func;
import bean.FlowData;
import bean.SchemaInfo;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.util.Collector;
import java.text.SimpleDateFormat;
import java.util.HashMap;
/**
*
* processElement 处理业务流数据
* processBroadcastElement 处理广播流数据
*/
public class FlowBrodCastFunction extends BroadcastProcessFunction<String, SchemaInfo, FlowData> {
@Override
public void processElement(String value, BroadcastProcessFunction<String, SchemaInfo, FlowData>.ReadOnlyContext ctx, Collector<FlowData> out) throws Exception {
ReadOnlyBroadcastState<String, SchemaInfo> flowMetaData = ctx.getBroadcastState(new MapStateDescriptor<String, SchemaInfo>(
"flowMetaData"
, TypeInformation.of(String.class)
, TypeInformation.of(SchemaInfo.class)));
SchemaInfo schema = flowMetaData.get("schema");
JSONObject jsonObject = JSON.parseObject(value);
Long time = jsonObject.getLong("time");
//创建时间格式
SimpleDateFormat yyyyMMdd = new SimpleDateFormat("yyyyMMdd");
String dt = yyyyMMdd.format(time);
//获取元数据字段
HashMap<String, String> map = new HashMap<>();
schema.getSet().forEach(
o->{
map.put(o.toString(),jsonObject.get(o).toString());
}
);
out.collect(new FlowData(schema.getInfo(),map,time));
}
@Override
public void processBroadcastElement(SchemaInfo value, BroadcastProcessFunction<String, SchemaInfo, FlowData>.Context ctx, Collector<FlowData> out) throws Exception {
if(value != null){
BroadcastState<String, SchemaInfo> flowMetaData = ctx.getBroadcastState(new MapStateDescriptor<String, SchemaInfo>(
"flowMetaData"
, TypeInformation.of(String.class)
, TypeInformation.of(SchemaInfo.class)));
flowMetaData.put("schema", value);
}
}
}
FlowData对象
package bean;
import lombok.AllArgsConstructor;
import lombok.Data;
import java.util.HashMap;
@Data
@AllArgsConstructor
public class FlowData {
public String schema;
public HashMap<String, String> values;
private long time;
}
7、主代码
package ubs.app;
import bean.FlowData;
import bean.SchemaInfo;
import func.DelayEtlMap;
import func.FlowBrodCastFunction;
import func.FlowSinkFunction;
import func.MetaDataMapFunction;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.StringUtils;
import source.FlowSourceFunction;
import source.StringSource;
import java.util.Objects;
public class FlowApp {
public static void main(String[] args) throws Exception{
//解析参数
ParameterTool parameterTool = ParameterTool.fromArgs(args);
//获取参数 db
String db = parameterTool.get("db")==null?"":parameterTool.get("db");
//获取参数 table
String table = parameterTool.get("table")==null?"":parameterTool.get("table");
//初始化环境
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//模拟元数据流,后续获取到元数据进行广播
DataStreamSource<String> configSource = env.addSource(new StringSource());
//设置并行度
configSource.setParallelism(1);
//获取实时流量数据流:kafka source
// KafkaSource<String> source = SourceGetter.getValueOnlySimpleStrDesSource(parameterTool);
// DataStreamSource<String> realData = env.fromSource(source, WatermarkStrategy.noWatermarks(), "liuliang");
DataStreamSource<String> realData = env.addSource(new FlowSourceFunction());
realData.print("realData: ");
// //处理元数据并广播:将元数据流接收并处理为schema
BroadcastStream<SchemaInfo> metaData = configSource.filter(data -> !StringUtils.isNullOrWhitespaceOnly(data))
.setParallelism(1)
.map(new MetaDataMapFunction("a", "b"))
.broadcast(new MapStateDescriptor<String, SchemaInfo>(
"flowMetaData"
, TypeInformation.of(String.class)
, TypeInformation.of(SchemaInfo.class)
));
//延迟主流
SingleOutputStreamOperator<String> realData2 = realData.map(new DelayEtlMap(1000))
.filter(Objects::nonNull);
//连接流,解析数据
SingleOutputStreamOperator<FlowData> res = realData2.connect(metaData).process(new FlowBrodCastFunction());
res.addSink(new FlowSinkFunction());
res.print("res");
env.execute();
}
}