总结Flink DataStream 算子: Map、FlatMap、Filter、KeyBy、Reduce、Fold、Aggregate 的使用。
Map [DataStream->DataStream]
Map: 一对一转换,即一条转换成另一条。
package com.bigdata.flink.dataStreamMapOperator;
import com.bigdata.flink.beans.UserAction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import java.util.Arrays;
/**
* Summary:
* Map: 一对一转换
*/
public class DataStreamMapOperator {
public static void main(String[] args) throws Exception{
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// 输入: 用户行为。某个用户在某个时刻点击或浏览了某个商品,以及商品的价格。
DataStreamSource<UserAction> source = env.fromCollection(Arrays.asList(
new UserAction("userID1", 1293984000, "click", "productID1", 10),
new UserAction("userID2", 1293984001, "browse", "productID2", 8),
new UserAction("userID1", 1293984002, "click", "productID1", 10)
));
// 转换: 商品的价格乘以8
SingleOutputStreamOperator<UserAction> result = source.map(new MapFunction<UserAction, UserAction>() {
@Override
public UserAction map(UserAction value) throws Exception {
int newPrice = value.getProductPrice() * 8;
return new UserAction(value.getUserID(), value.getEventTime(), value.getEventType(), value.getProductID(), newPrice);
}
});
// 输出: 输出到控制台
// UserAction(userID=userID1, eventTime=1293984002, eventType=click, productID=productID1, productPrice=80)
// UserAction(userID=userID1, eventTime=1293984000, eventType=click, productID=productID1, productPrice=80)
// UserAction(userID=userID2, eventTime=1293984001, eventType=browse, productID=productID2, productPrice=64)
result.print();
env.execute();
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
FlatMap [DataStream->DataStream]
FlatMap: 一行变零到多行。如下,将一个句子(一行)分割成多个单词(多行)。
package com.bigdata.flink.dataStreamFlatMapOperator;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
/**
* Summary:
* FlatMap: 一行变任意行(0~多行)
*/
public class DataStreamFlatMapOperator {
public static void main(String[] args) throws Exception{
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// 输入: 英文电影台词
DataStreamSource<String> source = env
.fromElements(
"You jump I jump",
"Life was like a box of chocolates"
);
// 转换: 将包含chocolates的句子转换为每行一个单词
SingleOutputStreamOperator<String> result = source.flatMap(new FlatMapFunction<String, String>() {
@Override
public void flatMap(String value, Collector<String> out) throws Exception {
if(value.contains("chocolates")){
String[] words = value.split(" ");
for (String word : words) {
out.collect(word);
}
}
}
});
// 输出: 输出到控制台
// Life
// was
// like
// a
// box
// of
// chocolates
result.print();
env.execute();
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
Filter [DataStream->DataStream]
Filter: 过滤出需要的数据
package com.bigdata.flink.dataStreamFilterOperator;
import com.bigdata.flink.beans.UserAction;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import java.util.Arrays;
/**
* Summary:
* Fliter: 过滤出需要的数据
*/
public class DataStreamFilterOperator {
public static void main(String[] args) throws Exception{
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// 输入: 用户行为。某个用户在某个时刻点击或浏览了某个商品,以及商品的价格。
DataStreamSource<UserAction> source = env.fromCollection(Arrays.asList(
new UserAction("userID1", 1293984000, "click", "productID1", 10),
new UserAction("userID2", 1293984001, "browse", "productID2", 8),
new UserAction("userID1", 1293984002, "click", "productID1", 10)
));
// 过滤: 过滤出用户ID为userID1的用户行为
SingleOutputStreamOperator<UserAction> result = source.filter(new FilterFunction<UserAction>() {
@Override
public boolean filter(UserAction value) throws Exception {
return value.getUserID().equals("userID1");
}
});
// 输出: 输出到控制台
// UserAction(userID=userID1, eventTime=1293984002, eventType=click, productID=productID1, productPrice=10)
// UserAction(userID=userID1, eventTime=1293984000, eventType=click, productID=productID1, productPrice=10)
result.print();
env.execute();
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
KeyBy [DataStream->KeyedStream]
KeyBy: 按指定的Key对数据重分区。将同一Key的数据放到同一个分区。
注意:
分区结果和KeyBy下游算子的并行度强相关。如下游算子只有一个并行度,不管怎么分,都会分到一起。
对于POJO类型,KeyBy可以通过keyBy(fieldName)指定字段进行分区。
对于Tuple类型,KeyBy可以通过keyBy(fieldPosition)指定字段进行分区。
对于一般类型,如上, KeyBy可以通过keyBy(new KeySelector {...})指定字段进行分区。
package com.bigdata.flink.dataStreamKeyByOperator;
import com.bigdata.flink.beans.UserAction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import java.util.Arrays;
/**
* Summary:
* KeyBy: 按指定的Key对数据重分区。将同一Key的数据放到同一个分区。
*/
public class DataStreamKeyByOperator {
public static void main(String[] args) throws Exception{
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
// 输入: 用户行为。某个用户在某个时刻点击或浏览了某个商品,以及商品的价格。
DataStreamSource<UserAction> source = env.fromCollection(Arrays.asList(
new UserAction("userID1", 1293984000, "click", "productID1", 10),
new UserAction("userID2", 1293984001, "browse", "productID2", 8),
new UserAction("userID1", 1293984002, "click", "productID1", 10)
));
// 转换: 按指定的Key(这里,用户ID)对数据重分区,将相同Key(用户ID)的数据分到同一个分区
KeyedStream<UserAction, String> result = source.keyBy(new KeySelector<UserAction, String>() {
@Override
public String getKey(UserAction value) throws Exception {
return value.getUserID();
}
});
// 输出: 输出到控制台
//3> UserAction(userID=userID1, eventTime=1293984000, eventType=click, productID=productID1, productPrice=10)
//3> UserAction(userID=userID1, eventTime=1293984002, eventType=click, productID=productID1, productPrice=10)
//2> UserAction(userID=userID2, eventTime=1293984001, eventType=browse, productID=productID2, productPrice=8)
result.print().setParallelism(3);
env.execute();
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
Reduce [KeyedStream->DataStream]
Reduce: 基于ReduceFunction进行滚动聚合,并向下游算子输出每次滚动聚合后的结果。
注意: Reduce会输出每一次滚动聚合的结果。
package com.bigdata.flink.dataStreamReduceOperator;
import com.bigdata.flink.beans.UserAction;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import java.util.Arrays;
/**
* Summary:
* Reduce: 基于ReduceFunction进行滚动聚合,并向下游算子输出每次滚动聚合后的结果。
*/
public class DataStreamReduceOperator {
public static void main(String[] args) throws Exception{
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// 输入: 用户行为。某个用户在某个时刻点击或浏览了某个商品,以及商品的价格。
DataStreamSource<UserAction> source = env.fromCollection(Arrays.asList(
new UserAction("userID1", 1293984000, "click", "productID1", 10),
new UserAction("userID2", 1293984001, "browse", "productID2", 8),
new UserAction("userID2", 1293984002, "browse", "productID2", 8),
new UserAction("userID2", 1293984003, "browse", "productID2", 8),
new UserAction("userID1", 1293984002, "click", "productID1", 10),
new UserAction("userID1", 1293984003, "click", "productID3", 10),
new UserAction("userID1", 1293984004, "click", "productID1", 10)
));
// 转换: KeyBy对数据重分区
KeyedStream<UserAction, String> keyedStream = source.keyBy(new KeySelector<UserAction, String>() {
@Override
public String getKey(UserAction value) throws Exception {
return value.getUserID();
}
});
// 转换: Reduce滚动聚合。这里,滚动聚合每个用户对应的商品总价格。
SingleOutputStreamOperator<UserAction> result = keyedStream.reduce(new ReduceFunction<UserAction>() {
@Override
public UserAction reduce(UserAction value1, UserAction value2) throws Exception {
int newProductPrice = value1.getProductPrice() + value2.getProductPrice();
return new UserAction(value1.getUserID(), -1, "", "", newProductPrice);
}
});
// 输出: 将每次滚动聚合后的结果输出到控制台。
//3> UserAction(userID=userID2, eventTime=1293984001, eventType=browse, productID=productID2, productPrice=8)
//3> UserAction(userID=userID2, eventTime=-1, eventType=, productID=, productPrice=16)
//3> UserAction(userID=userID2, eventTime=-1, eventType=, productID=, productPrice=24)
//4> UserAction(userID=userID1, eventTime=1293984000, eventType=click, productID=productID1, productPrice=10)
//4> UserAction(userID=userID1, eventTime=-1, eventType=, productID=, productPrice=20)
//4> UserAction(userID=userID1, eventTime=-1, eventType=, productID=, productPrice=30)
//4> UserAction(userID=userID1, eventTime=-1, eventType=, productID=, productPrice=40)
result.print();
env.execute();
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
Fold [KeyedStream->DataStream]
基于初始值和FoldFunction进行滚动折叠(Fold),并向下游算子输出每次滚动折叠后的结果。
注意: Fold会输出每一次滚动折叠的结果。
package com.bigdata.flink.dataStreamFoldOperator;
import com.bigdata.flink.beans.UserAction;
import org.apache.flink.api.common.functions.FoldFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import java.util.Arrays;
/**
* Summary:
* Fold: 基于初始值和自定义的FoldFunction滚动折叠后发出新值
*/
public class DataStreamFoldOperator {
public static void main(String[] args) throws Exception{
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// 输入: 用户行为。某个用户在某个时刻点击或浏览了某个商品,以及商品的价格。
DataStreamSource<UserAction> source = env.fromCollection(Arrays.asList(
new UserAction("userID1", 1293984000, "click", "productID1", 10),
new UserAction("userID2", 1293984001, "browse", "productID2", 8),
new UserAction("userID2", 1293984002, "browse", "productID2", 8),
new UserAction("userID2", 1293984003, "browse", "productID2", 8),
new UserAction("userID1", 1293984002, "click", "productID1", 10),
new UserAction("userID1", 1293984003, "click", "productID3", 10),
new UserAction("userID1", 1293984004, "click", "productID1", 10)
));
// 转换: KeyBy对数据重分区
KeyedStream<UserAction, String> keyedStream = source.keyBy(new KeySelector<UserAction, String>() {
@Override
public String getKey(UserAction value) throws Exception {
return value.getUserID();
}
});
// 转换: Fold 基于初始值和FoldFunction滚动折叠
SingleOutputStreamOperator<String> result = keyedStream.fold("浏览的商品及价格:", new FoldFunction<UserAction, String>() {
@Override
public String fold(String accumulator, UserAction value) throws Exception {
if(accumulator.startsWith("userID")){
return accumulator + " -> " + value.getProductID()+":"+value.getProductPrice();
}else {
return value.getUserID()+" " +accumulator + " -> " + value.getProductID()+":"+value.getProductPrice();
}
}
});
// 输出: 输出到控制台
// 每一条数据都会触发计算并输出
// userID1 浏览的商品及价格: -> productID1:10
// userID1 浏览的商品及价格: -> productID1:10 -> productID1:10
// userID1 浏览的商品及价格: -> productID1:10 -> productID1:10 -> productID3:10
// userID1 浏览的商品及价格: -> productID1:10 -> productID1:10 -> productID3:10 -> productID1:10
// userID2 浏览的商品及价格: -> productID2:8
// userID2 浏览的商品及价格: -> productID2:8 -> productID2:8
// userID2 浏览的商品及价格: -> productID2:8 -> productID2:8 -> productID2:8
result.print();
env.execute();
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
Aggregate [KeyedStream->DataStream]
Aggregate 对KeyedStream按指定字段滚动聚合并输出每一次滚动聚合后的结果。默认的聚合函数有:sum、min、minBy、max、mabBy。
注意:
max(field)与maxBy(field)的区别: maxBy返回field最大的那条数据;而max则是将最大的field的值赋值给第一条数据并返回第一条数据。同理,min与minBy。
Aggregate聚合算子会滚动输出每一次聚合后的结果。
package com.bigdata.flink.dataStreamAggregateOperator;
import com.bigdata.flink.beans.UserActionLogPOJO;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import java.util.ArrayList;
/**
* Summary:
* Aggregate: min()、minBy()、max()、maxBy() 滚动聚合并输出每次滚动聚合后的结果
*/
public class DataStreamAggregateOperator {
public static void main(String[] args) throws Exception{
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// 输入: 用户行为。某个用户在某个时刻点击或浏览了某个商品,以及商品的价格。
ArrayList<UserActionLogPOJO> userActionLogs = new ArrayList<>();
UserActionLogPOJO userActionLog1 = new UserActionLogPOJO();
userActionLog1.setUserID("userID1");
userActionLog1.setProductID("productID3");
userActionLog1.setProductPrice(10);
userActionLogs.add(userActionLog1);
UserActionLogPOJO userActionLog2 = new UserActionLogPOJO();
userActionLog2.setUserID("userID2");
userActionLog2.setProductPrice(10);
userActionLogs.add(userActionLog2);
UserActionLogPOJO userActionLog3 = new UserActionLogPOJO();
userActionLog3.setUserID("userID1");
userActionLog3.setProductID("productID5");
userActionLog3.setProductPrice(30);
userActionLogs.add(userActionLog3);
DataStreamSource<UserActionLogPOJO> source = env.fromCollection(userActionLogs);
// 转换: KeyBy对数据重分区
// 这里, UserActionLog是POJO类型,也可通过keyBy("userID")进行分区
KeyedStream<UserActionLogPOJO, String> keyedStream = source.keyBy(new KeySelector<UserActionLogPOJO, String>() {
@Override
public String getKey(UserActionLogPOJO value) throws Exception {
return value.getUserID();
}
});
// 转换: Aggregate并输出
// 滚动求和并输出
//keyedStream.sum("productPrice").print();
// 滚动求最大值并输出
keyedStream.max("productPrice").print();
// 滚动求最大值并输出
keyedStream.maxBy("productPrice").print();
// 滚动求最小值并输出
//keyedStream.min("productPrice").print();
// 滚动求最小值并输出
//keyedStream.minBy("productPrice").print();
env.execute();
}
}
————————————————
版权声明:本文为CSDN博主「wangpei1949」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/wangpei1949/article/details/101625394