[root@master pyflink]# cat t201.py
import argparse
import logging
import sys
from pyflink.common import WatermarkStrategy, Encoder, Types
from pyflink.datastream import StreamExecutionEnvironment, RuntimeExecutionMode
from pyflink.datastream.connectors.file_system import FileSource, StreamFormat, FileSink, OutputFileConfig, RollingPolicy
import time
word_count_data = [" Hello all People this is Flink all People"]
env = StreamExecutionEnvironment.get_execution_environment()
env.set_runtime_mode(RuntimeExecutionMode.BATCH)
# write all the data to one file
env.set_parallelism(1)
ds = env.from_collection(word_count_data)
print(ds)
print(dir(ds))
def split(line):
yield from line.split()
# flatMap将列表中每个元素提取出来
ds = ds.flat_map(split).map(lambda i: (i, 1), output_type=Types.TUPLE([Types.STRING(), Types.INT()])) \
.key_by(lambda i: i[0])
#print(ds.print())
print('1111111111111')
print(ds.print())
print('222222222222')
env.execute()
key_by函数:
keyBy
含义: 根据指定的key进行分组(逻辑上把DataStream分成若干不相交的分区,key一样的event会 被划分到相同的partition,内部采用hash分区来实现)
转换关系: DataStream → KeyedStream