writeAsText (流式处理很少用写成文件)
将元素以字符串形式逐行写入,这些字符串通过调用每个元素的toString()方法来获取
print() / printToErr() : 打印每个元素的 toString() 方法的值到标准输出或者标准错误输出流中
自定义输出addSink【kafka、redis】
<!-- https://mvnrepository.com/artifact/org.apache.bahir/flink-connector-redis -->
<dependency>
<groupId>org.apache.bahir</groupId>
<artifactId>flink-connector-redis_2.11</artifactId>
<version>1.0</version>
</dependency>
样例代码:
/**
* 接收socket数据,把数据保存到redis中
*
* list
*
* lpush list key value
*
*/
public class StreamingDemoToRedis {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStreamSource<String> text = env.socketTextStream("hadoop",9000,"\n");
//lpush l_words word
// 对数据进行组装
DataStream<Tuple2<String,String>> l_wordsData = text.map(new MapFunction<String, Tuple2<String,String>>() {
@Override
public Tuple2<String, String> map(String value) throws Exception {
return new Tuple2<>("l_words",value);
}
});
//创建 redis 配置
FlinkJedisPoolConfig conf = new FlinkJedisPoolConfig.Builder().setHost("hadoop").setPort(6379).build();
// 创建redisSink
RedisSink<Tuple2<String,String>> redisSink = new RedisSink<Tuple2<String,String>>(conf,new MyRedisMapper());
l_wordsData.addSink(redisSink);
env.execute("StreamingDemoToRedis");
}
public static class MyRedisMapper implements RedisMapper<Tuple2<String,String>>{
@Override
public RedisCommandDescription getCommandDescription() {
return null;
}
//表示从接收的数据中获取需要操作的redis Key
@Override
public String getKeyFromData(Tuple2<String, String> data) {
return data.f0;
}
//表示从接收的数据中获取需要操作的redis Value
@Override
public String getValueFromData(Tuple2<String, String> data) {
return data.f1;
}
}
}