java 版本
批处理
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.operators.AggregateOperator;
import org.apache.flink.api.java.operators.DataSource;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.util.Collector;
public class WordCount {
public static void main(String[] args) throws Exception {
// 创建执行环境
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
// 设置并行度
env.setParallelism(3);
// 获取参数
ParameterTool parameterTool = ParameterTool.fromArgs(args);
String path = parameterTool.get("path");
// 读取文件内容
DataSource<String> inputDataSet = env.readTextFile(path);
// 对数据集进行处理,按空格分词展开,转换成(word, 1)二元组进行统计
AggregateOperator<Tuple2<String, Integer>> resultSet = inputDataSet.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
@Override
public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
String[] words = value.split(" ");
for (String word : words) {
out.collect(new Tuple2<>(word, 1));
}
}
})
.groupBy(0)
.sum(1);
// 输出结果
resultSet.print();
}
}
流处理
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
public class StreamWordCount {
public static void main(String[] args) throws Exception {
// 创建流处理环境
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// 获取参数
ParameterTool parameterTool = ParameterTool.fromArgs(args);
String host = parameterTool.get("host");
int post = parameterTool.getInt("post");
// 从stock中获取数据
DataStreamSource<String> inputDataStream = env.socketTextStream(host, post);
// 处理无界数据的时候,没有groupBy方法,groupBy可以理解为当数据全加载完成之后进行分组
// 无界是没有办法加载全部数据,因此替换为keyBy方法
SingleOutputStreamOperator<Tuple2<String, Integer>> resultStream =
inputDataStream.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
@Override
public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
String[] words = value.split(" ");
for (String word : words) {
out.collect(new Tuple2<>(word, 1));
}
}
})
.keyBy(1)
.sum(0);
// 打印结果
resultStream.print();
// 执行任务
env.execute();
}
}
scala 版本
批处理
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.api.scala._
object WordCount {
def main(args: Array[String]): Unit = {
// 创建一个批处理的执行环境
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
// 从文件中读取数据
val paramTool: ParameterTool = ParameterTool.fromArgs(args)
val inputPath: String = paramTool.get("inputPath")
val inputDataSet: DataSet[String] = env.readTextFile(inputPath)
// 对数据进行转换处理统计,先分词,再按照word进行分组,最后进行聚合统计
val resultDataSet: DataSet[(String, Int)] = inputDataSet
.flatMap(_.split(" "))
.map((_, 1))
.groupBy(0) // 以第一个元素作为key,进行分组
.sum(1) // 对所有数据的第二个元素求和
// 打印输出
resultDataSet.print()
}
}
流处理
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.streaming.api.scala._
// 流处理word count
object StreamWordCount {
def main(args: Array[String]): Unit = {
// 创建流处理的执行环境
val env = StreamExecutionEnvironment.getExecutionEnvironment
// env.setParallelism(8)
// env.disableOperatorChaining()
// 从外部命令中提取参数,作为socket主机名和端口号
val paramTool: ParameterTool = ParameterTool.fromArgs(args)
val host: String = paramTool.get("host")
val port: Int = paramTool.getInt("port")
// 接收一个socket文本流
val inputDataStream: DataStream[String] = env.socketTextStream(host, port)
// 进行转化处理统计
val resultDataStream: DataStream[(String, Int)] = inputDataStream
.flatMap(_.split(" "))
.filter(_.nonEmpty)
.map((_, 1))
.keyBy(0)
.sum(1)
resultDataStream.print().setParallelism(1)
// 启动任务执行
env.execute("stream word count")
}
}