1、pom依赖
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-scala_2.11</artifactId>
<version>1.7.2</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-streaming-scala_2.11</artifactId>
<version>1.7.2</version>
</dependency>
2、Flink批处理wordcount
import org.apache.flink.api.scala._
//批处理的word count
object Demo01_BatchWordCount {
def main(args: Array[String]): Unit = {
//创建一个批处理的执行环境
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
//从文件中读取数据
val inputPathSet: DataSet[String] = env.readTextFile("D:\\test\\b.txt")
//对数据进行转换处理统计
val resultDataSet: AggregateDataSet[(String, Int)] = inputPathSet
.flatMap(_.split(" "))
.map((_, 1))
.groupBy(0) //没有groupByKey,以第一个元素作为key进行分组
.sum(1) //对所有数据的第二个元素求和
//打印输出
resultDataSet.print()
}
}
3、Flink流处理wordcount
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.streaming.api.datastream.DataStreamSource
import org.apache.flink.streaming.api.scala._
//流处理word Count
object Demo02_StreamWordCount {
def main(args: Array[String]): Unit = {
//创建一个流处理的执行环境
val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
//env.setParallelism(2) //设置并行度
//接收一个socket文本流
val inputDataStream: DataStream[String] = env.socketTextStream("192.168.136.20",7777)
/*//上面接收socket文本流是写死的方式,用起来了并不方便
//可以从外部命令中提取参数,作为socket主机名和端口号
val paramTool: ParameterTool = ParameterTool.fromArgs(args)
val host:String=paramTool.get("host")
val port:Int=paramTool.getInt("port" )
val inputDataStream: DataStream[String] = env.socketTextStream(host,port)
//设置完输入两个args参数即可
*/
//进行转化处理统计
val resultDataStream: DataStream[(String, Int)] = inputDataStream
.flatMap(_.split(" "))
.map((_, 1))
.keyBy(0) //没有groupBy,keyBy相当于groupBy
.sum(1)
resultDataStream.print()
//启动任务执行
env.execute("stream world count")
}
}
测试:
# 启动7777端口
nc -lk 7777