批处理字数(BatchWordCount)
package com.liao.chapter02
import org.apache.flink.api.scala._
object BatchWordCount {
def main(args: Array[String]): Unit = {
//1.创建一个执行环境
val env = ExecutionEnvironment.getExecutionEnvironment
//2.读取文本文件数据
val lineDataSet = env.readTextFile("input/words.txt")
//3.对数据集进行转换处理
val wordAndOne = lineDataSet.flatMap(_.split(" ")).map(word => (word, 1))
//4.按照单词进行分组
val wordAndOneGroup = wordAndOne.groupBy(0)
//5.对分组数据进行sum聚合统计
val sum = wordAndOneGroup.sum(1)
//6.打印输出
sum.print()
}
}
有界流字数(BoundedStreamWordCount)
package com.liao.chapter02
import org.apache.flink.streaming.api.scala._
object BoundedStreamWordCount {
def main(args: Array[String]): Unit = {
//1.创建一个流式执行环境
val env = StreamExecutionEnvironment.getExecutionEnvironment
//2.读取文本文件数据
val lineDataStream = env.readTextFile("input/words.txt")
//3.对数据集进行转换处理
val wordAndOne = lineDataStream.flatMap(_.split(" ")).map(word => (word, 1))
//4.按照单词进行分组
// wordAndOne.keyBy(data => data._1)
val wordAndOneGroup = wordAndOne.keyBy(_._1)
//5.对分组数据进行sum聚合统计
val sum = wordAndOneGroup.sum(1)
//6.打印输出
sum.print()
//执行任务
env.execute()
}
}
流字计数(StreamWordCount)
package com.liao.chapter02
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.streaming.api.scala._
object StreamWordCount {
def main(args: Array[String]): Unit = {
//1.创建一个流式执行环境
val env = StreamExecutionEnvironment.getExecutionEnvironment
//2.读取socket文本流数据
// val lineDataStream = env.socketTextStream("hadoop002",7777)
val parameterTool = ParameterTool.fromArgs(args)
val hostname = parameterTool.get("host")
val port = parameterTool.getInt("port")
val lineDataStream = env.socketTextStream(hostname,port)
//3.对数据集进行转换处理
val wordAndOne = lineDataStream.flatMap(_.split(" ")).map(word => (word, 1))
//4.按照单词进行分组
val wordAndOneGroup = wordAndOne.keyBy(_._1)
//5.对分组数据进行sum聚合统计
val sum = wordAndOneGroup.sum(1)
//6.打印输出
sum.print()
//执行任务
env.execute()
}
}