package hgs.flink_lesson
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.streaming.api.scala._
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.core.fs.FileSystem.WriteMode
import org.apache.flink.api.common.accumulators.Accumulator
import org.apache.flink.api.common.accumulators.IntCounter
import scala.collection.immutable.List
import scala.collection.mutable.ListBuffer
import scala.collection.immutable.HashMap
//import StreamExecutionEnvironment.class
object WordCount {
def main(args: Array[String]): Unit = {
val params = ParameterTool.fromArgs(args)
//1.获得一个执行环境,如果是Streaming则换成StreamExecutionEnvironment
val env = ExecutionEnvironment.getExecutionEnvironment
//这样会得到当前环境下的配置
env.getConfig.setGlobalJobParameters(params)
println(params.get("input"))
println(params.get("output"))
val text = if(params.has("input")){
//2.加载或者创建初始化数据
env.readTextFile(params.get("input"))
}else{
println("Please specify the input file directory.")
return
}
println("lines "+text.count())
val ac = new IntCounter
//3.在数据上指明操作类型
val counts = text.flatMap{ _.toLowerCase().split("\\W+").filter{_.nonEmpty}}
//这里与spark的算子的groupBy有点不同,这边要用数组类似的下标来确定根据什么进行分组
.map{(_,1)}.groupBy(0).reduceGroup(it=>{
val tuple = it.next()
var cnt = tuple._2
val ch = tuple._1
while(it.hasNext){
cnt= cnt+it.next()._2
}
(ch,cnt)})
//指明计算后的数据结果放到哪个位置
//4.counts.print()
counts.writeAsCsv("file:/d:/re.txt", "\n", " ",WriteMode.OVERWRITE)
//5.触发程序执行
env.execute("Scala WordCount Example")
//
}
}
来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/31506529/viewspace-2564530/,如需转载,请注明出处,否则将追究法律责任。
转载于:http://blog.itpub.net/31506529/viewspace-2564530/