Java版的Wordcount
package com.bjsxt.scala;
import java.util.Arrays;
import javax.sound.sampled.Line;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import groovy.lang.Tuple;
import scala.Tuple2;
import tachyon.client.lineage.LineageMasterClient;
public class Java_WordCount {
public static void main(String[] args) {
/**
* SparkConf 设置spark的配置
* 1.可以设置 spark webui 名称
* 2.可以设置spark的运行模式
* local:多用于本地测试,比如eclipse,IDEA写本地代码,要设置local
* stanalone:spark自带的资源调度框架,支持分布式搭建,spark application 可以运行在standalone
* yarn:yarn是hadoop生态圈中资源调度框架,spark application也可以基于yarn运行。
* mesos:资源调度框架。
*/
SparkConf conf=new SparkConf();
conf.setAppName("wc");
conf.setMaster("local");
/**
* sparkContext是spark的上下文,是通往集群的唯一通道
* 要用spark处理数据,必须先要处理上下文
*/
JavaSparkContext sc=new JavaSparkContext(conf);
JavaRDD<String> lines=sc.textFile("./words");
JavaRDD<String> words=lines.flatMap(new FlatMapFunction<String, String>() {
private static final long serialVersionUID=1L;
@Override
public Iterable<String> call(String line) throws Exception {
return Arrays.asList(line.split(" "));
}
});
JavaPairRDD<String,Integer> pairWords=words.mapToPair(new PairFunction<String,String,Integer>(){
private static final long serialVersionUID=1L;
@Override
public Tuple2<String, Integer> call(String word) throws Exception {
// TODO Auto-generated method stub
return new Tuple2<String, Integer>(word,1);
}
});
JavaPairRDD<String,Integer> reduceResult=pairWords.reduceByKey(new Function2<Integer,Integer,Integer>(){
private static final long serialVersionUID=1L;
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
// TODO Auto-generated method stub
return v1+v2;
}
});
JavaPairRDD<Integer,String>mapToPair=reduceResult.mapToPair(new PairFunction<Tuple2<String,Integer>,Integer,String>(){
private static final long serialVersionUID=1L;
@Override
public Tuple2<Integer, String> call(Tuple2<String, Integer> tuple) throws Exception {
// TODO Auto-generated method stub
return new Tuple2<Integer,String>(tuple._2,tuple._1);
}
});
JavaPairRDD<Integer,String> sortByKey=mapToPair.sortByKey(false);
JavaPairRDD<String ,Integer> result=sortByKey.mapToPair(new PairFunction<Tuple2<Integer,String>,String,Integer>(){
private static final long serialVersionUID=1L;
@Override
public Tuple2<String, Integer> call(Tuple2<Integer, String> tuple) throws Exception {
// TODO Auto-generated method stub
return new Tuple2<String,Integer>(tuple._2,tuple._1);
}
});
result.foreach(new VoidFunction<Tuple2<String,Integer>>(){
private static final long serialVersionUID=1L;
@Override
public void call(Tuple2<String, Integer> tuple) throws Exception {
System.out.println(tuple);
}
});
sc.stop();
}
}
————————————————————————————————————————————————————————
Scala版的Wordcount
package com.bjsxt.scala
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
import org.apache.spark.SparkConf
object SparkWordCount {
def main(args:Array[String]):Unit={
/*val conf=new SparkConf()
conf.setAppName("wordcount")
conf.setMaster("local")
val sc=new SparkContext(conf)
val lines=sc.textFile("./words")
val words=lines.flatMap(line=>{
line.split(" ")
})
val pairWords=words.map(word=>{
new Tuple2(word ,1)
})
*//**
* reduceByKey 先分组,在给每个组的value进行聚合
*//*
val reduceResult = pairWords.reduceByKey((v1,v2)=>{v1+v2})
val result=reduceResult.sortBy(tuple=>{tuple._2},false)
result.foreach(tuple=>{
println(tuple)
})*/
println("*************************************************")
/* val conf =new SparkConf().setAppName("wordCount").setMaster("local")
val sc=new SparkContext(conf)
sc.textFile("./words").flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_).foreach(println)
sc.stop()*/
println("""""""""""""""""""""""""""""""""""""""""""""""""""""""""")
val conf =new SparkConf()
conf.setAppName("wordCount")
conf.setMaster("local")
val sc=new SparkContext(conf)
val lines=sc.textFile("./words")
val words=lines.flatMap(line=>{
line.split(" ")
})
val pairWords =words.map(word=>{
new Tuple2(word,1)
})
/**
*
* reduceByKey 先分组,在给每个组的value进行聚合
*/
val result=pairWords.reduceByKey((v1,v2)=>{v1+v2})
result.foreach(tuple=>{
println(tuple)
})
sc.stop()
}
}
wordcount(包含java版和Scala版)
最新推荐文章于 2024-03-28 21:40:19 发布