原始代码
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object WorldCount {
def main(args: Array[String]): Unit = {
val conf =new SparkConf()
//设置本地运行
conf.setMaster("local")
conf.setAppName("WC")
设置上下文对象
val sc = new SparkContext(conf)
val lines=sc.textFile("./data/words")
//切好的单词 hello
// scala
val pairlines = lines.flatMap(line=>{line.split(" ")})
//(hello,1 )
val ww=pairlines.map(pairline=>(new Tuple2(pairline,1)))
//(hello,33)
val result = ww.reduceByKey((v1:Int,V2:Int) =>(v1+V2))
result.foreach(tp=>{println(tp)})
}
}
进化
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object WorldCount {
def main(args: Array[String]): Unit = {
new SparkContext(new SparkConf().setAppName("WC").setMaster("local")).textFile("./data/words").flatMap(line=>{line.split(" ")}).map(c=>(new Tuple2(c,1))).reduceByKey((v1:Int,V2:Int) =>(v1+V2)).foreach(tp=>(println(tp)))
}
}
超级进化
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object WorldCount {
def main(args: Array[String]): Unit = {
new SparkContext(new SparkConf().setAppName("WC").setMaster("local")).textFile("./data/words").flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_).foreach(println)
}
]
java代码实现
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.*;
import scala.Tuple2;
import java.util.Arrays;
import java.util.Iterator;
/**
* Hello world!
*
*/
public class App {
public static void main(String[] args) {
// SparkConf conf = new SparkConf();
// conf.setMaster("local");
// conf.setAppName("XXX");
// JavaSparkContext sc =new JavaSparkContext(conf);
// JavaRDD<String> lines = sc.textFile("./data/words");
// JavaRDD<String > words = lines.flatMap(line -> Arrays.asList(line.split(" ")).iterator());
// JavaPairRDD<String, Integer> pairWords = words.mapToPair(word -> new Tuple2<>(word, 1));
// JavaPairRDD<String, Integer> result = pairWords.reduceByKey((v1, v2) -> v1 + v2);
// result.foreach(tp -> System.out.println(tp));
SparkConf conf = new SparkConf();
conf.setMaster("local");
conf.setAppName("xxx");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<String> lines = sc.textFile("./data/words");
JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
@Override
public Iterator<String> call(String line) throws Exception {
return Arrays.asList(line.split(" ")).iterator();
}
});
JavaPairRDD<String, Integer> pairWords = words.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String word) throws Exception {
return new Tuple2<>(word, 1);
}
});
JavaPairRDD<String, Integer> reduce = pairWords.reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
JavaPairRDD<Integer, String> transRDD = reduce.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
@Override
public Tuple2<Integer, String> call(Tuple2<String, Integer> tp) throws Exception {
return new Tuple2<Integer, String>(tp._2, tp._1);
}
});
JavaPairRDD<Integer, String> sortByRDD = transRDD.sortByKey(false);
JavaPairRDD<String, Integer> result = sortByRDD.mapToPair(new PairFunction<Tuple2<Integer, String>, String, Integer>() {
@Override
public Tuple2<String, Integer> call(Tuple2<Integer, String> tp) throws Exception {
return tp.swap();
}
});
result.foreach(new VoidFunction<Tuple2<String, Integer>>() {
@Override
public void call(Tuple2<String, Integer> tuple2) throws Exception {
System.out.println(tuple2);
}
});
}
}