第一个列子wordcount
package com;
/**
* Created by kcz on 2017/2/18.
*/
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.regex.Pattern;
import static java.util.Arrays.asList;
public class WordCountLocal {
public static void main(String[] args) {
SparkConf conf = new SparkConf()
.setAppName("WordCountLocal")
.setMaster("local");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<String> lines = sc.textFile("C:\\Users\\kcz\\Desktop\\test.txt");
JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
private static final long serialVersionUID = 1L;
public Iterable<String> call(String line) throws Exception {
return Arrays.asList(line.split(" "));
}
});
JavaPairRDD<String, Integer> pairs = words.mapToPair(
new PairFunction<String, String, Integer>() {
private static final long serialVersionUID = 1L;
public Tuple2<String, Integer> call(String word) throws Exception {
return new Tuple2<String, Integer>(word, 1);
}
});
JavaPairRDD<String, Integer> wordCounts = pairs.reduceByKey(
new Function2<Integer, Integer, Integer>() {
private static final long serialVersionUID = 1L;
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
wordCounts.foreach(new VoidFunction<Tuple2<String, Integer>>() {
private static final long serialVersionUID = 1L;
public void call(Tuple2<String, Integer> wordCount) throws Exception {
System.out.println(wordCount._1 + " appeared " + wordCount._2 + " times.");
}
});
sc.close();
}
}
难点介绍:
RDD编程,难点如下:
1. 采用对象方式来封装逻辑,和stream中处理方式一致,但是由于算子较多,每个算子要求不同,需要有一定练习来熟悉
2. 输入输出为JavaRDD,但reduce、group、sort等操作需要中间使用mapToPair将JavaRDD转成JavaPairRDD才能操作,会涉及到多次转换
3. reduce操作中,算子是更高层次的抽象,有一定的理解难度
编程步骤:
Step1:获取RDD数据
JavaRDD<PracticePojo> inputTradeRecords = this.getInputRDD(PracticePojo.class);
获取数据之后可以进行
filter操作
map操作
join操作
Step2:将数据转为Key-Value格式
因为在Spark处理中,reduce、sort、group等操作涉及到在分布式机器间的数据交互,数据必须要有Key来作为分布操作的依据,所以我们首先要将数据格式进行转换。
JavaPairRDD<String, Integer> mappedTradeRecords = inputTradeRecords.mapToPair(
new PairFunction<PracticePojo, String, Integer>() {
@Override
public Tuple2<String, Integer> call(PracticePojo practicePojo) throws Exception {
return new Tuple2<String, Integer>(practicePojo.getSecurityId(), 1);
}
});
Step3:执行计数操作。
这里会用到reduceByKey方法,这里要注意的是,reduce是一个非常常用的操作,它有两个操作步骤:
1. 对数据按照Key进行分组
JavaPairRDD<String, Integer> reducedTradeRecords = mappedTradeRecords.reduceByKey(
new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
Step4:执行排序
这里有两步操作:
1. 首先是使用mapToPair方法进行数据变形,因为sortByKey方法仅是针对key来排序,而我们原始数据的key并不是我们要的排序字段,所以首先需要将key和value换一下
2. 用sortByKey执行排序操作,需要注意的是,参数是一个布尔值,默认为升序,false表示降序
JavaPairRDD<Integer, String> reversedTradeRecords = reducedTradeRecords.mapToPair(
new PairFunction<Tuple2<String, Integer>, Integer, String>() {
@Override
public Tuple2<Integer, String> call(Tuple2<String, Integer> stringIntegerTuple2) throws Exception {
return new Tuple2<Integer, String>(stringIntegerTuple2._2, stringIntegerTuple2._1);
}
});
JavaPairRDD<Integer, String> sortedTradeRecords = reversedTradeRecords.sortByKey(false);
Step5:输出结果
一般我们都是输出一个JavaRDD,这里采用了预先定义的model结构,会将排序完的结果映射到这个结构上。
JavaRDD<PracticeResultPojo> resultTradeRecords = sortedTradeRecords.map(
new Function<Tuple2<Integer, String>, PracticeResultPojo>() {
@Override
public PracticeResultPojo call(Tuple2<Integer, String> v1) throws Exception {
PracticeResultPojo resultPojo = new PracticeResultPojo();
resultPojo.setSecurityId(v1._2);
resultPojo.setCount(v1._1);
return resultPojo;
}
});