Spark运行在IDEA的本地,Java API --单词记数
package lesson03;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.*;
import org.apache.spark.sql.catalyst.expressions.In;
import scala.Tuple2;
import java.util.Arrays;
import java.util.Iterator;
/**
* Created by Administrator on 2017/7/31.
*/
public class JavaWordCount7 {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("
wordcount").setMaster("local");// spark程序入口是JavaSparkContext
JavaSparkContext sc = new JavaSparkContext(conf);
//读取文件内容形成一个rdd
JavaRDD<String> fileRDD = sc.textFile("D:\\文档\\hello.txt");
//接下来我们要执行flatmap的操作 -- 分割 --压平 -- 并返回一个Iterator --最终得到一个RDD
//第一个string代表的是输入的数据类型
//第二个String代表的是输出的数据类型
final JavaRDD<String> wordRDD = fileRDD.flatMap(new FlatMapFunction<String, String>() {
//Iterator<String> 输出的数据类型
//String s 输入的数据类型
@Override
public Iterator<String> call(String s) throws Exception {
return Arrays.asList(s.split("\t")).iterator();
}
});
//貌似用map((word,1))也是可以的
final JavaPairRDD<String, Integer> wordOneRDD = wordRDD.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) throws Exception {
return new Tuple2<String, Integer>(s, 1);
}
});
final JavaPairRDD<String, Integer> WordCountRDD = wordOneRDD.reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
final JavaPairRDD<Integer, String> resultRDD = WordCountRDD.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
@Override
public Tuple2<Integer, String> call(Tuple2<String, Integer> t) throws Exception {
return new Tuple2<Integer, String>(t._2, t._1);
}
});
resultRDD.sortByKey().foreach(new VoidFunction<Tuple2<Integer, String>>() {
@Override
public void call(Tuple2<Integer, String> t) throws Exception {
System.out.println("单词: "+t._2 + "次数: "+t._1);
}
});
}
}
package lesson03;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.*;
import org.apache.spark.sql.catalyst.expressions.In;
import scala.Tuple2;
import java.util.Arrays;
import java.util.Iterator;
/**
* Created by Administrator on 2017/7/31.
*/
public class JavaWordCount7 {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("javawordcount").setMaster("local");
//java 程序入口是JavaSparkContext
JavaSparkContext sc = new JavaSparkContext(conf);
//读取文件内容形成一个rdd
JavaRDD<String> fileRDD = sc.textFile("D:\\文档\\hello.txt");
//接下来我们要执行flatmap的操作
//第一个string代表的是输入的数据类型
//第二个String代表的是输出的数据类型
final JavaRDD<String> wordRDD = fileRDD.flatMap(new FlatMapFunction<String, String>() {
//Iterator<String> 输出的数据类型
//String s 输入的数据类型
@Override
public Iterator<String> call(String s) throws Exception {
return Arrays.asList(s.split("\t")).iterator();
}
});
final JavaPairRDD<String, Integer> wordOneRDD = wordRDD.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) throws Exception {
return new Tuple2<String, Integer>(s, 1);
}
});
final JavaPairRDD<String, Integer> WordCountRDD = wordOneRDD.reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
final JavaPairRDD<Integer, String> resultRDD = WordCountRDD.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
@Override
public Tuple2<Integer, String> call(Tuple2<String, Integer> t) throws Exception {
return new Tuple2<Integer, String>(t._2, t._1);
}
});
resultRDD.sortByKey().foreach(new VoidFunction<Tuple2<Integer, String>>() {
@Override
public void call(Tuple2<Integer, String> t) throws Exception {
System.out.println("单词:"+t._2 + "次数:"+t._1);
}
});
}
}