1.lamabda编写spark
package cn.edu360.spark;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;
import java.util.Arrays;
/**
* Created by zx on 2017/10/5.
*/
public class JavaLambdaWordCount {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("JavaWordCount");
//创建sparkContext
JavaSparkContext jsc = new JavaSparkContext(conf);
//指定以后从哪里读取数据
JavaRDD<String> lines = jsc.textFile(args[0]);
//切分压平
JavaRDD<String> words = lines.flatMap(line -> Arrays.asList(line.split(" ")).iterator());
//将单词和一组合
JavaPairRDD<String, Integer> wordAndOne = words.mapToPair(w -> new Tuple2<>(w, 1));
//聚合
JavaPairRDD<String, Integer> reduced = wordAndOne.reduceByKey((m, n) -> m + n);
//调整顺序
JavaPairRDD<Integer, String> swaped = reduced.mapToPair(tp -> tp.swap());
//排序
JavaPairRDD<Integer, String> sorted = swaped.sortByKey(false);
//调整顺序
JavaPairRDD<String, Integer> result = sorted.mapToPair(tp -> tp.swap());
//将结果保存到hdfs
result.saveAsTextFile(args[1]);
//释放资源
jsc.stop();
2.Java编写spark程序
package cn.edu360.spark;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
import java.util.Arrays;
import java.util.Iterator;
/**
* Created by zx on 2017/10/5.
*/
public class JavaWordCount {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("JavaWordCount");
//创建sparkContext
JavaSparkContext jsc = new JavaSparkContext(conf);
//指定以后从哪里读取数据
JavaRDD<String> lines = jsc.textFile(args[0]);
//切分压平
JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
@Override
public Iterator<String> call(String line) throws Exception {
return Arrays.asList(line.split(" ")).iterator();
}
});
//将单词和一组合在一起
JavaPairRDD<String, Integer> wordAndOne = words.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String word) throws Exception {
return new Tuple2<>(word, 1);
}
});
//聚合
JavaPairRDD<String, Integer> reduced = wordAndOne.reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
//调换顺序
JavaPairRDD<Integer, String> swaped = reduced.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
@Override
public Tuple2<Integer, String> call(Tuple2<String, Integer> tp) throws Exception {
//return new Tuple2<>(tp._2, tp._1);
return tp.swap();
}
});
//排序
JavaPairRDD<Integer, String> sorted = swaped.sortByKey(false);
//调整顺序
JavaPairRDD<String, Integer> result = sorted.mapToPair(new PairFunction<Tuple2<Integer, String>, String, Integer>() {
@Override
public Tuple2<String, Integer> call(Tuple2<Integer, String> tp) throws Exception {
return tp.swap();
}
});
//将数据保存到hdfs
result.saveAsTextFile(args[1]);
//释放资源
jsc.stop();
}
}
3.Scala编写spark
package cn.edu360.spark
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
/**
* Created by zx on 2017/10/5.
*
*/
object ScalaWordCount {
def main(args: Array[String]): Unit = {
//创建spark配置,设置应用程序名字
//val conf = new SparkConf().setAppName("ScalaWordCount")
val conf = new SparkConf().setAppName("ScalaWordCount").setMaster("local[4]")
//创建spark执行的入口
val sc = new SparkContext(conf)
//指定以后从哪里读取数据创建RDD(弹性分布式数据集)
//sc.textFile(args(0)).flatMap(_.split(" ")).map((_, 1)).reduceByKey(_+_).sortBy(_._2, false).saveAsTextFile(args(1))
val lines: RDD[String] = sc.textFile(args(0))
//切分压平
val words: RDD[String] = lines.flatMap(_.split(" "))
//将单词和一组合
val wordAndOne: RDD[(String, Int)] = words.map((_, 1))
//按key进行聚合
val reduced:RDD[(String, Int)] = wordAndOne.reduceByKey(_+_)
//排序
val sorted: RDD[(String, Int)] = reduced.sortBy(_._2, false)
//将结果保存到HDFS中
sorted.saveAsTextFile(args(1))
//释放资源
sc.stop()
}
}
上传jar包到linux,执行
spark-submit --master spark://jiqun01:7077 --class nuc.edu.WrodCount /root/original-SparkDemo-1.0-SNAPSHOT.jar hdfs://jiqun01:9000/wordcount hdfs://jiqun01:9000/wordrs