1.写在前面
在spark streaming+kafka对流式数据处理过程中,往往是spark streaming消费kafka的数据写入hdfs中,再进行hive映射形成数仓,当然也可以利用sparkSQL直接写入hive形成数仓。对于写入hdfs中,如果是普通的rdd则API为saveAsTextFile(),如果是PairRDD则API为saveAsHadoopFile()。当然高版本的spark可能将这两个合二为一。这两种API在spark streaming中如果不自定义的话会导致新写入的hdfs文件覆盖历史写入的hdfs文件,下面来重现这个问题。
2.saveAsTextFile()写新写入的hdfs文件覆盖历史写入的hdfs文件测试代码
package com.surfilter.dp.timer.job;
import kafka.message.MessageAndMetadata;
import kafka.serializer.StringDecoder;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.streaming.Seconds;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import java.text.SimpleDateFormat;
import java.util.*;
public class TestStreaming extends BaseParams {
public static void main(String args[]) {
String totalParameterString = null;
if (null != args && args.length > 0) {
totalParameterString = args[0];
}
if (null != totalParameterString && !"".equals(totalParameterString)) {
ParameterParse parameterParse = new ParameterParse(totalParameterString);
SparkConf conf = new SparkConf().setAppName(parameterParse.getSpark_app_name());
setSparkConf(parameterParse, conf);
JavaSparkContext sparkContext = new JavaSparkContext(conf);
JavaStreamingContext streamingContext = new JavaStreamingContext(sparkContext, Seconds.apply(Long.parseLong(parameterParse.getSpark_streaming_