spark streaming NotSerializableException

在使用spark streaming时,会出现无法序列化异常,代码如下:

		SparkConf conf = new SparkConf().setAppName("NetworkWordCount");
		JavaStreamingContext jssc = new JavaStreamingContext(conf, new Duration(5000));
		JavaReceiverInputDStream<String> lines = jssc.socketTextStream("10.0.3.19", 9999);

		JavaDStream<String> words = lines.flatMap(
		  new FlatMapFunction<String, String>() {
		    public Iterable<String> call(String x) {
		    	System.out.println(x);
		    	String str = JSON.toJSONString(x);
		    	System.out.println("json:"+str);
		      return Arrays.asList(x.split(" "));
		    }
		  });		
		new FunNext(<span style="font-family: Arial, Helvetica, sans-serif;">words</span>).next();


public class FunNext{
	JavaDStream<String> words;
	public FunNext(JavaDStream<String> words) {
		this.words = words;
	}
	public JavaDStream<String> getWords() {
		return words;
	}
	public void setWords(JavaDStream<String> words) {
		this.words = words;
	}
	public void next(){
		JavaPairDStream<String, Integer> pairs = words.mapToPair(
				  new PairFunction<String, String, Integer>() {
				    public Tuple2<String, Integer> call(String s) throws Exception {
				      return new Tuple2<String, Integer>(s, 1);
				    }
				  });
				JavaPairDStream<String, Integer> wordCounts = pairs.reduceByKey(
				  new Function2<Integer, Integer, Integer>() {
				    public Integer call(Integer i1, Integer i2) throws Exception {
				      return i1 + i2;
				    }
				  });
		wordCounts.print();

	}
}

异常信息如下:

5/01/24 16:56:20 ERROR JobScheduler: Error running job streaming job 1422089780000 ms.0
org.apache.spark.SparkException: Job aborted due to stage failure: Task not serializable: java.io.NotSerializableException: com.kingsoft.spark.FunNext
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1033)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1017)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1015)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1015)
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$submitMissingTasks(DAGScheduler.scala:770)
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$submitStage(DAGScheduler.scala:713)
at org.apache.spark.scheduler.DAGScheduler.handleJobSubmitted(DAGScheduler.scala:697)
at org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1176)
at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
at akka.actor.ActorCell.invoke(ActorCell.scala:456)
at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
at akka.dispatch.Mailbox.run(Mailbox.scala:219)
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)


发现是类FunNext不能被序列化,然后给函数FunNext实现 implements Serializable。打包运行,发现还是异常,分析了半天发现是该类里的words属性不能被序列化,也就是RDD不能被序列化,解决办法就是在words前面加上transient 来修饰即可。也可以将words作为next()函数的参数传进去,这样FunNext类也能被序列化了。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值