使用Spark写的WordCount程序如下:
package rdd
import org.apache.spark.{SparkConf, SparkContext}
/**
* Created by leboop on 2019/3/11.
*/
object WordCount {
private val hdfsUrl="hdfs://bigdata111:9000"
private val sparkMaster="spark://bigdata111:7077"
private val filePath="/input/data.txt"
private val savePath="hdfs://bigdata111:9000/output/wc"
def main(args: Array[String]): Unit = {
val conf=new SparkConf().setAppName("word-count").setMaster(sparkMaster)
val sc=new SparkContext(conf)
sc.textFile(hdfsUrl+filePath)
.flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_).saveAsTextFile(savePath)
sc.stop()
}
}
在开发工具中直接运行,报如下错误:
Caused by: java.lang.ClassCastException: cannot assign instance of scala.collection.immutable.List$SerializationProxy to field org.apache.spark.rdd.RDD.org$apache$spark$rdd$RDD$$dependencies_ of type scala.collection.Seq in instance of org.apache.spark.rdd.MapPartitionsRDD
at java.io.ObjectStreamClass$FieldReflector.setObjFieldValues(ObjectStreamClass.java:2133)
at java.io.ObjectStreamClass.setObjFieldValues(ObjectStreamClass.java:1305)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2251)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2169)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2027)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2245)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2169)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2027)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:422)
at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:75)
at org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:114)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:85)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
at org.apache.spark.scheduler.Task.run(Task.scala:99)
但是将程序打包到Linux系统(Spark集群)上使用Spark submit运行正常。原因是saveAsTextFile(savePath)