本文打算对这小段时间学习 scala 以及 spark 编程技术做个小结,一来温故而知新,而来为以后查阅方便
spark scala 入门小例子
文本文件 UserPurchaseHistory.csv: John,iPhone Cover,9.99 John,Headphones,5.49 Jack,iPhone Cover,9.99 Jill,Samsung Galaxy Cover,8.95 Bob,iPad Cover,5.49 下面开始读取文件进行分析 package com.ghc.bigdata import org.apache.spark.{SparkConf, SparkContext} /** * Created by Yu Li on 12/5/2017. */ object ScalaApp { def main(args:Array[String]):Unit={ val sparkConf=new SparkConf() sparkConf.setMaster("local[2]") sparkConf.setAppName("Scala App") val sc=new SparkContext(sparkConf) val textFromFile=sc.textFile("./src/main/scala/UserPurchaseHistory.csv") val data=textFromFile.map(x=>x.split(",")).map(p=>(p(0),p(1),p(2))) //购买次数 val countOfPurchase=data.count() //多少不同客户购买过商品 val distinctCountOfPurchase=data.map(x=>x._1).distinct.count() println("共有 "+distinctCountOfPurchase+" 不同客户购买过商品") println("共有 "+data.map{case(user,product,price)=>user}.distinct.count()+" 不同客户购买过商品") val totalRevenue=data.map{case(user,product,price)=>price.toDouble}.sum() //只有 double才能 sum println("totalRevenue: "+totalRevenue) //最畅销的产品,可以想到对产品分组求和 reduceByKey val mostPoplularProduct=data.map{case(user,product,price)=>(product,1)}.reduceByKey(_+_).collect().sortBy(-_._2) // 负是倒序的意思 println(mostPoplularProduct.mkString(",")) println("mostPoplularProduct: "+mostPoplularProduct(0)) //mostPoplularProduct(0) println("共购买:"+countOfPurchase+" 件商品") println("共有 "+data.map{case(user,product,price)=>user}.distinct.count()+" 不同客户购买过商品") println("总收入: "+totalRevenue) println("最畅销的产品是: %s 购买了 %d 件".format(mostPoplularProduct.take(1)(0)._1,mostPoplularProduct.take(1)(0)._2)) } } package com.ghc.bigdata import org.apache.spark.{SparkConf,SparkContext} /** * Created by Yu Li on 12/6/2017. */ object SparkScalaApp { def main(args:Array[String]):Unit={ val sparkConf:SparkConf=new SparkConf() sparkConf.setAppName("Spark Scala App") sparkConf.setMaster("local[2]") val sc:SparkContext=new SparkContext(sparkConf) val fileName:String="./src/main/scala/UserPurchaseHistory.csv" printResult(sc,fileName) } /*println("共购买:*/ def printResult(sc:SparkContext,fileName:String):Unit={ // 共购买多少产品 val rawRDD=sc.textFile(fileName).map(x=>x.split(",")).map(purchase=>(purchase(0),purchase(1),purchase(2))) val countOfPurchase=rawRDD.count() // 共有多少不同客户购买产品 val countOfDistinctPurchase=rawRDD.map{case(user,item,price)=>user}.distinct.count() // 总收入 val sumRevenue=rawRDD.map{case(user,item,price)=>price.toDouble}.sum() //每一类产品收入 val sumGroupByItemRevenue=rawRDD.map{case(user,item,price)=>(item,price.toDouble)}.reduceByKey(_+_).sortBy(-_._2).collect()(0) //按照每一类商品收入倒序取最大的那个值 // 最畅销的产品 val mostPopularItem=rawRDD.map{case(user,item,price)=>(item,1)}.reduceByKey(_+_).sortBy(-_._2).collect()(0) println("共购买 "+countOfPurchase+" 件商品") println("共有 "+countOfDistinctPurchase+" 个不同用户购买过商品") println("总收入 "+sumRevenue) println("收入最高的商品是 %s 卖了 %.2f 钱".format(sumGroupByItemRevenue._1,sumGroupByItemRevenue._2)) println("最畅销的产品是 %s 卖了 %d 件".format(mostPopularItem._1,mostPopularItem._2)) } }
package com.ghc.bigdata import org.apache.spark.{SparkConf,SparkContext} /** * Created by Yu Li on 12/6/2017. */ object SparkScalaApp { def main(args:Array[String]):Unit={ val sparkConf:SparkConf=new SparkConf() sparkConf.setAppName("Spark Scala App") sparkConf.setMaster("local[2]") val sc:SparkContext=new SparkContext(sparkConf) println("2 在List中的位置: "+getPosition(List(1,2,3),2)) for(i<- map[Int,String](List(1,2,3),{num:Int=>num+"2"})){ println(i) } } def getPosition[A](l:List[A],v:A): Int ={ l.indexOf(v) } def map[A,B](list:List[A],func: A=>B)=list.map(func) }
spark 线性回归算法预测谋杀率
package com.ghc.bigdata import org.apache.spark.ml.feature.VectorAssembler import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.ml.regression.LinearRegression import org.apache.spark.sql.{DataFrame, SQLContext, SparkSession} import org.apache.log4j.{Level, Logger} /** * Created by Yu Li on 12/7/2017. */ // 因为 ml 推荐使用 DataFrame 所以下面开始都用 DataFrame object LRDemo { def main(args:Array[String]):Unit={ //屏蔽日志 Logger.getLogger("org.apache.spark").setLevel(Level.WARN) Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF) //设置 Spark 模拟环境 val sparkConf=new SparkConf().setAppName("Regression Demo").setMaster("local[2]") val sc=new SparkContext(sparkConf) //真实文本数据转化为 VectorDataFrame 准备真实数据集 val realFile="./src/main/scala/com/ghc/bigdata/LR_data" val delimeter="," val vecDF_real=transform2VectorDFFromFile(sc,realFile,delimeter) println("start print df from real: ") println(vecDF_real.collect.mkString("\t")) //预测文本数据转化为 VectorDataFrame 准备预测数据集 可以 select 的 val predictFile="./src/main/scala/com/ghc/bigdata/LR_data_for_predict" val vecDF_predict=transform2VectorDFFromFile(sc,predictFile,delimeter) println("start print df from predict: ") println(vecDF_real.collect.mkString("\t")) // res: 1 行 50 列 , 每列 里又是 1 行 2 列 左边,由 select 可知 [ [A]] A 里的是 feature 列 // [3615.0,3624.0,2.1,69.05,15.1,41.3,20.0,50708.0,[3615.0,3624.0,2.1,69.05,41.3,20.0,50708.0]] //建立模型预测 谋杀率 //设置线性回归参数 val lr=new LinearRegression() lr.setFeaturesCol("features") // 设置特征列 lr.setLabelCol("Murder") // 设置需要被预测的列 lr.setFitIntercept(true) // 数据中心化 lr.setMaxIter(20) // 设置迭代次数 lr.setRegParam(0.4) // 设置正则化参数 lr.setElasticNetParam(0.8) //设置弹性化混合参数 // 将真实数据 训练集合代入训练 val lrModel=lr.fit(vecDF_real) // 代入真实数据 lrModel.extractParamMap() // 输出模型全部参数 println(s"coefficients: ${lrModel.coefficients},intercept: ${lrModel.intercept}") //对训练结果进行评价 val trainingSummary=lrModel.summary println(s"numIterations: ${trainingSummary.totalIterations}") //总迭代次数 println(s"objectiveHistory: ${trainingSummary.objectiveHistory.toList}") //每次迭代损失?依次递减 trainingSummary.residuals.show()// 残差 println(s"RMSE: ${trainingSummary.rootMeanSquaredError}") //均方根差 RMSE(均方根、标准差) println(s"r2: ${trainingSummary.r2}") //决定系数 val predictions: DataFrame = lrModel.transform(vecDF_predict) // 用同一个模型来训练 println("输出预测结果") val predict_result: DataFrame =predictions.selectExpr("features","Murder", "round(prediction,1) as prediction") predict_result.foreach(println(_)) sc.stop() } //定义一个从文件中获取 VectorDataFrame 的方法,因为实际文本数据与预测文本数据都需要,便于复用 def transform2VectorDFFromFile(sc:SparkContext,fileName:String,delimeter:String):DataFrame={ val sqc=new SQLContext(sc) // SQLContext是过时的,被 SparkSession 替代 val raw_data=sc.textFile(fileName) val map_data=raw_data.map{x=> val split_list=x.split(delimeter) (split_list(0).toDouble,split_list(1).toDouble,split_list(2).toDouble,split_list(3).toDouble,split_list(4).toDouble,split_list(5).toDouble,split_list(6).toDouble,split_list(7).toDouble)} val df=sqc.createDataFrame(map_data) val data = df.toDF("Population", "Income", "Illiteracy", "LifeExp", "Murder", "HSGrad", "Frost", "Area") val colArray = Array("Population", "Income", "Illiteracy", "LifeExp", "HSGrad", "Frost", "Area") val assembler = new VectorAssembler().setInputCols(colArray).setOutputCol("features") val vecDF: DataFrame = assembler.transform(data) vecDF } }