标记一个DEMO以便于后面复习
DEMO背景阿里天池竞赛系列 口碑商家客流量预测
第五步使用线性回归(不适用于当前场景)
package com.huadian.bigdata.ijcai
import org.apache.spark.mllib.feature.{StandardScaler, StandardScalerModel}
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.regression.{LabeledPoint, LinearRegressionWithSGD}
import org.apache.spark.mllib.tree.model.{DecisionTreeModel, RandomForestModel}
import org.apache.spark.mllib.tree.{DecisionTree, RandomForest}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SparkSession}
/**
*线性回归
*
*/
object IJCAISparkRFPrecisionV3 {
/**
* 贷出模式:贷出函数
* @param args
* 参数
* @param operation
* 用户函数
*/
def sparkOperation(args: Array[String])(operation:SparkSession=>Unit):Unit={
if(args.length != 2){
println("Usage:SparkMLlibTemplateSpark <appName> <master>")
System.exit(1)
}
val spark = SparkSession
.builder()
.appName(args(0))
.master(args(1))
.getOrCreate()
spark.sparkContext.setLogLevel("WARN")
try {
//调用用户函数
operation(spark)
}catch {
case e:Exception=>e.printStackTrace()
}finally {
spark.stop()
}
}
/**
* 贷出模式中用户函数
* 针对于机器学习来讲,数据预处理,实训模型,评估模型和测试模型
* @param spark
*/
def modelTrain(spark:SparkSession):Unit= {
//读取数据
val userVisitPayRDD: RDD[Row] = spark.read
.option("header","true")
.csv("file:///F:\\IJCAI\\train_user_visit_pay")
.select("day_week","shop_id","count_visit","count_pay")
.rdd
//由于线性回归算法不能使用类别特征数据,需要将类别特征值进行转换 采用1 of K
val categoryMap: Map[String, Int] = userVisitPayRDD
.map(row =>row.getString(0)).distinct()
.collect()
.zipWithIndex //Array[(星期一,0),(星期二,1)]
.toMap
// xx.foreach(println)
val lpRDD: RDD[LabeledPoint] = userVisitPayRDD.map{
case Row(day_week:String,shop_id:String,count_visit:String,count_pay:String)=>{
//标签
val label: Double = count_pay.toDouble
//获取类别索引
val categoryIndex = categoryMap(day_week)
val categoryFeature: Array[Double] = Array.ofDim[Double](categoryMap.size)
categoryFeature(categoryIndex) = 1.0
//特征向量
val features: Vector = Vectors.dense(
//根据实际情况,增加或者减少特征值
categoryFeature++ Array(shop_id.toDouble,count_visit.toDouble)
)
//返回标签向量
LabeledPoint(label,features)
}
}
//偷懒,将数据划分为2份,一份用来训练,一份用来测试
val Array(traningRDD,testingRDD) = lpRDD.randomSplit(Array(0.8,0.2),123L)
//线性回顾 随机随机梯度下降算法
val lrModel = LinearRegressionWithSGD.train(traningRDD,20)
//使用模型进行预测--得到预测值和实际值的比较
val actualAndPredictRDD: RDD[(Double, Double)] = testingRDD.map{
case LabeledPoint(label,features)=>{
val predictValue = lrModel.predict(features)
(label,predictValue)
}
}
actualAndPredictRDD.take(30).foreach(println)
//评估模型预测性能
def modelEvaluate(apRDD: RDD[(Double, Double)]):Unit = {
//总数
val count = apRDD.count().toDouble
//MSE:均方误差
val mseValue = apRDD
.map{case(actual,predict)=>Math.pow(actual-predict,2)}
.sum()/count
//RMSE:均方根误差
val rmseValue = Math.sqrt(mseValue)
//MAE平均绝对误差
val maeValue = apRDD
.map{case(actual,predict)=>Math.abs(actual-predict)}
.sum()/count
println(s"MSE:$mseValue,RMSE:${rmseValue},MAE:${maeValue}")
}
modelEvaluate(actualAndPredictRDD)
}
def main(args: Array[String]): Unit = {
sparkOperation(args)(modelTrain)
}
}