LR、XGB、RF案例

##LR https://www.cnblogs.com/wuchuanying/p/6243987.html

##XGB http://blog.csdn.net/sb19931201/article/details/52577592

##RF http://www.oschina.net/translate/random-forests-in-python?cmp

package com.sf.demo

import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.PipelineModel
import ml.dmlc.xgboost4j.scala.spark.{XGBoostEstimator, XGBoostClassificationModel}
import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator
import org.apache.spark.ml.tuning.{ParamGridBuilder, CrossValidator}
import scala.collection.mutable
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

object random_neg_wg_train {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("random_neg_xgb_Wg_train_32").set("spark.serializer", "org.apache.spark.serializer.KryoSerializer").set("yarn.nodemanager.vmem-check-enabled", "false")
    val sparkSession = SparkSession.builder().config(sparkConf).getOrCreate()
    val inputTrainPath = "/user/spark/security/Random_negative_data_included_678_train_data_target_TR2"
    val data = sparkSession.sqlContext.read.format("libsvm").option("numFeatures", "28124").load(inputTrainPath)
    val Array(trainingData, testData) = data.randomSplit(Array(0.8, 0.2))

    // Create default param map for XGBoost
    def get_param(): mutable.HashMap[String, Any] = {
      val params = new mutable.HashMap[String, Any]()
      params += "eta" -> 0.1
      params += "scale_pos_weight" -> 0.1
      params += "gamma" -> 0.0
      params += "colsample_bylevel" -> 1
      params += "objective" -> "binary:logistic"
      params += "num_class" -> 2
      params += "booster" -> "gbtree"
      params += "num_rounds" -> 1
      params += "nWorkers" -> 5
      params += "seed" -> 0
      return params
    }


    // Create an XGBoost Classifier
    val xgb = new XGBoostEstimator(get_param().toMap).setLabelCol("label").setFeaturesCol("features")


    // XGBoost paramater grid
    val xgbParamGrid = (new ParamGridBuilder()
      .addGrid(xgb.round, Array(15))
      .addGrid(xgb.maxDepth, Array(20))
      .addGrid(xgb.scalePosWeight, Array(10.0))
      .addGrid(xgb.eta, Array(0.1))
      .addGrid(xgb.subSample, Array(0.85,0.9))
      .addGrid(xgb.alpha, Array(0.8))
      .addGrid(xgb.lambda, Array(0.9))
      .addGrid(xgb.nWorkers, Array(100))
      .build())

    // Create the XGBoost pipeline
    val pipeline = new Pipeline().setStages(Array(xgb))

    // Setup the binary classifier evaluator
    val evaluator = (new BinaryClassificationEvaluator()
      .setLabelCol("label")
      .setRawPredictionCol("prediction")
      .setMetricName("areaUnderROC"))


    // Create the Cross Validation pipeline, using XGBoost as the estimator, the
    // Binary Classification evaluator, and xgbParamGrid for hyperparameters
    val cv = (new CrossValidator()
      .setEstimator(pipeline)
      .setEvaluator(evaluator)
      .setEstimatorParamMaps(xgbParamGrid)
      .setNumFolds(3))


    // Create the model by fitting the training data
    val xgbModel = cv.fit(trainingData)


    // Test the data by scoring the model
    val results = xgbModel.transform(testData)


    // Print out a copy of the parameters used by XGBoost
    (xgbModel.bestModel.asInstanceOf[PipelineModel]
      .stages(0).asInstanceOf[XGBoostClassificationModel]
      .extractParamMap().toSeq.foreach(println))

    println("-------------------")
    results.stat.crosstab("label","prediction").show()

    // What was the overall accuracy of the model, using AUC
    val auc = evaluator.evaluate(results)
    println("-------------------")
    println("auc="+auc)

    // Save the best model
    val bestPipelineModel = xgbModel.bestModel.asInstanceOf[PipelineModel]
    bestPipelineModel.save("/user/spark/security/TR2_best_model_random_neg_20171128_32")

    //load the best model
    //val loadedModel = PipelineModel.load("/user/spark/security/TR2_best_model_random_neg_20171128_32")

    sparkSession.stop()
  }
}
package com.sf.demo

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.ml.PipelineModel
import org.apache.spark.ml.linalg.DenseVector

object wg_prediction_streaming {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("Oct_wg_pred").set("spark.serializer", "org.apache.spark.serializer.KryoSerializer").set("yarn.nodemanager.vmem-check-enabled", "false")
    val sparkSession = SparkSession.builder().config(sparkConf).getOrCreate()
    val inputTestPath = "/user/spark/security/Oct_wg_included_maxsendconsname_added678_test_data_target_TR2"
    val data = sparkSession.sqlContext.read.format("libsvm").option("numFeatures", "28124").load(inputTestPath)

    //load the best model
    val best_Model = PipelineModel.load("/user/spark/security/TR2_best_model_random_neg_20171112_01")

    val results = best_Model.transform(data)
    results.createOrReplaceTempView("myresult")

    val new_result = sparkSession.sqlContext.sql("select label, probabilities, prediction from myresult")
    val split_probabilities_result = new_result.map(row => (row.getDouble(0), row.get(1).asInstanceOf[DenseVector].toArray(1), row.getDouble(2)))
    val final_result = split_probabilities_result.withColumnRenamed(existingName="_1",newName="label")
                         .withColumnRenamed(existingName="_2",newName="probabilities")
                         .withColumnRenamed(existingName="_3",newName="prediction")


    //final_result.filter("probabilities > '0.75'").count()
    //final_result.filter("probabilities > '0.75' and label=='1.0'").count()
    //println("-------------------")
    //results.stat.crosstab("label","prediction").show()

    sparkSession.stop()
  }
}

##http://www.infoq.com/cn/articles/apache-spark-sql results.createOrReplaceTempView("myresult") val bb = spark.sql("select label, probabilities, prediction from myresult") val cc = bb.select("probabilities") cc.map(t => "probabilities: " + t(0)).collect().foreach(println)

import org.apache.spark.ml.linalg.DenseVector cc.map(row => row.get(0).asInstanceOf[DenseVector].toArray(1)).show() bb.map(row => row.get(1).asInstanceOf[DenseVector].toArray(1)).show()

bb.map(row => (row.getDouble(0), row.get(1).asInstanceOf[DenseVector].toArray(1), row.getDouble(2))).show()

val dd = bb.map(row => (row.getDouble(0), row.get(1).asInstanceOf[DenseVector].toArray(1), row.getDouble(2)))

##字段重命名 ##https://docs.microsoft.com/zh-cn/azure/machine-learning/team-data-science-process/scala-walkthrough dd.withColumnRenamed(existingName="_3",newName="prediction") dd.withColumnRenamed(existingName="_3",newName="prediction").withColumnRenamed(existingName="_2",newName="probabilities").withColumnRenamed(existingName="_1",newName="label").show()

val final_result = dd.withColumnRenamed(existingName="_3",newName="prediction").withColumnRenamed(existingName="_2",newName="probabilities").withColumnRenamed(existingName="_1",newName="label") ##结果保存到集群 final_result.rdd.repartition(1).saveAsTextFile("/user/spark/security/iris_final_result")

##保存为csv final_result.write.format("csv").save("/user/spark/security/iris_final_result_002")

##读取一个csv文件 val data = spark.read.format("csv").load("/user/spark/security/iris_final_result_001111/part-00000-353abec2-c5ac-434e-a68b-ff9b64033a99.csv") ##读取目录下多个csv文件;生成的多个文件结果顺序,与原始记录不一定保持一致 val d1 = spark.read.format("csv").load("/user/spark/security/iris_final_result_001111")

转载于:https://my.oschina.net/kyo4321/blog/1572367

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值