转载于:原文
from pyspark.sql import SparkSession
from pyspark.ml.feature import StringIndexer
from pyspark.ml.classification import LogisticRegression
from pyspark.ml import Pipeline
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.evaluation import BinaryClassificationEvaluator
spark = SparkSession.builder.appName("Spark_mllearn_example").getOrCreate()
#数据读取
dpath = 'hdfs:///user/sparknode/data/GLM_64_300/'
data = spark.read.parquet(dpath)
#数据类型转化
lst_col = data.columns
for col in lst_col:
data = data.withColumn(col,data[col].cast('float'))
#特征属性,去除标签列
lst_col.remove('64')
# dataframe--> dataset
vecAssembler = VectorAssembler(inputCols=lst_col, outputCol="features")
stringIndexer = StringIndexer(inputCol="64", outputCol="label")
pipeline = Pipeline(stages=[vecAssembler, stringIndexer])
pipelineFit = pipeline.fit(data)
dataset = pipelineFit.transform(data)
#数据集划分
(trainingData, testData) = dataset.randomSplit([0.7, 0.3], 123)
print("Training Dataset Count: " + str(trainingData.count()))
print("Test Dataset Count: " + str(testData.count()))
# 模型训练
lr = LogisticRegression(maxIter=20, regParam=0.3, elasticNetParam=0)
lrModel = lr.fit(trainingData)
# 模型预测
prediction = lrModel.transform(testData)
# ROC score
evaluator = BinaryClassificationEvaluator()
evaluator.evaluate(prediction)