#-*-coding=utf-8 -*-
from pyspark importSparkConf, SparkContext
sc= SparkContext(‘local‘)from pyspark.mllib.tree importDecisionTree, DecisionTreeModelfrom pyspark.mllib.util importMLUtils#Load and parse the data file into an RDD of LabeledPoint.
data = MLUtils.loadLibSVMFile(sc, ‘data/mllib/sample_libsvm_data.txt‘)‘‘‘每一行使用以下格式表示一个标记的稀疏特征向量
label index1:value1 index2:value2 ...
tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0")
>>> tempFile.flush()
>>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect()
>>> tempFile.close()
>>> examples[0]
LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0]))
>>> examples[1]
LabeledPoint(-1.0, (6,[],[]))
>>> examples[2]
LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0]))‘‘‘
#Split the data into training and test sets (30% held out for testing) 分割数据集,留30%作为测试集
(trainingData, testData) = data.randomSplit([0.7, 0.3])#Train a DecisionTree model. 训练决策树模型#Empty categoricalFeaturesInfo indicates all features are continuous. 空的categoricalFeaturesInfo意味着所有的特征都是连续的
model = DecisionTree.trainClassifier(trainingData, numClasses=2, categoricalFeaturesInfo={},
impurity=‘gini‘, maxDepth=5, maxBins=32)#Evaluate model on test instances and compute test error 预测和测试准确率
predictions = model.predict(testData.map(lambdax: x.features))
labelsAndPredictions= testData.map(lambdalp: lp.label).zip(predictions)
testErr=labelsAndPredictions.filter(lambda lp: lp[0] != lp[1]).count() /float(testData.count())print(‘Test Error =‘ + str(testErr)) #Test Error = 0.04
#Save and load model 保存和加载模型
model.save(sc, "myDecisionTreeClassificationModel")
sameModel= DecisionTreeModel.load(sc, "myDecisionTreeClassificationModel")print sameModel.predict(data.collect()[0].features) #0.0