在终端打开spark-shell
然后在scala编辑模式下依次输入以下命令:
//导入算法执行所需函数库
import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.mllib.linalg.Vectors
// 加载并分析数据
val data = sc.textFile("data/mllib/kmeans_data.txt")
val parsedData = data.map(s => Vectors.dense(s.split(' ').map(_.toDouble))).cache()
// 用算法将数据聚类成两个簇,用数据集来训练kmeans算法
val numClusters = 2
val numIterations = 20
val clusters = KMeans.train(parsedData, numClusters, numIterations)
//提取聚类结果
val clusterCenters = clusters.clusterCenters
//聚类结果标签
val labels=clusters.predict(parsedData)
//保存聚类结果
//保存聚类结果
labels.saveAsTextFile("/output/kmeansTest/result")
// 误差分析,评价聚类的计算在规定的误差平方和
val WSSSE = clusters.computeCost(parsedData)
println("Within Set Sum of Squared Errors = " + WSSSE)