最近开始学spark,看了一段时间官网后,从官网上给的github上的examples开始学起。作为自己的一个笔记,有需要的人也可以看一看,反正我是菜鸡。。。
也不知道能写几篇,尽力而为吧
import sys
import numpy as np
from pyspark.sql import SparkSession
def parseVector(line):
return np.array([float(x) for x in line.split(' ')])
#计算到点p最近的中心点,返回中心点的索引
def closestPoint(p, centers):
bestIndex = 0
closest = float("+inf")
for i in range(len(centers)):
tempDist = np.sum((p - centers[i]) ** 2)
if tempDist < closest:
closest = tempDist
bestIndex = i
return bestIndex
if __name__ == "__main__":
if len(sys.argv) != 4:
print("Usage: kmeans <file> <k> <convergeDist>", file=sys.stderr)
sys.exit(-1)
print("""WARN: This is a naive implementation of KMeans Clustering and is given
as an example! Please refer to examples/src/main/python/ml/kmeans_example.py for an
example on how to use ML's KMeans implementation.""", file=sys.stderr)
#建立SparkSession
spark = SparkSession\
.builder\
.appName("PythonKMeans")\
.getOrCreate()
lines = spark.read.text(sys.argv[1]).rdd.map(lambda r: r[0])
data = lines.map(parseVector).cache()
K = int(sys.argv[2])
convergeDist = float(sys.argv[3])
#随机采样K个点
kPoints = data.takeSample(False, K, 1)
tempDist = 1.0
#循环直到达到收敛值
while tempDist > convergeDist:
#计算data中每个点最近的center
closest = data.map(
lambda p: (closestPoint(p, kPoints), (p, 1)))
#利用reduceByKey算子将所有点按最近center分类,p1_c1[0]+p2_c2[0]将所有点坐标加起来 用于求中心点,p1_c1[1] + p2_c2[1]是进行计数分属每个center的点的个数
pointStats = closest.reduceByKey(
lambda p1_c1, p2_c2: (p1_c1[0] + p2_c2[0], p1_c1[1] + p2_c2[1]))
#计算新中心点
newPoints = pointStats.map(
lambda st: (st[0], st[1][0] / st[1][1])).collect()
#计算新center与对应的旧center的距离
tempDist = sum(np.sum((kPoints[iK] - p) ** 2) for (iK, p) in newPoints)
#更新center
for (iK, p) in newPoints:
kPoints[iK] = p
print("Final centers: " + str(kPoints))
spark.stop()