1. 生成Array,由N个 DataPoint(Vector, Double), DataPoint 由 rand 随机生成D个元素组成Vector
def generateData = {
def generatePoint(i: Int) = {
val y = if(i % 2 == 0) -1 else 1
val x = DenseVector.fill(D){rand.nextGaussian + y * R}
DataPoint(x, y)
}
Array.tabulate(N)(generatePoint)
}
2. 定义 DataPoint
case class DataPoint(x: Vector[Double], y: Double)
3.随机初始化 系数 w (和vector维数一样D)
var w = DenseVector.fill(D){2 * rand.nextDouble - 1}
4. 跌代 ITERATIONS次, 计算 gradient, 逻辑回归 (1/ 1+ e(-y * (w 点乘 vector(x))) - 1) * p.y 不断调节权重
for (i <- 1 to ITERATIONS) {
println("On iteration " + i)
var gradient = DenseVector.zeros[Double](D)
for (p <- data) {
val scale = (1 / (1 + math.exp(-p.y * (w.dot(p.x)))) - 1) * p.y
gradient += p.x * scale
}
w -= gradient
}
5. 并行化
val sparkConf = new SparkConf().setAppName("SparkLR")
val sc = new SparkContext(sparkConf)
val numSlices = if (args.length > 0) args(0).toInt else 2
val points = sc.parallelize(generateData, numSlices).cache()
// Initialize w to a random value
var w = DenseVector.fill(D){2 * rand.nextDouble - 1}
println("Initial w: " + w)
for (i <- 1 to ITERATIONS) {
println("On iteration " + i)
val gradient = points.map { p =>
p.x * (1 / (1 + exp(-p.y * (w.dot(p.x)))) - 1) * p.y
}.reduce(_ + _)
w -= gradient
}
println("Final w: " + w)
sc.stop()