object sgdDemo{
def main(args: Array[String]): Unit = {
val featuresMatrix: List[List[Double]] = List(List(1, 4), List(2, 5), List(5, 1), List(4, 2))//特征矩阵
// 如果需要设置直线截距,则替换为 List(1, 1, 4), List(1, 2, 5), List(1, 5, 1), List(1, 4, 2)
val labelMatrix: List[Double] = List(19, 26, 19, 20)//真实值向量
var theta: List[Double] = List(0, 0) //参数 theta 的初始值
var loss: Double = 10.0
val alpha = 0.001 //学习率
println("features: " + featuresMatrix + "\n" + "label: " + labelMatrix + "\n" + "alpha: " + alpha + "\n")
for {
i <- 0 until 1000 //迭代次数 - until: not including end
if (loss > 0.01) //收敛条件loss<=0.01
} {
var error = 0.0 //第j个样本的预测误差: labelMatrix(j) - h(j)
var j = i % 4 //4个样本点:0,1,2,3.
var h_j = 0.0 //第j个样本的预测值:h(j)
{//计算第j个样本点的预测值 - 运算结果是一个值:[Double]
for (k <- 0 until 2) //每个样本点包含2维特征:0,1
{ h_j += featuresMatrix(j)(k) * theta(k) } //计算给出的测试数据集中第j个对象的计算预测值
}
error = labelMatrix(j) - h_j //计算给出的测试数据集中类标签与计算的类标签的误差值
var cacheTheta: List[Double] = List()
{//更新权重向量 - 运算结果是一个theta向量:[List[Double]]
for (k <- 0 until 2) {
val updaterTheta = theta(k) + alpha * error * featuresMatrix(j)(k)
cacheTheta = updaterTheta +: cacheTheta
}
}
cacheTheta.foreach(t => print(t + ","))
print("\t")
theta = cacheTheta
//更新损失函数
var SumLoss: Double = 0
for (j <- 0 until 4) {
var current_h = 0.0
for (k <- 0 until 2) { current_h += featuresMatrix(j)(k) * theta(k) } //计算第j个样本点的特征加权和,即预测值h(j)
SumLoss += (current_h - labelMatrix(j)) * (current_h - labelMatrix(j))
}
loss = SumLoss / 2
println("loss->>>>" + loss + " ,i>>>>>" + i + "\n")
}
}
}
简单线性回归的随机梯度下降算法实现:Linear Regression - SGD
最新推荐文章于 2024-07-25 12:14:33 发布