简单线性回归的随机梯度下降算法实现:Linear Regression - SGD

object sgdDemo{
  def main(args: Array[String]): Unit = {
    val featuresMatrix: List[List[Double]] = List(List(1, 4), List(2, 5), List(5, 1), List(4, 2))//特征矩阵
                                                 // 如果需要设置直线截距,则替换为 List(1, 1, 4), List(1, 2, 5), List(1, 5, 1), List(1, 4, 2)
    val labelMatrix: List[Double] = List(19, 26, 19, 20)//真实值向量  
    var theta: List[Double] = List(0, 0)  //参数 theta 的初始值
    var loss: Double = 10.0  
    val alpha = 0.001 //学习率
    
    println("features: " + featuresMatrix + "\n" + "label: " + labelMatrix + "\n" + "alpha: " + alpha + "\n")
    
    for {  
      i <- 0 until 1000 //迭代次数  - until: not including end
      if (loss > 0.01) //收敛条件loss<=0.01  
    } {  
      var error = 0.0 //第j个样本的预测误差:  labelMatrix(j) - h(j)
      var j = i % 4  //4个样本点:0,1,2,3.
      var h_j = 0.0  //第j个样本的预测值:h(j)
     
      {//计算第j个样本点的预测值 - 运算结果是一个值:[Double]
        for (k <- 0 until 2)  //每个样本点包含2维特征:0,1
          { h_j += featuresMatrix(j)(k) * theta(k) } //计算给出的测试数据集中第j个对象的计算预测值 
      }
      
      error = labelMatrix(j) - h_j //计算给出的测试数据集中类标签与计算的类标签的误差值  
      
      var cacheTheta: List[Double] = List()  
  
      {//更新权重向量  - 运算结果是一个theta向量:[List[Double]]
      for (k <- 0 until 2) {  
            val updaterTheta = theta(k) + alpha * error * featuresMatrix(j)(k)  
            cacheTheta = updaterTheta +: cacheTheta  
          } 
      }
      
      cacheTheta.foreach(t => print(t + ","))  
      print("\t")
      theta = cacheTheta  
      
      //更新损失函数  
      var SumLoss: Double = 0  
      
      for (j <- 0 until 4) {  
        var current_h = 0.0  
        for (k <- 0 until 2) { current_h += featuresMatrix(j)(k) * theta(k) }  //计算第j个样本点的特征加权和,即预测值h(j)
        SumLoss += (current_h - labelMatrix(j)) * (current_h - labelMatrix(j))  
      }  
      loss = SumLoss / 2 
      println("loss->>>>" + loss + " ,i>>>>>" + i + "\n")  
    }  
  }
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值