sgd Momentum Vanilla SGD RMSprop adam等优化算法在寻找 简单logistic分类中的 的应用

这篇博客探讨了优化算法在寻找函数最值时的作用,包括SGD、Momentum、RMSprop和Adam。作者通过实现这些算法的核心代码并进行实验,展示了它们在训练过程中的损失变化,并比较了不同算法的收敛速度和最终效果。实验结果显示,Adam算法通常表现出更好的性能。
摘要由CSDN通过智能技术生成

参考博文





​​​​​​(4条消息) sgd Momentum Vanilla SGD RMSprop adam等优化算法在寻找函数最值的应用_tcuuuqladvvmm454的博客-CSDN博客

在这里随机选择一些数据 生成两类

 

 

 

 

核心代码如下:
    def __init__(self, loss, weights, lr=2.1, beta1=0.9, beta2=0.999, epislon=1e-8):# , t1=[], g1=[], lr1=[], m1=[], v1=[], theta1=[]):
        self.loss = loss
        self.theta = weights
        self.lr = lr
        self.beta1 = beta1
        self.beta2 = beta2
        self.epislon = epislon
        self.get_gradient = grad(loss)
        self.m = 0
        self.v = 0
        self.t = 0
        #t1=[], g1=[], lr1=[], m1=[], v1=[], theta1=[]
        #self.t1, g1, lr1, self.m1, self.v1, self.theta1=[],[],[],[],[],[]

    def minimize_raw(self, epochs=EPOCHS):##sgd
        ee=[]
        #print('sgd------------')
        for _ in range(epochs):
            self.t += 1
            #print('adma------------')
            g = self.get_gradient(self.theta)
            self.m = self.beta1 * self.m + (1 - self.beta1) * g
            self.v = self.beta2 * self.v + (1 - self.beta2) * (g * g)
            self.m_cat = self.m / (1 - self.beta1 ** self.t)
            self.v_cat = self.v / (1 - self.beta2 ** self.t)
            self.theta -= self.lr * self.m_cat / (self.v_cat ** 0.5 + self.epislon)
            final_loss = self.loss(self.theta)
            ee.append(final_loss)
        plt.figure()
        plt.plot(ee)
        plt.show()
        print("adam_final loss:{} weights0:{}".format(final_loss, self.theta[0]))
    def minimize_raw1(self, epochs=EPOCHS):##sgd
        ee=[]
        #print('sgd------------')
        for _ in range(epochs):
            self.t += 1
            g = self.get_gradient(self.theta)
            self.m = self.beta1 * self.m + (1 - self.beta1) * g
            self.v = self.beta2 * self.v + (1 - self.beta2) * (g * g)
            self.m_cat = self.m / (1 - self.beta1 ** self.t)
            self.v_cat = self.v / (1 - self.beta2 ** self.t)
            self.theta -= self.lr * g#self.m_cat / (self.v_cat ** 0.5 + self.epislon)
            final_loss = self.loss(self.theta)
            ee.append(final_loss)
        plt.figure()
        plt.plot(ee)
        plt.show()
        print("sgd_final loss:{} weights0:{}".format(final_loss, self.theta[0]))

    def minimize(self, epochs=EPOCHS):
        ee=[]
        for _ in range(epochs):
            
            self.t += 1
            g = self.get_gradient(self.theta)
            lr = self.lr * (1 - self.beta2 ** self.t) ** 0.5 / (1 - self.beta1 ** self.t)
            self.m = self.beta1 * self.m + (1 - self.beta1) * g
            self.v = self.beta2 * self.v + (1 - self.beta2) * (g * g)
            self.theta -= lr * self.m / (self.v ** 0.5 + self.epislon)
            final_loss = self.loss(self.theta)
            ee.append(final_loss)
        plt.figure()
        plt.plot(ee)
        plt.show()
        print("romsrop_final loss:{} weights0:{}".format(final_loss, self.theta[0]))
    def minimize2(self, epochs=EPOCHS):
        ee=[]
        for _ in range(epochs):
            self.t += 1
            g = self.get_gradient(self.theta)
            lr = self.lr * (1 - self.beta2 ** self.t) ** 0.5 / (1 - self.beta1 ** self.t)
            self.m = self.beta1 * self.m + lr * g
            #self.v = self.beta2 * self.v + (1 - self.beta2) * (g * g)
            self.theta -= self.m# / (self.v ** 0.5 + self.epislon)
            final_loss = self.loss(self.theta)
            ee.append(final_loss)
        plt.figure()
        plt.plot(ee)
        plt.show()
        print("dongliang_final loss:{} weights0:{}".format(final_loss, self.theta[0]))
        

    #t1, g1, lr1, m1, v1, theta1=[],[],[],[],[],[]
    def minimize_show(self, epochs=EPOCHS):
        lr1=[0.1,0.3,0.0001]
        for uu in range(3):
            for _ in range(epochs):
                self.t += 1
                lr=lr1[uu]
                g = self.get_gradient(self.theta)
                lr = self.lr * (1 - self.beta2 ** self.t) ** 0.5 / (1 - self.beta1 ** self.t)
                self.m = self.beta1 * self.m + (1 - self.beta1) * g
                self.v = self.beta2 * self.v + (1 - self.beta2) * (g * g)
                self.theta -= lr * self.m / (self.v ** 0.5 + self.epislon)
                #print("step{: 4d} g:{} lr:{} m:{} v:{} theta:{}".format(self.t, g, lr, self.m, self.v, self.theta))
                #t1.append(self.t) 
                #g1.append(g),
                #l.r1append(l.r) 
                #m1.append(self.m)
                #v1.append(self.v) 
                #theta1.append(self.theta)#=[],[],[],[],[],[]
            #return self.t1, g1, lr1, self.m1, self.v1, self.theta1

            final_loss = self.loss(self.theta)
        print("final loss:{} weights:{}".format(final_loss, self.theta))


def sigmoid(x):
    
    
    return 1/(np.exp(-x) + 1)#0.5*(np.tanh(x) + 1)

def plot_sigmoid_dao( ):
    x=np.arange(-8,8,0.1)
    y=sigmoid(x)*(1-sigmoid(x))
    y1=sigmoid(x)#*(1-sigmoid(x))
    p1=plt.plot(x,y,label='sigmod1 ')
    p2=plt.plot(x,y1,label='sigmod')
    plt.legend( )#[p2, p1], ["yuanshi2", "daosgu 1"], loc='upper left')
    plt.show()
    
    #plt.legend('daoshu','yuanshi')
plot_sigmoid_dao()  
    
def logistic_predictions(weights, inputs):
    # Outputs probability of a label being true according to logistic model.
    return sigmoid(np.dot(inputs, weights))

def training_loss1(weights):
    rr=[]
    # Training loss is the negative log-likelihood of the training labels.
    preds = logistic_predictions(weights, inputs)
    rr.append((preds))
    #print(rr)
    label_probabilities = (preds - targets)**2#preds * targets + (1 - preds) * (1 - targets)
   
    #return -np.sum(np.log(label_probabilities))
    return np.sum((label_probabilities))/preds.shape[0]

def training_loss(weights):
    rr=[]
    s1=np.dot(inputs, weights)
    s2=sigmoid(s1)
    #print('s2=',s2)
    # Training loss is the negative log-likelihood of the training labels.
    preds = s2#logistic_predictions(weights, inputs)
    rr.append((preds))
    #print(preds ,targets)
    ee=[]
    for i in range(preds.shape[0]):
        ee.append((preds[i]-targets[i])**2)
    ee1=sum(ee)
        
    #label_probabilities = (preds - targets)**2#preds * targets + (1 - preds) * (1 - targets)
    #print(label_probabilities)
    #return -np.sum(np.log(label_probabilities))
    return ee1#np.sum((label_probabilities))

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值