gtadam

import numpy as np
import networkx as nx
import matplotlib.pyplot as plt

N = 50 #agent
mi = 5 #point
C = 1 #正规项系数
T = 2000
alpha=0.1
beta1=0.9
beta2=0.999
eps = 1e-8
loss1 = 0
loss2 = 0
# 生成数据点圆心pc
pc = np.random.rand(N, mi, 2) * 100 - 50
# 生成Erdős-Rényi图并返回图和其双随机权重矩阵
def generate_ER_graph(N):#双随机,ER随机图
G = nx.erdos_renyi_graph(N, 0.5)
W = nx.adjacency_matrix(G).toarray() + np.eye(N)
row_sums = W.sum(axis=1)
W = W / row_sums[:, np.newaxis]
return G, W
#G,W = generate_graph(N)
def generate_ring_graph(N):
G = nx.cycle_graph(N)
# 转换为邻接矩阵并加上单位矩阵
W = nx.adjacency_matrix(G).toarray() + np.eye(N)

# 归一化权重矩阵,使其成为双随机矩阵
row_sums = W.sum(axis=1)
W = W / row_sums[:, np.newaxis]

return G, W


# 逻辑回归损失函数
def logistic_loss(w, b, x, y):#(w1,w2), b, (x,y), label
z = np.dot(x, w) + b
return np.log(1 + np.exp(-y * z))

# 逻辑回归的梯度
def gradient_logistic_loss(w, b, x, y):
z = np.dot(x, w) + b
common_term = -y / (1 + np.exp(y * z))
grad_w = common_term * x
grad_b = common_term
return grad_w, grad_b

def optimal_solution(pt, labels):
d = pt[0][0].shape[0]
w_opt = np.zeros(d)
b_opt = 0
g_w = np.zeros(d)
g_b = 0
loss_array = [] #

iteration = 0
while True:
iteration += 1
#current_loss = 0#累计损失
for i in range(N):
for j in range(mi):
grad_w, grad_b = gradient_logistic_loss(w_opt, b_opt, pt[i][j], labels[i][j])#
g_w += grad_w#累加梯度
g_b += grad_b
#current_loss += logistic_loss(w_opt, b_opt, pt[i][j], labels[i][j]) #累计N*mi个数据点的误差
g_w = g_w / (N * mi) + C * w_opt
g_b = g_b / (N * mi) + C * b_opt
w_opt -= alpha * g_w
b_opt -= alpha * g_b
# if(iteration % 20 == 0):
# print(w_opt)

#loss_array.append(current_loss)
if np.all(np.abs(g_w) < eps) and np.all(np.abs(g_b) < eps):
break

return w_opt, b_opt#, loss_array






#生成数据点pt
def generate_data_points(pc,T):
pt = pc + np.array([np.cos(T / 100), np.sin(T / 100)])
return pt


# GTAdam算法
#按照伪码实现
def GTAdam(data_points, labels, G, W, T, alpha, beta1, beta2, eps, loss1, loss2):
relative_cost_error = []
d = data_points[0][0].shape[0] # 数据点的维度 2特征 坐标(x,y)
# 初始化 x = (w,b), m,v
w_list = [np.zeros(d) for _ in range(N)]#优化参数
b_list = [0 for _ in range(N)]
m_w_list = [np.zeros(d) for _ in range(N)]#一阶动量(前二维[]
m_b_list = [0 for _ in range(N)] #第三维
v_w_list = [np.zeros(d) for _ in range(N)]#二阶动量
v_b_list = [0 for _ in range(N)]
#g,s的初始化比较特殊
g_w_list = [np.zeros(d) for _ in range(N)]#梯度
g_b_list = [0 for _ in range(N)]

sum_g_w = np.zeros(d)
sum_g_b = 0

s_w_list = [np.zeros(d) for _ in range(N)]#tracker
s_b_list = [0 for _ in range(N)]
pt = generate_data_points(pc,0)
for i in range(N):#t = 0时候的初始化
for j in range(mi):
grad_w, grad_b = gradient_logistic_loss(w_list[i], b_list[i], pt[i][j], labels[i][j])
sum_g_w += grad_w # (xi t+1) #x #y
sum_g_b += grad_b
# s_w_list[i] = g_w_list[i] # (xi t+1) #x #y
# s_b_list[i] = g_b_list[i]
g_w_list[i] = sum_g_w / mi + C*w_list[i] # 正则项初始时候是0,就不加了
g_b_list[i] = sum_g_b / mi + C*b_list[i]
s_w_list[i] = g_w_list[i]
s_b_list[i] = g_b_list[i]
sum_g_w = np.zeros(d)
sum_g_b = 0




for t in range(1,T+1):
sum_g_w = np.zeros(d) # 清零备用
sum_g_b = 0
#t到t+1的更新是通过新数据对旧数据完成覆盖而实现的
pt = generate_data_points(pc, t) # 刷新数据(时变
#labels = np.random.choice([1, -1], size=(N, mi))

# if(t == 0):# t = 0时候的初始化
# for i in range(N):
# for j in range(mi):
# grad_w, grad_b = gradient_logistic_loss(w_list[i], b_list[i], pt[i][j], labels[i][j])
# sum_g_w += grad_w # (xi t+1) #x #y
# sum_g_b += grad_b
# # s_w_list[i] = g_w_list[i] # (xi t+1) #x #y
# # s_b_list[i] = g_b_list[i]
# g_w_list[i] = sum_g_w / mi + C * w_list[i] # 正则项
# g_b_list[i] = sum_g_b / mi + C * b_list[i]
# s_w_list[i] = g_w_list[i]
# s_b_list[i] = g_b_list[i]
#
# sum_g_w = np.zeros(d)#清零备用
# sum_g_b = 0

# 更新一阶和二阶动量m,v
w_list_pre = w_list.copy()#保留t时间的所有xi,用来更新Wij*xj
b_list_pre = b_list.copy()
for i in range(N):

m_w_list[i] = beta1 * m_w_list[i] + (1 - beta1) * s_w_list[i]
m_b_list[i] = beta1 * m_b_list[i] + (1 - beta1) * s_b_list[i]
v_w_list[i] = beta2 * v_w_list[i] + (1 - beta2) * (s_w_list[i] ** 2)
v_b_list[i] = beta2 * v_b_list[i] + (1 - beta2) * (s_b_list[i] ** 2)
# 与邻居交换信息 局部解x

w_list[i] = sum([W[i][j] * w_list_pre[j] for j in range(N)]) - alpha * m_w_list[i] / np.sqrt(v_w_list[i] + eps)
b_list[i] = sum([W[i][j] * b_list_pre[j] for j in range(N)]) - alpha * m_b_list[i] / np.sqrt(v_b_list[i] + eps)

# 更新梯度g
s_w_list_pre = s_w_list.copy()
s_b_list_pre = s_b_list.copy()
for i in range(N):#保存上一次梯度
sum_g_w = np.zeros(d)#累计梯度
sum_g_b = 0
g_w_prev = g_w_list[i].copy() #保留gt的值,s更新时用gt+1和gt
g_b_prev = g_b_list[i]
# g_w_list[i] = np.zeros(d)
# g_b_list[i] = 0
for j in range(mi): # 对第j个数据
#for i in range(N):#对梯度清零
# g_w_list[i] = np.zeros(d)
# g_b_list[i] = 0
grad_w, grad_b = gradient_logistic_loss(w_list[i], b_list[i], pt[i][j], labels[i][j])
sum_g_w += grad_w #(xi t+1) #x #y
sum_g_b += grad_b
g_w_list[i] = sum_g_w/mi + C * w_list[i]#C正则项系数
g_b_list[i] = sum_g_b/mi + C * b_list[i]

s_w_list[i] = sum([W[i][j] * s_w_list_pre[j] for j in range(N)]) + g_w_list[i] - g_w_prev
s_b_list[i] = sum([W[i][j] * s_b_list_pre[j] for j in range(N)]) + g_b_list[i] - g_b_prev

#计算loss2(时间t下的最优
w_opt,b_opt = optimal_solution(pt, labels)
#计算N*mi个数据点的总损失
loss1 = 0
loss2 = 0
for i in range(N):
for j in range(mi):
loss2 += logistic_loss(w_opt,b_opt,pt[i][j],labels[i][j])
loss2 += 0.5*C*(np.dot(w_opt, w_opt) + b_opt**2)
#计算loss1(时间t下所有agent的平均解
w_average = sum(w_list[i] for i in range(N)) / N
b_average = sum(b_list[i] for i in range(N)) / N
if(t % 100 == 0):
print("t", format(t))
print("wopt", format(w_opt))
print("w_ave", format(w_average))
for i in range(N):
for j in range(mi):
loss1 += logistic_loss(w_average, b_average, pt[i][j], labels[i][j])
loss1 += 0.5 * C * (np.dot(w_average, w_average) + b_average ** 2)
relative_cost_error.append(abs((loss1-loss2)/loss2))

return w_list, b_list, relative_cost_error, w_opt, b_opt

# 主函数
def main():


# 生成图和权重矩阵
G, W = generate_ring_graph(N)
print(G)
print(W)

# 生成数据点pt
data_points = generate_data_points(pc, 0)
# 随机初始化标签
labels = np.random.choice([1, -1], size=(N, mi))#第n个agent的第mi个数据的标签

# GTAdam算法
w_list, b_list,relative_cost_error,w_opt1,b_opt = GTAdam(data_points, labels, G, W, T, alpha, beta1, beta2, eps, loss1, loss2)
print(w_list)
print(b_list)
print(w_opt1)
print(b_opt)
print(relative_cost_error)
# 生成一个1到T的列表作为x轴
x_values = list(range(1, T + 1))

# 使用relative_cost_error作为y轴
y_values = relative_cost_error

# 绘制折线图
#plt.plot(x_values, y_values, '-', markersize=4, label='Relative Cost Error')
plt.semilogy(x_values, y_values, '-', markersize=4, label='Relative Cost Error')

plt.xlabel('T')
plt.ylabel('Relative Cost Error(T)')
plt.title('Relationship between Relative Cost Error and T')
plt.legend()
plt.grid(True)
plt.show()
if __name__ == "__main__":
main()

  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值