【联邦学习——手动搭建简易联邦学习】

1. 目的

用于记录自己在手写联邦学习相关实验时碰到的一些问题,方便自己进行回顾。

2. 代码

2.1 本地模型计算梯度更新

# 比较训练前后的参数变化
def compare_weights(new_model, old_model):
    weight_updates = {}
    for layer_name, params in new_model.state_dict().items():
        weight_updates[layer_name] = params - old_model.state_dict().get(layer_name)
    return weight_updates

测试代码如下:
有意思的点在于我获得了update = model2-model1
但是我去计算model1+update==model2的时候发现不相等
最后思考了一下可能是在这个计算的过程中存在精度的丢失

import torch
from torch import nn
from torchvision import transforms, datasets, models
from torch.utils.data import DataLoader

def weight_init(m):
    if isinstance(m, nn.Linear):
        nn.init.xavier_normal_(m.weight)
        nn.init.constant_(m.bias, 0)
    # 也可以判断是否为conv2d,使用相应的初始化方式
    elif isinstance(m, nn.Conv2d):
        nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
    # 是否为批归一化层
    elif isinstance(m, nn.BatchNorm2d):
        nn.init.constant_(m.weight, 1)
        nn.init.constant_(m.bias, 0)

if __name__ == '__main__':
	# 建立model1和model2作为训练前后的模型
	model1 = models.get_model("resnet18")
	model1.apply(weight_init)
	model2 = models.get_model("resnet18")
	model2.apply(weight_init)
    weight_updates = compare_weights(model2,model1)
    # 创建一个临时的模型状态字典,用于存储更新后的参数
    updated_params = model1.state_dict().copy()

    for layer_name, update in weight_updates.items():
        # 确保该层存在于model1中且形状匹配,避免错误
        if layer_name in updated_params and update.shape == updated_params[layer_name].shape:
            # 直接相加更新参数
            updated_params[layer_name] += update
        else:
            print(f"Warning: Layer {layer_name} not found or shape mismatch, skipping update.")

    # 将更新后的参数加载回model1
    model1.load_state_dict(updated_params)

    for layer_name,params in model1.state_dict().items():
        ts = params-model2.state_dict().get(layer_name)
		# 很重要,这里应该是我们在做的时候会有一点模型精度上的损失,所以不能够计算这里等于0
        if torch.sum(ts).item()>1e-6:
            print(f"{layer_name}更新后与原有的不匹配,差距为")
        else:
            print(f"{layer_name}更新后与原有的匹配")

2.2 客户端代码

import numpy as np
import torch.utils.data
from tqdm import tqdm

'''
conf 配置文件
model 模型
train_dataset 数据集
class_ratios 从数据集中筛选出一部分 class_ratios = {0: 0.5, 1: 0.5,..., 8: 0.5, 9: 0.5}
id 客户端的标识
'''

class Client(object):
    def __init__(self,conf,model,device,train_loader,id=1):
        self.client_id = id                     # 客户端ID
        self.conf = conf                        # 配置文件
        self.local_model = model                # 客户端本地模型
        self.train_loader = train_loader        # 训练数据的迭代器,需要训练的数据已经在里面了
        self.grad_update = dict()               # 本地训练完之后的梯度更新
        self.weight = conf['weight']            # 全局模型梯度更新时的权重
        self.device = device                    # 训练的设备

        self.local_model.to(self.device)        # 将模型放入训练设备

    def train(self, model):
        self._before_train(model)
        self._local_train()
        self._after_train(model)

    def _before_train(self, model):
        self._load_global_model(model)

    # 用服务器模型来覆盖本地模型
    def _load_global_model(self,model):
        for name,param in model.state_dict().items():
            # 客户端首先用服务器端下发的全局模型覆盖本地模型
            self.local_model.state_dict()[name].copy_(param.clone())


    def _local_train(self):
        # 定义最优化函数器,用于本地模型训练
        optimizer = torch.optim.SGD(
            self.local_model.parameters(),
            lr=self.conf['lr'],
            momentum=self.conf['momentum']
        )
        # 本地模型训练
        self.local_model.train()
        loss = 0
        for epoch in range(self.conf['local_epochs']):
            for batch in tqdm(self.train_loader, desc=f"Epoch {epoch + 1}/{self.conf['local_epochs']}"):
                data, target = batch
                # 放入相应的设备
                data = data.to(self.device)
                target = target.to(self.device)
                # 梯度清零
                optimizer.zero_grad()
                output = self.local_model(data)
                loss = torch.nn.functional.cross_entropy(output, target)
                # 反向传播
                loss.backward()
                optimizer.step()
            print(f"Client{self.client_id}----Epoch {epoch} done.Loss {loss}")

    def _after_train(self,model):
        self._cal_update_weights(model)


    def _cal_update_weights(self, old_model):
        weight_updates = dict()
        for layer_name, params in self.local_model.state_dict().items():
            weight_updates[layer_name] = params - old_model.state_dict().get(layer_name)
        # 更新梯度模型的权重
        self.grad_update = weight_updates

2.3 服务器代码

import torch.utils.data
import torchvision.datasets as datasets
from torchvision import models
from torchvision.transforms import transforms

from utils.CommonUtils import copy_model_params


# 服务端
class Server(object):
    def __init__(self, conf, eval_dataset, device):
        self.conf = conf
        # 全局老模型
        self.old_model = models.get_model(self.conf["model_name"])
        # 全局的新模型
        self.global_model = models.get_model(self.conf["model_name"])
        # 创建时保持新老模型的参数是一致的
        copy_model_params(self.old_model,self.global_model)
        # 根据客户端上传的梯度进行排列组合,用于测量贡献度的模型
        self.sub_model = models.get_model(self.conf["model_name"])
        self.eval_loader = torch.utils.data.DataLoader(eval_dataset,batch_size=self.conf["batch_size"],shuffle=True)
        self.accuracy_history = []  # 保存accuracy的数组
        self.loss_history = []  # 保存loss的数组

        self.device = device
        self.old_model.to(device)
        self.global_model.to(device)
        self.sub_model.to(device)


    # 模型重构
    def model_aggregate(self, clients, target_model):
        if target_model == self.global_model:
            print("++++++++全局模型更新++++++++")
            # 更新一下老模型参数
            copy_model_params(self.old_model,self.global_model)
        else:
            print("========子模型重构========")
        sum_weight = 0
        # 计算总的权重
        for client in clients:
            sum_weight += client.weight

        # 将old_model的模型参数赋值给sub_model
        copy_model_params(self.sub_model, self.old_model)

        # 初始化一个空字典来累积客户端的模型更新
        aggregated_updates = {}
        # 遍历每个客户端
        for client in clients:
            # 根据客户端的权重比例聚合更新
            for name, update in client.grad_update.items():
                if name not in aggregated_updates:
                    aggregated_updates[name] = update * client.weight / sum_weight
                else:
                    aggregated_updates[name] += update * client.weight / sum_weight

        # 应用聚合后的更新到sub_model
        for name, param in target_model.state_dict().items():
            if name in aggregated_updates:
                param.copy_(param + aggregated_updates[name])  # 累加更新到当前层参数上


    # 定义模型评估函数
    def model_eval(self,target_model):
        target_model.eval()
        total_loss = 0.0
        correct = 0
        dataset_size = 0
        for batch_id,batch in enumerate(self.eval_loader):
            data,target = batch
            dataset_size += data.size()[0]
            # 放入和模型对应的设备
            data = data.to(self.device)
            target = target.to(self.device)
            # 模型预测
            output = target_model(data)
            # 把损失值聚合起来
            total_loss += torch.nn.functional.cross_entropy(output,target,reduction='sum').item()
            # 获取最大的对数概率的索引值
            pred = output.data.max(1)[1]
            correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()
        # 计算准确率
        acc = 100.0 * (float(correct) / float(dataset_size))
        # 计算损失值
        total_l = total_loss / dataset_size
        # 将accuracy和loss保存到数组中
        self.accuracy_history.append(acc)
        self.loss_history.append(total_l)
        if target_model == self.global_model:
            print(f"++++++++全局模型评估++++++++acc:{acc}  loss:{total_l}")
        else:
            print(f"========子模型评估========acc:{acc}  loss:{total_l}")
        return acc,total_l

    def save_results_to_file(self):
        # 将accuracy和loss保存到文件中
        with open("fed_accuracy_history.txt", "w") as f:
            for acc in self.accuracy_history:
                f.write("{:.2f}\n".format(acc))

        with open("fed_loss_history.txt", "w") as f:
            for loss in self.loss_history:
                f.write("{:.4f}\n".format(loss))

2.4 Utils

def copy_model_params(target_model, source_model):
    for name, param in source_model.state_dict().items():
        target_model.state_dict()[name].copy_(param.clone())

3. 运行测试代码

import json

import torch
from torch import nn
from torch.utils.data import DataLoader, Subset
from torchvision import models
from torchvision import transforms, datasets
from client.Client import Client
from server.Server import Server

with open("../conf/client1.json",'r') as f:
    conf = json.load(f)

with open("../conf/server1.json",'r') as f:
    serverConf = json.load(f)

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
])

train_dataset = datasets.CIFAR10(root='../data/', train=True, download=True,transform=transform)
eval_dataset = datasets.CIFAR10(root='../data/', train=False, download=True,transform=transform)


# train_loader = DataLoader(train_dataset, shuffle=True, batch_size=32, num_workers=2)


# 计算数据集长度
total_samples = len(train_dataset)
# 确保可以平均分配,否则需要调整逻辑以处理余数
assert total_samples % 2 == 0, "数据集样本数需为偶数以便完全平分"

# 分割点
split_point = total_samples // 2

# 创建两个子集
train_dataset_first_half = Subset(train_dataset, range(0, split_point))
train_dataset_second_half = Subset(train_dataset, range(split_point, total_samples))

# 然后为每个子集创建DataLoader
batch_size = 32

train_loader_first_half = DataLoader(train_dataset_first_half, shuffle=True, batch_size=batch_size, num_workers=2)
train_loader_second_half = DataLoader(train_dataset_second_half, shuffle=True, batch_size=batch_size, num_workers=2)

# 检查CUDA是否可用
if torch.cuda.is_available():
    device = torch.device("cuda")  # 如果CUDA可用,选择GPU
else:
    device = torch.device("cpu")   # 如果CUDA不可用,选择CPU

local_model = models.get_model("resnet18")
local_model2 = models.get_model("resnet18")

server = Server(serverConf,eval_dataset,device)
client1 = Client(conf, local_model, device,train_loader_first_half, 1)
client2 = Client(conf, local_model2, device,train_loader_second_half, 2)
for i in range(2):
    client1.train(server.global_model)
    client2.train(server.global_model)
    server.model_aggregate([client1,client2], server.global_model)
    server.model_eval(server.global_model)
    server.model_aggregate([client1], server.sub_model)
    server.model_eval(server.sub_model)
    server.model_aggregate([client2], server.sub_model)
    server.model_eval(server.sub_model)

  • 16
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
完全去中心化的联邦学习实例python源码+项目说明+模型+数据.zip 完全去中心化的联邦学习实例python源码+项目说明+模型+数据.zip 完全去中心化的联邦学习实例python源码+项目说明+模型+数据.zip 完全去中心化的联邦学习实例python源码+项目说明+模型+数据.zip 完全去中心化的联邦学习实例python源码+项目说明+模型+数据.zip 完全去中心化的联邦学习实例python源码+项目说明+模型+数据.zip 完全去中心化的联邦学习实例python源码+项目说明+模型+数据.zip 完全去中心化的联邦学习实例python源码+项目说明+模型+数据.zip 完全去中心化的联邦学习实例python源码+项目说明+模型+数据.zip 完全去中心化的联邦学习实例python源码+项目说明+模型+数据.zip 完全去中心化的联邦学习实例python源码+项目说明+模型+数据.zip 完全去中心化的联邦学习实例python源码+项目说明+模型+数据.zip 完全去中心化的联邦学习实例python源码+项目说明+模型+数据.zip 【资源说明】 1、该资源内项目代码都是经过测试运行成功,功能正常的情况下才上传的,请放心下载使用。 2、适用人群:主要针对计算机相关专业(如计科、信息安全、数据科学与大数据技术、人工智能、通信、物联网、数学、电子信息等)的同学或企业员工下载使用,具有较高的学习借鉴价值。 3、不仅适合小白学习实战练习,也可作为大作业、课程设计、毕设项目、初期项目立项演示等,欢迎下载,互相学习,共同进步!

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值