简单BP网络的pytorch和python实现

实现一个自定义参数的BP网络,实现一次前向传播,一次反向传播,一次测试

输入:[0.05, 0.10]
输出:[0.01, 0.99]
隐藏层两个神经元
w1-w8:0.35, 0.20, 0.55, 0.95, 0.60, 0.75, 0.60, 0.95
b1,b2=0.75,0.55
学习率为0.5

1、使用pytorch实现

import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset,DataLoader

device = "cuda" if torch.cuda.is_available() else "cpu"


data = np.array([np.array([0.05, 0.10]).reshape(-1, 1)])  # 样本,(1,2,1)
label = np.array([np.array([0.01, 0.99]).reshape(-1)])  # 标签,(1,2)


W1 = np.array([[0.35, 0.20], [0.55, 0.95]])  # [w1,w2],[w3,w4]
b1 = np.array([0.75, 0.75])  # [b1,b1]
W2 = np.array([[0.60, 0.75], [0.60, 0.95]])  # [w5,w6],[w7,w8]
b2 = np.array([0.55, 0.55])  # [b2,b2]
www1 = torch.tensor(W1)  # 将多维数组转换为张量格式
bbb1 = torch.tensor(b1)
www2 = torch.tensor(W2)
bbb2 = torch.tensor(b2)


#初始化数据集
class MyDataset(Dataset):
    def __init__(self, data, label):
        self.data = data
        self.label = label

    def __getitem__(self, index):
        return self.data[index], self.label[index]

    def __len__(self):
        return len(self.label)


training = MyDataset(data, label)
batch_size = 1

# Create data loaders.
# 将数据集按照batch_size大小打包
train_dataloader = DataLoader(training, batch_size=batch_size)



# 创建网络
class NeuralNetwork(nn.Module):
    def __init__(self):
        super(NeuralNetwork, self).__init__()
        self.flatten = nn.Flatten()
        self.fc1 = nn.Linear(2, 2)
        self.activate1 = nn.Sigmoid()
        self.fc2 = nn.Linear(2, 2)
        self.activate2 = nn.Sigmoid()
		# 只含有一层隐藏层的全连接网络,每层神经元2个,激活函数sigmoid


    def forward(self, x):
        x = self.flatten(x)  # 将输入样本拉直成行向量 (1,2)
        x = self.fc1(x)
        x = self.activate1(x)
        x = self.fc2(x)
        logits = self.activate2(x)
        return logits
		#前向传播



    def initialize_weights(self):
        self.fc1.weight = torch.nn.Parameter(www1)  # 使用parameter数据类型初始化网络权值
        self.fc1.bias = torch.nn.Parameter(bbb1,requires_grad=False)  # 偏置不参与训练
        self.fc2.weight = torch.nn.Parameter(www2)
        self.fc2.bias = torch.nn.Parameter(bbb2,requires_grad=False)
		#自定义网络权值初始化



model = NeuralNetwork().to(device)
model.initialize_weights()
# print('fc1',model.fc1.weight,model.fc1.bias)  # 打印初始化的网络参数,检查初始化是否成功
# print('fc2',model.fc2.weight,model.fc2.bias)

loss_fn = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.5)  # 学习率设为0.5 如果步长太小,对于本例,一次训练后网络权值看不出变化


def train(dataloader, model, loss_fn, optimizer):
    model.train()
    for batch, (X, y) in enumerate(dataloader):
        X, y = X.to(device), y.to(device)

        pred = model(X)
        # print('pred',pred.shape,pred)  # 打印预测值,维度为(1,2)
        loss = loss_fn(pred, y)  # 此处损失函数针对行向量进行计算

        # Backpropagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print(model.fc1.weight, model.fc1.bias)
        print(model.fc2.weight, model.fc2.bias)  # 打印训练一次后的网络参数




def test(dataloader, model, loss_fn):

    model.eval()
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            print(pred)
           

train(train_dataloader, model, loss_fn, optimizer)
test(train_dataloader, model, loss_fn)

自定义数据集时,样本应当是若干列向量,标签应当是若干个一维数组。在本例中样本为一个[0.05,0.1]T(2*1的列向量),标签为一个一维数组(0.01,0.99)。
自定义网络参数时,可先查看网络默认的参数格式,打印出默认网络的权值和偏置的维度
默认网络的权值和偏置的维度
按照这个格式,自定义我的网络参数为

W1 = np.array([[0.35, 0.20], [0.55, 0.95]])  # [w1,w2],[w3,w4]
b1 = np.array([0.75, 0.75])  # [b1,b1]
W2 = np.array([[0.60, 0.75], [0.60, 0.95]])  # [w5,w6],[w7,w8]
b2 = np.array([0.55, 0.55])  # [b2,b2]

2、使用Python实现:

import numpy as np


f = lambda x: 1.0/(1.0 + np.exp(-x))  # 激活函数
# 定义输入特征向量、网络权重和偏置
x = np.array([0.05, 0.10]).reshape([-1, 1])
label = np.array([0.01, 0.99]).reshape([-1,1])
W1 = np.array([0.35, 0.20, 0.55, 0.95]).reshape([-1, 2])
b1 = np.array([0.75, 0.75]).reshape([-1, 1])
W2 = np.array([0.60, 0.75, 0.60, 0.95]).reshape([-1, 2])
b2 = np.array([0.55, 0.55]).reshape([-1, 1])

# 前向传递(第一次)
h = f(np.dot(W1, x) + b1)
output = f(np.dot(W2, h) + b2)
# print("h:{}\nout:{}".format(h, output))

# 打印隐藏层和输出层输出结果
for i in range(len(h)):
    print("h{}:{}".format(i + 1, h[i][0]))

for i in range(len(output)):
    print("o{}:{}".format(i + 1, output[i][0]))

# 反向传播
a = np.multiply(output, 1 - output)  # 对sigmoid函数激活的输出值求导
b = np.multiply(a, output - label)  
c = np.dot(W2, b)
d = np.multiply(h, 1 - h)  # 对sigmoid函数激活的隐藏层输出值求导
dW1 = np.dot(np.multiply(c, d), np.transpose(x))
dW2 = np.dot(b, np.transpose(h))
# 参数更新
lr = 0.5  # 设置学习率
W1 = W1 - lr * dW1
W2 = W2 - lr * dW2
# print("W1:{}\nW2:{}".format(W1, W2))

# 打印网络权重的更新值
for i in range(W1.size):
    print("w{}:{}".format(i + 1, W1.reshape(1, -1)[0, i]))
    
for i in range(W2.size):
    print("w{}:{}".format(i + 5, W2.reshape(1, -1)[0, i]))

# 前向传播(第二次)
h = f(np.dot(W1, x) + b1)
output = f(np.dot(W2, h) + b2)
# print("h:{}\nout:{}".format(h, output))

# 打印隐藏层和输出层输出结果
for i in range(len(h)):
    print("n_h{}:{}".format(i + 1, h[i][0]))

for i in range(len(output)):
    print("o_h{}:{}".format(i + 1, output[i][0]))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值