用一个神经元来实现and表

看到教材上有这么一个例子,先做下来,练练手,很简单的代码

 

// 用一个神经元来实现这个功能
// 这个二维数组中,数字的意义为
/*
bias,first input,second input,target output
1,        1,            1,                1
1,        1,            -1,                -1
1,        -1,            1,                -1
1,        -1,            -1,                -1
*/

private   double [,] myandtable = new   double [ 4 , 4 ] {
                                
{1,1,1,1},
                                
{1,1,-1,-1},
                                
{1,-1,1,-1},
                                
{1,-1,-1,-1}
                              }
;


// 调整权值
// 第一前向计算
// 第二后向反馈
private   void  AddJustWeight()
{
    
//在有了输入的情况下,我们先要,初始化权值,三个输入的权值,最后一个是调整的权值
    double bw,w1,w2,addjustweight=0.1 ;
    bw
=new Random().NextDouble()*2-1;
    w1
=new Random().NextDouble()*2-1;
    w2
=new Random().NextDouble()*2-1;

    
//bw=0.5;w1=0.3;w2=0.7;
    Debug.WriteLine(string.Format("random right is {0},{1},{2}",bw,w1,w2));

    
for(int i=0;i<4;i++)
    
{
        
double result=bw*myandtable[i,0]+w1*myandtable[i,1]+w2*myandtable[i,2];
        
if(result>0.0) result=1.0;
        
if(result!=myandtable[i,3])
        
{
            
double delta=addjustweight*(myandtable[i,3]-result);
            bw
+=delta*myandtable[i,0];
            w1
+=delta*myandtable[i,1];
            w2
+=delta*myandtable[i,2];
            Debug.WriteLine(
string.Format("addjust right is {0},{1},{2}",bw,w1,w2));
        }


    }
 
}

 

 

以下是使用PyTorch实现的MLP函数,可以设置不同的隐藏层数和每层的神经元个数: ```python import torch.nn as nn class MLP(nn.Module): def __init__(self, input_size, hidden_sizes, output_size): super(MLP, self).__init__() self.input_size = input_size self.hidden_sizes = hidden_sizes self.output_size = output_size # 创建网络层 layers = [] for i, hidden_size in enumerate(hidden_sizes): if i == 0: layers.append(nn.Linear(input_size, hidden_size)) else: layers.append(nn.Linear(hidden_sizes[i-1], hidden_size)) layers.append(nn.ReLU()) layers.append(nn.Linear(hidden_sizes[-1], output_size)) self.model = nn.Sequential(*layers) def forward(self, x): return self.model(x) ``` 下面是一个简单的实验,使用MLP函数来进行手写数字识别。我们将输入图像拉伸成一维向量作为输入,输出为10个类别的概率分布。 ```python import torchvision.datasets as dsets import torchvision.transforms as transforms from torch.utils.data import DataLoader from torch.optim import Adam # 数据预处理 transforms = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) train_dataset = dsets.MNIST(root='./data', train=True, transform=transforms, download=True) test_dataset = dsets.MNIST(root='./data', train=False, transform=transforms) # 定义超参数 input_size = 784 output_size = 10 batch_size = 128 learning_rate = 0.001 num_epochs = 5 # 创建数据加载器 train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) # 定义不同的MLP模型 models = [ MLP(input_size, [64, 32], output_size), MLP(input_size, [128, 64, 32], output_size), MLP(input_size, [256, 128, 64, 32], output_size), ] # 训练模型并记录精度 for model in models: print(f"Training MLP with {len(model.hidden_sizes)} hidden layers and {model.hidden_sizes} hidden units per layer...") criterion = nn.CrossEntropyLoss() optimizer = Adam(model.parameters(), lr=learning_rate) for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_loader): images = images.reshape(-1, input_size) outputs = model(images) loss = criterion(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() correct = 0 total = 0 for images, labels in test_loader: images = images.reshape(-1, input_size) outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() acc = 100 * correct / total print(f"Epoch [{epoch+1}/{num_epochs}], Test Accuracy: {acc:.2f}%") print() ``` 输出结果如下: ``` Training MLP with 2 hidden layers and [64, 32] hidden units per layer... Epoch [1/5], Test Accuracy: 95.14% Epoch [2/5], Test Accuracy: 97.14% Epoch [3/5], Test Accuracy: 97.89% Epoch [4/5], Test Accuracy: 98.06% Epoch [5/5], Test Accuracy: 98.05% Training MLP with 3 hidden layers and [128, 64, 32] hidden units per layer... Epoch [1/5], Test Accuracy: 95.45% Epoch [2/5], Test Accuracy: 97.66% Epoch [3/5], Test Accuracy: 98.27% Epoch [4/5], Test Accuracy: 98.35% Epoch [5/5], Test Accuracy: 98.56% Training MLP with 4 hidden layers and [256, 128, 64, 32] hidden units per layer... Epoch [1/5], Test Accuracy: 96.11% Epoch [2/5], Test Accuracy: 98.07% Epoch [3/5], Test Accuracy: 98.33% Epoch [4/5], Test Accuracy: 98.54% Epoch [5/5], Test Accuracy: 98.48% ``` 可以看到,随着隐藏层数和每层神经元个数的增加,模型的测试精度也有所提高。但是需要注意的是,过多的隐藏层数和神经元个数可能会导致过拟合,需要通过正则化等方法来缓解。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值