【深度学习】Pytorch学习笔记(三)

实现二分类神经网络(数据线性可分):

import torch
import torch.nn.functional as F
import numpy as np
import warnings
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score

warnings.filterwarnings("ignore")

x1 = torch.from_numpy(np.random.uniform(0.5, 2.15, (50, 2)))
y1 = torch.ones((50, 1))
x2 = torch.from_numpy(np.random.uniform(2.15, 3.8, (50, 2)))
y2 = torch.zeros((50, 1))
y = torch.cat((y1, y2), dim=0)     # 列合并
x = torch.cat((x1, x2), dim=0).type(torch.FloatTensor)      # 列合并,数据类型改为float32
x = torch.cat((x, y), dim=1)       # 行合并
x = x[torch.randperm(x.size(0))]   # 按行维度对x打乱次序,torch.randperm(x.size(0))返回打乱次序后的索引
x_train, y_train = x[:70, :2], x[:70, 2].type(torch.LongTensor)  # 训练集
x_test, y_test = x[70:, :2], x[70:, 2].type(torch.LongTensor)    # 测试集

for i in range(x.size(0)):
    if x[i, 2] == 1.0:
        plt.scatter(x[i, 0].numpy(), x[i, 1].numpy(), marker="^", color='m', edgecolors='k', s=100)
    else:
        plt.scatter(x[i, 0].numpy(), x[i, 1].numpy(), marker="^", color='c', edgecolors='k', s=100)
plt.title("Raw data")
plt.show()

class Model(torch.nn.Module):

    # 初始化网络的层结构
    def __init__(self, n_features, n1_hidden, n_output):
        super(Model, self).__init__()
        self.hidden_one = torch.nn.Linear(n_features, n1_hidden)
        self.out_layer = torch.nn.Linear(n1_hidden, n_output)

    # 重写父类方法,实现前向传播
    def forward(self, input):
        output = F.relu(self.hidden_one(input))
        output = F.sigmoid(self.out_layer(output))
        return output

model =Model(2, 30, 2)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-1)   # 定义优化器
loss_func = torch.nn.CrossEntropyLoss()  # 定义损失函数为交叉熵

print("Training-------------------")
for t in range(100):
    classification = model(x_train)   # 实例化模型对象
    loss = loss_func(classification, y_train)   # 计算交叉熵

    optimizer.zero_grad()   # 梯度清零
    loss.backward()         # 实现后向传播,计算各参数梯度
    optimizer.step()        # 更新各参数的值

print("Testing--------------------")
classification_pre = model(x_test)
y_pre = classification_pre.argmax(dim=1).numpy()   # 寻找概率最大的列索引
print("Testing Accuracy: ", accuracy_score(y_pre, y_test.numpy()))  # 计算正确率
Training-------------------
Testing--------------------
Testing Accuracy:  1.0

在这里插入图片描述
实现二分类神经网络(数据非线性可分):

import torch
import warnings
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.functional as F
from sklearn.metrics import accuracy_score
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification, make_moons

warnings.filterwarnings("ignore")
cm_bright = ListedColormap(['#FF0000', '#0000FF'])

X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
                           random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
X, y = make_moons(noise=0.3, random_state=0)
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
x_train, x_test, y_train, y_test = torch.from_numpy(x_train).type(torch.FloatTensor), torch.from_numpy(x_test).type(torch.FloatTensor), torch.from_numpy(y_train).type(torch.LongTensor), torch.from_numpy(y_test).type(torch.LongTensor)

plt.scatter(x_train[:, 0], x_train[:, 1], c=y_train, cmap=cm_bright,
           edgecolors='k', s=80)

plt.scatter(x_test[:, 0], x_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.5,
           edgecolors='k', s=80)
plt.show()

class Model(torch.nn.Module):

    # 初始化网络的层结构
    def __init__(self, n_features, n1_hidden, n2_hidden, n_output):
        super(Model, self).__init__()
        self.hidden_one = torch.nn.Linear(n_features, n1_hidden)
        self.hidden_two = torch.nn.Linear(n1_hidden, n2_hidden)
        self.out_layer = torch.nn.Linear(n2_hidden, n_output)

    # 重写父类方法,实现前向传播
    def forward(self, input):
        output = F.relu(self.hidden_one(input))
        output = F.relu(self.hidden_two(output))
        output = F.sigmoid(self.out_layer(output))
        return output

model =Model(2, 50, 10, 2)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)   # 定义优化器
loss_func = torch.nn.CrossEntropyLoss()  # 定义损失函数为交叉熵

print("Training-------------------")
for t in range(1000):
    classification = model(x_train)   # 实例化模型对象
    loss = loss_func(classification, y_train)   # 计算交叉熵

    optimizer.zero_grad()   # 梯度清零
    loss.backward()         # 实现后向传播,计算各参数梯度
    optimizer.step()        # 更新各参数的值
    if t % 5 == 0:
        print("CrossEntropy: ", loss.data.numpy())

print("Testing--------------------")
classification_pre = model(x_test)
y_test_pre = classification_pre.argmax(dim=1).numpy()   # 寻找概率最大的列索引
print("Testing Accuracy: ", accuracy_score(y_test_pre, y_test.numpy()))  # 计算正确率
Training-------------------
CrossEntropy:  0.6893401
CrossEntropy:  0.60950667
CrossEntropy:  0.5358823
CrossEntropy:  0.485409
CrossEntropy:  0.45639047
CrossEntropy:  0.4410974
CrossEntropy:  0.4293453
CrossEntropy:  0.42015803
CrossEntropy:  0.41060606
CrossEntropy:  0.40126452
CrossEntropy:  0.38995636
CrossEntropy:  0.3775206
CrossEntropy:  0.36911288
CrossEntropy:  0.36071128
CrossEntropy:  0.3529678
CrossEntropy:  0.3482838
CrossEntropy:  0.34492618
CrossEntropy:  0.34195
CrossEntropy:  0.33962718
CrossEntropy:  0.33792797
CrossEntropy:  0.336431
CrossEntropy:  0.3351852
CrossEntropy:  0.3341276
CrossEntropy:  0.33325976
CrossEntropy:  0.33256066
CrossEntropy:  0.33196408
CrossEntropy:  0.33147833
CrossEntropy:  0.33106938
CrossEntropy:  0.33071905
CrossEntropy:  0.33041888
CrossEntropy:  0.33016908
CrossEntropy:  0.32994902
CrossEntropy:  0.3297502
CrossEntropy:  0.32957315
CrossEntropy:  0.3294134
CrossEntropy:  0.32925922
CrossEntropy:  0.32909274
CrossEntropy:  0.32885364
CrossEntropy:  0.32834315
CrossEntropy:  0.32773632
CrossEntropy:  0.32717824
CrossEntropy:  0.32655054
CrossEntropy:  0.32609588
CrossEntropy:  0.32554683
CrossEntropy:  0.32522535
CrossEntropy:  0.3247946
CrossEntropy:  0.32441214
CrossEntropy:  0.32398418
CrossEntropy:  0.32359704
CrossEntropy:  0.32332298
CrossEntropy:  0.3229001
CrossEntropy:  0.32256198
CrossEntropy:  0.3223457
CrossEntropy:  0.32196796
CrossEntropy:  0.32155162
CrossEntropy:  0.32127237
CrossEntropy:  0.32093886
CrossEntropy:  0.32065076
CrossEntropy:  0.32031897
CrossEntropy:  0.3200694
CrossEntropy:  0.31971872
CrossEntropy:  0.31947976
CrossEntropy:  0.31913522
CrossEntropy:  0.31883866
CrossEntropy:  0.3185804
CrossEntropy:  0.31833082
CrossEntropy:  0.31814486
CrossEntropy:  0.31790042
CrossEntropy:  0.3176914
CrossEntropy:  0.31740978
CrossEntropy:  0.3171655
CrossEntropy:  0.31695986
CrossEntropy:  0.3167738
CrossEntropy:  0.31658953
CrossEntropy:  0.31644794
CrossEntropy:  0.31624088
CrossEntropy:  0.31610516
CrossEntropy:  0.31596395
CrossEntropy:  0.31586203
CrossEntropy:  0.31572524
CrossEntropy:  0.31557816
CrossEntropy:  0.31547117
CrossEntropy:  0.31535515
CrossEntropy:  0.31528077
CrossEntropy:  0.3151677
CrossEntropy:  0.31507483
CrossEntropy:  0.3149873
CrossEntropy:  0.31492448
CrossEntropy:  0.3148493
CrossEntropy:  0.31479803
CrossEntropy:  0.31473917
CrossEntropy:  0.31466782
CrossEntropy:  0.31460458
CrossEntropy:  0.3145462
CrossEntropy:  0.31449944
CrossEntropy:  0.31445935
CrossEntropy:  0.31439894
CrossEntropy:  0.31434867
CrossEntropy:  0.31430647
CrossEntropy:  0.3142721
CrossEntropy:  0.31423426
CrossEntropy:  0.3142011
CrossEntropy:  0.31416646
CrossEntropy:  0.31413233
CrossEntropy:  0.31410325
CrossEntropy:  0.31407407
CrossEntropy:  0.31405437
CrossEntropy:  0.31402227
CrossEntropy:  0.31399584
CrossEntropy:  0.31396863
CrossEntropy:  0.31395158
CrossEntropy:  0.31392598
CrossEntropy:  0.3139136
CrossEntropy:  0.31389135
CrossEntropy:  0.313874
CrossEntropy:  0.3138553
CrossEntropy:  0.31384113
CrossEntropy:  0.31382692
CrossEntropy:  0.31380475
CrossEntropy:  0.31379092
CrossEntropy:  0.31377712
CrossEntropy:  0.31375965
CrossEntropy:  0.3137511
CrossEntropy:  0.31373438
CrossEntropy:  0.31372213
CrossEntropy:  0.31371096
CrossEntropy:  0.31369886
CrossEntropy:  0.3136886
CrossEntropy:  0.31367883
CrossEntropy:  0.31366745
CrossEntropy:  0.3136582
CrossEntropy:  0.31364965
CrossEntropy:  0.31363907
CrossEntropy:  0.31363142
CrossEntropy:  0.31362414
CrossEntropy:  0.31361386
CrossEntropy:  0.31360844
CrossEntropy:  0.31359988
CrossEntropy:  0.31359214
CrossEntropy:  0.31358448
CrossEntropy:  0.31357712
CrossEntropy:  0.31357044
CrossEntropy:  0.31356472
CrossEntropy:  0.31355855
CrossEntropy:  0.31355265
CrossEntropy:  0.3135458
CrossEntropy:  0.3135406
CrossEntropy:  0.31353483
CrossEntropy:  0.3135291
CrossEntropy:  0.3135251
CrossEntropy:  0.31351954
CrossEntropy:  0.31351504
CrossEntropy:  0.31351194
CrossEntropy:  0.31350532
CrossEntropy:  0.31350046
CrossEntropy:  0.31349596
CrossEntropy:  0.31349266
CrossEntropy:  0.3134887
CrossEntropy:  0.31348363
CrossEntropy:  0.31348032
CrossEntropy:  0.31347623
CrossEntropy:  0.31347343
CrossEntropy:  0.31346887
CrossEntropy:  0.3134656
CrossEntropy:  0.3134624
CrossEntropy:  0.31345952
CrossEntropy:  0.31345627
CrossEntropy:  0.3134525
CrossEntropy:  0.31344905
CrossEntropy:  0.31344616
CrossEntropy:  0.31344357
CrossEntropy:  0.3134402
CrossEntropy:  0.31343746
CrossEntropy:  0.31343508
CrossEntropy:  0.3134327
CrossEntropy:  0.3134292
CrossEntropy:  0.31342658
CrossEntropy:  0.3134243
CrossEntropy:  0.31342158
CrossEntropy:  0.31341928
CrossEntropy:  0.31341732
CrossEntropy:  0.31341434
CrossEntropy:  0.31341252
CrossEntropy:  0.31341013
CrossEntropy:  0.31340784
CrossEntropy:  0.31340632
CrossEntropy:  0.31340423
CrossEntropy:  0.3134018
CrossEntropy:  0.31339988
CrossEntropy:  0.3133982
CrossEntropy:  0.31339604
CrossEntropy:  0.31339425
CrossEntropy:  0.31339213
CrossEntropy:  0.3133905
CrossEntropy:  0.3133886
CrossEntropy:  0.31338698
CrossEntropy:  0.3133853
CrossEntropy:  0.31338364
CrossEntropy:  0.31338197
CrossEntropy:  0.31338048
Testing--------------------
Testing Accuracy:  0.9333333333333333

深色的为训练集,浅色的为测试集:
在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值