Pytorch代码入门

根据视频敲代码
https://www.bilibili.com/video/BV12741177Cu?p=1
尝试一,随机生成64*1000,给出分类

import torch.nn as nn
import torch

N,input,H,output = 64,1000,100,10
x = torch.randn(N,input)
y = torch.randn(N,output)

class TwoLayerNet(torch.nn.Module):
    def __init__(self,input,H,output):
        super(TwoLayerNet,self).__init__()
        self.linear1 = nn.Linear(input,H,bias=False)
        self.linear2 = nn.Linear(H,output,bias=False)
    def forward(self,x):
        y_pred = self.linear2(self.linear1(x).clamp(min=0))
        return y_pred
net = TwoLayerNet(input,H,output)
loss_fn = nn.MSELoss(reduction = 'sum')
learning_rate = 1e-4
optimizer = torch.optim.Adam(net.parameters(),lr = learning_rate)

for epoch in range(500):
    y_pred = net(x)
    loss = loss_fn(y_pred,y)
    print(epoch+1,loss.item())
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

with torch.no_grad():
    x_test = torch.randn(64,1000)
    y_test = net(x_test)
    print(x_test)
    print(y_test.shape)
    _, predicted = torch.max(y_test, 1)
    print(y_test)
    print(y_test.shape)
    print(predicted)

尝试二
fizz buzz游戏
3的倍数返回buzz,5的倍数返回fizz,15的倍数返回fizzbuzz

import numpy as np
import torch
import torch.nn as nn

def fizz_buzz_encode(i):
    if i % 15 == 0 : return 3
    elif i % 3 ==0 : return 2
    elif i % 5 ==0 : return 1
    else : return 0

def fizz_buzz_decode(i,prediction):
    return [str(i),'fizz','buzz','fizzbuzz'][prediction]

#把数字转为二进制方便计算
def binary_encode(i,num_digits):
    return np.array([i>>d &1 for d in range(num_digits)][::-1])

NUM_DIGITS = 10
#trX是101-1023的二进制数组,有923个数,shape为[923*10],如101是[0., 0., 0., 1., 1., 0., 0., 1., 0., 1.]
#trX是101-1023对应的fuzz和buzz的值,shape为[923]
trX = torch.Tensor([binary_encode(i,NUM_DIGITS) for i in range(101,2**NUM_DIGITS)])
trY = torch.LongTensor([fuzz_buzz_encode(i) for i in range(101,2**NUM_DIGITS)])

NUM_HIDDEN = 100
class NET(nn.Module):
    def __init__(self,NUM_DIGITS,NUM_HIDDEN):
        super(NET,self).__init__()
        self.linear1 = nn.Linear(NUM_DIGITS,NUM_HIDDEN,bias=False)
        self.relu = nn.ReLU()
        self.linear2 = nn.Linear(NUM_HIDDEN,4,bias=False)

    def forward(self,x):
        return self.linear2(self.relu(self.linear1(x)))

net = NET(NUM_DIGITS,NUM_HIDDEN)
loss_fn = torch.nn.CrossEntropyLoss()
learning_rate = 0.001
optimizer = torch.optim.Adam(net.parameters(),lr = learning_rate)
Batch_Size = 100
for epoch in range(1000):
#从0到1000,step为batch_size
    for start in range(0,len(trX),Batch_Size):
        end = start+Batch_Size
        batch_trX = trX[start:end]
        batch_trY = trY[start:end]
        batch_trY_pred = net(batch_trX)
        loss = loss_fn(batch_trY_pred,batch_trY)
        print(epoch+1,loss.item())
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()


with torch.no_grad():
    test_trX_array = []
    true_trY = []
    for i in range(0,101):
        test_trX_array.append(binary_encode(i,NUM_DIGITS))
        true_trY.append(fuzz_buzz_decode(i,fuzz_buzz_encode(i)))
    #test_trX = torch.Tensor([binary_encode(i, NUM_DIGITS) for i in range(0, 101)])
    test_trX = torch.Tensor(test_trX_array)
    test_trY = net(test_trX)
    print('true_y', true_trY)
    print('test_y', test_trY)
    # test_trY是一个100*4的矩阵,找4个分类中的最大值
    print(len(test_trY.max(1)[1].data.tolist()))
    predict = list(fuzz_buzz_decode(i,x) for i,x in zip(range(0, 101), test_trY.max(1)[1].data.tolist()))
    predict_array = np.array(predict)
    print(predict_array)
    true = 0
    for i in range(len(true_trY)):
        if true_trY[i] == predict_array[i]:
            true +=1
    print("accuracy",100.0*true/len(true_trY))

准确率达到94%
在这里插入图片描述

  • 0
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值