# -*- coding: utf-8 -*-
"""
Created on Sat May 22 09:55:44 2021
@author: lwf
"""
import torch
import numpy as np
#对数值进行fizzbuzz编码0,%3,%5,%15>>>0,1,2,3>>>0,fizz, buzz,fizzbuzz
def fizzbuzz_encode(i):
if i % 15 == 0:
return 3
if i % 5 == 0:
return 2
if i % 3 == 0:
return 1
else:
return 0
# 对编码数值进行解码 0,1,2,3>>>0,fizz, buzz,fizzbuzz
def fizzbuzz_decode(i):
out = [str(i),"fizz","buzz","fizzbuzz"][i]
return out
# 对数值转为2进制,并为np数组,长度为num_digits
def binary_encode(i,num_digits):
a = np.array([int(d) for d in list(bin(i)[2:])])
b = np.append(np.zeros(num_digits-len(a),dtype=np.long),a)
return b
#训练数据 X,转换成2进制
trX = torch.tensor([binary_encode(i,10) for i in range(101,1001)],dtype=torch.float32)
#训练数据Y,转为数字编码
trY = torch.tensor([fizzbuzz_encode(i) for i in range(101,1001)],dtype=torch.long)
#建立模型
model = torch.nn.Sequential(
torch.nn.Linear(10,100),
torch.nn.ReLU(),
torch.nn.Linear(100,4)
)
#损失函数及优化器
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(),lr = 0.1)
#训练
for i in range(10000):
preY =model(trX)
loss =loss_fn(preY,trY)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Loss",loss)
# 保存及读取
# # torch.save(model,r'C:\Users\lwf\Documents\Python Scripts\神经网络\fizzbuzz.pkl')
## model =torch.load(r'C:\Users\lwf\Documents\Python Scripts\神经网络\fizzbuzz.pkl')
# 被测数据
startnumber = 1
stopnumer = 101
testX = torch.tensor([binary_encode(i,10) for i in range(startnumber,stopnumer)],dtype=torch.float32)
# 被测数据结果,取概率最大值的位置作为结果即0,1,2,3>>>0,fizz, buzz,fizzbuzz
testY = model(testX).argmax(dim=1)
#对结果进行编码,测试原值,fizz, buzz,fizzbuzz
a = []
for i in range(stopnumer-startnumber):
if testY.numpy()[i] == 0:
a.append(i+startnumber)
else:
a.append(fizzbuzz_decode(testY.numpy()[i]))
# 计算正确率
duide = 0
for i in range(startnumber,stopnumer):
if fizzbuzz_encode(i) == testY.numpy()[i-startnumber]:
duide +=1
print (duide/(stopnumer-startnumber))
bizzbuzz pytorch 这个游戏不适合机器学习
最新推荐文章于 2023-02-16 23:02:44 发布