python的一些学习到的代码
打怪代码
class Game():
def __init__(self, name, sex, hp, exp):
self.name = name
self.sex = sex
self.hp = hp
self.exp = exp
def showhp(self):
self.hp -= 20
if self.hp <= 0:
print('your hero is dead...')
else:
pass
return self.hp
def showexp(self):
self.exp += 50
if self.exp == 50:
print('你升级了')
else:
pass
return self.exp
if __name__ == '__main__':
Button = str(input('是否开始游戏 y/n '))
if Button == 'y':
Player1 = Game(name='Little Boy', sex='male', hp=100, exp=0)
for i in range(3):
Player1.showhp()
for i in range(5):
Player1.showexp()
else:
pass
print('你剩余的生命值为')
print(Player1.hp)
从60开始进行循环倒计时,每一秒自减一,按下按键‘b',
import time
import threading
begin = False
def numCounter():
num = 60
global begin
while True:
if begin:
num = num -1
print(f'num={num}, begin={begin}')
time.sleep(1)
if num<=0:
num = 60
else:
pass
def keyDectect():
# TODO:need to be revised by using the threading lock
global begin
while True:
key = input()
if key == 'b':
begin = True
elif key == 't':
begin = False
else:
print('wrong input..')
if __name__ == '__main__':
t1 = threading.Thread(target=numCounter)
t2 = threading.Thread(target=keyDectect)
t1.setDaemon(False)
t2.setDaemon(False)
t1.start()
t2.start()
猜数字:
随机产生一个1~100的正数, 从键盘输入一个正整数,程序判断告诉:输入数据大了,还是小了。最终找到这个数字。
import random
randNum=random.randint(1,100)
while True:
number=float(input("请输入一个100以内的数:"))
if(number>randNum):
print("输入值偏大")
elif(number<randNum):
print("输入值偏小")
else:
print("数值正确")
break
输出1--100之间能够被3整除的整数:
for num in range(100):
if num%3==0:
print(num,end='\t')
汽车类型代码
class Car():
def __init__(self,kind='BMW',length=4.5,weight=1.6):
self.kind=kind
self.length=length
self.__weight=weight
def showWeight(self):
return self.__weight
BMW=Car()
print(BMW.kind)
print(BMW.showWeight())
print(BMW.length)
采用sklearn的方法来实现线性回归的效果
给定训练集为 x=1, y=6.8
x=2, y=9.8
x=3, y=13.2
x=4, y=16.2
测试集 x=5, y=? '''
import numpy as np
from matplotlib import pyplot as plt
x_data = [1,2,3,4]
y_data = [6.8,9.8,13.2,16.2]
loss_list = list()
def forward(a,x,b):
return a*x+b
def lossFunction(a,x,y,b):
y_pred = forward(a,x,b)
loss = (y_pred - y)**2
return loss
a_list = list()
b_list = list()
if __name__ == '__main__':
for a in np.arange(0,6,0.1):
for b in np.arange(0,6,0.1):
sum_loss = 0
for i in range(4):
sum_loss += lossFunction(a, x_data[i], y_data[i],b)
loss_list.append(sum_loss/4)
a_list.append(a)
b_list.append(b)
plt.plot(a_list,loss_list)
plt.xlabel('a')
plt.ylabel('loss')
print(min(loss_list))
loss_min_index = loss_list.index(min(loss_list))
print(loss_min_index)
a_wanted = a_list[loss_min_index]
b_wanted = b_list[loss_min_index]
print(f'a_wanted = {a_wanted}, b_wanted ={b_wanted}')
# plt.show()
# a_wanted = a_list[loss_list.index(min(loss_list))]
# print(forward(a_wanted, 4))
print(forward(a_wanted, 5, b_wanted))
反向计算梯度
from matplotlib import pyplot as plt
import torch
data_x = [1, 2, 3]
data_y = [2, 4, 6]
loss_list = list()
a_list = list()
alpha = 0.01
def forward(x):
return a * x
def lossFunction(x, y):
y_pred = forward(x)
loss = (y_pred - y) ** 2
return loss
if __name__ == '__main__':
a = torch.Tensor([7.0])
a.requires_grad = True
for epoch in range(1000):
# for a in np.arange(0, 4, 0.1):
sum_loss = 0
for i in range(3):
sum_loss += lossFunction(data_x[i], data_y[i])
l = lossFunction(data_x[i],data_y[i])
l.backward()
a.data = a.data - alpha*a.grad
a.grad = None
a_list.append(a.data)
# a = gradient(a, data_x[i], data_y[i])
loss_list.append(sum_loss / 3)
print(a_list)
plt.subplot(211)
plt.plot(a_list)
plt.subplot(212)
plt.plot(loss_list)
plt.show()
pytorch实现线性回归
import torch
from matplotlib import pyplot as plt
x_data = torch.tensor([[1], [2], [3]], dtype=torch.float)
y_data = torch.tensor([[2], [4], [6]], dtype=torch.float)
class LinearExample(torch.nn.Module):
def __init__(self):
super(LinearExample, self).__init__()
self.linear = torch.nn.Linear(1, 1)
def forward(self, x):
y_pred = self.linear(x)
return y_pred
model = LinearExample()
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
epoch_list = list()
a_list = list()
if __name__ == '__main__':
for epoch in range(100):
y_hat = model(x_data)
loss = criterion(y_hat, y_data)
a_list.append(model.linear.weight.item())
epoch_list.append(epoch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
plt.plot(epoch_list, a_list)
plt.show()
糖尿病例子的预测问题:
import numpy as np
from matplotlib import pyplot as plt
import torch
data_xy = np.loadtxt('/home/chasing/Documents/pytorchbooklit/diabetes.csv.gz', delimiter=',', dtype=np.float32)
x_data = torch.from_numpy(data_xy[:,:-1])
y_data = torch.from_numpy(data_xy[:,-1]).reshape(-1,1)
class LinearExample(torch.nn.Module):
def __init__(self):
super(LinearExample, self).__init__()
self.linear1 = torch.nn.Linear(8,6)
self.linear2 = torch.nn.Linear(6,4)
self.linear3 = torch.nn.Linear(4,1)
# self.linear4 = torch.nn.Linear(2,1)
self.sigmoid = torch.nn.Sigmoid()
self.relu = torch.nn.ReLU()
def forward(self,x):
x = self.relu(self.linear1(x))
x = self.relu(self.linear2(x))
x = self.linear3(x)
# x = self.linear4(x)
return self.relu(x)
# return self.sigmoid(x)
model = LinearExample()
criterion = torch.nn.BCELoss(reduction='mean')
optimizer = torch.optim.SGD(model.parameters(),lr=1e-2)
loss_list = list()
if __name__ == '__main__':
for epoch in range(300):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
loss_list.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
plt.plot(loss_list)
plt.show()
手写数字识别例题:
import torch
from matplotlib import pyplot as plt
from torchvision import datasets
from torch.utils.data import DataLoader
from torchvision import transforms
import torch.optim as optim
import numpy as np
batch_size = 64
batch_size_test = 100
data_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
minist_tainloader = datasets.MNIST(root='./', train=True, download=True, transform=data_transform)
minist_testloader = datasets.MNIST(root='./', train=False, download=True, transform=data_transform)
trainloader = DataLoader(minist_tainloader, batch_size=batch_size, shuffle=True)
testloader = DataLoader(minist_testloader, batch_size=batch_size_test, shuffle=False)
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear1 = torch.nn.Linear(784, 512)
self.linear2 = torch.nn.Linear(512, 256)
self.linear3 = torch.nn.Linear(256, 128)
self.linear4 = torch.nn.Linear(128, 64)
self.linear5 = torch.nn.Linear(64, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = x.view(-1, 784)
x = self.relu(self.linear1(x))
x = self.relu(self.linear2(x))
x = self.relu(self.linear3(x))
x = self.relu(self.linear4(x))
return self.linear5(x)
model = Model()
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-2, momentum=0.5)
loss_list = list()
def test_accuracy():
correct = 0
with torch.no_grad():
for data in testloader:
images, labels = data
pred = model(images)
total_num = 0
correct = 0
for i in range(batch_size_test):
labels_np = labels.numpy().tolist()
pred_np = pred.numpy().tolist()
total_num += 1
if labels_np[i] == pred_np[i].index(max(pred_np[i])):
correct += 1
print(f'Accuracy = {correct/total_num}, i = {i}')
if __name__ == '__main__':
for epoch in range(10):
for i, data in enumerate(trainloader, 0):
inputs, label = data
outputs = model(inputs)
optimizer.zero_grad()
loss = criterion(outputs, label)
loss_list.append(loss)
loss.backward()
optimizer.step()
print(f'[{epoch}]: loss = {loss}')
plt.plot(loss_list)
plt.show()
test_accuracy()
通过PIL识别图像
import numpy as np
from PIL import Image
a = Image.open('test.jpg')
c = a.convert('L')
c.show()
# print(c)
im = np.array(a)
im_gray = np.array(c)
print(im_gray.shape)
print(im_gray)
print(im.shape)
# print(im)
b = np.array([[[1,2,3],[2,3,3],[3,4,5]],[[2,1,2],[3,4,5],[4,5,6]]])
# print(b.shape)
# a.show()
# print(a)
卷积神经网络
首先需要完成卷积网络的维度的推断
import torch
width, height = 28, 28
in_channle = 1
batch_size = 1
inputs = torch.randn(batch_size, in_channle,
width, height)
print(inputs.shape)
conv_lay1 = torch.nn.Conv2d(in_channels=1,
out_channels=10,
kernel_size=5)
output1 = conv_lay1(inputs)
print(output1.shape)
maxpool_lay = torch.nn.MaxPool2d(kernel_size=2)
output2 = maxpool_lay(output1)
print(output2.shape)
conv_lay2 = torch.nn.Conv2d(in_channels=10,
out_channels=20,
kernel_size=5)
output3 = conv_lay2(output2)
print(output3.shape)
output4 = maxpool_lay(output3)
print(output4.shape)
output5 = output4.view(1, -1)
linear_lay = torch.nn.Linear(320, 10)
output6 = linear_lay(output5)
print(output6.shape)
下面将手写数字识别的程序修改成带有卷积操作的深度神经网络结构
import torch
from matplotlib import pyplot as plt
from torchvision import datasets
from torch.utils.data import DataLoader
from torchvision import transforms
import torch.optim as optim
import numpy as np
batch_size = 64
data_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
minist_tainloader = datasets.MNIST(root='./', train=True, download=True, transform=data_transform)
minist_testloader = datasets.MNIST(root='./', train=False, download=True, transform=data_transform)
trainloader = DataLoader(minist_tainloader, batch_size=batch_size, shuffle=True)
testloader = DataLoader(minist_testloader, batch_size=batch_size, shuffle=False)
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
self.pooling = torch.nn.MaxPool2d(kernel_size=2)
self.linear = torch.nn.Linear(320, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
batch_size = x.size(0)
x = self.relu(self.pooling(self.conv1(x)))
x = self.relu(self.pooling(self.conv2(x)))
x = x.view(batch_size, -1)
x = self.linear(x)
return x
model = Model()
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-2, momentum=0.5)
loss_list = list()
def test_accuracy():
correct = 0
with torch.no_grad():
for data in testloader:
images, labels = data
pred = model(images)
total_num = 0
correct = 0
for i in range(batch_size):
labels_np = labels.numpy().tolist()
pred_np = pred.numpy().tolist()
total_num += 1
if labels_np[i] == pred_np[i].index(max(pred_np[i])):
correct += 1
print(f'Accuracy = {correct / total_num}')
if __name__ == '__main__':
for epoch in range(3):
for i, data in enumerate(trainloader, 0):
inputs, label = data
outputs = model(inputs)
optimizer.zero_grad()
loss = criterion(outputs, label)
loss_list.append(loss)
loss.backward()
optimizer.step()
print(f'[{epoch}]: loss = {loss}')
plt.plot(loss_list)
plt.show()
test_accuracy()