线性回归
从0开始的实现
import torch
import matplotlib.pyplot as plt
import numpy as np
import random
from IPython import display
print(torch.__version__)
num_inputs = 2
num_examples = 1000
true_w = [2,-3.4]
true_b = 4.2
features = torch.randn(num_examples,num_inputs,dtypes = torch.float32)
labels = true_w[0]*feature[:0] + true_w[1]*features[:,1] + true_b
labels += torch.tensor(np.random.normal(0,0.01,size=labels.size()),\
dtypes=troch.float32)
plt.scatter(features[:,1].numpy(),labels.numpy(),1)
def data_iter(batch_size,features,labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices)
for i in range(0,num_examples,batch_size):
j = torch.LongTensor(indices[i:min(i+batch_size,num_examples])
yield features.index_select(0,j),labels.index_select(0,j)
batch_size = 10
for X ,y in data_iter(batch_size,features,labels):
print(X,'\n',y)
break
w = torch.tensor(np.random.normal(0,0.01,(num_inputs,1)),dtypes=torch.float32)
b = torch.zeros(1,dtypes = torch.float32)
w.requires_grad(requires_grad =True)
b.requires_grad(requires_grad = True)
def linreg(X,w,b):
return torch.mm(X,w) + b
def squared_loss(y_hat,y):
return (y_hat - y.view(y_hat.size())) ** 2 / 2
def sgd(params,lr,batch_size):
for param in params:
param.data -= lr.param.grad / batch_size
lr = 0.03
num_epochs = 5
net = linreg
loss = squared_loss
for epoch in range(nun_epochs):
for X,y in data_iter(bacth_size,features,labels):
l = loss(net(X,w,b).sum()
l.backward()
sgd([w,b],lr,batch_size)
w.grad.data.zero_()
b.grad.data.zero_()
train_l = loss(net(features,w,b),labels)
print('epoch %d ,loss %f '%(epoch+1,train_l.mean().item())
print(w,true_w,b,true_b)
pytorch版本
import torch
from torch import nn
import numpy as np
torch.manual_seed(1)
print(torch.__version__)
troch.set_default_tensor_type('troch.FloatTensor')
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
import torch.utils.data as Data
bacth_size = 10
dataset = Data.TensorDataset(features,labels)
data_iter = Data.DataLoader(
dataset = dataset,
bacth_size = batch_size,
shuffle = True,
num_workers = 2,
)
for X,y in data_iter:
print(X,'\n',y)
break
class LinearNet(nn.Module):
def __init__(self,n_feature):
super(LinearNet,self).__init__()
self.linear = nn.Linear(n_feature,1)
def forward(self,x):
y = self.linear(x)
return y
net = LinearNet(num_inputs)
print(net)
net = nn.Sequential(nn.Linear(num_inputs,1))
net = nn.Sequential()
net.add_module('linear',nn.Linear(num_inputs,1))
from collections import OrderedDict
net = nn.Sequential(OrderedDict(
[('linear',nn.Linear(num_inputs,1))]
))
print(net)
print(net[0])
from torch.nn import init
init.normal_(net[0].weight,mean=0.0,std = 0.01)
init.constant_(net[0].bias,val=0.0)
for param in net.parameters():
print(param)
loss = nn.MSELoss()
import torch.optim as optim
optimizer = optim.SGD(net.parameters(),lr=0.03)
print(optimizer)
num_epochs = 3
for epoch in range(1,num_epochs+1):
for X,y in data_iter:
output = net(X)
l = loss(output,y.view(-1,1))
optimizer.zero_grad()
l.backward()
optimizer.step()
print('epoch %d,loss:%f'%(epoch,l.item()))
dense = net[0]
print(true_w,dense.weight.data)
print(true_b,dense.bias.data)
softmax回归
从0开始
import torch
import torhcvision
import numpy as np
import sys
sys.path.append('/hom/kesci/input')
import d21zh1981 as d21
print(torch.__version__)
print(torhcvision.__version__)
batch_size = 256
train_iter,test_iter = d21.load_data_fashion_mnist(batch_size)
num_inputs = 784
print(28*28)
num_inputs = 10
W = torch.tensor(np.random.normal(0,0.01,(num_inputs,num_inputs)),dtype = torch.float)
b = torch.zeros(num_inputs,dtypes = torch.float)
W.requires_grad_(reuqires_grad = True)
b.requires_grad_(requires_grad = True)
X = torch.tensor([[1,2,3],[4,5,6]])
print(X.sum(dim =0,keepdim = True))
print(X.sum(dim=1, keepdim=True))
print(X.sum(dim=0, keepdim=False))
print(X.sum(dim=1, keepdim=False))
def softmax(X):
X_exp = X.exp()
partition = X_exp.sum(dim = 1,keepdim = True)
return X_exp / partition
X = torch.rand((2,5))
X_prob = softmax(X)
print(X_prob,'\n',X_prob.sum(dim = 1))
def net(x):
return softmax(torch.mm(X.view((-1,num_inputs)),W) + b)
y_hat = torch.tensor([[0.1,0.3,0.6],[0.3,0.2,0.5]])
y = torch.LongTensor([0,2])
y_hat.gather(1,y.view(-1,1))
def cross_entroy(y_hat,y):
return -torch.log(y_hat.gather(1,y.view(-1,1)))
def accuracy(y_hat,y):
return (y_hat.argmax(dim =1) == y).float().mean().item()
def evaluate_accuracy(data_iter,net):
acc_sum,n = 0.0,0
for X,y in data_iter:
acc_sum += (net(X).argmax(dim =1) == y).float().sum().item()
n += y.shape[0]
return acc_sum / n
num_epochs,lr = 5,0.1
def train_ch3(net,train_iter,test_iter,loss,num_epochs,batch_size,\
params = None,lr = None,optimizer = None):
for epoch in range(num_epochs):
train_1_sum,train_acc_sum ,n = 0.0,0.0,0
for X,y in train_iter:
y_hat = net(X)
l = loss(y_hat,y).sum()
if optimizer is not None:
optimizer.zero_grad()
elif params is not None and params[0].grad is not None:
for param in params:
param.grad.data.zero_()
l.backward()
if optimizer is None:
d21.sgd(params,lr,batch_size)
else:
optimizer.step()
train_1_sum += l.item()
train_acc_sum += (y_hat.argmax(dim = 1) == y).sum().item()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter,net)
print('epoch %d,loss %.4f,train acc %.3f,test_acc %.3f'%(epoch + 1,train_1_sum /n,train_acc_sum/n,test_acc))
train_ch3(net,train_iter,test_iter,cross_entroy,num_epochs,batch_size,[W,b],lr)
X,y = iter(test_iter).next()
true_labels = d21.get_fashion_mnist_labels(y.numpy())
pred_labels = d21.get_fashion_mnist_labels(net(X).argmax(dim =1).numpy())
titles = [true + '\n' + pred for true,pred in zip(true_labels,pred_labels)]
d21.show_fashion_mnist(X[0:9],titles[0:9])
pytorch版本
import torch
from torch import nn
from torch.nn import init
import numpy as np
import sys
sys.path.append('/home/kesci/input')
import d21zh1981 as d21
print(torch.__version__)
batcg_size = 256
train_iter,test_iter = d21.load_data_fashion_mnist(batch_size)
num_inouts = 784
num_outputs = 10
class LinearNet(nn.Module):
def __init__(self,num_inputs,num_outputs):
super(LinearNet,self).__init__()
self.linear = nn.Linear(num_inputs,num_outputs)
def forward(self,x):
y = self.linear(x.view(x.shape[0],-1))
return y
class FlattenLayer(nn.Module):
def __init__(self):
super(FlattenLayer,self).__init__()
def forward(self,x):
return x.view(x.shape[0],-1)
from collections import OrderedDict
net = nn.Sequential(OrderedDict(
[('flatten',FlattenLayer()),
('linear',nn.Linear(num_inputs,num_outputs))]
))
init.normal_(net.linear.weight,mean = 0,std = 0.01)
init.constant_(net.linear.bias,val = 0)
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(),lr = 0.1)
num_epochs = 5
d21.train_ch3(net,train_iter,test_iter,loss,num_epochs,batch_size,None,None,optimizer)