动手入门深度学习笔记-线性回归&softmax回归

线性回归

从0开始的实现

import torch
import matplotlib.pyplot as plt
import numpy as np
import random
from IPython import display

### 显示torch 版本
print(torch.__version__)

## 生成1000个样本数据
num_inputs = 2
num_examples = 1000

true_w = [2,-3.4]
true_b = 4.2
features = torch.randn(num_examples,num_inputs,dtypes = torch.float32)
labels = true_w[0]*feature[:0] + true_w[1]*features[:,1] + true_b
labels += torch.tensor(np.random.normal(0,0.01,size=labels.size()),\
dtypes=troch.float32)

# 使用图像来展示生成的数据
plt.scatter(features[:,1].numpy(),labels.numpy(),1)

## 读取数据集
def data_iter(batch_size,features,labels):
    num_examples = len(features)
    indices  = list(range(num_examples))
    random.shuffle(indices)
    for i in range(0,num_examples,batch_size):
        j = torch.LongTensor(indices[i:min(i+batch_size,num_examples])
        yield features.index_select(0,j),labels.index_select(0,j)

batch_size = 10
for X ,y in data_iter(batch_size,features,labels):
    print(X,'\n',y)
    break


## 初始化模型参数
w = torch.tensor(np.random.normal(0,0.01,(num_inputs,1)),dtypes=torch.float32)
b = torch.zeros(1,dtypes = torch.float32)

w.requires_grad(requires_grad =True)
b.requires_grad(requires_grad = True)

## 定义模型
def linreg(X,w,b):
    return torch.mm(X,w) + b

## 定义损失函数 均方误差
def squared_loss(y_hat,y):
    return (y_hat - y.view(y_hat.size())) ** 2 / 2

## 定义损失函数小批量随机梯度下降法
def sgd(params,lr,batch_size):
    for param in params:
        param.data  -= lr.param.grad / batch_size 
    
## 训练
lr = 0.03
num_epochs = 5

net = linreg
loss = squared_loss

# training
for epoch in range(nun_epochs):
    for X,y in data_iter(bacth_size,features,labels):
        l = loss(net(X,w,b).sum()
        l.backward()
        # using small bacth random gradient descent to iter model parameters
        sgd([w,b],lr,batch_size)
        w.grad.data.zero_()
        b.grad.data.zero_()
    train_l = loss(net(features,w,b),labels)
    print('epoch %d ,loss %f '%(epoch+1,train_l.mean().item())
print(w,true_w,b,true_b)

pytorch版本

import torch
from torch import nn
import numpy as np
torch.manual_seed(1)

print(torch.__version__)
troch.set_default_tensor_type('troch.FloatTensor')

## 生成数据集
num_inputs = 2
num_examples = 1000

true_w = [2, -3.4]
true_b = 4.2

features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)

## 读取数据集
import torch.utils.data as Data
bacth_size = 10
dataset = Data.TensorDataset(features,labels)

data_iter = Data.DataLoader(

    dataset = dataset,
    bacth_size = batch_size,
    shuffle = True,
    num_workers = 2, ## read data in multithreading
)


for X,y in data_iter:
    print(X,'\n',y)
    break


## 定义模型
class LinearNet(nn.Module):
    def __init__(self,n_feature):
        super(LinearNet,self).__init__()
        self.linear = nn.Linear(n_feature,1)
    
    def forward(self,x):
        y = self.linear(x)
        return y
    
net = LinearNet(num_inputs)
print(net)

## ways to init a multilayer network
## method one
net = nn.Sequential(nn.Linear(num_inputs,1))
## method two
net = nn.Sequential()
net.add_module('linear',nn.Linear(num_inputs,1))
## method three
from  collections import OrderedDict
net = nn.Sequential(OrderedDict(
    [('linear',nn.Linear(num_inputs,1))]
))
print(net)
print(net[0])

## 初始话模型参数
from torch.nn import init
init.normal_(net[0].weight,mean=0.0,std = 0.01)
init.constant_(net[0].bias,val=0.0)
for param in net.parameters():
    print(param)

## 定义损失函数
loss = nn.MSELoss()

## 定义损失函数
import torch.optim as optim

optimizer = optim.SGD(net.parameters(),lr=0.03)
print(optimizer)

## 训练
num_epochs = 3
for epoch in range(1,num_epochs+1):
    for X,y in data_iter:
        output = net(X)
        l = loss(output,y.view(-1,1))
        optimizer.zero_grad()
        l.backward()
        optimizer.step()
    print('epoch %d,loss:%f'%(epoch,l.item()))

dense = net[0]
print(true_w,dense.weight.data)
print(true_b,dense.bias.data)

softmax回归

从0开始

## 从0开始
import torch
import torhcvision
import numpy as np
import sys
sys.path.append('/hom/kesci/input')
import d21zh1981 as d21
print(torch.__version__)
print(torhcvision.__version__)

## 获取训练接和测试集
batch_size = 256
train_iter,test_iter = d21.load_data_fashion_mnist(batch_size)

## 模型参数初始化
num_inputs = 784
print(28*28)
num_inputs = 10
W = torch.tensor(np.random.normal(0,0.01,(num_inputs,num_inputs)),dtype = torch.float)
b = torch.zeros(num_inputs,dtypes = torch.float)

W.requires_grad_(reuqires_grad = True)
b.requires_grad_(requires_grad = True)

## 对多维tensor 进行按维度操作
X = torch.tensor([[1,2,3],[4,5,6]])
print(X.sum(dim =0,keepdim = True)) # dim为0,按照相同的列求和,并在结果中保留列特征
print(X.sum(dim=1, keepdim=True))  # dim为1,按照相同的行求和,并在结果中保留行特征
print(X.sum(dim=0, keepdim=False)) # dim为0,按照相同的列求和,不在结果中保留列特征
print(X.sum(dim=1, keepdim=False)) # dim为1,按照相同的行求和,不在结果中保留行特征

## 定义softmax
def softmax(X):
    X_exp  = X.exp()
    partition = X_exp.sum(dim = 1,keepdim = True)
    return X_exp / partition

X = torch.rand((2,5))
X_prob = softmax(X)
print(X_prob,'\n',X_prob.sum(dim = 1))

## softmax 回归模型
def net(x):
    return softmax(torch.mm(X.view((-1,num_inputs)),W) + b)

## 定义损失函数
y_hat = torch.tensor([[0.1,0.3,0.6],[0.3,0.2,0.5]])
y = torch.LongTensor([0,2])
y_hat.gather(1,y.view(-1,1))

def cross_entroy(y_hat,y):
    return -torch.log(y_hat.gather(1,y.view(-1,1)))

## 定义准确率
def accuracy(y_hat,y):
    return (y_hat.argmax(dim =1) == y).float().mean().item()

def  evaluate_accuracy(data_iter,net):
    acc_sum,n = 0.0,0
    for X,y in data_iter:
        acc_sum  += (net(X).argmax(dim =1) == y).float().sum().item()
        n += y.shape[0]
    return acc_sum /  n 
## 训练模型
num_epochs,lr = 5,0.1

def train_ch3(net,train_iter,test_iter,loss,num_epochs,batch_size,\
        params = None,lr = None,optimizer = None):
        for epoch in range(num_epochs):
            train_1_sum,train_acc_sum ,n = 0.0,0.0,0
            for X,y in train_iter:
                y_hat = net(X)
                l = loss(y_hat,y).sum()

                ## 梯度清零
                if optimizer  is not None:
                    optimizer.zero_grad()
                elif params is not None and params[0].grad is not None:
                    for param in params:
                        param.grad.data.zero_()
                l.backward()
                if optimizer is None:
                    d21.sgd(params,lr,batch_size)
                else:
                    optimizer.step()
                train_1_sum += l.item()
                train_acc_sum += (y_hat.argmax(dim = 1) == y).sum().item()
                n += y.shape[0]
            test_acc = evaluate_accuracy(test_iter,net)
            print('epoch %d,loss %.4f,train acc %.3f,test_acc %.3f'%(epoch + 1,train_1_sum /n,train_acc_sum/n,test_acc))
train_ch3(net,train_iter,test_iter,cross_entroy,num_epochs,batch_size,[W,b],lr)
X,y = iter(test_iter).next()
true_labels = d21.get_fashion_mnist_labels(y.numpy())
pred_labels = d21.get_fashion_mnist_labels(net(X).argmax(dim =1).numpy())
titles = [true + '\n' + pred for true,pred in zip(true_labels,pred_labels)]
d21.show_fashion_mnist(X[0:9],titles[0:9])

pytorch版本

import torch
from torch import nn
from torch.nn import init
import numpy as np
import sys
sys.path.append('/home/kesci/input')
import d21zh1981 as d21
print(torch.__version__)
batcg_size = 256
train_iter,test_iter = d21.load_data_fashion_mnist(batch_size)
num_inouts = 784
num_outputs = 10

class LinearNet(nn.Module):
    def __init__(self,num_inputs,num_outputs):
        super(LinearNet,self).__init__()
        self.linear = nn.Linear(num_inputs,num_outputs)
    def forward(self,x):
        y = self.linear(x.view(x.shape[0],-1))
        return y

class FlattenLayer(nn.Module):
    def __init__(self):
        super(FlattenLayer,self).__init__()
    def forward(self,x):
        return x.view(x.shape[0],-1)
    
from collections import OrderedDict
net = nn.Sequential(OrderedDict(
    [('flatten',FlattenLayer()),
    ('linear',nn.Linear(num_inputs,num_outputs))]
))

##  初始化模型参数
init.normal_(net.linear.weight,mean = 0,std = 0.01)
init.constant_(net.linear.bias,val = 0)

## 定义损失函数
loss = nn.CrossEntropyLoss()

## 定义优化函数
optimizer = torch.optim.SGD(net.parameters(),lr = 0.1)

## 训练
num_epochs = 5
d21.train_ch3(net,train_iter,test_iter,loss,num_epochs,batch_size,None,None,optimizer)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

mensyne

你的鼓励是我写作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值