乱七八糟的笔记——应该是pytorch深度学习实战那块跟着敲的代码

乱七八糟的笔记——应该是pytorch深度学习实战那块跟着敲的代码

# demo_4  应该是分P视频的第几集
import torch

x_data=[1,2,3]
y_data=[2,4,6]

w1=torch.Tensor([1.0])
w1.requires_grad=True
w2=torch.Tensor([1.0])
w2.requires_grad=True
b=torch.Tensor([1.0])
b.requires_grad=True

def forward(x):
    return w1*x**2+w2*x+b

def loss(x,y):
    y_pred=forward(x)
    return (y_pred-y)**2

print('Predict (before training):',4,forward(4).item())

for epoch in range(100):
    for x,y in zip(x_data,y_data):
        l=loss(x,y)
        l.backward()
        w1.data=w1.data-0.01*w1.grad.data
        w2.data = w2.data - 0.01 * w2.grad.data
        b.data = b.data - 0.01 * b.grad.data

        w1.grad.data.zero_()
        w2.grad.data.zero_()
        b.grad.data.zero_()

    print('Progress:',epoch,l.item())#item()将张量转化为标量

print('Predict (after training):',4,forward(4).item())

#demo_5.py
import torch

x_data=torch.Tensor([[1],[2],[3]])
y_data=torch.Tensor([[2],[4],[6]])

class LinearModel(torch.nn.Module):
    def __init__(self):
        super(LinearModel,self).__init__()
        self.linear=torch.nn.Linear(1,1)

    def forward(self,x):
        y_pred=self.linear(x)
        return y_pred

model=LinearModel()

criterion=torch.nn.MSELoss(size_average=False)
optimeter=torch.optim.SGD(model.parameters(),lr=0.01)

for epoch in range(1000):
     y_pred=model(x_data)
     loss=criterion(y_pred,y_data)
     print(epoch,loss.item())

     optimeter.zero_grad()
     loss.backward()
     optimeter.step()

print('w=',model.linear.weight.item())
print('b=',model.linear.bias.item())

x_test=torch.Tensor([[4]])  
print('x_test_pred=',model(x_test).data)
print('x_test_pred=',model(x_test).item())
    
#demo_8.py
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset

class TitanicDataset(Dataset):
    def __init__(self,filepath):
        xy=np.loadtxt(filepath,delimiter=',',dtype=np.float32)
        self.len=xy.shape[0]
        self.x_data=torch.from_numpy(xy[2:,1:])
        self.y_data=torch.from_numpy(xy[2:,[1]])

    def __getitem__(self, item):
        return self.x_data[item],self.y_data[item]

    def __len__(self):
        return self.len

dataset=TitanicDataset('train.csv')
train_loader=DataLoader(dataset,batch_size=16,shuffle=True,num_workers=2)

# class Model(torch.Module):
#     def __init__(self):
#         super(Model,self).__init__()
#         self.linear1=torch.nn.Linear()
#
# for epoch in range(100):
#     for i,data in enumerate(train_loader,0):
#demo_12.py
import torch
batch_size=2
seq_len=3
input_size=4
hidden_size=4
num_layers=1

cell=torch.nn.RNN(input_size, hidden_size, num_layers)

inputs=torch.randn(seq_len,batch_size,input_size)
hidden=torch.zeros(num_layers,batch_size,hidden_size)

out,hidden=cell(inputs,hidden)

print(cell)
print('Input=',inputs)
print('Input_shape=',inputs.shape)
print('Output=',out)
print('Output_shape=',out.shape)
print('Hidden=',hidden)
print('Hidden_shape=',hidden.shape)
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值