1、PyTorch张量-线性回归-计算图-autograd

1 创建张量的三种方法

# -*- coding:utf-8 -*-
import torch
import numpy as np
torch.manual_seed(1)

# ===============================  exmaple 1 ===============================
# 【1】通过torch.tensor创建张量
#
# flag = True
flag = False
if flag:
    arr = np.ones((3, 3))
    print("ndarray的数据类型:", arr.dtype)

    # t = torch.tensor(arr, device='cuda')
    t = torch.tensor(arr)

    print(t)
'''
ndarray的数据类型: float64
tensor([[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]], dtype=torch.float64)
'''


# ===============================  exmaple 2 ===============================
# 【2】通过torch.from_numpy创建张量
# flag = True
flag = False
if flag:
    arr = np.array([[1, 2, 3], [4, 5, 6]])
    t = torch.from_numpy(arr)
    print("numpy array: ", arr)
    print("tensor : ", t)

    # print("\n修改arr")
    # arr[0, 0] = 0
    # print("numpy array: ", arr)
    # print("tensor : ", t)

    print("\n修改tensor")
    t[0, 0] = -1
    print("numpy array: ", arr)
    print("tensor : ", t)

'''
numpy array:  [[1 2 3]
 [4 5 6]]
tensor :  tensor([[1, 2, 3],
        [4, 5, 6]], dtype=torch.int32)

修改tensor
numpy array:  [[-1  2  3]
 [ 4  5  6]]
tensor :  tensor([[-1,  2,  3],
        [ 4,  5,  6]], dtype=torch.int32)
'''


# ===============================  exmaple 3 ===============================
# 通过torch.zeros创建张量
# flag = True
flag = False
if flag:
    out_t = torch.tensor([1])
    # 用来接收我们创建的全0张量

    # out就是把torch.zeros生成的张量,赋给out_t
    t = torch.zeros((3, 3), out=out_t)

    print(t, '\n', out_t)
    print(id(t), id(out_t), id(t) == id(out_t))
# t和out_t是同一个数据。

'''
tensor([[0, 0, 0],
        [0, 0, 0],
        [0, 0, 0]]) 
 tensor([[0, 0, 0],
        [0, 0, 0],
        [0, 0, 0]])
1936006519832 1936006519832 True
'''

# ===============================  exmaple 4 ===============================
# 通过torch.full创建全1张量
# flag = True
flag = False
if flag:
    t = torch.full((3, 3), 1)
    print(t)
# 创建一个3*3大小的全1张量

# ===============================  exmaple 5 ===============================
# 通过torch.arange创建等差数列张量
# flag = True
flag = False
if flag:
    t = torch.arange(2, 10, 2)
    print(t)
# 输出【2,4,6,8】
# [start, end)

# ===============================  exmaple 6 ===============================
# 通过torch.linspace创建均分数列张量
# flag = True
flag = False
if flag:
    # t = torch.linspace(2, 10, 5)
    t = torch.linspace(2, 10, 6)
    print(t)
# [start,end] 一共6个数
# tensor([ 2.0000,  3.6000,  5.2000,  6.8000,  8.4000, 10.0000])

# ===============================  exmaple 7 ===============================
# 【3】通过torch.normal创建正态分布张量
flag = True
# flag = False
if flag:

    # mean:张量 std: 张量
    mean = torch.arange(1, 5, dtype=torch.float)
    std = torch.arange(1, 5, dtype=torch.float)
    t_normal = torch.normal(mean, std)
    print("mean:{}\nstd:{}".format(mean, std))
    print(t_normal)

    # mean:标量 std: 标量,size=(4,)确定张量大小
    t_normal = torch.normal(0., 1., size=(4,))
    print(t_normal)
    # tensor([0.6614, 0.2669, 0.0617, 0.6213])

    # mean:张量 std: 标量
    mean = torch.arange(1, 5, dtype=torch.float)
    std = 1
    t_normal = torch.normal(mean, std)
    print("mean:{}\nstd:{}".format(mean, std))
    print(t_normal)

'''
mean:tensor([1., 2., 3., 4.])
std:tensor([1., 2., 3., 4.])
tensor([1.6614, 2.5338, 3.1850, 6.4853])
tensor([-0.4519, -0.1661, -1.5228,  0.3817])
mean:tensor([1., 2., 3., 4.])
std:1
tensor([-0.0276,  1.4369,  2.1077,  3.9417])
'''

2 张量的基本操作

# -*- coding:utf-8 -*-
import torch
torch.manual_seed(1)

# ======================================= example 1 =======================================
# torch.cat

# flag = True
flag = False

if flag:
    t = torch.ones((2, 3))

    t_0 = torch.cat([t, t], dim=0)
    t_1 = torch.cat([t, t, t], dim=1)

    print("t_0:{} shape:{}\nt_1:{} shape:{}".format(t_0, t_0.shape, t_1, t_1.shape))
'''
t_0:tensor([[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]]) shape:torch.Size([4, 3])
t_1:tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1.],
        [1., 1., 1., 1., 1., 1., 1., 1., 1.]]) shape:torch.Size([2, 9])
'''

# ======================================= example 2 =======================================
# torch.stack

# flag = True
flag = False

if flag:
    t = torch.ones((2, 3))

    t_stack_0 = torch.stack([t,t],dim=2)
    # 在第二维度上创建一个新的维度,进行拼接

    t_stack_1 = torch.stack([t, t], dim=0)

    print("\nt_stack0:{} shape0:{}".format(t_stack_0, t_stack_0.shape))
    print("\nt_stack1:{} shape1:{}".format(t_stack_1, t_stack_1.shape))
'''
t_stack0:tensor([[[1., 1.],
         [1., 1.],
         [1., 1.]],

        [[1., 1.],
         [1., 1.],
         [1., 1.]]]) shape0:torch.Size([2, 3, 2])

t_stack1:tensor([[[1., 1., 1.],
         [1., 1., 1.]],

        [[1., 1., 1.],
         [1., 1., 1.]]]) shape1:torch.Size([2, 2, 3])
'''

# ======================================= example 3 =======================================
# torch.chunk

# flag = True
flag = False

if flag:
    a = torch.ones((2, 7))  # 7
    list_of_tensors = torch.chunk(a, dim=1, chunks=3)   # 3

    for idx, t in enumerate(list_of_tensors):
        print("第{}个张量:{}, shape is {}".format(idx+1, t, t.shape))
'''
第1个张量:tensor([[1., 1., 1.],
        [1., 1., 1.]]), shape is torch.Size([2, 3])
第2个张量:tensor([[1., 1., 1.],
        [1., 1., 1.]]), shape is torch.Size([2, 3])
第3个张量:tensor([[1.],
        [1.]]), shape is torch.Size([2, 1])
'''

# ======================================= example 4 =======================================
# torch.split

# flag = True
flag = False

if flag:
    t = torch.ones((2, 5))

    # 根据list给定元素,来确定张量的长度
    list_of_tensors = torch.split(t, [2, 1, 2], dim=1)  # [2 , 1, 2]
    for idx, t in enumerate(list_of_tensors):
        print("第{}个张量:{}, shape is {}".format(idx+1, t, t.shape))

    # list_of_tensors = torch.split(t, [2, 1, 1], dim=1)
    # for idx, t in enumerate(list_of_tensors):
    #     print("第{}个张量:{}, shape is {}".format(idx, t, t.shape))
    # 报错,list之和等于指定维度长度
'''
第1个张量:tensor([[1., 1.],
        [1., 1.]]), shape is torch.Size([2, 2])
第2个张量:tensor([[1.],
        [1.]]), shape is torch.Size([2, 1])
第3个张量:tensor([[1., 1.],
        [1., 1.]]), shape is torch.Size([2, 2])
'''
# ======================================= example 5 =======================================
# torch.index_select

# flag = True
flag = False

if flag:
    # 3*3的均匀分布
    t = torch.randint(0, 9, size=(3, 3))

    # 生成index 0,2
    idx = torch.tensor([0, 2], dtype=torch.long)    # float

    # 索引张量t,第0维度的,第0和第2 个张量--》再在第0维度拼接返回
    t_select = torch.index_select(t, dim=0, index=idx)
    print("t:\n{}\nt_select:\n{}".format(t, t_select))

'''
t:
tensor([[4, 5, 0],
        [5, 7, 1],
        [2, 5, 8]])
t_select:
tensor([[4, 5, 0],
        [2, 5, 8]])

'''

# ======================================= example 6 =======================================
# torch.masked_select

# flag = True
flag = False

if flag:

    t = torch.randint(0, 9, size=(3, 3))
    mask = t.le(5)  # ge is mean greater than or equal/   gt: greater than  le  lt
    t_select = torch.masked_select(t, mask)
    print("t:\n{}\nmask:\n{}\nt_select:\n{} ".format(t, mask, t_select))

'''
t:
tensor([[4, 5, 0],
        [5, 7, 1],
        [2, 5, 8]])
mask:
tensor([[ True,  True,  True],
        [ True, False,  True],
        [ True,  True, False]])
t_select:
tensor([4, 5, 0, 5, 1, 2, 5]) 
'''

# ======================================= example 7 =======================================
# torch.reshape

# flag = True
flag = False

if flag:
    t = torch.randperm(8)                       # 由其他维度计算得到 8/2*2 =2
    t_reshape = torch.reshape(t, (-1, 2, 2))    # -1 表示这个维度不用关心。变换到2*2的新张量==8
    print("t:{}\nt_reshape:\n{}".format(t, t_reshape))

    t[0] = 1024
    print("t:{}\nt_reshape:\n{}".format(t, t_reshape))
    print("t.data 内存地址:{}".format(id(t.data)))
    print("t_reshape.data 内存地址:{}".format(id(t_reshape.data)))

'''
t:tensor([5, 4, 2, 6, 7, 3, 1, 0])
t_reshape:
tensor([[[5, 4],
         [2, 6]],

        [[7, 3],
         [1, 0]]])
t:tensor([1024,    4,    2,    6,    7,    3,    1,    0])
t_reshape:
tensor([[[1024,    4],
         [   2,    6]],

        [[   7,    3],
         [   1,    0]]])
t.data 内存地址:2479583132048
t_reshape.data 内存地址:2479583131904

'''

# ======================================= example 8 =======================================
# torch.transpose

# flag = True
flag = False

if flag:
    # torch.transpose
    t = torch.rand((2, 3, 4))
    t_transpose = torch.transpose(t, dim0=1, dim1=2)    # 一般在图像预处理中用到
    print("t shape:{}\nt_transpose shape: {}".format(t.shape, t_transpose.shape))

'''
t shape:torch.Size([2, 3, 4])
t_transpose shape: torch.Size([2, 4, 3])
'''

# ======================================= example 9 =======================================
# torch.squeeze

flag = True
# flag = False

if flag:
    t = torch.rand((1, 2, 3, 1))
    t_sq = torch.squeeze(t)      # 【1】不指定压缩维度
    t_0 = torch.squeeze(t, dim=0)
    t_1 = torch.squeeze(t, dim=1)
    print(t.shape)
    print(t_sq.shape)
    print(t_0.shape)
    print(t_1.shape)

'''
[1] 没有指定维度,那会移除所有长度为1的轴
torch.Size([1, 2, 3, 1])  
torch.Size([2, 3])

[2] 当且仅当指定维度,长度为1的轴,会被移除
torch.Size([2, 3, 1])
torch.Size([1, 2, 3, 1])
'''

# ======================================= example 8 =======================================
# torch.add

flag = True
# flag = False

if flag:
    t_0 = torch.randn((3, 3))
    t_1 = torch.ones_like(t_0)
    t_add = torch.add(t_0, 10, t_1)

    print("t_0:\n{}\nt_1:\n{}\nt_add_10:\n{}".format(t_0, t_1, t_add))


'''
t_0:
tensor([[ 0.5636,  1.1431,  0.8590],
        [ 0.7056, -0.3406, -1.2720],
        [-1.1948,  0.0250, -0.7627]])
t_1:
tensor([[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]])
t_add_10:
tensor([[10.5636, 11.1431, 10.8590],
        [10.7056,  9.6594,  8.7280],
        [ 8.8052, 10.0250,  9.2373]])
'''

 

3 线性回归

import torch
import matplotlib.pyplot as plt
torch.manual_seed(10)

lr = 0.05  # 学习率    20191015修改

# 创建训练数据
x = torch.rand(20, 1) * 10  # x data (tensor), shape=(20, 1)
y = 2*x + (5 + torch.randn(20, 1))  # y data (tensor), shape=(20, 1)

# 构建线性回归参数
w = torch.randn((1), requires_grad=True)
b = torch.zeros((1), requires_grad=True)

for iteration in range(1000):

    # 前向传播
    wx = torch.mul(w, x)
    y_pred = torch.add(wx, b)

    # 计算 MSE loss
    loss = (0.5 * (y - y_pred) ** 2).mean()

    # 反向传播
    loss.backward()

    # 更新参数
    b.data.sub_(lr * b.grad)
    w.data.sub_(lr * w.grad)

    # 清零张量的梯度   20191015增加
    w.grad.zero_()
    b.grad.zero_()

    # 绘图
    if iteration % 20 == 0:

        plt.scatter(x.data.numpy(), y.data.numpy())
        plt.plot(x.data.numpy(), y_pred.data.numpy(), 'r-', lw=5)
        plt.text(2, 20, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 20, 'color':  'red'})
        plt.xlim(1.5, 10)
        plt.ylim(8, 28)
        plt.title("Iteration: {}\nw: {} b: {}".format(iteration, w.data.numpy(), b.data.numpy()))
        plt.pause(0.5)

        if loss.data.numpy() < 1:
            break

4  计算图

import torch

w = torch.tensor([1.], requires_grad=True)
x = torch.tensor([2.], requires_grad=True)

a = torch.add(w, x)     # retain_grad()

# a.retain_grad()
b = torch.add(w, 1)
y = torch.mul(a, b)

#  程序运行到此行就会停止---》debug---》程序运行此行会停止---》进入这个函数(Step Into)中得算
#  step over会运行这一行的代码
y.backward()
print(w.grad)    # tensor([5.])

# 查看叶子结点
print("is_leaf:\n", w.is_leaf, x.is_leaf, a.is_leaf, b.is_leaf, y.is_leaf)

# 查看梯度
print("gradient:\n", w.grad, x.grad, a.grad, b.grad, y.grad)

'''
is_leaf:
 True True False False False
gradient:
 tensor([5.]) tensor([2.]) None None None
 非叶子节点,在反向传播后,梯度会被释放掉所以结果为None
'''

# 查看 grad_fn
print("grad_fn:\n", w.grad_fn, x.grad_fn, a.grad_fn, b.grad_fn, y.grad_fn)

'''
None None :用户创建的叶子节点,grad_fn 为None 
 <AddBackward0 object at 0x000002A5C316DF28> 
 <AddBackward0 object at 0x000002A5C316DEB8> 
 <MulBackward0 object at 0x000002A5C3177080>
'''

5  autograd

import torch
torch.manual_seed(10)


# ====================================== retain_graph ==============================================
# flag = True
flag = False
if flag:
    w = torch.tensor([1.], requires_grad=True)
    x = torch.tensor([2.], requires_grad=True)

    a = torch.add(w, x)
    b = torch.add(w, 1)
    y = torch.mul(a, b)

    y.backward(retain_graph=True) # 可以得到w,x两个叶子节点的梯度 tensor([5.])
    # 直接调用torch.autograd.backward方法

    # print(w.grad)
    y.backward()

# ====================================== grad_tensors ==============================================
# 用于设置多个梯度之间的权重

# flag = True
flag = False
if flag:
    w = torch.tensor([1.], requires_grad=True)
    x = torch.tensor([2.], requires_grad=True)

    a = torch.add(w, x)     # retain_grad()
    b = torch.add(w, 1)

    y0 = torch.mul(a, b)    # y0 = (x+w) * (w+1)
    y1 = torch.add(a, b)    # y1 = (x+w) + (w+1)    dy1/dw = 2

    loss = torch.cat([y0, y1], dim=0)       # [y0, y1]
    grad_tensors = torch.tensor([1., 2.])

    loss.backward(gradient=grad_tensors)    # gradient 传入 torch.autograd.backward()中的grad_tensors

    print(w.grad)
#   tensor([9.])

# ====================================== autograd.gard ==============================================
# flag = True
flag = False
if flag:

    x = torch.tensor([3.], requires_grad=True)
    y = torch.pow(x, 2)     # y = x**2

                                       # 创建倒数的计算图,才能对倒数进行二次求导
    grad_1 = torch.autograd.grad(y, x, create_graph=True)   # grad_1 = dy/dx = 2x = 2 * 3 = 6
    print(grad_1)

    grad_2 = torch.autograd.grad(grad_1[0], x)              # grad_2 = d(dy/dx)/dx = d(2x)/dx = 2
    print(grad_2)

'''
(tensor([6.], grad_fn=<MulBackward0>),)
(tensor([2.]),)
'''

# ====================================== tips: 1 ==============================================
# 【1】autograd 梯度不清零
# flag = True
flag = False
if flag:

    w = torch.tensor([1.], requires_grad=True)
    x = torch.tensor([2.], requires_grad=True)

    for i in range(4):
        a = torch.add(w, x)
        b = torch.add(w, 1)
        y = torch.mul(a, b)

        y.backward()
        print(w.grad)
        w.grad.zero_()
'''
[1]
tensor([5.])
tensor([10.])
tensor([15.])
tensor([20.])

[2]
tensor([5.])
tensor([5.])
tensor([5.])
tensor([5.])
'''


# ====================================== tips: 2 ==============================================
# 【2】 依赖于叶子节点的节点,requires_grad默认为True
# flag = True
flag = False
if flag:

    w = torch.tensor([1.], requires_grad=True)
    x = torch.tensor([2.], requires_grad=True)

    a = torch.add(w, x)
    b = torch.add(w, 1)
    y = torch.mul(a, b)

    print(a.requires_grad, b.requires_grad, y.requires_grad)


# ====================================== tips: 3 ==============================================
# 【3】叶子节点不可以执行in-place(原地操作,地址不变)
# 如果要求w的梯度,则需要求y对a的偏导=w+1,a对w的偏导
# 反向传播时候需要需要w,前向传播会记录w地址,反向传播根据地址寻找数据,
# 如果在反向传播前改变地址数据会出错

flag = True
# flag = False
if flag:

    a = torch.ones((1, ))
    print(id(a), a)

    a = a + torch.ones((1, ))
    print(id(a), a)

    a += torch.ones((1, ))
    print(id(a), a)

'''
2023834146640 tensor([1.])
2023834146928 tensor([2.])
2023834146928 tensor([3.])
'''

# flag = True
flag = False
if flag:

    w = torch.tensor([1.], requires_grad=True)
    x = torch.tensor([2.], requires_grad=True)

    a = torch.add(w, x)
    b = torch.add(w, 1)
    y = torch.mul(a, b)

    w.add_(1)

    y.backward()

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值