pytorch如何计算导数_Pytorch之Variable求导机制

自动求导机制是pytorch中非常重要的性质,免去了手动计算导数,为构建模型节省了时间。下面介绍自动求导机制的基本用法。#自动求导机制

import torch

from torch.autograd import Variable

# 1、简单的求导(求导对象是标量)

x = Variable(torch.Tensor([2]),requires_grad=True)

y = (x + 2) ** 2 + 3

print(y)

y.backward()

print(x.grad)

#对矩阵求导

x1 = Variable(torch.randn(10,20),requires_grad=True)

y1 = Variable(torch.randn(10,1),requires_grad=True)

W = Variable(torch.randn(20,1),requires_grad=True)

J = torch.mean(y1 - torch.matmul(x1,W)) #matmul表示做矩阵乘法

J.backward()

print(x1.grad)

print(y1.grad)

print(W.grad)tensor([19.], grad_fn=)

tensor([8.])

tensor([[-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544,

0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144,

-0.1581, 0.1986, -0.0226, -0.0454],

[-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544,

0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144,

-0.1581, 0.1986, -0.0226, -0.0454],

[-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544,

0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144,

-0.1581, 0.1986, -0.0226, -0.0454],

[-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544,

0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144,

-0.1581, 0.1986, -0.0226, -0.0454],

[-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544,

0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144,

-0.1581, 0.1986, -0.0226, -0.0454],

[-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544,

0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144,

-0.1581, 0.1986, -0.0226, -0.0454],

[-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544,

0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144,

-0.1581, 0.1986, -0.0226, -0.0454],

[-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544,

0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144,

-0.1581, 0.1986, -0.0226, -0.0454],

[-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544,

0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144,

-0.1581, 0.1986, -0.0226, -0.0454],

[-0.1636, 0.0904, 0.0446, -0.1052, -0.2323, 0.0129, -0.1532, 0.0544,

0.0231, -0.0993, -0.0387, -0.1762, 0.0477, 0.1552, 0.0493, 0.0144,

-0.1581, 0.1986, -0.0226, -0.0454]])

tensor([[0.1000],

[0.1000],

[0.1000],

[0.1000],

[0.1000],

[0.1000],

[0.1000],

[0.1000],

[0.1000],

[0.1000]])

tensor([[ 0.0224],

[ 0.0187],

[-0.2078],

[ 0.5092],

[ 0.0677],

[ 0.3497],

[-0.4575],

[-0.5480],

[ 0.4228],

[-0.0869],

[ 0.2876],

[-0.1714],

[ 0.0985],

[-0.1364],

[-0.1502],

[-0.1372],

[-0.0999],

[-0.0006],

[-0.0544],

[-0.0678]])#复杂情况的自动求导 多维数组自动求导机制

import torch

from torch.autograd import Variable

x = Variable(torch.FloatTensor([3]),requires_grad=True)

y = x ** 2 + x * 2 + 3

y.backward(retain_graph=True) #保留计算图

print(x.grad)

y.backward()#不保留计算图

print(x.grad) #得到的是第一次求导的值加上第二次求导的值 8 + 8tensor([8.])

tensor([16.])#小练习,向量对向量求导

import torch

from torch.autograd import Variable

x = Variable(torch.Tensor([2,3]),requires_grad = True)

k = Variable(torch.zeros_like(x))

k[0] = x[0]**2 + 3 * x[1]

k[1] = 2*x[0] + x[1] ** 2

print(k)

j = torch.zeros(2,2)

k.backward(torch.FloatTensor([1,0]),retain_graph = True)

j[0] = x.grad.data

x.grad.zero_()

k.backward(torch.FloatTensor([0,1]),retain_graph = True)

j[1] = x.grad.data

print(j)

tensor([13., 13.], grad_fn=)

tensor([[4., 3.],

[2., 6.]])

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值