# import torch
# x=torch.arange(4.0)
# x.requires_grad_(True)
# # x.grad.zero_()
# y=x*x
# u=y.detach()
# z=u*x
# z.sum().backward()
# print(x.grad==u)
#
# x.grad.zero_()
# y.sum().backward()
# print(x.grad==2*x)
#
#
#
# def f(a):
# b=a*2
# while b.norm()<1000:
# b=b*2
# if b.sum()>0:
# c=b
# else:
# c=100*b
# return c
#
# a=torch.randn(size=(),requires_grad=True)
# d=f(a)
# d.backward()
# print(a.grad==d/a)#m没搞懂?
import torch
from torch.distributions import multinomial
from d2l import torch as d2l
fair_probs = torch.ones([6]) /6
print(fair_probs)
multinomial.Multinomial(1, fair_probs).sample()
multinomial.Multinomial(10, fair_probs).sample()
counts = multinomial.Multinomial(1000, fair_probs).sample()
print(counts)
counts / 1000 # 相对频率作为估计值
counts = multinomial.Multinomial(10, fair_probs).sample((500,))
cum_counts = counts.cumsum(dim=0)
# estimates = cum_counts / cum_counts.sum(dim=1, keepdims=True)
# d2l.set_figsize((6, 4.5))
# for i in range(6):
# d2l.plt.plot(estimates[:, i].numpy(),
# label=("P(die=" + str(i + 1) + ")"))
# d2l.plt.axhline(y=0.167, color='black', linestyle='dashed')
# d2l.plt.gca().set_xlabel('Groups of experiments')
# d2l.plt.gca().set_ylabel('Estimated probability')
# d2l.plt.legend();
# d2l.plt.show()
动手学深度学习2.2数据预处理
于 2024-05-24 20:00:51 首次发布