练习手稿,记录&分享
# -------------------------------------------------------------------------------
# Description: 创建 NDArray、NDAarry 运算、广播机制
# Reference:
# Author: Sophia
# Date: 2021/1/28
# -------------------------------------------------------------------------------
from mxnet import nd
# 1、创建 NDArray
# x = nd.arange(12)
# print(x)
# # [ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11.]
# # <NDArray 12 @cpu(0)>
# print(x.shape) # (12,)
# print(x.size) # 12
# x = x.reshape((3, 4)) # reshape((-1, 4)) or reshape((3, -1))
# print(x)
# # [[ 0. 1. 2. 3.]
# # [ 4. 5. 6. 7.]
# # [ 8. 9. 10. 11.]]
# # <NDArray 3x4 @cpu(0)>
# print(nd.zeros((2, 3, 4))) # 全零张量
# print(nd.ones((3, 4))) # 全1张量
# print(nd.array([[2, 1, 4, 3], [4, 3, 2, 1], [1, 3, 4, 2]])) # 用list初始化
# print(nd.random.normal(0, 1, shape=(3, 4))) # 随机生成3*4的数组,每个元素随机采样与均值为0,标准差为1的正态分布
# 2、NDAarry 运算
# x = nd.arange(12).reshape((3, 4))
# y = nd.array([[2, 1, 4, 3], [4, 3, 2, 1], [1, 3, 4, 2]])
# print(x + y) # 按元素加法
# print(x * y) # 按元素乘法
# print(x / y) # 按元素除法
# print(y.exp()) # 按元素做指数运算 2.7182817
# print(nd.dot(x, y.T)) # 矩阵乘法
# print(nd.concat(x, y, dim=0))
# print(nd.concat(x, y, dim=1))
# print(x == y) # 得到一个数组
# print(x.sum())
# z = nd.array([1.5])
# print(z.asscalar()) # asscalar 函数将向量X转换成标量,且向量X只能为一维含单个元素的向量
# b = nd.array([2, 3])
# print(nd.norm(b)) # b的每个元素平方和求根号
# print(b.norm()) # b的每个元素平方和求根号
# 3、广播机制
# A = nd.arange(3).reshape((3, 1))
# B = nd.arange(2).reshape((1, 2))
# print(A, B)
# print(A + B) # 将A,B分别通过复制扩展成3*2的矩阵,然后按元素相加
# A = nd.arange(12).reshape((3, 4))
# B = nd.arange(12).reshape((6, 2))
# print(A + B) # 不知道为什么不行
# 4、索引
# x = nd.arange(12).reshape((3, 4))
# print(x[1:3]) # 1、2行
# print(x[1, 2]) # or x[1][2]
# x[1:2, :] = 12 # or x[1, :] = 12 把1行全部变为12
# print(x)
# 5、替换操作
# x = nd.ones((3, 4))
# y = nd.ones((3, 4))
# before = id(y)
# y = y + x # y 在新指定的内存中
# print(id(y) == before) # False
#
# z = nd.zeros_like(x)
# before = id(z)
# z[:] = x + y # 切片的方式
# print(id(z) == before) # True 过程中还是开辟了临时空间,只是最后复制回去了
#
# nd.elemwise_add(x, y, out=z) # 可以真正地避免新的内存开销
# print(id(z) == before) # True
#
# before = id(x)
# x += y # 复制操作符也可避免额外的内存开销
# print(id(x) == before) # True
#
# before = id(x)
# x = x + y
# print(id(x) == before) # False
# 6、和 Numpy 相互转换
# import numpy as np
# x = np.ones((2, 3))
# y = nd.array(x)
# z = y.asnumpy()
# print(x, y, z)
# 7、自动求梯度
# from mxnet import autograd, nd
# x = nd.arange(4).reshape((4, 1))
# x.attach_grad() # 申请梯度所需要的内存
# with autograd.record(): # 存储函数信息
# y = 2 * nd.dot(x.T, x)
# y = y * 2
# y.backward() # 求梯度
# print(x.grad) # 输出梯度信息
# print(x.grad == 8 * x) # 验证求出的梯度信息是否等于实际梯度
#
# print(autograd.is_training())
# with autograd.record():
# print(autograd.is_training()) # autograd在record情况下会从预测模式变成训练模式
#8、对控制流求梯度(自动求梯度很强大,对控制流也有效)
# from mxnet import autograd, nd
# def f(a):
# b = a * 2
# while b.norm().asscalar() < 1000:
# b = b * 2
# if b.sum().asscalar() > 0:
# c = b
# else:
# c = 100 * b
# return c
#
# a = nd.random.normal(shape=1) # 从正态分布采样
# print(a)
# a.attach_grad()
# with autograd.record():
# c = f(a)
# c.backward()
# print(a.grad == c / a)
#9、查找模块里所有的函数与类
print(dir(nd.random_normal))
print(dir(nd.random.normal))
# 10、查找特定函数和类的使用
# print(help(nd.ones_like))
print(help(nd.random_normal))
print(help(nd.random.normal))