mxnet NDArray

import mxnet as mx
import pickle as pkl
import numpy as np
import math

# create a 1-dimensional array with a python list
def demo1():
	a = mx.nd.array([1,2,3])
	b = mx.nd.array([[1,2,3], [2,3,4]])

	print(a.shape)
	print(a.dtype)
	print(a.size)
	print(a.context)

	print(b.shape)
	print(b.dtype)
	print(b.size)
	print(b.context)
	
	print("##########################33")
	c = np.arange(15).reshape(3,5)
	print(np.arange(15))
	print(c)
	a = mx.nd.array(c)	# default float32
	print(a)
	
def demo2():
	# float32 is used in default
	a = mx.nd.array([1,2,3])
	# create an int32 array
	b = mx.nd.array([1,2,3], dtype=np.int32)
	# create a 16-bit float array
	c = mx.nd.array([1.2, 2.3], dtype=np.float16)
	print(a.dtype, b.dtype, c.dtype)
	
	print("***********************************")
	# create a 2-dimensional array full of zeros with shape (2,3)
	a = mx.nd.zeros((2,3))
	# create a same shape array full of ones
	b = mx.nd.ones((2,3))
	# create a same shape array with all elements set to 7
	c = mx.nd.full((2,3), 7)
	# create a same shape whose initial content is random and
	# depends on the state of the memory
	d = mx.nd.empty((2,3))
	e = mx.nd.arange(18).reshape((3,2,3))
	
	print("a: ")
	print(a.asnumpy())
	print("b: ")
	print(b.asnumpy())
	print("c: ")
	print(c.asnumpy())
	print("d: ")
	print(d.asnumpy())
	print("e: ")
	print(e.asnumpy())
	
def demo3():
	a = mx.nd.ones((2,3))
	b = mx.nd.ones((2,3))
	# elementwise plus
	c = a + b
	print("c:")
	print(c.asnumpy())
	# elementwise minus
	d = - c
	print("d:")
	print(d.asnumpy())
	
	# elementwise pow and sin, and then transpose
	e = mx.nd.sin(c**2).T
	print("e:")
	print(e.asnumpy())
	
	# elementwise max
	f = mx.nd.maximum(a, c)
	print(f.asnumpy())
	
	print("###################################33333")
	a = mx.nd.arange(4).reshape((2,2))
	print(a.asnumpy())
	b = a * a
	#print(b.asnumpy())
	c = mx.nd.dot(a,a)
	print("b: %s, \n c: %s" % (b.asnumpy(), c.asnumpy()))
	
	#############################
	print("####################################3")
	a = mx.nd.ones((2,2))
	b = mx.nd.ones(a.shape)
	b += a
	print(b.asnumpy())

def demo4():
	# 索引和切片
	a = mx.nd.array(np.arange(6).reshape(3,2))
	print(a.asnumpy())
	a[1:2] = 1
	print(a[:].asnumpy())
	d = mx.nd.slice_axis(a, axis=1, begin=1, end=2)
	print(d.asnumpy())
	
	# 形状修改
	a = mx.nd.array(np.arange(24))
	b = a.reshape((2,3,4))
	print(b.asnumpy())
	
	print("##################################")
	a = mx.nd.ones((2,3))
	b = mx.nd.ones((2,3))*2
	c = mx.nd.concat(a,b)
	print(c.asnumpy())
	
	# 分解
	a = mx.nd.ones((2,3))
	b = mx.nd.sum(a)
	print(b.asnumpy())	# 求和
	c = mx.nd.sum_axis(a, axis=1) # 每一行求和
	print(c.asnumpy())
	
	# 广播
	a = mx.nd.array(np.arange(6).reshape(6,1))
	b = a.broadcast_to((6,4))  #
	print(b.asnumpy())
	
	c = a.reshape((2,1,1,3))
	d = c.broadcast_to((2,2,2,3))
	print(d.asnumpy())
	
	# 复制
	a = mx.nd.ones((2,2))
	b = a
	print("b is a:", b is a)
	b = a.copy()
	print("b is a:", b is a)
	
	print("##################")
	b = mx.nd.ones(a.shape)
	c = b
	c[:] = a
	d = b
	a.copyto(d)
	#(c is b, d is b)  # Both will be True
	
def demo5():
	gpu_device=mx.gpu() # Change this to mx.cpu() in absence of GPUs.
	
	# # in default mx.cpu() is used
	a = mx.nd.ones((100,100))
	b = mx.nd.ones((100,100))
	c = a + b
	print(a.context)
	print(c)
	
	# use gpu
	with mx.Context(gpu_device):
		a = mx.nd.ones((100,100))
		b = mx.nd.ones((100,100))
		c = a + b
		print(a.context)
		print(b.context)
		print(c.context)
		print(c)
	
	#我们也可以在创建数组时明确地指定上下文:
	a = mx.nd.ones((100, 100), gpu_device)
	print(a.context)
	
	#通常MXNet的计算需要两个数组位于同一个设备,有多种方法可以在不同设备之间拷贝数据
	a = mx.nd.ones((100,100), mx.cpu())
	b = mx.nd.ones((100,100), gpu_device)
	c = mx.nd.ones((100,100), gpu_device)
	a.copyto(c)  # copy from CPU to GPU
	d = b + c
	e = b.as_in_context(c.context) + c  # same to above
	
# 序列化 从/到 (分布式)文件系统
def demo6():
	a = mx.nd.ones((2, 3))
	# pack and then dump into disk
	data = pkl.dumps(a)
	pkl.dump(data, open('tmp.pickle', 'wb'))
	
	# load from disk and then unpack
	data = pkl.load(open('tmp.pickle', 'rb'))
	b = pkl.loads(data)
	print(b.asnumpy())
	
	a = mx.nd.ones((2,3))
	b = mx.nd.ones((5,6))
	mx.nd.save("temp.ndarray", [a,b]) # 保存列表
	c = mx.nd.load("temp.ndarray")
	print(c)
	print(len(c))
	print(c[0])
	print(c[1])
	
	print("####################################")
	# 词典(dict)
	d = {'a':a, 'b':b}
	mx.nd.save("tempDic.ndarray", d)
	c = mx.nd.load("tempDic.ndarray")
	print(c)
	print(len(c))
	print()
	print(c['a'])
	print(c['b'])
	
def demo7():
	# 创建占位符a和b及其名称
	a = mx.sym.Variable('a')
	b = mx.sym.Variable('b')
	c = a + b
	print(a, b, c)
	
def main():
	#demo1()
	#demo2()
	#demo3()
	#demo4()
	#demo5()
	#demo6()
	demo7()
	
if __name__ == '__main__':
	main()

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值