python中16mod7_Python numpy.float16方法代码示例

本文整理汇总了Python中numpy.float16方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.float16方法的具体用法?Python numpy.float16怎么用?Python numpy.float16使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块numpy的用法示例。

在下文中一共展示了numpy.float16方法的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_ndarray_elementwise

​点赞 6

# 需要导入模块: import numpy [as 别名]

# 或者: from numpy import float16 [as 别名]

def test_ndarray_elementwise():

np.random.seed(0)

nrepeat = 10

maxdim = 4

all_type = [np.float32, np.float64, np.float16, np.uint8, np.int32]

real_type = [np.float32, np.float64, np.float16]

for repeat in range(nrepeat):

for dim in range(1, maxdim):

check_with_uniform(lambda x, y: x + y, 2, dim, type_list=all_type)

check_with_uniform(lambda x, y: x - y, 2, dim, type_list=all_type)

check_with_uniform(lambda x, y: x * y, 2, dim, type_list=all_type)

check_with_uniform(lambda x, y: x / y, 2, dim, type_list=real_type)

check_with_uniform(lambda x, y: x / y, 2, dim, rmin=1, type_list=all_type)

check_with_uniform(mx.nd.sqrt, 1, dim, np.sqrt, rmin=0)

check_with_uniform(mx.nd.square, 1, dim, np.square, rmin=0)

check_with_uniform(lambda x: mx.nd.norm(x).asscalar(), 1, dim, np.linalg.norm)

开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:18,

示例2: create_state

​点赞 6

# 需要导入模块: import numpy [as 别名]

# 或者: from numpy import float16 [as 别名]

def create_state(self, index, weight):

momentum = None

weight_master_copy = None

if self.multi_precision and weight.dtype == numpy.float16:

weight_master_copy = array(weight, ctx=weight.context, dtype=numpy.float32)

if self.momentum != 0.0:

momentum = zeros(weight.shape, weight.context, dtype=numpy.float32,

stype=weight.stype)

return (momentum, weight_master_copy)

if weight.dtype == numpy.float16 and not self.multi_precision:

warnings.warn("Accumulating with float16 in optimizer can lead to "

"poor accuracy or slow convergence. "

"Consider using multi_precision=True option of the "

"SGD optimizer")

if self.momentum != 0.0:

momentum = zeros(weight.shape, weight.context, dtype=weight.dtype, stype=weight.stype)

return momentum

开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:19,

示例3: create_state

​点赞 6

# 需要导入模块: import numpy [as 别名]

# 或者: from numpy import float16 [as 别名]

def create_state(self, index, weight):

"""Create additional optimizer state: momentum

Parameters

----------

weight : NDArray

The weight data

"""

momentum = None

weight_master_copy = None

do_multi_precision = self.multi_precision and weight.dtype == np.float16

if do_multi_precision:

if self.momentum != 0.0:

momentum = mx.nd.zeros(weight.shape, weight.context, dtype=np.float32)

weight_master_copy = array(weight, ctx=weight.context, dtype=np.float32)

return (momentum, weight_master_copy)

else:

if self.momentum != 0.0:

momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)

return momentum

开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:23,

示例4: test_pooling_with_type2

​点赞 6

# 需要导入模块: import numpy [as 别名]

# 或者: from numpy import float16 [as 别名]

def test_pooling_with_type2():

ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},

{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},

{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},

{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},

{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]

sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')

check_consistency(sym, ctx_list, rand_type=np.float16)

sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')

check_consistency(sym, ctx_list)

sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')

check_consistency(sym, ctx_list, rand_type=np.float16)

sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')

check_consistency(sym, ctx_list)

开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:20,

示例5: test_elementwisesum_with_type

​点赞 6

# 需要导入模块: import numpy [as 别名]

# 或者: from numpy import float16 [as 别名]

def test_elementwisesum_with_type():

dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],

[mx.cpu(0), [np.float64, np.float32]] ]

for num_args in range(1, 6):

ews_arg_shape = {}

for i in range(num_args):

ews_arg_shape['ews_arg'+str(i)] = (2, 10)

sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)

ctx_list = []

for dev, types in dev_types:

for dtype in types:

ews_arg_dtype = {'type_dict':{}}

for i in range(num_args):

ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype

ctx_elem = {'ctx': dev}

ctx_elem.update(ews_arg_shape)

ctx_elem.update(ews_arg_dtype)

ctx_list.append(ctx_elem)

check_consistency(sym, ctx_list)

开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:21,

示例6: test_embedding_with_type

​点赞 6

# 需要导入模块: import numpy [as 别名]

# 或者: from numpy import float16 [as 别名]

def test_embedding_with_type():

def test_embedding_helper(data_types, weight_types, low_pad, high_pad):

NVD = [[20, 10, 20], [200, 10, 300]]

for N, V, D in NVD:

sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)

ctx_list = []

for data_type in data_types:

for weight_type in weight_types:

ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),

'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})

ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),

'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})

arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}

check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},

arg_params=arg_params)

data_types = [np.float16, np.float32, np.float64, np.int32]

weight_types = [np.float16, np.float32, np.float64]

test_embedding_helper(data_types, weight_types, 5, 5)

data_types = [np.uint8]

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值