paddle复现pytorch踩坑(二):paddleAPI对照表(百度论文复现营活动提供)

百度提供了paddlepaddle对应pytorch的API对照表
环境:paddlepaddle1.8
百度论文复现营活动:论文复现营

Pytorch的API名称Paddle的API名称
torch.absfluid.layers.abs(x, name=None)
torch.acosfluid.layers.acos(x, name=None)
torch.addfluid.layers.elementwise_add(x, y, axis=-1, act=None, name=None)
torch.allclosefluid.layers.allclose
torch.arangefluid.layers.range(start, end, step, dtype)
torch.argmaxfluid.layers.argmax
torch.argminfluid.layers.argmin
torch.argsortfluid.layers.argsort(input, axis=-1, descending=False, name=None)
torch.as_stridedfluid.layers.strided_slice(input, axes, starts, ends, strides):
torch.asinfluid.layers.asin(x, name=None)
torch.atanfluid.layers.atan(x, name=None)
torch.catfluid.layers.concat(input, axis=0, name=None)
torch.ceilfluid.layers.ceil
torch.chunkfluid.layers.unstack(x, axis=0, num=None)
torch.cosfluid.layers.cos
torch.cumsumfluid.layers.cumsum(x, axis=None, exclusive=None, reverse=None)
torch.diagfluid.layer.diag(diagonal)
torch.diag_embedfluid.layer.diag_embed
torch.divfluid.layers.elementwise_div(x, y, axis=-1, act=None, name=None)
torch.eqfluid.layers.equal(x, y, cond=None)
torch.equalfluid.layers.elementwise_equal(x, y, name=None)
torch.equalfluid.layers.equal(x, y, cond=None)
torch.erffluid.layers.erf(x)
torch.expfluid.layers.exp(x, name=None)
torch.eyefluid.layers.eye(num_rows, num_columns=None, batch_shape=None, dtype=‘float32’)
torch.flattenfluid.layers.flatten
torch.flipfluid.layers.flip
torch.floorfluid.layers.floor(x, name=None)
torch.fmodfluid.layers.elementwise_mod(x, y, axis=-1, act=None, name=None)
torch.from_numpyfluid.dygraph.to_variable(value, block=None, name=None)
torch.fullfluid.layers.full(shape, fill_value, out=None, dtype=None, device=None, stop_gradient=True, name=None)
torch.full_likefluid.layers.full_like
torch.gatherfluid.layers.gather(input, axis, index, out=None, sparse_grad=False, name=None)
torch.gefluid.layers.greater_equal
torch.gtfluid.layers.greater_than
torch.lefluid.layers.less_equal
torch.linspacefluid.layers.linspace(start, stop, num, dtype)
torch.logfluid.layers.log(x, name=None)
torch.logical_notfluid.layers.logical_not(x, out=None, name=None)
torch.logical_xorfluid.layers.logical_xor(x, y, out=None, name=None)
torch.ltfluid.layers.less_than
torch.masked_selectfluid.layers.masked_select
torch.matmulfluid.layers.matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None)
torch.maxfluid.layers.max(input, axis=None, keepdim=False, out=None, name=None) -> (Tensor, LongTensor)
torch.meanfluid.layers.reduce_mean(input, dim=None, keep_dim=False, name=None)
torch.minfluid.layers.min
torch.mmfluid.layers.mm
torch.mulfluid.layers.mul
torch.mvfluid.layers.matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None)
torch.nefluid.layers.not_equal
torch.nn.AdaptiveAvgPool2dfluid.layers.adaptive_pool2d
torch.nn.AdaptiveAvgPool3dfluid.layers.adaptive_pool3d
torch.nn.AdaptiveMaxPool2dfluid.layers.adaptive_pool2d
torch.nn.AdaptiveMaxPool3dfluid.layers.adaptive_pool3d
torch.nn.add_modulefluid.dygraph.Layer.add_sublayer(name, sublayer)
torch.nn.appendfluid.dygraph.LayerList.append(sublayer)
torch.nn.appendfluid.dygraph.ParameterList.append(parameter)
torch.nn.AvgPool2dfluid.layers.pool2d(pool_type为’avg’)
torch.nn.AvgPool3dfluid.layers.pool3d(pool_type为’avg’)
torch.nn.BatchNorm1dfluid.layers.batch_norm(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout=‘NCHW’, in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False)
torch.nn.BatchNorm2dfluid.layers.batch_norm(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout=‘NCHW’, in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False)
torch.nn.BatchNorm3dfluid.layers.batch_norm(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout=‘NCHW’, in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False)
torch.nn.Bilinearfluid.layers.bilinear_tensor_product(x, y, size, act=None, name=None, param_attr=None, bias_attr=None)
torch.nn.ConstantPad1dfluid.layers.pad
torch.nn.ConstantPad2dfluid.layers.pad / fluid.layers.pad2d
torch.nn.ConstantPad3dfluid.layers.pad
torch.nn.Conv2dfluid.dygraph.Conv2D
torch.nn.Conv3dfluid.dygraph.Conv3D
torch.nn.ConvTranspose2dfluid.layers.conv2d_transpose
torch.nn.ConvTranspose3dfluid.layers.conv3d_transpose
torch.nn.doublefluid.layers.cast(x, dtype)
torch.nn.Dropoutfluid.layers.dropout(x, dropout_prob, is_test=False, seed=None, name=None, dropout_implementation=‘downgrade_in_infer’)
torch.nn.ELUfluid.layers.elu(x, alpha=1.0, name=None)
torch.nn.Embeddingfluid.dygraph.Embedding
torch.nn.evalfluid.dygraph.Layer.eval()
torch.nn.floatfluid.layers.cast(x, dtype)
torch.nn.Foldfluid.layers.unfold
torch.nn.functional.adaptive_avg_pool2dfluid.layers.adaptive_pool2d
torch.nn.functional.adaptive_avg_pool3dfluid.layers.adaptive_pool3d
torch.nn.functional.adaptive_max_pool2dfluid.layers.adaptive_pool2d
torch.nn.functional.adaptive_max_pool3dfluid.layers.adaptive_pool3d
torch.nn.functional.affine_gridfluid.layers.affine_grid
torch.nn.functional.avg_pool2dfluid.layers.pool2d(input, pool_size=-1, pool_type=“max”, pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, name=None, exclusive=True, data_format=“NCHW”)设置pool_type=“avg”
torch.nn.functional.avg_pool3dfluid.layers.pool3d(input, pool_size=-1, pool_type=“max”, pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, name=None, exclusive=True, data_format=“NCDHW”)设置pool_type=“avg”
torch.nn.functional.cross_entropyfluid.layers.softmax_with_cross_entropy
torch.nn.functional.ctc_lossfluid.layers.warpctc
torch.nn.functional.elufluid.layers.elu
torch.nn.functional.embeddingfluid.embedding
torch.nn.functional.embedding_bagfluid.contrib.fused_embedding_seq_pool
torch.nn.functional.glufluid.nets.glu(input, dim=-1)
torch.nn.functional.grid_samplefluid.layers.grid_sampler
torch.nn.functional.hardshrinkfluid.layers.hard_shrink
torch.nn.functional.interpolatefluid.layers.interpolate
torch.nn.functional.kl_divfluid.layers.kldiv_loss
torch.nn.functional.leaky_relufluid.layers.leaky_relu
torch.nn.functional.logsigmoidfluid.layers.logsigmoid
torch.nn.functional.margin_ranking_lossfluid.layers.margin_rank_loss(label, left, right, margin=0.1, name=None)
torch.nn.functional.max_pool2dfluid.layers.pool2d(input, pool_size=-1, pool_type=“max”, pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, name=None, exclusive=True, data_format=“NCHW”)设置pool_type=“max”
torch.nn.functional.max_pool3dfluid.layers.pool3d(input, pool_size=-1, pool_type=“max”, pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, name=None, exclusive=True, data_format=“NCDHW”)设置pool_type=“max”
torch.nn.functional.mse_lossfluid.layers.mse_loss(input, label)
torch.nn.functional.one_hotfluid.one_hot
torch.nn.functional.padfluid.layers.pad(x, paddings, pad_value=0.0, name=None)
torch.nn.functional.pixel_shufflefluid.layers.pixel_shuffle(x, upscale_factor)
torch.nn.functional.prelufluid.layers.prelu(x, mode, param_attr=None, name=None)
torch.nn.functional.relufluid.layers.relu(x, name=None)
torch.nn.functional.relu6fluid.layers.relu6
torch.nn.functional.selufluid.layers.selu
torch.nn.functional.sigmoidfluid.layers.sigmoid
torch.nn.functional.smooth_l1_lossfluid.layers.smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None)
torch.nn.functional.softmaxfluid.layers.softmax
torch.nn.functional.softplusfluid.layers.softplus
torch.nn.functional.softshrinkfluid.layers.softshrink
torch.nn.functional.softsignfluid.layers.softsign
torch.nn.functional.tanhfluid.layers.tanh
torch.nn.functional.tanhshrinkfluid.layers.tanh_shrink
torch.nn.functional.thresholdfluid.layers.thresholded_relu
torch.nn.functional.unfoldfluid.layers.unfold(x, kernel_size, strides=1, paddings=0, dilation=1, name=None)
torch.nn.GroupNormfluid.layers.group_norm(input, groups, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, data_layout=‘NCHW’, name=None)
torch.nn.GRUfluid.layers.dynamic_gru
torch.nn.GRUCellfluid.layers.GRUCell
torch.nn.halffluid.layers.cast(x, dtype)
torch.nn.Hardshrinkfluid.layers.hard_shrink(x, threshold=None)
torch.nn.init.constant_fluid.initializer.ConstantInitializer(value=0.0, force_cpu=False)
torch.nn.init.eye_fluid.layers.eye(num_rows, num_columns=None, batch_shape=None, dtype=‘float32’)
torch.nn.init.kaiming_normal_fluid.initializer.MSRAInitializer(uniform=True, fan_in=None, seed=0)
torch.nn.init.kaiming_uniform_fluid.initializer.MSRAInitializer(uniform=True, fan_in=None, seed=0)
torch.nn.init.normal_fluid.initializer.NormalInitializer(loc=0.0, scale=1.0, seed=0)
torch.nn.init.ones_fluid.layers.ones(shape, dtype, force_cpu=False)
torch.nn.init.uniform_fluid.initializer.UniformInitializer(low=-1.0, high=1.0, seed=0)
torch.nn.init.xavier_normal_fluid.initializer.XavierInitializer(uniform=True, fan_in=None, fan_out=None, seed=0
torch.nn.init.xavier_uniform_fluid.initializer.XavierInitializer(uniform=True, fan_in=None, fan_out=None, seed=0
torch.nn.init.zeros_fluid.layers.zeros(shape, dtype, force_cpu=False)
torch.nn.InstanceNorm1dfluid.layers.instance_norm
torch.nn.InstanceNorm2dfluid.layers.instance_norm
torch.nn.InstanceNorm3dfluid.layers.instance_norm
torch.nn.LayerNormfluid.layers.layer_norm(input, scale=True, shift=True, begin_norm_axis=1, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, name=None)
torch.nn.LeakyReLUfluid.layers.leaky_relu(x, alpha=0.02, name=None)
torch.nn.Linearfluid.dygraph.Linear(input_dim, output_dim, param_attr=None, bias_attr=None, act=None, dtype=‘float32’)
torch.nn.load_state_dictfluid.dygraph.Layer.set_dict(stat_dict, include_sublayers=True)
torch.nn.LogSigmoidfluid.layers.logsigmoid(x, name=None)
torch.nn.LSTMfluid.layers.lstm
torch.nn.LSTMCellfluid.layers.LSTMCell
torch.nn.MarginRankingLossfluid.layers.margin_rank_loss(label, left, right, margin=0.1, name=None)
torch.nn.MaxPool2dfluid.layers.pool2d(pool_type为’max’)
torch.nn.MaxPool3dfluid.layers.pool3d(pool_type为’max’)
torch.nn.Modulefluid.dygraph.Layer()
torch.nn.modulesfluid.dygraph.Layer.sublayers(include_sublayers=True)
torch.nn.MSELossfluid.dygraph.MSELoss(input, label)
torch.nn.MultiheadAttentionfluid.nets.scaled_dot_product_attention
torch.nn.named_modulesfluid.dygraph.Layer.named_sublayers(prefix=’’,include_sublayers=True,include_self=False,layers_set=None)
torch.nn.named_parametersfluid.dygraph.Layer.named_parameters(prefix=’’, include_sublayers=True)
torch.nn.ParameterListfluid.dygraph.ParameterList
torch.nn.parametersfluid.dygraph.Layer.parameters(include_sublayers=True)
torch.nn.PixelShufflefluid.layers.pixel_shuffle
torch.nn.PReLUfluid.dygraph.Prelu
torch.nn.ReflectionPad2dfluid.layers.pad2d
torch.nn.register_parameterfluid.dygraph.Layer.add_parameter(name, parameter)
torch.nn.ReLUfluid.layers.relu(x, name=None)
torch.nn.ReLU6fluid.layers.relu6
torch.nn.ReplicationPad2dfluid.layers.pad2d
torch.nn.requires_grad_fluid.Variable().stop_gradient
torch.nn.RNNfluid.layers.rnn
torch.nn.RNNCellfluid.layers.RNNCell
torch.nn.SELUfluid.layers.selu
torch.nn.Sequentialfluid.dygraph.Sequential(*layers)
torch.nn.Sigmoidfluid.layers.sigmoid(x, name=None)
torch.nn.SmoothL1Lossfluid.layers.smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None)
torch.nn.Softmaxfluid.layers.softmax(input, use_cudnn=False, name=None, axis=-1)
torch.nn.Softplusfluid.layers.softplus
torch.nn.Softshrinkfluid.layers.softshrink(x, alpha=None)
torch.nn.Softsignfluid.layers.softsign(x, name=None)
torch.nn.state_dictfluid.dygraph.Layer.state_dict
torch.nn.Tanhfluid.layers.tanh(x, name=None)
torch.nn.Tanhshrinkfluid.layers.tanh_shrin
torch.nn.Thresholdfluid.layers.thresholded_relu(x, threshold=None)
torch.nn.trainfluid.dygraph.Layer.train()
torch.nn.typefluid.layers.cast(x, dtype)
torch.nn.UpsamplingBilinear2dfluid.layers.resize_bilinear
torch.nn.UpsamplingNearest2dfluid.layers.resize_nearest
torch.nn.zero_gradfluid.dygraph.Layer.clear_gradients()
torch.nn.ZeroPad2dfluid.layers.pad / fluid.layers.pad2d
torch.normfluid.layers.l2_normalize
torch.normalfluid.layers.gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype=‘float32’)
torch.numelfluid.layers.size(input)
torch.onesfluid.layers.ones
torch.ones_likefluid.layers.ones_like
torch.powfluid.layers.pow
torch.prodfluid.layers.reduce_prod(input, dim=None, keep_dim=False, name=None)
torch.rand_likefluid.layers.gaussian_random_batch_size_like(input, shape, input_dim_idx=0, output_dim_idx=0, mean=0.0, std=1.0, seed=0, dtype=‘float32’)[
torch.randintfluid.layers.randint(low, high=None, shape=None, out=None, dtype=None, device=None, stop_gradient=False, seed=0, name=None)
torch.randnfluid.layers.randn
torch.randn_likefluid.layers.gaussian_random_batch_size_like
torch.randpermfluid.layers.randperm
torch.rangefluid.layers.range
torch.reciprocalfluid.layers.reciprocal(x, name=None)
torch.reshapefluid.layers.reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None)
torch.rollfluid.layers.roll
torch.roundfluid.layers.round(x, name=None)
torch.rsqrtfluid.layers.rsqrt(x, name=None)
torch.seedfluid.Program.random_seed
torch.set_num_interop_threadsfluid.ExecutionStrategy.num_threads
torch.set_num_threadsfluid.cpu_places(device_count=None)
torch.sigmoidfluid.layers.sigmoid(x, name=None)
torch.signfluid.layers.sign(x)
torch.sinfluid.layers.sin(x, name=None)
torch.splitfluid.layers.split(input, num_or_sections, dim=-1, name=None)
torch.sqrtfluid.layers.squeeze(input, axes, name=None)
torch.squeezefluid.layers.squeeze(input, axes, name=None)
torch.stackfluid.layers.stack(x, axis=0)
torch.std_meanfluid.layers.mean
torch.sumfluid.layers.sum(x)
torch.sumfluid.layers.elementwise_add(x, y, axis=-1, act=None, name=None)
torch.tanfluid.layers.tanh(x, name=None)
torch.tanhfluid.layers.tanh(x, name=None)
torch.Tensor.absfluid.layers.abs(x, name=None)
torch.Tensor.abs_fluid.layers.abs(x, name=None)
torch.Tensor.acosfluid.layers.acos(x, name=None)
torch.Tensor.acos_fluid.layers.acos(x, name=None)
torch.Tensor.add_fluid.layers.elementwise_add(x, y, axis=-1, act=None, name=None)
torch.Tensor.asin_fluid.layers.asin(x, name=None)
torch.Tensor.atan_fluid.layers.atan(x, name=None)
torch.Tensor.backwardfluid.gradients(targets, inputs, target_gradients=None, no_grad_set=None)paddle 目标变量和起始
torch.Tensor.bfloat16fluid.layers.cast(x, float16)
torch.Tensor.bmmfluid.layers.matmul
torch.Tensor.boolfluid.layers.cast(x, bool)
torch.Tensor.bytefluid.layers.cast(x, uint8)
torch.Tensor.ceilfluid.layers.ceil
torch.Tensor.clampfluid.layers.clip
torch.Tensor.clamp_fluid.layers.clip(x, min, max, name=None)
torch.Tensor.clonefluid.layers.assign(input, output=None)
torch.Tensor.cosfluid.layers.cos
torch.Tensor.cumsumfluid.layers.cumsum
torch.Tensor.dequantizefluid.layers.dequantize()
torch.Tensor.diagfluid.layers.diag
torch.Tensor.divfluid.layers.elementwise_div(x, y, axis=-1, act=None, name=None)
torch.Tensor.eqfluid.layers.equal
torch.Tensor.eq_fluid.layers.equal(x, y, cond=None)
torch.Tensor.expfluid.layers.exp(x, name=None)
torch.Tensor.exp_fluid.layers.exp(x, name=None)
torch.Tensor.expandfluid.layers.expand(x, expand_times, name=None)
torch.Tensor.expand_asfluid.layers.expand_as(x, target_tensor, name=None)
torch.Tensor.fill_diagonal_fluid.layers.diag(diagonal)
torch.Tensor.flattenfluid.layers.flatten
torch.Tensor.flipfluid.layers.reverse
torch.Tensor.floorfluid.layers.floor
torch.Tensor.floor_fluid.layers.floor(x, name=None)
torch.Tensor.fmod_fluid.layers.elementwise_mod(x, y, axis=-1, act=None, name=None)
torch.Tensor.gatherfluid.layers.gather(input, index, overwrite=True)
torch.Tensor.gefluid.layers.greater_equal(x, y, cond=None)
torch.Tensor.ge_fluid.layers.greater_equal(x, y, cond=None)
torch.Tensor.gtfluid.layers.greater_than(x, y, cond=None)
torch.Tensor.gt_fluid.layers.greater_than(x, y, cond=None)
torch.Tensor.hardshrinkfluid.layers.hard_shrink(x, threshold=None)
torch.Tensor.index_selectfluid.layers.multiplex(inputs, index)
torch.Tensor.intfluid.layers.cast
torch.Tensor.lefluid.layers.less_equal(x, y, cond=None)
torch.Tensor.le_fluid.layers.less_equal(x, y, cond=None)
torch.Tensor.logfluid.layers.log(x, name=None)
torch.Tensor.log_fluid.layers.log(x, name=None)
torch.Tensor.logical_notfluid.layers.logical_not(x, out=None, name=None)
torch.Tensor.logical_not_fluid.layers.logical_not(x, out=None, name=None)
torch.Tensor.logical_xorfluid.layers.logical_xor(x, y, out=None, name=None)
torch.Tensor.logical_xor_fluid.layers.logical_xor(x, y, out=None, name=None)
torch.Tensor.ltfluid.layers.less_than(x, y, cond=None)
torch.Tensor.lt_fluid.layers.less_than(x, y, force_cpu=None, cond=None)
torch.Tensor.masked_selectfluid.layers.masked_select(input, mask)
torch.Tensor.matmulfluid.layers.matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None)
torch.Tensor.matrix_powerfluid.layers.pow(x, factor=1.0, name=None)
torch.Tensor.maxfluid.layers.reduce_max(input, dim=None, keep_dim=False, name=None)
torch.Tensor.meanfluid.layers.mean(x, name=None)
torch.Tensor.min"fluid.layers.reduce_min(input, dim=None, keep_dim=False, name=None) "
torch.Tensor.mulfluid.layers.mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None)
torch.Tensor.mul_fluid.layers.mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None)
torch.Tensor.multinomialfluid.layers.sampling_id(x, min=0.0, max=1.0, seed=0, dtype=‘float32’)
torch.Tensor.mvfluid.layers.mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None)
torch.Tensor.narrow_copyfluid.layers.slice(input, axes, starts, ends)
torch.Tensor.ndimensionfluid.layers.rank(input)
torch.Tensor.ne_fluid.layers.not_equal(x, y, cond=None)
torch.Tensor.normal_fluid.layers.Normal(loc, scale)
torch.Tensor.numelfluid.layers.size(input)
torch.Tensor.permutefluid.layers.transpose(x, perm, name=None)
torch.Tensor.powfluid.layers.pow(x, factor=1.0, name=None)
torch.Tensor.pow_fluid.layers.pow(x, factor=1.0, name=None)
torch.Tensor.prodfluid.layers.reduce_prod(input, dim=None, keep_dim=False, name=None)[
torch.Tensor.put_fluid.layers.scatter(input, index, updates, name=None, overwrite=True)[源代码]
torch.Tensor.random_fluid.layers.uniform_random(shape, dtype=‘float32’, min=-1.0, max=1.0, seed=0)
torch.Tensor.reciprocalfluid.layers.reciprocal(x, name=None)
torch.Tensor.reciprocal_fluid.layers.reciprocal(x, name=None)
torch.Tensor.remainder_fluid.layers.elementwise_mod(x, y, axis=-1, act=None, name=None)
torch.Tensor.requires_gradfluid.Variable.stop_gradient(属性)
torch.Tensor.requires_grad_fluid.Variable.stop_gradient(属性)
torch.Tensor.reshapefluid.layers.reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None)
torch.Tensor.reshape_asfluid.layers.reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None)
torch.Tensor.resize_as_fluid.layers.reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None)
torch.Tensor.roundfluid.layers.round(x, name=None)
torch.Tensor.round_fluid.layers.round(x, name=None)
torch.Tensor.rsqrtfluid.layers.rsqrt(x, name=None)
torch.Tensor.rsqrt_fluid.layers.rsqrt(x, name=None)
torch.Tensor.scatterfluid.layers.scatter(input, index, updates, name=None, overwrite=True)
torch.Tensor.scatter_fluid.layers.scatter(input, index, updates, name=None, overwrite=True)
torch.Tensor.scatter_addfluid.layers.scatter(input, index, updates, name=None, overwrite=True)
torch.Tensor.scatter_add_fluid.layers.scatter(input, index, updates, name=None, overwrite=True)
torch.Tensor.selectfluid.layers.slice
torch.Tensor.shortfluid.layers.cast(x, dtype)
torch.Tensor.sigmoidfluid.layers.sigmoid(x, name=None)
torch.Tensor.sigmoid_fluid.layers.sigmoid(x, name=None)
torch.Tensor.signfluid.layers.sign(x)
torch.Tensor.sign_fluid.layers.sign(x)
torch.Tensor.sinfluid.layers.sin(x, name=None)
torch.Tensor.sin_fluid.layers.sin(x, name=None)
torch.Tensor.splitfluid.layers.split(input, num_or_sections, dim=-1, name=None)
torch.Tensor.sqrtfluid.layers.sqrt(x, name=None)
torch.Tensor.sqrt_fluid.layers.sqrt(x, name=None)
torch.Tensor.squeezefluid.layers.squeeze(input, axes, name=None)
torch.Tensor.squeeze_fluid.layers.squeeze(input, axes, name=None)
torch.Tensor.subfluid.layers.elementwise_sub(x, y, axis=-1, act=None, name=None)
torch.Tensor.sub_fluid.layers.elementwise_sub(x, y, axis=-1, act=None, name=None)
torch.Tensor.sumfluid.layers.sum(x)
torch.Tensor.t_fluid.layers.transpose(x, perm, name=None)
torch.Tensor.tanhfluid.layers.tanh(x, name=None)
torch.Tensor.tanh_fluid.layers.tanh(x, name=None)
torch.Tensor.tofluid.layers.cast(x, dtype)
torch.Tensor.topkfluid.layers.topk(input, k, name=None)
torch.Tensor.transposefluid.layers.transpose(x, perm, name=None)
torch.Tensor.transpose_fluid.layers.transpose(x, perm, name=None)
torch.Tensor.truncfluid.layers.floor(x, name=None)
torch.Tensor.unbindfluid.layers.unstack(x, axis=0, num=None)
torch.Tensor.uniquefluid.layers.unique
torch.Tensor.unsqueezefluid.layers.unsqueeze
torch.Tensor.unsqueeze_fluid.layers.unsqueeze(input, axes, name=None)
torch.Tensor.viewfluid.layers.reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None)
torch.Tensor.wherefluid.layers.where
torch.Tensor.zero_fluid.layers.zeros(shape, dtype, force_cpu=False)
torch.topkfluid.layers.topk(input, k, name=None)
torch.transposefluid.layers.transpose(x, perm, name=None)
torch.trilfluid.layers.tril
torch.triufluid.layers.triu
torch.uniquefluid.layers.unique(x, dtype=‘int32’)
torch.unsqueezefluid.layers.unsqueeze(input, axes, name=None)
torch.var_meanfluid.layers.mean
torch.wherefluid.layers.where(condition)
torch.zerosfluid.layers.zeros(shape, dtype, force_cpu=False)
torch.zeros_likefluid.layers.zeros_like(x, out=None)
torch.utils.data.dataloader.default_collatefluid.io.default_collate_fn
torch.optim.lr_scheduler.StepLRfluid.dygraph.StepDecay
  • 21
    点赞
  • 66
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值