Tensorflow API: Neural network

Activation Functions
tf.sigmoid(x,name=None)  # y = 1 / (1 + exp(-x)) ✨
tf.tanh(x,name=None)  # 双曲线切线激活函数
tf.nn.relu(features, name=None)  # 整流函数:max(features, 0) ✨
tf.nn.relu6(features,name=None)  # 以6为阈值的整流函数: min(max(features, 0), 6)
tf.nn.elu(features, name=None)  # elu函数, exp(features) - 1 if < 0, features otherwise
tf.nn.softplus(features, name=None)  # 计算softplus: log(exp(features) + 1)
tf.nn.dropout(x,keep_prob,noise_shape=None,seed=None,name=None)  # 计算dropout,keep_prob为keep概率, noise_shape为噪声的shape ✨
bias_add(value,bias,data_format=None,name=None)  # 对value加一偏置量,此函数为tf.add的特殊情况,bias仅为一维,函数通过广播机制进行与value求和,数据格式可以与value不同,返回为与value相同格式
Convolution
tf.nn.convolution(input, filter, padding, strides=None, dilation_rate=None, name=None, data_format=None) # n 维卷积
tf.nn.conv1d(input, filter, strides, padding, use_cudnn_on_gpu=None, data_format=None, name=None)  # 一维卷积
tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, data_format=None, name=None)  # 二维卷积 ✨
tf.nn.conv3d(input, filter, strides, padding, use_cudnn_on_gpu=None, data_format=None, name=None) # 三维卷积 ✨
conv2d_transpose(value, filter, output_shape, strides, padding='SAME', data_format='NHWC', name=None) # 转置卷积/ 反卷积  ✨
Pooling
tf.nn.avg_pool(value, ksize, strides, padding, data_format=’NHWC’, name=None)  # 平均方式 ✨
tf.nn.max_pool(value, ksize, strides, padding, data_format=’NHWC’, name=None)  # 最大池化 ✨
tf.nn.max_pool_with_argmax(input, ksize, strides, padding, Targmax=None, name=None) # 返回一个二维元组(output,argmax),最大值pooling,返回最大值及其相应的索引 ✨
tf.nn.avg_pool3d(input, ksize, strides, padding, name=None) # 3D平均池化 ✨
tf.nn.max_pool3d(input, ksize, strides,  padding, name=None) # 3D最大池化 ✨
Batch Normalization
tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None) # L2范式标准化 
tf.nn.batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name=None)
Classification
tf.nn.sigmoid_cross_entropy_with_logits(logits, targets, name=None) # 计算输入logits, targets的交叉熵 ✨
tf.nn.softmax(logits, name=None)  # 计算softmax: softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j])) ✨
tf.nn.softmax_cross_entropy_with_logits(logits, labels, name=None)  # 计算logits和labels的softmax交叉熵 logits, labels必须为相同的shape与数据类型 ✨
Loss
l2_loss(t,name=None)  # l2_loss output = sum(t**2)/2
Embeddings
embedding_lookup(params, ids, partition_strategy='mod', name=None, validate_indices=True, max_norm=None)
embedding_lookup_sparse(params, sp_ids, sp_weights, partition_strategy='mod', name=None, combiner=None, max_norm=None)
Recurrent Neural Networks
dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False, time_major=False,
scope=None)
bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None, initial_state_fw=None, initial_state_bw=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None)
raw_rnn(cell, loop_fn, parallel_iterations=None, swap_memory=False, scope=None)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值