1.argparse基本用法
http://vra.github.io/2017/12/02/argparse-usage/
2.importlib.import_module导入模块函数
MODEL = importlib.import_module(FLAGS.model,package='models') # import network module,models文件夹下的3dcnn文件
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, 30, 30, 30, 1, NUM_CLASSES)
3.tf.placeholder函数占位构建整个计算图,用于输入输出
tf.placeholder( #函数形式
dtype,
shape=None,
name=None
)
#使用:
pointclouds_pl = tf.placeholder(tf.float32, [batch_size, depth, height, width,channels])
labels_pl = tf.placeholder(tf.int32, [batch_size, num_classes])
feed_dict = {ops['pointclouds_pl']: current_voxel[start_idx:end_idx, :, :, :, :],
ops['labels_pl']:current_label[start_idx:end_idx,:],
ops['is_training_pl']:is_training}
loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
feed_dict=feed_dict)
4.tf.variable_scope和tf.get_variable
tf.get_variable(name, shape, initializer): name就是变量的名称,shape是变量的维度,initializer是变量初始化的方式,初始化的方式有以下几种:(tf.constant_initializer:常量初始化函数;tf.random_normal_initializer:正态分布;tf.truncated_normal_initializer:截取的正态分布;tf.random_uniform_initializer:均匀分布;tf.zeros_initializer:全部是0;tf.ones_initializer:全是1;tf.uniform_unit_scaling_initializer:满足均匀分布,但不影响输出数量级的随机值),该函数会根据变量是否存在决定重用变量或新建变量。
tf.variable_scope划分变量的作用域,使得在不同作用域下变量可以取得相同的变量名(待更改,暂时不会)
with tf.variable_scope("conv1") as scope:
out_filters = 20
kernel = _weight_variable("weights", [7, 7, 7, in_filters, out_filters])
conv = tf.nn.conv3d(point_cloud, kernel, [1, 1, 1, 1, 1], padding="SAME")
biases = _bias_variable("biases", [out_filters])
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
print_activations(conv1)
prev_layer = conv1
in_filters = out_filters
# (2)
pool1 = tf.nn.max_pool3d(prev_layer, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding="SAME")
norm1 = pool1
print_activations(pool1)
# (3)
prev_layer = norm1
with tf.variable_scope("conv2") as scope:
out_filters = 20
kernel = _weight_variable("weights", [5, 5, 5, in_filters, out_filters])
conv = tf.nn.conv3d(prev_layer, kernel, [1, 1, 1, 1, 1], padding="SAME")
biases = _bias_variable("biases", [out_filters])
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
print_activations(conv2)
prev_layer = conv2
in_filters = out_filters