利用了三个 实例,中间一直出问题。Residual-Dense-Network-Trained-with-cGAN-for-Super-Resolution-master、Spectral_Normalization-Tensorflow-master、SNGAN-master。最终使用了 第一个中的方法。
TripleGAN-Tensorflow-SN中。
具体代码如下:黑体为修改的代码:
1、ops.py中
import tensorflow as tf
#import tflearn
from tflearn import global_avg_pool
from tensorflow.contrib.layers import variance_scaling_initializer
import numpy as np
import math
he_init = variance_scaling_initializer()
# he_init = tf.truncated_normal_initializer(stddev=0.02)
"""
The weight norm is not implemented at this time.
"""
def weight_norm(x, output_dim) :
input_dim = int(x.get_shape()[-1])
g = tf.get_variable('g_scalar', shape=[output_dim], dtype=tf.float32, initializer=tf.ones_initializer())
w = tf.get_variable('weight', shape=[input_dim, output_dim], dtype=tf.float32, initializer=he_init)
w_init = tf.nn.l2_normalize(w, dim=0) * g # SAME dim=1
return tf.variables_initializer(w_init)
weight_init = tf.random_normal_initializer(mean=0.0, stddev=0.02)
weight_regularizer = None
def conv_layer(x, filter_size, kernel, stride=1, padding='SAME', wn=False,sn=True, layer_name="conv"):
with tf.name_scope(layer_name):
'''if wn:
w_init = weight_norm(x, filter_size)
x = tf.layers.conv2d(inputs=x, filters=filter_size, kernel_size=kernel, kernel_initializer=w_init, strides=stride, padding=padding)
else :
x = tf.layers.conv2d(inputs=x, filters=filter_size, kernel_size=kernel, kernel_initializer=he_init, strides=stride, padding=padding)
'''
if sn :
with tf.variable_scope('scope', reuse=tf.AUTO_REUSE):
w = tf.get_variable("kernel"+layer_name, shape=[kernel[0], kernel[1], x.get_shape()[-1], filter_size], initializer=weight_init,regularizer=weight_regularizer)
print("w",w)
#bias = tf.get_variable("bias", [filter_size], initializer=tf.constant_initializer(0.0))
x = tf.nn.conv2d(input=x, filter=spectral_norm(layer_name,w), strides=[1, stride, stride, 1], padding=padding)
print("x",x)
else :
x = tf.layers.conv2d(inputs=x, filters=filter_size,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride,padding=padding)
return x
def deconv_layer(x, filter_size, kernel, stride=1, padding='SAME', wn=False, sn=False, layer_name='deconv'):
with tf.name_scope(layer_name):
if wn :
w_init = weight_norm(x, filter_size)
x = tf.layers.conv2d_transpose(inputs=x, filters=filter_size, kernel_size=kernel, kernel_initializer=w_init, strides=stride, padding=padding)
else :
x = tf.layers.conv2d_transpose(inputs=x, filters=filter_size, kernel_size=kernel, kernel_initializer=he_init, strides=stride, padding=padding)
x_shape = x.get_shape().as_list()
output_shape = [x_shape[0], x_shape[1] * stride, x_shape[2] * stride, filter_size]
if sn:
w = tf.get_variable("kernel", shape=[kernel[0], kernel[1], filter_size, x.get_shape()[-1]], initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.conv2d_transpose(x, filter=spectral_normed_weight(w), output_shape=output_shape,
strides=[1, stride, stride, 1], padding='SAME')
else:
x = tf.layers.conv2d_transpose(inputs=x, filters=filter_size,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, padding='SAME')
return x
def linear(x, unit, wn=False, layer_name='linear'):
with tf.name_scope(layer_name):
if wn :
w_init = weight_norm(x, unit)
x = tf.layers.dense(inputs=x, units=unit, kernel_initializer=w_init)
else :
x = tf.layers.dense(inputs=x, units=unit, kernel_initializer=he_init)
return x
def nin(x, unit, wn=False, layer_name='nin'):
# https://github.com/openai/weightnorm/blob/master/tensorflow/nn.py
with tf.name_scope(layer_name):
s = list(map(int, x.get_shape()))
x = tf.reshape(x, [np.prod(s[:-1]), s[-1]])
x = linear(x, unit, wn, layer_name)
x = tf.reshape(x, s[:-1] + [unit])
return x
def gaussian_noise_layer(x, std=0.15):
noise = tf.random_normal(shape=tf.shape(x), mean=0.0, stddev=std, dtype=tf.float32)
return x + noise
def Global_Average_Pooling(x):
return global_avg_pool(x, name='Global_avg_pooling')
def max_pooling(x, kernel, stride):
return tf.layers.max_pooling2d(x, pool_size=kernel, strides=stride, padding='VALID')
def flatten(x):
return tf.contrib.layers.flatten(x)
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak * x)
def sigmoid(x):
return tf.nn.sigmoid(x)
def relu(x):
return tf.nn.relu(x)
def tanh(x):
return tf.nn.tanh(x)
def conv_concat(x, y):
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return concat([x, y * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], axis=3)
def concat(x, axis=1):
return tf.concat(x, axis=axis)
def reshape(x, shape):
return tf.reshape(x, shape=shape)
def batch_norm(x, is_training, scope):
return tf.contrib.layers.batch_norm(x,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=is_training,
scope=scope)
def instance_norm(x, is_training, scope):
with tf.variable_scope(scope):
epsilon = 1e-5
mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
scale = tf.get_variable('scale', [x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))
offset = tf.get_variable('offset', [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0))
out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset
return out
def dropout(x, rate, is_training):
return tf.layers.dropout(inputs=x, rate=rate, training=is_training)
def rampup(epoch):
if epoch < 80:
p = max(0.0, float(epoch)) / float(80)
p = 1.0 - p
return math.exp(-p*p*5.0)
else:
return 1.0
def rampdown(epoch):
if epoch >= (300 - 50):
ep = (epoch - (300 - 50)) * 0.5
return math.exp(-(ep * ep) / 50)
else:
return 1.0
def _l2normalize(v, eps=1e-12):
return v / tf.sqrt(tf.reduce_sum(tf.square(v)) + eps)
def max_singular_value(W, u, Ip=1):
_u = u
_v = 0
for _ in range(Ip):
_v = _l2normalize(tf.matmul(_u, W), eps=1e-12)
_u = _l2normalize(tf.matmul(_v, W, transpose_b=True), eps=1e-12)
_v = tf.stop_gradient(_v)
_u = tf.stop_gradient(_u)
sigma = tf.reduce_sum(tf.matmul(_u, W) * _v)
return sigma, _u, _v
def spectral_norm(name, W, Ip=1):
u = tf.get_variable(name + "_u", [1, W.shape[-1]], initializer=tf.random_normal_initializer(), trainable=False) # 1 x ch
W_mat = tf.transpose(tf.reshape(W, [-1, W.shape[-1]]))
sigma, _u, _ = max_singular_value(W_mat, u, Ip)
with tf.control_dependencies([tf.assign(u, _u)]):
W_sn = W / sigma
return W_sn
下面是一些问题:
Any ideas how can I solve problem shown below? With the information that I found on the web it is associated with problem of reusing tensorflow scope however nothing works.
ValueError: Variable rnn/basic_rnn_cell/kernel already exists, disallowed. Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope? Originally defined at:
File "/code/backend/management/commands/RNN.py", line 370, in predict
states_series, current_state = tf.nn.dynamic_rnn(cell=cell, inputs=batchX_placeholder, dtype=tf.float32)
File "/code/backend/management/commands/RNN.py", line 499, in Command
predict("string")
File "/code/backend/management/commands/RNN.py", line 12, in <module>
class Command(BaseCommand):
I tried for instance something like this
with tf.variable_scope('scope'):
states_series, current_state = tf.nn.dynamic_rnn(cell=cell, inputs=batchX_placeholder, dtype=tf.float32)
and this
with tf.variable_scope('scope', reuse = True ):
states_series, current_state = tf.nn.dynamic_rnn(cell=cell, inputs=batchX_placeholder, dtype=tf.float32)
and this
with tf.variable_scope('scope', reuse = tf.AUTO_REUSE ):
states_series, current_state = tf.nn.dynamic_rnn(cell=cell, inputs=batchX_placeholder, dtype=tf.float32)
Any ideas?
python python-3.x machine-learning tensorflow neural-network
asked Nov 14 '17 at 23:26
user7304253
add a comment
1 Answer
53
Does this happen when you run the model for the first time (upon opening a new python console)?
If not, you need to clear you computational graph. You can do that by putting this line at the beginning of your script.
tf.reset_default_graph()
-
Thank you Sir! I couldn't find any solution for some time, so I had to restart Spyder environment every time I wanted to run my NN! You saved me! – Ladislav Ondris Apr 10 '18 at 15:16
-
1
Thank you, restarting kernel and reloading dataset was so annoying until I found this. – Michal Fašánek Apr 14 '18 at 22:53
Tensorflow报错:ValueError: Trying to share variable CON/conv2/W, but specified shape (3, 3, 128, 256) and found shape (3, 3, 128, 128).
我的使用情景是这样的:
我在一个卷积层提供了一个filter,并且它的shape为(3, 3, 128, 256), 然后我通过tf.variable_scope等操作使这个filter的name为CON/conv2/W。在使用tf.get_variable生成初始变量时就报了这样一个bug。
部分代码流程:
...
conv3 = conv_layer_new(relu2, [3, 3, 128, 256], strides = [1, 2, 2, 1], name = 'conv2')
...
def conv_layer_new(input, filter_shape, strides, name = None):
with tf.variable_scope(name):
W = tf.get_variable("W", filter_shape, initializer = tf. truncated_normal_initializer(0., 0.005))
...
重点来了:
那为什么它会自动给我一个(3, 3, 128, 128)这样一个特定的shape呢?因为报错的原话是 found !
所以我就思考难道是之前有相同name的filter,它的shape就是(3, 3, 128, 128)?
结果证明我是对的,确实在这个出错的卷积层使用了与之前相的name,导致调用了之前的shape。我将conv2改为了conv3之后就正常了。(也就是name变为了CON/conv3/W)
所以说博主写了这么多废话就是想告诉大家一定要检查一下前面的变量名,是否写重复了,因为如果有相同的name,tensorflow就会自作主张地进行调用!(博主比较傻,这个问题卡了我好久所以写篇博客怕自己下次再犯。。。)
---------------------
作者:yyhhlancelot
来源:CSDN
原文:https://blog.csdn.net/yyhhlancelot/article/details/82979235
版权声明:本文为博主原创文章,转载请附上博文链接!