最近看了一篇Correlational Neural Networks 的论文,这篇论文主利用到迁移学习思想
以下是简单的实现代码
def Autoencoder(left_input,right_input,left_units=None, right_units=None, hidden_units=None):
#Initialization
#weight initialization
w_left =tf.Variable(tf.truncated_normal([left_units, hidden_units], stddev=0.1))
b_left =tf.Variable(tf.random_normal([hidden_units]))
w_right =tf.Variable(tf.truncated_normal([right_units,hidden_units], stddev =0.1))
b_right =tf.Variable(tf.random_normal([hidden_units]))
b = tf.Variable(tf.random_normal([hidden_units]))
#
left_w =tf.Variable(tf.truncated_normal([hidden_units, left_units], stddev=0.1))
left_b =tf.Variable(tf.random_normal([left_units]))
right_w =tf.Variable(tf.truncated_normal([hidden_units,right_units], stddev =0.1))
right_b =tf.Variable(tf.random_normal([right_units]))
#encoder
#common= tf.matmul(left_input, w_left)+ tf.matmul(right_input, w_right)+b
encoder = tf.nn.tanh(tf.add(tf.add(tf.matmul(left_input, w_left), tf.matmul(right_input, w_right)),b))
#decoder
# y_left =tf.matmul(encoder,left_w) + left_b
#y_right =tf.matmul(encoder,right_w) + right_b
left_decode =tf.nn.tanh(tf.add(tf.matmul(encoder,left_w),left_b))
right_decode =tf.nn.tanh(tf.add(tf.matmul(encoder,right_w),right_b))
return left_decode, right_decode
def model():
left_input = tf.placeholder(tf.float32, [None, 41])
right_input =tf.placeholder(tf.float32, [None, 21])
left_decode, right_decode =Autoencoder(left_input,right_input,41,21,10)
left_cost = 0.5*tf.reduce_sum(tf.pow(tf.subtract(left_input,left_decode),2.0))
right_cost = 0.5*tf.reduce_sum(tf.pow(tf.subtract(right_input,right_decode),2.0))
cost =left_cost+right_cost
optimizer =tf.train.AdamOptimizer(0.05, 0.9, 0.999, 1e-5).minimize(cost)
sess= tf.InteractiveSession()
sess.run( tf.global_variables_initializer())
data=read_data_sets()
for i in range(50000):
batch = data.train.next_batch(10)
if i % 100==0:
c = sess.run([optimizer, cost],feed_dict={left_input:batch[0],right_input:batch[1]})
print("cost=",c)
if __name__=="__main__":
model()