全连接层参数用 TensorFlow学习的代码

本算法先构建了一个三元组数据,占位符X表示原特征点,Xp表示匹配的特征点, Xn表示不匹配的特征点。

目的在于通过训练,
使得参考样本与正样本之间的欧氏距离 PDis 和参考样本与
负样本之间的欧氏距离 NDis 满足同类样本间的距离加上某
个给定的阈值threshold 要小于异类样本间的距离。

全部代码如下:
from __future__ import division, print_function, absolute_import

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

# Training Parameters
learning_rate = 0.005
num_steps = 100
n_epoch = 5
batch_size = 250

display_step = 5
examples_to_show = 10

# Network Parameters
num_hidden_1 = 20# 1st layer num features
num_hidden_2 = 20 # 2nd layer num features (the latent dim)
num_input = 882 # MNIST data input (img shape: 28*28)

out_file = "D:/data/out.txt"
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, num_input])
Xp = tf.placeholder("float", [None, num_input])
Xn = tf.placeholder("float",[None, num_input])
thresh = 2500000

weights = {
    'encoder_h1': tf.Variable(tf.random_normal([num_input, num_hidden_2]))
}


# Building the encoder
def encoder(x,xp,xn):

    # Encoder Hidden layer with sigmoid activation #1
    layer_1 = tf.matmul(x, weights['encoder_h1'])    #原特征
    layer_1p = tf.matmul(xp, weights['encoder_h1'])   #匹配的特征
    layer_1n = tf.matmul(xn, weights['encoder_h1'])   #不匹配的特征
    # Encoder Hidden layer with sigmoid activation #2
   # layer_2 = tf.matmul(layer_1, weights['encoder_h2'])
    return layer_1,layer_1p,layer_1n


# Construct model
encoder_x,encoder_yp,encoder_yn = encoder(X,Xp,Xn)
#decoder_op = decoder(encoder_op)

# Prediction
#y_pred = decoder_op
# Targets (Labels) are the input data.
#y_true = X

# Define loss and optimizer, minimize the squared error
loss = tf.reduce_mean(tf.add(tf.subtract(tf.pow(encoder_x - encoder_yp, 2),tf.pow(encoder_x - encoder_yn, 2)),thresh))
#optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)

# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()

# Start Training
# Start a new TF session
sess = tf.Session()

# Run the initializer
sess.run(init)



data = []
datap = []
datan = []

dataline = []
f = open("D:/data.txt","r",encoding='utf-8')


# i%4 ==0时为标签,i%4 ==1时为原特征点数据,i%4 ==2时为匹配的特征点数据,i%4 ==3时为不匹配的特征点数据
i = 0
for line in f:
    if(i%4 ==1): 
        data.append(list(map(int, line.split())))
        #data.append(line)
    elif(i%4 ==2):
        datap.append(list(map(int, line.split())))
        #datap.append(line)
    elif(i%4 ==3):
        datan.append(list(map(int, line.split())))
        #datan.append(line)

    i +=1


num_example = i/4

ratio = 1
#num_steps = round(num_example*ratio)
num_steps = round(num_example)
s = np.int(num_example * ratio)
o_train = data[:s]
p_train = datap[:s]
n_train = datan[:s]
o_val = data[s:]  # 验证集
p_val = datap[s:]
n_val = datan[s:]


# 定义一个函数,按批次取数据
def minibatches(inputs=None, inputsp=None, inputsn=None,batch_size=None, shuffle=False):
    assert len(inputs) == len(inputsp) & len(inputs) == len(inputsn)
    if shuffle:
        indices = np.arange(len(inputs))
        np.random.shuffle(indices)
    for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
        if shuffle:
            excerpt = indices[start_idx:start_idx + batch_size]
        else:
            excerpt = slice(start_idx, start_idx + batch_size)
        yield np.array(inputs)[excerpt], np.array(inputsp)[excerpt],np.array(inputsn)[excerpt]



breakflag =0

with tf.Session() as sess:
     #sess.run(init_local_op)
     tf.global_variables_initializer().run()
     #for i in range(5):
 # Retrieve a single instance:
         #e_val = sess.run([example_batch])
         #print(e_val)
     for _ in range(n_epoch):
         # Prepare Data
         # Get the next batch of MNIST data (only images are needed, not labels)
         n_batch = 0
         for o_train_a, p_train_a, n_train_a in minibatches(o_train, p_train, n_train, batch_size, shuffle=True):

             o_train_a = np.array(o_train_a).reshape([-1, num_input])
             p_train_a = np.array(p_train_a).reshape([-1, num_input])
             n_train_a = np.array(n_train_a).reshape([-1, num_input])

             _, l = sess.run([optimizer, loss], feed_dict={X: o_train_a, Xp: p_train_a,Xn:n_train_a})
             n_batch += 1
             print("batches:%d  loss:%f" % (n_batch,l))

             if n_batch % display_step == 0 or i == 1:
                print('Step %i: Minibatch Loss: %f' % (n_batch, l))
                if l< 0:
                 #fw =open(out_file,"w")
                 #fw.write(weights['encoder_h1'].eval() *100)
                     np.set_printoptions(suppress=True) #去掉科学显示

                     #result1 = tf.matmul(weights['encoder_h1'],weights['encoder_h2'])
                     result2 = np.array(weights['encoder_h1'].eval())
                     result2 = np.round(result2 * 100)
                     result3 = result2.T

                     print(result3)
                     np.savetxt('D:/data/w11.txt', result3, fmt=['%d,']*result3.shape[1],newline='\r\n')
                     #np.savetxt('D:/data/b1.txt', result4, fmt='%d,') #fmt='%.02f,'
                     print(weights['encoder_h1'].eval())
                     breakflag = 1
                     break


         if(breakflag ==1):
             break

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值