tensorflow对自己的数据进行训练(选择性的恢复权值)(26)---《深度学习》

利用生成的批数据对inception_v3模型进行微调,然后进行训练!

1)利用slim.assign_from_checkpoint_fn函数进行恢复

#-*-coding=utf-8-*-
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
#from tensorflow.contrib.slim.nets.inception import inception_v3, inception_v3_arg_scope
import numpy as np
import os
import img_convert

height = 299
width = 299
channels = 3
num_classes=1001

X = tf.placeholder(tf.float32, shape=[None, height, width, channels])
y = tf.placeholder(tf.float32,shape=[None,182])
with slim.arg_scope(nets.inception.inception_v3_arg_scope()):
    logits, end_points = nets.inception.inception_v3(X, num_classes=num_classes,is_training=False)
    shape=logits.get_shape().as_list()
    dim=1
    for d in shape[1:]:
        dim*=d
    fc_=tf.reshape(logits,[-1,dim])

    fc0_weights=tf.get_variable(name="fc0_weights",shape=(1001,182),initializer=tf.contrib.layers.xavier_initializer())
    fc0_biases=tf.get_variable(name="fc0_biases",shape=(182),initializer=tf.contrib.layers.xavier_initializer())
    logits_=tf.nn.bias_add(tf.matmul(fc_,fc0_weights),fc0_biases)
    predictions=tf.nn.softmax(logits_)
    #cross_entropy = -tf.reduce_sum(y*tf.log(predictions))  
    cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=logits_))
    #cross_entropy_mean=tf.reduce_mean(cross_entropy)
    train_step=tf.train.GradientDescentOptimizer(1e-6).minimize(cross_entropy)

    correct_pred=tf.equal(tf.argmax(y,1),tf.argmax(predictions,1))
    #acc=tf.reduce_sum(tf.cast(correct_pred,tf.float32))
    accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))

with tf.Session() as sess:
    o_dir="E:/test"
    num_classes=182
    batch_size=3
    epoches=2
    batches=img_convert.data_lrn(img_convert.load_data(o_dir,num_classes,batch_size))

    init_fn = slim.assign_from_checkpoint_fn(
        os.path.join("E:/", 'inception_v3.ckpt'),
        slim.get_model_variables(),
        ignore_missing_vars=True)
    init_op=tf.global_variables_initializer()
    sess.run(init_op)
    init_fn(sess)

    for epoch in range(epoches):
        for batch in batches:
            sess.run(train_step,feed_dict={X:batch[0],y:batch[1]})
    acc=sess.run(accuracy,feed_dict={X:batches[0][0],y:batches[1][1]})
    print(acc)
    print("Done")

2)利用saver.restore进行恢复

原始版:

#-*-coding=utf-8-*-
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
#from tensorflow.contrib.slim.nets.inception import inception_v3, inception_v3_arg_scope
import numpy as np
import os
import img_convert

height = 299
width = 299
channels = 3
num_classes=1001

X = tf.placeholder(tf.float32, shape=[None, height, width, channels])
y = tf.placeholder(tf.float32,shape=[None,182])
with slim.arg_scope(nets.inception.inception_v3_arg_scope()):
    logits, end_points = nets.inception.inception_v3(X, num_classes=num_classes,is_training=False)
    exclude=[]
    variables_to_restore=slim.get_variables_to_restore(exclude=['Mixed_7c'])
    '''
    for v in variables_to_restore:
        if 'Mixed_7c' not in v.name:
            exclude.append(v)
    '''
    shape=logits.get_shape().as_list()
    dim=1
    for d in shape[1:]:
        dim*=d
    fc_=tf.reshape(logits,[-1,dim])

    fc0_weights=tf.get_variable(name="fc0_weights",shape=(1001,182),initializer=tf.contrib.layers.xavier_initializer())
    fc0_biases=tf.get_variable(name="fc0_biases",shape=(182),initializer=tf.contrib.layers.xavier_initializer())
    logits_=tf.nn.bias_add(tf.matmul(fc_,fc0_weights),fc0_biases)
    predictions=tf.nn.softmax(logits_)
    #cross_entropy = -tf.reduce_sum(y*tf.log(predictions))  
    cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=logits_))
    #cross_entropy_mean=tf.reduce_mean(cross_entropy)
    train_step=tf.train.GradientDescentOptimizer(1e-6).minimize(cross_entropy)

    correct_pred=tf.equal(tf.argmax(y,1),tf.argmax(predictions,1))
    #acc=tf.reduce_sum(tf.cast(correct_pred,tf.float32))
    accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))

with tf.Session() as sess:

    o_dir="E:/test"
    num_classes=182
    batch_size=3
    epoches=2
    batches=img_convert.data_lrn(img_convert.load_data(o_dir,num_classes,batch_size))
    sess.run(tf.global_variables_initializer())
    #加载需要恢复的权重,舍弃掉不需要恢复的权值
    saver=tf.train.Saver(variables_to_restore)
    saver.restore(sess,os.path.join("E:\\","inception_v3.ckpt"))

    for epoch in range(epoches):
        for batch in batches:
            sess.run(train_step,feed_dict={X:batch[0],y:batch[1]})
    acc=sess.run(accuracy,feed_dict={X:batches[0][0],y:batches[1][1]})
    print(acc)
    print("Done")

改进版:

#-*-coding=utf-8-*-
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
#from tensorflow.contrib.slim.nets.inception import inception_v3, inception_v3_arg_scope
import numpy as np
import os
import img_convert

height = 299
width = 299
channels = 3
num_classes=1001

X = tf.placeholder(tf.float32, shape=[None, height, width, channels])
y = tf.placeholder(tf.float32,shape=[None,182])
with slim.arg_scope(nets.inception.inception_v3_arg_scope()):
    logits, end_points = nets.inception.inception_v3(X, num_classes=num_classes,is_training=False)
    variables_to_restore_t=[]
    variables_to_restore=slim.get_variables_to_restore()
    for v in variables_to_restore:
        if 'Mixed_7c' not in v.name:
            variables_to_restore_t.append(v)
    variables_to_restore = variables_to_restore_t
    for v in variables_to_restore:
        print (v.name)
    shape=logits.get_shape().as_list()
    dim=1
    for d in shape[1:]:
        dim*=d
    fc_=tf.reshape(logits,[-1,dim])

    fc0_weights=tf.get_variable(name="fc0_weights",shape=(1001,182),initializer=tf.contrib.layers.xavier_initializer())
    fc0_biases=tf.get_variable(name="fc0_biases",shape=(182),initializer=tf.contrib.layers.xavier_initializer())
    logits_=tf.nn.bias_add(tf.matmul(fc_,fc0_weights),fc0_biases)
    predictions=tf.nn.softmax(logits_)
    #cross_entropy = -tf.reduce_sum(y*tf.log(predictions))  
    cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=logits_))
    #cross_entropy_mean=tf.reduce_mean(cross_entropy)
    train_step=tf.train.GradientDescentOptimizer(1e-6).minimize(cross_entropy)

    correct_pred=tf.equal(tf.argmax(y,1),tf.argmax(predictions,1))
    #acc=tf.reduce_sum(tf.cast(correct_pred,tf.float32))
    accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))

with tf.Session() as sess:

    o_dir="E:/test"
    num_classes=182
    batch_size=3
    epoches=2
    batches=img_convert.data_lrn(img_convert.load_data(o_dir,num_classes,batch_size))
    sess.run(tf.global_variables_initializer())
    saver=tf.train.Saver(variables_to_restore)
    saver.restore(sess,os.path.join("E:\\","inception_v3.ckpt"))
    for epoch in range(epoches):
        for batch in batches:
            sess.run(train_step,feed_dict={X:batch[0],y:batch[1]})
    acc=sess.run(accuracy,feed_dict={X:batches[0][0],y:batches[1][1]})
    print(acc)
    print("Done")
  • 4
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值