以上错误是在执行
tf.train.Saver(max_to_keep=None).save(sess, os.path.join('model','model.ckpt'))
出错,是由于warning导致。
解决方案:
def train(): #encode_to_tfrecords() image, label = read_and_decode('/home/system/Python/train.tfrecords') batch_image,batch_label=get_batch(image,label,batch_size=50,crop_size=39)#batch 生成测试 #网络链接,训练所用 net=network() inf=net.inference(batch_image) loss=net.sorfmax_loss(inf,batch_label) opti=net.optimer(loss) #验证集所用 #encode_to_tfrecords() test_image,test_label=read_and_decode('/home/system/Python/train.tfrecords') test_images,test_labels=get_test_batch(test_image,test_label,batch_size=120,crop_size=39)#batch 生成测试 test_inf=net.inference_test(test_images) correct_prediction = tf.equal(tf.cast(tf.argmax(test_inf,1),tf.int32), tf.cast(test_labels,tf.int32)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) with tf.Session() as session: init=tf.initialize_all_variables() session.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) max_iter=1000 iter=0 if os.path.exists(os.path.join("model", 'model.ckpt')) is True: tf.train.Saver(max_to_keep=None).restore(session, os.path.join("model", 'model.ckpt')) while iter<max_iter: loss_np,_,label_np,image_np,inf_np=session.run([loss,opti,batch_label,batch_image,inf]) if iter%50==0: print 'trainloss:',loss_np if iter%500==0: accuracy_np=session.run([accuracy]) print '***************test accruacy:',accuracy_np,'*******************' iter+=1 coord.request_stop()#queue需要关闭,否则报错 coord.join(threads) tf.train.Saver(max_to_keep=None).save(session, os.path.join('model', 'model.ckpt')) print "ok"
应该先关闭队列再保存model!!!
应该先关闭队列再保存model!!!
应该先关闭队列再保存model!!!