tensorflow学习(3):保存和读取文件

保存

import tensorflow as tf
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import numpy as np
import math

np.random.seed(29)

def neural(x_train,y_train,batch_size,shuffle=None):
    list_shuffle = np.arange(x_train.shape[0])
    list_notshuffle = np.arange(x_train.shape[0])
    np.random.shuffle(list_shuffle)
    if shuffle == True:
        start = 0
        for step in range(0, math.floor(x_train.shape[0] / batch_size) + 1):
            one_1 = list_shuffle[ start: start + batch_size ]
            out_x_1 = x_train[one_1]
            out_y_1 = y_train[one_1]
            start = start + batch_size
            yield out_x_1 , out_y_1
    else:
        start = 0
        for step in range(0,math.floor(x_train.shape[0]/batch_size) + 1):
            one_2 = list_notshuffle[ start : start + batch_size ]
            out_x_2 = x_train[one_2]
            out_y_2 = y_train[one_2]
            start = start + batch_size
            yield out_x_2 , out_y_2

lr =load_iris()
x = lr.data
y = lr.target
y_= np.reshape(y,[-1,1])

one = OneHotEncoder()
y_1 = one.fit_transform(y_).toarray()
x_train_1,x_test_2,y_train_1,y_test_2 = train_test_split(x,y_1,test_size=0.25,random_state=29)

x_data = tf.placeholder(dtype=tf.float32,shape=[None,4],name="x_data")
y_target = tf.placeholder(dtype=tf.float32,shape=[None,3],name="y_target")

with tf.variable_scope("net1"):
    weights1 =  tf.get_variable(name="w1",initializer = tf.random_uniform(shape=[4,10], maxval=1 / 4, minval=-1 / 4,dtype = tf.float32))
    bais1 = tf.get_variable(name="b1",initializer=tf.truncated_normal(shape=[1,10],stddev=0.1,dtype=tf.float32))
    out_1 = tf.matmul(x_data,weights1)+bais1
    op1 = tf.nn.relu(out_1)

with tf.variable_scope("net2"):
    weights2 = tf.get_variable(name="w1", initializer=tf.random_uniform(shape=[10, 3], maxval=1 / 4, minval=-1 / 4,dtype=tf.float32))
    bais2 = tf.get_variable(name="b1", initializer=tf.truncated_normal(shape=[1, 3], stddev=0.1, dtype=tf.float32))
    out_2 = tf.matmul(op1, weights2) + bais2

with tf.variable_scope("train"):
    lost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_target,logits=out_2),name="lost")
    step_train = tf.train.AdamOptimizer(learning_rate=0.001).minimize(lost)

with tf.variable_scope("test"):
    predict = tf.argmax(out_2,axis=1,name="pre_")
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(out_2,axis=1),tf.argmax(y_target,axis=1)),dtype=tf.float32),name="acc_")

with tf.Session() as sess:
    x_train = x_train_1
    y_train = y_train_1
    sess.run(tf.global_variables_initializer())
    epoch = 300
    saver = tf.train.Saver()
    # print(x_data.name)
    # print(y_target.name)
    # print(lost.name)
    # print(predict.name)

    for i in range(epoch):
        batch_onetime = 1
        acc = 0
        for x_train_,y_train_ in neural(x_train=x_train, y_train=y_train, batch_size=10):
            sess.run(step_train,feed_dict={x_data:x_train_,y_target:y_train_})
            accuracy_ =sess.run(accuracy,feed_dict={x_data:x_train_,y_target:y_train_})
            acc += accuracy_
            this_acc = acc / batch_onetime
            if this_acc > 0.95:
                saver.save(sess,r"C:\Users\Public\Desktop\log\my_model.ckpt",global_step=i)
                print("第{}轮的准确率:{}".format(batch_onetime,this_acc))
            batch_onetime += 1

恢复模型方法一:先定义和saver()保存一样的模型图结构,并使用tf.saver()恢复模型和参数,不再训练直接进行预测

import tensorflow as tf
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import numpy as np
import math
import tensorflow as tf
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import numpy as np
import math

np.random.seed(29)

lr =load_iris()
x = lr.data[50:160]
x_data = tf.placeholder(dtype=tf.float32,shape=[None,4])

with tf.variable_scope("net1"):
    weights1 =  tf.get_variable(name="w1",initializer = tf.random_uniform(shape=[4,10], maxval=1 / 4, minval=-1 / 4,dtype = tf.float32))
    bais1 = tf.get_variable(name="b1",initializer=tf.truncated_normal(shape=[1,10],stddev=0.1,dtype=tf.float32))
    out_1 = tf.matmul(x_data,weights1)+bais1
    op1 = tf.nn.relu(out_1)

with tf.variable_scope("net2"):
    weights2 = tf.get_variable(name="w1", initializer=tf.random_uniform(shape=[10, 3], maxval=1 / 4, minval=-1 / 4,dtype=tf.float32))
    bais2 = tf.get_variable(name="b1", initializer=tf.truncated_normal(shape=[1, 3], stddev=0.1, dtype=tf.float32))
    out_2 = tf.matmul(op1, weights2) + bais2

with tf.variable_scope("test"):
    predict = tf.argmax(out_2,axis=1,name="pre_")
saver = tf.train.Saver()

with tf.Session() as sess:
    x_train = x
    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(r"C:\Users\Public\Desktop\log")
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print(sess.run(predict,feed_dict={x_data:x_train}))

恢复模型方法二: 不再建立模型图,直接从保存的saver()中获取 模型图和参数 进行预测。

import tensorflow as tf
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import numpy as np
import math

np.random.seed(29)

lr =load_iris()
x1 = lr.data[10:20]
x2 = lr.data[50:70]
x3 = lr.data[140:150]
x = np.vstack([x1,x2,x3])
y1 = lr.target[10:20]
y2 = lr.target[50:70]
y3 = lr.target[140:150]
y = np.hstack([y1,y2,y3])
y_= np.reshape(y,[-1,1])
one = OneHotEncoder()
y_1 = one.fit_transform(y_).toarray()


# x_data: 0 
# y_target: 0 
# lost: 0 
# pre: 0

with tf.Session() as sess:
    x_train = x
    y_train = y_1
    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(r"C:\Users\Public\Desktop\log")
    if ckpt and ckpt.model_checkpoint_path:
        saver = tf.train.import_meta_graph(meta_graph_or_file="{}.meta".format(ckpt.model_checkpoint_path))
        saver.restore(sess, ckpt.model_checkpoint_path)
        lost = tf.get_default_graph().get_tensor_by_name("train/lost:0")
        predict = tf.get_default_graph().get_tensor_by_name("test/pre_:0")
        x_data = tf.get_default_graph().get_tensor_by_name("x_data: 0")
        y_data = tf.get_default_graph().get_tensor_by_name("y_target:0")
        lost_ ,predict_y = sess.run([lost,predict],feed_dict={x_data:x_train,y_data:y_train})
        print(lost_,predict_y)
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值