fashion-mnist数据集下载至本地并使用

导入本地fashion-mnist数据集

from tensorflow.python.keras.utils import get_file
import gzip
import numpy as np


def load_data():
    base = "file:///D:/fasionmnist/"
    files = [
        'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
        't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
    ]

    paths = []
    for fname in files:
        paths.append(get_file(fname, origin=base + fname))

    with gzip.open(paths[0], 'rb') as lbpath:
        y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)

    with gzip.open(paths[1], 'rb') as imgpath:
        x_train = np.frombuffer(
            imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)

    with gzip.open(paths[2], 'rb') as lbpath:
        y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)

    with gzip.open(paths[3], 'rb') as imgpath:
        x_test = np.frombuffer(
            imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)

    return (x_train, y_train), (x_test, y_test)

查看数据集图片

import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow import keras


import Load_Data

print(tf.__version__)

(train_images, train_labels), (test_images, test_labels) = Load_Data.load_data()


class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
               'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']

print(train_images.shape)
print(len(train_images))
print(test_images.shape)
print(len(test_labels))

plt.figure(figsize=(5,5))
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
发布了2 篇原创文章 · 获赞 2 · 访问量 5070
展开阅读全文

fashion_mnist识别准确率问题

11-05

fashion_mnist识别准确率一般为多少呢?我看好多人都是92%左右,但是我用一个网络达到了94%,想问问做过的小伙伴到底是多少? ``` #这是我的结果示意 x_shape: (60000, 28, 28) y_shape: (60000,) epoches: 0 val_acc: 0.4991 train_acc 0.50481665 epoches: 1 val_acc: 0.6765 train_acc 0.66735 epoches: 2 val_acc: 0.755 train_acc 0.7474 epoches: 3 val_acc: 0.7846 train_acc 0.77915 epoches: 4 val_acc: 0.798 train_acc 0.7936 epoches: 5 val_acc: 0.8082 train_acc 0.80365 epoches: 6 val_acc: 0.8146 train_acc 0.8107 epoches: 7 val_acc: 0.8872 train_acc 0.8872333 epoches: 8 val_acc: 0.896 train_acc 0.89348334 epoches: 9 val_acc: 0.9007 train_acc 0.8986 epoches: 10 val_acc: 0.9055 train_acc 0.90243334 epoches: 11 val_acc: 0.909 train_acc 0.9058833 epoches: 12 val_acc: 0.9112 train_acc 0.90868336 epoches: 13 val_acc: 0.9126 train_acc 0.91108334 epoches: 14 val_acc: 0.9151 train_acc 0.9139 epoches: 15 val_acc: 0.9172 train_acc 0.91595 epoches: 16 val_acc: 0.9191 train_acc 0.91798335 epoches: 17 val_acc: 0.9204 train_acc 0.91975 epoches: 18 val_acc: 0.9217 train_acc 0.9220333 epoches: 19 val_acc: 0.9252 train_acc 0.9234667 epoches: 20 val_acc: 0.9259 train_acc 0.92515 epoches: 21 val_acc: 0.9281 train_acc 0.9266667 epoches: 22 val_acc: 0.9289 train_acc 0.92826664 epoches: 23 val_acc: 0.9301 train_acc 0.93005 epoches: 24 val_acc: 0.9315 train_acc 0.93126667 epoches: 25 val_acc: 0.9322 train_acc 0.9328 epoches: 26 val_acc: 0.9331 train_acc 0.9339667 epoches: 27 val_acc: 0.9342 train_acc 0.93523335 epoches: 28 val_acc: 0.9353 train_acc 0.93665 epoches: 29 val_acc: 0.9365 train_acc 0.9379333 epoches: 30 val_acc: 0.9369 train_acc 0.93885 epoches: 31 val_acc: 0.9387 train_acc 0.9399 epoches: 32 val_acc: 0.9395 train_acc 0.9409 epoches: 33 val_acc: 0.94 train_acc 0.9417667 epoches: 34 val_acc: 0.9403 train_acc 0.94271666 epoches: 35 val_acc: 0.9409 train_acc 0.9435167 epoches: 36 val_acc: 0.9418 train_acc 0.94443333 epoches: 37 val_acc: 0.942 train_acc 0.94515 epoches: 38 val_acc: 0.9432 train_acc 0.9460667 epoches: 39 val_acc: 0.9443 train_acc 0.9468833 epoches: 40 val_acc: 0.9445 train_acc 0.94741666 epoches: 41 val_acc: 0.9462 train_acc 0.9482 epoches: 42 val_acc: 0.947 train_acc 0.94893336 epoches: 43 val_acc: 0.9472 train_acc 0.94946665 epoches: 44 val_acc: 0.948 train_acc 0.95028335 epoches: 45 val_acc: 0.9486 train_acc 0.95095 epoches: 46 val_acc: 0.9488 train_acc 0.9515833 epoches: 47 val_acc: 0.9492 train_acc 0.95213336 epoches: 48 val_acc: 0.9495 train_acc 0.9529833 epoches: 49 val_acc: 0.9498 train_acc 0.9537 val_acc: 0.9498 ``` ``` import tensorflow as tf from tensorflow import keras import numpy as np import matplotlib.pyplot as plt def to_onehot(y,num): lables = np.zeros([num,len(y)]) for i in range(len(y)): lables[y[i],i] = 1 return lables.T # 预处理数据 mnist = keras.datasets.fashion_mnist (train_images,train_lables),(test_images,test_lables) = mnist.load_data() print('x_shape:',train_images.shape) #(60000) print('y_shape:',train_lables.shape) X_train = train_images.reshape((-1,train_images.shape[1]*train_images.shape[1])) / 255.0 #X_train = tf.reshape(X_train,[-1,X_train.shape[1]*X_train.shape[2]]) Y_train = to_onehot(train_lables,10) X_test = test_images.reshape((-1,test_images.shape[1]*test_images.shape[1])) / 255.0 Y_test = to_onehot(test_lables,10) #双隐层的神经网络 input_nodes = 784 output_nodes = 10 layer1_nodes = 100 layer2_nodes = 50 batch_size = 100 learning_rate_base = 0.8 learning_rate_decay = 0.99 regularization_rate = 0.0000001 epochs = 50 mad = 0.99 learning_rate = 0.005 # def inference(input_tensor,avg_class,w1,b1,w2,b2): # if avg_class == None: # layer1 = tf.nn.relu(tf.matmul(input_tensor,w1)+b1) # return tf.nn.softmax(tf.matmul(layer1,w2) + b2) # else: # layer1 = tf.nn.relu(tf.matmul(input_tensor,avg_class.average(w1)) + avg_class.average(b1)) # return tf.matual(layer1,avg_class.average(w2)) + avg_class.average(b2) def train(mnist): X = tf.placeholder(tf.float32,[None,input_nodes],name = "input_x") Y = tf.placeholder(tf.float32,[None,output_nodes],name = "y_true") w1 = tf.Variable(tf.truncated_normal([input_nodes,layer1_nodes],stddev=0.1)) b1 = tf.Variable(tf.constant(0.1,shape=[layer1_nodes])) w2 = tf.Variable(tf.truncated_normal([layer1_nodes,layer2_nodes],stddev=0.1)) b2 = tf.Variable(tf.constant(0.1,shape=[layer2_nodes])) w3 = tf.Variable(tf.truncated_normal([layer2_nodes,output_nodes],stddev=0.1)) b3 = tf.Variable(tf.constant(0.1,shape=[output_nodes])) layer1 = tf.nn.relu(tf.matmul(X,w1)+b1) A2 = tf.nn.relu(tf.matmul(layer1,w2)+b2) A3 = tf.nn.relu(tf.matmul(A2,w3)+b3) y_hat = tf.nn.softmax(A3) # y_hat = inference(X,None,w1,b1,w2,b2) # global_step = tf.Variable(0,trainable=False) # variable_averages = tf.train.ExponentialMovingAverage(mad,global_step) # varible_average_op = variable_averages.apply(tf.trainable_variables()) #y = inference(x,variable_averages,w1,b1,w2,b2) cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=A3,labels=Y)) regularizer = tf.contrib.layers.l2_regularizer(regularization_rate) regularization = regularizer(w1) + regularizer(w2) +regularizer(w3) loss = cross_entropy + regularization * regularization_rate # learning_rate = tf.train.exponential_decay(learning_rate_base,global_step,epchos,learning_rate_decay) # train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step) train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) # with tf.control_dependencies([train_step,varible_average_op]): # train_op = tf.no_op(name="train") correct_prediction = tf.equal(tf.argmax(y_hat,1),tf.argmax(Y,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) total_loss = [] val_acc = [] total_train_acc = [] x_Xsis = [] with tf.Session() as sess: tf.global_variables_initializer().run() for i in range(epochs): # x,y = next_batch(X_train,Y_train,batch_size) batchs = int(X_train.shape[0] / batch_size + 1) loss_e = 0. for j in range(batchs): batch_x = X_train[j*batch_size:min(X_train.shape[0],j*(batch_size+1)),:] batch_y = Y_train[j*batch_size:min(X_train.shape[0],j*(batch_size+1)),:] sess.run(train_step,feed_dict={X:batch_x,Y:batch_y}) loss_e += sess.run(loss,feed_dict={X:batch_x,Y:batch_y}) # train_step.run(feed_dict={X:x,Y:y}) validate_acc = sess.run(accuracy,feed_dict={X:X_test,Y:Y_test}) train_acc = sess.run(accuracy,feed_dict={X:X_train,Y:Y_train}) print("epoches: ",i,"val_acc: ",validate_acc,"train_acc",train_acc) total_loss.append(loss_e / batch_size) val_acc.append(validate_acc) total_train_acc.append(train_acc) x_Xsis.append(i) validate_acc = sess.run(accuracy,feed_dict={X:X_test,Y:Y_test}) print("val_acc: ",validate_acc) return (x_Xsis,total_loss,total_train_acc,val_acc) result = train((X_train,Y_train,X_test,Y_test)) def plot_acc(total_train_acc,val_acc,x): plt.figure() plt.plot(x,total_train_acc,'--',color = "red",label="train_acc") plt.plot(x,val_acc,color="green",label="val_acc") plt.xlabel("Epoches") plt.ylabel("acc") plt.legend() plt.show() ``` 问答

没有更多推荐了,返回首页

©️2019 CSDN 皮肤主题: 大白 设计师: CSDN官方博客

分享到微信朋友圈

×

扫一扫,手机浏览