数据挖掘——时装类别识别

一、问题描述

时装类别识别问题是预测一张图片中的时装类别。

数据集:fashionMnist

训练集:60000张时装图片,每张图片是28*28的灰度矩阵,有一个{0,1,...,9}的类标签,表示时装的类别。测试数据:10000张测试数据。  

二、实验目的

导入fashionMnist数据。

设计神经网络算法,完成时装类别的预测问题。

注意:fashionMnist数据集的导入,会遇到一些问题,自主尝试解决。

三、实验内容

1. 数据导入:采用自动生成的数据

2.数据处理

四、实验结果及分析

五、完整代码

model.py

import numpy as np
import struct
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import MinMaxScaler
import machine_learning.homework.week13.neural as nn

def read_image(file_name):
    file_handle=open(file_name,"rb")  #以二进制打开文档
    file_content=file_handle.read()   #读取到缓冲区中
    head = struct.unpack_from('>IIII', file_content, 0)  # 取前4个整数,返回一个元组
    offset = struct.calcsize('>IIII')
    imgNum = head[1]  #图片数
    width = head[2]   #宽度
    height = head[3]  #高度
    bits = imgNum * width * height  # data一共有60000*28*28个像素值
    bitsString = '>' + str(bits) + 'B'  # fmt格式:'>47040000B'
    imgs = struct.unpack_from(bitsString, file_content, offset)  # 取data数据,返回一个元组
    imgs_array=np.array(imgs).reshape((imgNum,width*height))     #最后将读取的数据reshape成 【图片数,图片像素】二维数组
    return imgs_array
def out_image(img):
    plt.figure()
    plt.imshow(img)
    plt.show()
def read_label(file_name):
    file_handle = open(file_name, "rb")  # 以二进制打开文档
    file_content = file_handle.read()  # 读取到缓冲区中
    head = struct.unpack_from('>II', file_content, 0)  # 取前2个整数,返回一个元组
    offset = struct.calcsize('>II')
    labelNum = head[1]  # label数
    bitsString = '>' + str(labelNum) + 'B'  # fmt格式:'>47040000B'
    label = struct.unpack_from(bitsString, file_content, offset)  # 取data数据,返回一个元组
    return np.array(label)
def process_features(X):#特征预处理
    scaler=MinMaxScaler(feature_range=(0,1))
    X=scaler.fit_transform(1.0*X)
    return X
def get_data(m_train,m_test):
    # 文件获取D:/temp
    train_image = "D:/PyCharm/Project/machine_learning/machine_learning/homework/week13/train-images-idx3-ubyte"
    test_image = "D:/PyCharm/Project/machine_learning/machine_learning/homework/week13/t10k-images-idx3-ubyte"
    train_label = "D:/PyCharm/Project/machine_learning/machine_learning/homework/week13/train-labels-idx1-ubyte"
    test_label = "D:/PyCharm/Project/machine_learning/machine_learning/homework/week13/t10k-labels-idx1-ubyte"
    # 读取数据
    train_x = read_image(train_image)
    test_x = read_image(test_image)
    train_y = read_label(train_label)
    test_y = read_label(test_label)

    train_x = train_x.reshape(-1,784)
    test_x = test_x.reshape(-1,784)

    train_x = (train_x / 255)[:m_train]  # 特征归一化,选择部分样本
    test_x = (test_x / 255)[:m_test]

    train_x = (process_features(train_x))   # 特征预处理
    test_x = (process_features(test_x))

    train_y = (train_y)[:m_train]
    test_y = (test_y)[:m_test]

    return train_x,test_x,train_y,test_y

def create_layers():            #创建神经网络结构
    n_features=28*28            #特征维度
    n_hidden1=300               #隐含层1神经元数
    n_hidden2=100               #隐含层2神经元数
    n_classes=10                #标签数,输出层神经元数
    layers=[]
    relu=nn.ReLUActivator()     #激活函数
    layers.append(nn.Layer(n_features,n_hidden1,activator=relu))    #第1层
    layers.append(nn.Layer(n_hidden1,n_hidden2,activator=relu))     #第2层
    layers.append(nn.Layer(n_hidden2,n_classes))                    #第3层
    return layers
def convert_to_vector(y):#类标签向量化
    m=len(y)
    k=np.max(y)+1
    v=np.zeros(m*k).reshape(m,k)
    for i in range(m):
        v[i][y[i]]=1
    return v

def run():
    m_train,m_test = 2000,200
    X_train,X_test,y_train,y_test = get_data(m_train,m_test)
    print(X_train.shape,X_test.shape,y_train.shape,y_test.shape)  # 数据采样格式测试

    layers = create_layers()
    loss = nn.SoftmaxCrossEntropy()
    model = nn.NeuralNetwork(layers,loss)
    iterations = 5000
    model.fit(X_train,convert_to_vector(y_train),iterations,0.01)
    v = model.predict(X_test)
    proba = nn.softmax(v)
    y_pred = np.argmax(proba,axis=1)
    print(accuracy_score(y_test,y_pred))

if __name__ == "__main__":
    run()

neural.py

import numpy as np

class IdentityActivator:
    def value(self, s):
        return s

    def derivative(self, s):
        return 1

class ReLUActivator:
    def value(self, s):
        return np.maximum(0, s)

    def derivative(self, s):
        return (s > 0).astype(np.int)


class Layer:
    def __init__(self, n_input, n_output, activator=IdentityActivator()):
        self.activator = activator
        r = np.sqrt(6.0 / (n_input + n_output))
        self.W = np.random.uniform(-r, r, (n_output, n_input))
        self.b = np.zeros((n_output, 1))
        self.outputs = np.zeros((n_output, 1))

    def forward(self, inputs):
        self.inputs = inputs
        self.sums = self.W.dot(inputs) + self.b
        self.outputs = self.activator.value(self.sums)

    def back_propagation(self, delta_in, learning_rate):
        d = self.activator.derivative(self.sums) * delta_in
        self.delta_out = self.W.T.dot(d)
        self.W_grad = d.dot(self.inputs.T)
        self.b_grad = d
        self.W -= learning_rate * self.W_grad
        self.b -= learning_rate * self.b_grad


class MSE:
    def value(self, y, v):
        return (v - y) ** 2

    def derivative(self, y, v):
        return 2 * (v - y)

def softmax(v):
    e = np.exp(v)
    s = e.sum(axis=0)
    for i in range(len(s)):
        e[i] /= s[i]
    return e


class SoftmaxCrossEntropy:
    def value(self, y, v):
        p = softmax(v)
        return - (y * np.log(p)).sum()

    def derivative(self, y, v):
        p = softmax(v)
        return p - y


class NeuralNetwork:
    def __init__(self, layers, loss):
        self.layers = layers
        self.loss = loss

    def forward(self, x):
        layers = self.layers
        inputs = x
        for layer in layers:
            layer.forward(inputs)
            inputs = layer.outputs
        return inputs

    def back_propagation(self, y, outputs, learning_rate):
        delta_in = self.loss.derivative(y, outputs)
        for layer in self.layers[::-1]:
            layer.back_propagation(delta_in, learning_rate)
            delta_in = layer.delta_out

    def fit(self, X, y, N, learning_rate):
        for t in range(N):
            i = np.random.randint(0, len(X))
            outputs = self.forward(X[i].reshape(-1, 1))
            self.back_propagation(y[i].reshape(-1, 1), outputs, learning_rate)

    def predict(self, X):
        y = []
        for i in range(len(X)):
            p = self.forward(X[i].reshape(-1, 1)).reshape(-1)
            y.append(p)
        return np.array(y)


  • 3
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

李逍遥~

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值