基于小批量随机梯度下降法的通用分类器

基于小批量随机梯度下降法的通用分类器

1. 方法源码

'''
The General Classifier Based on Mini-Batch Stochastic Gradient Descent.
'''

import numpy as np
import random

'''
Data Restraint: numpy.ndarray.
Test Data Scale: 2683*6603.
Test Time Evaluation: Great! 
Design Style: Scikit-Learn, PyTorch.
Train Mode: Min-Batch.
'''

class Classifier:
    def __init__(self, epochs: int=200, batch_size: int=16, lr: float=0.1):
        '''
        :param epochs: max iteration, default: int=200.
        :param batch_size: batch size, default: int=16.
        :param lr: learning rate, default: float=0.1.

        '''
        self.epochs = epochs
        self.lr = lr
        self.batch_size = batch_size
    
    def __data_matrix(self, X):
        '''
        :param X: data converted to augmented, numpy.ndarray.

        '''
        ones = np.ones(X.shape[0])
        return np.insert(X, 0, ones, axis=1)
    
    def __softmax(self, part):
        '''
        :param part: part of features, numpy.ndarray.

        '''
        part -= np.max(part, axis=1).reshape(-1, 1)
        return np.exp(part) / np.sum(np.exp(part))

    def __data_iter(self, X, y):
        '''
        :param X: features, numpy.ndarray.
        :param y: labels, numpy.ndarray.

        '''
        num_examples = len(X)
        indices = list(range(num_examples))
        random.shuffle(indices)
        for index in range(0, num_examples, self.batch_size):
            batch_indices = np.array(indices[index: min(index + self.batch_size, num_examples)])
            yield X[batch_indices], y[batch_indices]
    
    def fit(self, X, y, console: int=100, decay: int=20) -> None:
        '''
        :param X: train data, numpy.ndarray.
        :param y: correct labels, numpy.ndarray.
        :parm console: console output interval, default: int=100.
        :parm decay: learning rate deacy change interval, default: int=20.

        '''
        assert len(X.shape) == 2, 'please ensure data like: (2, 2) shape'
        assert len(y.shape) == 1, 'please ensure data like: (2,) shape'
        augmented, unique = self.__data_matrix(X), np.unique(y)
        self.num_classes = len(unique)
        indices = dict(zip(unique, range(self.num_classes)))
        self.weights = np.zeros((augmented.shape[1], self.num_classes), dtype=np.float64)
        for epoch in range(self.epochs):
            for step, (features, labels) in enumerate(self.__data_iter(augmented, y)):
                res = self.__softmax(features @ self.weights)
                obj = np.eye(self.num_classes)[[indices[value] for value in labels]]
                err = res - obj
                self.weights -= self.lr * (features.T @ err) / self.batch_size
                if (step + 1) % console == 0:
                    print ('Epoch [{}/{}], Step [{}/{}], Loss {:.4f}'.format(epoch+1, self.epochs, step+1, len(X) // self.batch_size, np.linalg.norm(err)))
            # learning rate decay
            if (epoch + 1) % decay == 0:
                self.lr /= 3 # replace it with another schedule.
                
    def score(self, X, y) -> float:
        '''
        :param X: test data, numpy.ndarray.
        :param y: correct labels, numpy.ndarray.

        '''
        assert len(X.shape) == 2, 'please ensure data like: (2, 2) shape'
        assert len(y.shape) == 1, 'please ensure data like: (2,) shape'
        X = self.__data_matrix(X)
        corr, prob = 0, X @ self.weights
        predicted = np.argmax(prob, axis=1)
        corr += (predicted == y).sum()
        return corr / len(X)
    
    def predict(self, X):
        '''
        :param X: predict data, numpy.ndarray.

        '''
        assert len(X.shape) == 2, 'please ensure data like: (2, 2) shape'
        X = self.__data_matrix(X)
        prob = X @ self.weights
        return np.argmax(prob, axis=1)

2. 测试程序

# import packages and model
import numpy as np
import scipy.io as sio
from ml import Classifier

# load data
def process_data(url: str) -> tuple:
    data = sio.loadmat(url)
    return data['X'], data['class']

# get features and labels
features, labels = process_data('textretrieval.mat')

# split data
def pretreat(features, labels) -> tuple:
    labels = np.argwhere(labels == 1)[:, 1]
    return features[:2000, :], features[2000:, :], labels[:2000], labels[2000:]

# get splited data
X_train, X_test, y_train, y_test = pretreat(features, labels)

'''
hyper-parameters:
- epochs: 200
- batch_size: 1
- lr: 0.1
'''

# train model
model = Classifier(epochs=200, batch_size=1, lr=0.1)
model.fit(X_train, y_train)

# evaluate model
model.score(X_test, y_test)

# predict result
print((y_test == model.predict(X_test)).sum() / len(X_test))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

DeeGLMath

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值