论文复现:Ordinal Regression by Extended Binary Classification

"""
By Daniel He
Date: 2021-8-20
"""
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.model_selection import StratifiedKFold
from collections import OrderedDict
from sklearn.metrics.pairwise import rbf_kernel, linear_kernel
from sklearn.metrics import accuracy_score


class eSVM():
    def __init__(self):
        self.gamma = 1
        self.C = 1
        self.eX = self.ey = None
        self.alpha = self.w = self.prediction_cache = None
        self.tol = 1e-3

    def fit(self, X, y, epoch=500):
        self.nSample, self.nDim = X.shape
        self.labels = list(np.sort(np.unique(y)))
        self.nClass = len(self.labels)
        self.extend_part = np.eye(self.nClass-1)
        self.label_dict = self.get_label_dict()
        print(self.label_dict)
        self.eX, self.ey = self.train_set_construct(X,y)
        self.gram_1 = rbf_kernel(self.eX[:,:self.nDim])
        self.gram_2 = self.eX[:,self.nDim:] @ self.eX[:,self.nDim:].T
        self.gram = self.gram_1 + self.gram_2

        self.Nsample, self.Ndim = self.eX.shape
        self._alpha, self._w, self._prediction_cache = (
            np.zeros(self.Nsample), np.zeros(self.Nsample), np.zeros(self.Nsample))
        self._b = 0.

        for i in range(epoch):
            idx1 = self._pick_alpha_1()
            if idx1 is None:
                return True
            idx2 = self._pick_alpha_2(idx1)
            self._update_alpha(idx1, idx2)

        return self

    def _pick_alpha_1(self):
        con1 = self._alpha > 0
        con2 = self._alpha < self.C
        err1 = self.ey * self._prediction_cache - 1
        err2 = err1.copy()
        err3 = err1.copy()

        err1[(con1 & (err1 <= 0)) | (~con1 & (err1 > 0))] = 0
        err2[((~con1 | ~con2) & (err2 != 0)) | ((con1 & con2) & (err2 == 0))] = 0
        err3[(con2 & (err3 >= 0)) | (~con2 & (err3 < 0))] = 0
        err = err1 ** 2 + err2 ** 2 + err3 ** 2
        idx = np.argmax(err)
        if err[idx] < self.tol:
            return
        return idx

    def _pick_alpha_2(self, idx1):
        idx = np.random.randint(self.Nsample)
        while idx == idx1:
            idx = np.random.randint(self.Nsample)
        return idx

    def _get_lower_bound(self, idx1, idx2):
        if self.ey[idx1] != self.ey[idx2]:
            return max(0., self._alpha[idx2] - self._alpha[idx1])
        return max(0., self._alpha[idx2] + self._alpha[idx1] - self.C)

    def _get_upper_bound(self, idx1, idx2):
        if self.ey[idx1] != self.ey[idx2]:
            return min(self.C, self.C + self._alpha[idx2] - self._alpha[idx1])
        return min(self.C, self._alpha[idx2] + self._alpha[idx1])

    def _update_alpha(self, idx1, idx2):
        L, H = self._get_lower_bound(idx1, idx2), self._get_upper_bound(idx1, idx2)
        y1, y2 = self.ey[idx1], self.ey[idx2]
        # print(idx1)
        # print(idx2)
        # print(self._prediction_cache[idx1])
        e1 = self._prediction_cache[idx1] - self.ey[idx1]
        e2 = self._prediction_cache[idx2] - self.ey[idx2]
        eta = self.gram[idx1][idx1] + self.gram[idx2][idx2] - 2 * self.gram[idx1][idx2]
        a2_new = self._alpha[idx2] + (y2 * (e1 - e2)) / eta
        if a2_new > H:
            a2_new = H
        elif a2_new < L:
            a2_new = L

        a1_old, a2_old = self._alpha[idx1], self._alpha[idx2]
        da2 = a2_new - a2_old
        da1 = -y1 * y2 * da2
        self._alpha[idx1] += da1
        self._alpha[idx2] = a2_new
        self._update_dw_cache(idx1, idx2, da1, da2, y1, y2)
        self._update_db_cache(idx1, idx2, da1, da2, y1, y2, e1, e2)
        self._update_pred_cache(idx1, idx2)



    def _update_dw_cache(self, idx1, idx2, da1, da2, y1, y2):
        self._dw_cache = np.array([da1 * y1, da2 * y2])
        self._w[idx1] += self._dw_cache[0]
        self._w[idx2] += self._dw_cache[1]

    def _update_db_cache(self, idx1, idx2, da1, da2, y1, y2, e1, e2):
        gram_12 = self.gram[idx1][idx2]
        b1 = -e1 - y1 * self.gram[idx1][idx1] * da1 - y2 * gram_12 * da2
        b2 = -e2 - y1 * gram_12 * da1 - y2 * self.gram[idx2][idx2] * da2
        self._db_cache = (b1 + b2) * 0.5
        self._b += self._db_cache

    def _update_pred_cache(self, *args):
        self._prediction_cache += self._db_cache
        if len(args) == 1:
            self._prediction_cache += self._dw_cache * self.gram[args[0]]
        elif len(args) == len(self.gram):
            self._prediction_cache = self._dw_cache.dot(self.gram)
        else:
            self._prediction_cache += self._dw_cache.dot(self.gram[args, ...])

    def predict(self, X_test, get_raw_result=False):
        nTest = X_test.shape[0]
        eX_test = self.test_set_construct(X=X_test)
        test_gram_1 = rbf_kernel(X=self.eX[:,:self.nDim], Y=eX_test[:,:self.nDim])
        # print("test_gram_1::",test_gram_1.shape)
        test_gram_2 = self.eX[:,self.nDim:] @ eX_test[:,self.nDim:].T
        # print("test_gram_2::", test_gram_2.shape)
        test_gram = test_gram_1 + test_gram_2
        y_tmp = np.sign(self._w.dot(test_gram) + self._b)

        y_pred = []
        for i in range(nTest):
            tmp_label = y_tmp[(self.nClass-1) * i: (self.nClass-1) * i + (self.nClass-1)]
            label_idx = sum(tmp_label > 0).astype(np.int)
            y_pred.append(self.labels[label_idx])

        return y_pred



        # test_gram = rbf_kernel(X=self.X, Y=X_test, gamma=self.gamma)
        # y_pred = self._w.dot(test_gram) + self._b
        # if get_raw_result:
        #     return y_pred
        # return np.sign(y_pred)


    def get_label_dict(self):
        # -------------------------------------------------
        # for i in range(self.nSample):
        #     tmp_label = np.ones(self.nClass - 1)
        #     for k, lab in enumerate(self.labels[:,-1]):
        #         if y[i] < lab:
        #             tmp_label[k] = 1
        #         else:
        #             tmp_label[k] = -1
        #--------------------------------------------------
        label_dict = OrderedDict()
        for i, lab in enumerate(self.labels):
            tmp_label = np.ones(self.nClass - 1)
            for k, pab in enumerate(self.labels[:-1]):
                if lab <= pab:
                    tmp_label[k] = -1
                else:
                    tmp_label[k] = 1
            label_dict[lab] = tmp_label
        return label_dict

    def train_set_construct(self, X, y):
        eX = np.zeros((self.nSample * (self.nClass-1), self.nDim + self.nClass - 1))
        ey = np.ones(self.nSample * (self.nClass - 1))
        # print("eX:::",eX.shape)
        for i in range(self.nSample):
            eXi = np.hstack((np.tile(X[i],(self.nClass-1, 1)),self.extend_part))
            eX[(self.nClass-1) * i: (self.nClass-1) * i + (self.nClass-1)] = eXi
            ey[(self.nClass - 1) * i: (self.nClass - 1) * i + (self.nClass - 1)] = self.label_dict[y[i]]
        print(eX.shape)
        print(ey.shape)
        return eX, ey

    def test_set_construct(self, X):
        nTest = X.shape[0]
        eX = np.zeros((nTest * (self.nClass - 1), self.nDim + self.nClass - 1))
        for i in range(nTest):
            eXi = np.hstack((np.tile(X[i], (self.nClass - 1, 1)), self.extend_part))
            eX[(self.nClass - 1) * i: (self.nClass - 1) * i + (self.nClass - 1)] = eXi
        return eX















if __name__ == '__main__':
    data = np.array(pd.read_csv(r"D:\OCdata\HDI2.csv"))
    X = data[:, :-1]
    y = data[:, -1]
    y = y-y.min()
    print(np.unique(y))
    Rounds = 1

    Acc_list = []
    for r in range(Rounds):
        SKF = StratifiedKFold(n_splits=5, shuffle=True)
        for train_idx, test_idx in SKF.split(X, y):
            X_train = X[train_idx]
            y_train = y[train_idx].astype(np.int)
            X_test = X[test_idx]
            y_test = y[test_idx]
            model = eSVM()
            model.fit(X_train,y_train)
            y_pred = model.predict(X_test=X_test)
            print("预测精度=",accuracy_score(y_true=y_test, y_pred=y_pred))


            break
        break



调包实现:

SVM部分使用了sklearn工具包

原理上与上面不同的是 kernel 矩阵的计算结果,但对预测结果并无大碍。

测试结果显示,序分类方法比OVR多分类SVM方法的的预测结果要准确一些。

"""
By Daniel He
Date: 2021-8-20
"""
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.model_selection import StratifiedKFold
from collections import OrderedDict
from sklearn.metrics.pairwise import rbf_kernel, linear_kernel
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC


class eSVM():
    def __init__(self):
        self.gamma = 1
        self.C = 1
        self.eX = self.ey = None
        self.alpha = self.w = self.prediction_cache = None
        self.tol = 1e-3

    def fit(self, X, y, epoch=500):
        self.nSample, self.nDim = X.shape
        self.labels = list(np.sort(np.unique(y)))
        self.nClass = len(self.labels)
        self.extend_part = np.eye(self.nClass-1)
        self.label_dict = self.get_label_dict()
        # print(self.label_dict)
        self.eX, self.ey = self.train_set_construct(X,y)
        self.model = SVC(kernel="rbf", C=200, gamma="auto",probability=True)
        self.model.fit(X=self.eX, y=self.ey)
        return self



    def predict(self, X_test, get_raw_result=False):
        nTest = X_test.shape[0]
        eX_test = self.test_set_construct(X=X_test)
        y_tmp = self.model.predict(X=eX_test)
        dist_tmp = self.model.decision_function(X=eX_test)


        # prob = self.model.predict_proba(X=eX_test)
        # for lab, dis, pro in zip(y_tmp, dist, prob):
        #     print(lab," ", pro," ", dis)
        # print("测试样本到决策超平面的距离=")
        # print(self.model.decision_function(X=eX_test))
        # test_gram_1 = rbf_kernel(X=self.eX[:,:self.nDim], Y=eX_test[:,:self.nDim])
        # # print("test_gram_1::",test_gram_1.shape)
        # test_gram_2 = self.eX[:,self.nDim:] @ eX_test[:,self.nDim:].T
        # # print("test_gram_2::", test_gram_2.shape)
        # test_gram = test_gram_1 + test_gram_2
        # y_tmp = np.sign(self._w.dot(test_gram) + self._b)

        y_pred = []
        for i in range(nTest):
            tmp_label = y_tmp[(self.nClass-1) * i: (self.nClass-1) * i + (self.nClass-1)]
            label_idx = sum(tmp_label > 0).astype(np.int)
            y_pred.append(self.labels[label_idx])

        for i in range(nTest):
            print(dist_tmp[(self.nClass-1) * i: (self.nClass-1) * i + (self.nClass-1)]," ",y_pred[i])
        return y_pred



        # test_gram = rbf_kernel(X=self.X, Y=X_test, gamma=self.gamma)
        # y_pred = self._w.dot(test_gram) + self._b
        # if get_raw_result:
        #     return y_pred
        # return np.sign(y_pred)


    def get_label_dict(self):
        # -------------------------------------------------
        # for i in range(self.nSample):
        #     tmp_label = np.ones(self.nClass - 1)
        #     for k, lab in enumerate(self.labels[:,-1]):
        #         if y[i] < lab:
        #             tmp_label[k] = 1
        #         else:
        #             tmp_label[k] = -1
        #--------------------------------------------------
        label_dict = OrderedDict()
        for i, lab in enumerate(self.labels):
            tmp_label = np.ones(self.nClass - 1)
            for k, pab in enumerate(self.labels[:-1]):
                if lab <= pab:
                    tmp_label[k] = -1
                else:
                    tmp_label[k] = 1
            label_dict[lab] = tmp_label
        return label_dict

    def train_set_construct(self, X, y):
        eX = np.zeros((self.nSample * (self.nClass-1), self.nDim + self.nClass - 1))
        ey = np.ones(self.nSample * (self.nClass - 1))
        # print("eX:::",eX.shape)
        for i in range(self.nSample):
            eXi = np.hstack((np.tile(X[i],(self.nClass-1, 1)),self.extend_part))
            eX[(self.nClass-1) * i: (self.nClass-1) * i + (self.nClass-1)] = eXi
            ey[(self.nClass - 1) * i: (self.nClass - 1) * i + (self.nClass - 1)] = self.label_dict[y[i]]
        print(eX.shape)
        print(ey.shape)
        return eX, ey

    def test_set_construct(self, X):
        nTest = X.shape[0]
        eX = np.zeros((nTest * (self.nClass - 1), self.nDim + self.nClass - 1))
        for i in range(nTest):
            eXi = np.hstack((np.tile(X[i], (self.nClass - 1, 1)), self.extend_part))
            eX[(self.nClass - 1) * i: (self.nClass - 1) * i + (self.nClass - 1)] = eXi
        return eX



if __name__ == '__main__':
    data = np.array(pd.read_csv(r"D:\OCdata\HDI2.csv"))
    # data = np.array(pd.read_csv(r"D:\OCdata\toy.csv"))

    X = data[:, :-1]
    y = data[:, -1]
    y = y-y.min()
    print(np.unique(y))
    Rounds = 2

    Acc_list = []
    acc_list = []
    for r in range(Rounds):
        SKF = StratifiedKFold(n_splits=5, shuffle=True)
        for train_idx, test_idx in SKF.split(X, y):
            X_train = X[train_idx]
            y_train = y[train_idx].astype(np.int)
            X_test = X[test_idx]
            y_test = y[test_idx]
            model = eSVM()
            model.fit(X_train,y_train)
            y_pred = model.predict(X_test=X_test)
            Acc = accuracy_score(y_true=y_test, y_pred=y_pred)
            print("预测精度=",Acc)
            Acc_list.append(Acc)

            svmmodel = SVC(kernel="rbf", C=100, gamma="auto",probability=True,decision_function_shape='ovr')
            svmmodel.fit(X=X_train,y=y_train)
            y_pred = svmmodel.predict(X=X_test)
            acc = accuracy_score(y_true=y_test, y_pred=y_pred)
            acc_list.append(acc)



    print("平均精度==", np.mean(Acc_list))
    print("直接掉包==", np.mean(acc_list))



终版: 

"""
By Daniel He
Date: 2021-8-20
"""
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.model_selection import StratifiedKFold
from collections import OrderedDict
from sklearn.metrics.pairwise import rbf_kernel, linear_kernel
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from scipy.special import expit


class eSVM():
    def __init__(self):
        self.gamma = 1
        self.C = 1
        self.eX = self.ey = None
        self.alpha = self.w = self.prediction_cache = None
        self.tol = 1e-3

    def fit(self, X, y, epoch=500):
        self.nSample, self.nDim = X.shape
        self.labels = list(np.sort(np.unique(y)))
        self.nClass = len(self.labels)
        self.extend_part = np.eye(self.nClass-1)
        self.label_dict = self.get_label_dict()
        # print(self.label_dict)
        self.eX, self.ey = self.train_set_construct(X,y)
        self.model = SVC(kernel="rbf", C=200, gamma="auto",probability=True)
        self.model.fit(X=self.eX, y=self.ey)
        return self



    def predict(self, X_test, get_raw_result=False):
        nTest = X_test.shape[0]
        eX_test = self.test_set_construct(X=X_test)
        y_tmp = self.model.predict(X=eX_test)
        dist_tmp = self.model.decision_function(X=eX_test)


        # prob = self.model.predict_proba(X=eX_test)
        # for lab, dis, pro in zip(y_tmp, dist, prob):
        #     print(lab," ", pro," ", dis)
        # print("测试样本到决策超平面的距离=")
        # print(self.model.decision_function(X=eX_test))
        # test_gram_1 = rbf_kernel(X=self.eX[:,:self.nDim], Y=eX_test[:,:self.nDim])
        # # print("test_gram_1::",test_gram_1.shape)
        # test_gram_2 = self.eX[:,self.nDim:] @ eX_test[:,self.nDim:].T
        # # print("test_gram_2::", test_gram_2.shape)
        # test_gram = test_gram_1 + test_gram_2
        # y_tmp = np.sign(self._w.dot(test_gram) + self._b)

        y_pred = []
        for i in range(nTest):
            tmp_label = y_tmp[(self.nClass-1) * i: (self.nClass-1) * i + (self.nClass-1)]
            label_idx = sum(tmp_label < 0).astype(np.int)
            y_pred.append(self.labels[label_idx])

        for i in range(nTest):
            print(dist_tmp[(self.nClass-1) * i: (self.nClass-1) * i + (self.nClass-1)]," ",y_pred[i])
        return y_pred


    def predict_proba(self,X_test):
        nTest = X_test.shape[0]
        eX_test = self.test_set_construct(X=X_test)
        dist_tmp = self.model.decision_function(X=eX_test)

        dist_matrix = dist_tmp.reshape(nTest, self.nClass-1)

        accumulative_proba = expit(dist_matrix)

        prob = np.pad(
            accumulative_proba,
            pad_width=((0, 0), (1, 1)),
            mode='constant',
            constant_values=(0, 1))
        prob = np.diff(prob)
        return prob



    def get_label_dict(self):
        # -------------------------------------------------
        # for i in range(self.nSample):
        #     tmp_label = np.ones(self.nClass - 1)
        #     for k, lab in enumerate(self.labels[:,-1]):
        #         if y[i] < lab:
        #             tmp_label[k] = 1
        #         else:
        #             tmp_label[k] = -1
        #--------------------------------------------------
        label_dict = OrderedDict()
        for i, lab in enumerate(self.labels):
            tmp_label = np.ones(self.nClass - 1)
            for k, pab in enumerate(self.labels[:-1]):
                if lab <= pab:
                    tmp_label[k] = 1
                else:
                    tmp_label[k] = -1
            label_dict[lab] = tmp_label
        return label_dict

    def train_set_construct(self, X, y):
        eX = np.zeros((self.nSample * (self.nClass-1), self.nDim + self.nClass - 1))
        ey = np.ones(self.nSample * (self.nClass - 1))
        # print("eX:::",eX.shape)
        for i in range(self.nSample):
            eXi = np.hstack((np.tile(X[i],(self.nClass-1, 1)),self.extend_part))
            eX[(self.nClass-1) * i: (self.nClass-1) * i + (self.nClass-1)] = eXi
            ey[(self.nClass - 1) * i: (self.nClass - 1) * i + (self.nClass - 1)] = self.label_dict[y[i]]
        print(eX.shape)
        print(ey.shape)
        return eX, ey

    def test_set_construct(self, X):
        nTest = X.shape[0]
        eX = np.zeros((nTest * (self.nClass - 1), self.nDim + self.nClass - 1))
        for i in range(nTest):
            eXi = np.hstack((np.tile(X[i], (self.nClass - 1, 1)), self.extend_part))
            eX[(self.nClass - 1) * i: (self.nClass - 1) * i + (self.nClass - 1)] = eXi
        return eX



if __name__ == '__main__':
    data = np.array(pd.read_csv(r"D:\OCdata\HDI2.csv"))
    # data = np.array(pd.read_csv(r"D:\OCdata\toy.csv"))

    X = data[:, :-1]
    y = data[:, -1]
    y = y-y.min()
    print(np.unique(y))
    Rounds = 2

    Acc_list = []
    acc_list = []
    accpro_list = []
    for r in range(Rounds):
        SKF = StratifiedKFold(n_splits=5, shuffle=True)
        for train_idx, test_idx in SKF.split(X, y):
            X_train = X[train_idx]
            y_train = y[train_idx].astype(np.int)
            X_test = X[test_idx]
            y_test = y[test_idx]
            model = eSVM()
            model.fit(X_train,y_train)
            y_pred = model.predict(X_test=X_test)
            Acc = accuracy_score(y_true=y_test, y_pred=y_pred)
            print("预测精度=",Acc)
            Acc_list.append(Acc)
            prob = model.predict_proba(X_test=X_test)
            y_prob = np.argmax(prob, axis=1)
            accpro_list.append(accuracy_score(y_true=y_test, y_pred=y_prob))
            print("Acc_prob:::", accuracy_score(y_true=y_test, y_pred=y_prob))

            svmmodel = SVC(kernel="rbf", C=100, gamma="auto",probability=True,decision_function_shape='ovr')
            svmmodel.fit(X=X_train,y=y_train)
            y_pred = svmmodel.predict(X=X_test)

            acc = accuracy_score(y_true=y_test, y_pred=y_pred)
            acc_list.append(acc)



    print("平均精度==", np.mean(Acc_list))
    print("概率精度==", np.mean(accpro_list))
    print("直接掉包==", np.mean(acc_list))



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

DeniuHe

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值