废弃代码存储

'''
         if self.alph < 0:
                Mn = self.M - self.learning_rate * g
                if np.isnan(Mn.sum()):
                    print('nan')
                [sigma, U] = la.eig(Mn)

                sigma = np.real(sigma)
                U = np.real(U)
                sigma[sigma < 1e-8] = 0
                Mn = U.dot(np.diag(sigma)).dot(U.T)
                An = (U.dot(np.diag(np.sqrt(sigma)))).T
                f, g = mann_cost(An, X, self.S_m, self.D_m,self.Y, self.m, self.alph, self.reg,self.s_type)
                err = f - prev_error
                if f < prev_error:
                    self.A = An
                    self.M = Mn
                    self.learning_rate = 1.01 * self.learning_rate
                    prev_error = f
                    f1.append(f)
                else:
                    self.learning_rate = 0.5 * self.learning_rate
                    
            else:
                An = self.A - self.learning_rate * g.dot(self.A)
                if np.isnan(An.sum()):
                    print('nan')

                f, g = mann_cost(An, X, self.S_m, self.D_m, self.Y, self.m, self.alph, self.reg, self.s_type)
                err = f - prev_error
                if f < prev_error:
                    self.A = An
                    self.M = self.A.dot(self.A.T)
                    self.learning_rate = 1.01 * self.learning_rate
                    prev_error = f
                    f1.append(f)
                else:
                    self.learning_rate = 0.5 * self.learning_rate
'''

 

def mann_cost1(A, xx, S_m, D_m, label, m, alph, reg, mu = 1,s_type = 1):
    """Neighbourhood Components Analysis: cost function and gradients

        ff, gg = mann_cost_log(A, xx, yy)

    Evaluate a linear projection from a D-dim space to a K-dim space (K<=D).
    See Goldberger et al. (2004).

    Inputs:
        A  KxD Current linear transformation.
        xx NxD Input data
        yy Nx1 Corresponding labels, taken from any discrete set

    Outputs:
        ff 1x1 MANN cost function
        gg KxD partial derivatives of ff wrt elements of A

    Motivation: gradients in existing implementations, and as written in the
    paper, have the wrong scaling with D. This implementation should scale
    correctly for problems with many input dimensions.

    Note: this function should be passed to a MINIMIZER.

    """

    N, D = xx.shape
    assert(A.shape[1] == D)


    # projection function:
    zz = np.dot(A, xx.T).T  # KxN

    # TODO Subsample part of data to compute loss on.
    # kk = np.exp(-square_dist(zz.T, zz.T[idxs]))  # Nxn
    # kk[idxs, np.arange(len(idxs))] = 0
    gg = np.zeros((D, D))
    yy = 0
    yy1 = 0
    gg1 = np.zeros((D, D))
    s_yy1 = 1
    for i in range(zz.shape[1]):
        S_m_i = S_m[i]
        D_m_i = D_m[i]
        dist_s = square_dist(zz[i:i+1, :], zz[S_m_i, :])
        dist_d = square_dist(zz[i:i+1, :], zz[D_m_i, :])
        dist_s_exp = np.exp(-alph * dist_s) + 1e-8
        dist_d_exp = np.exp(-dist_d) + 1e-8
        if s_type == 1:
            t_s = -np.log(np.sum(dist_s_exp)/dist_s_exp.shape[1])/alph + m
            t_d = -np.log(np.sum(dist_d_exp)/dist_d_exp.shape[1])
        else:
            t_s = -np.log(np.sum(dist_s_exp)) / alph + m
            t_d = -np.log(np.sum(dist_d_exp))

        t = t_s - t_d
        if t > 0:
            yy = yy + t
            t_s_rate = dist_s_exp / np.sum(dist_s_exp)
            t_d_rate = dist_d_exp / np.sum(dist_d_exp)
            s_temp = np.dot((t_s_rate.T * xx[S_m_i, :]).T, xx[S_m_i, :])
            d_temp = np.dot((t_d_rate.T * xx[D_m_i, :]).T, xx[D_m_i, :])
            gg = gg + s_temp - d_temp
        gg1 = gg1 + np.dot(xx[(label[i] == label)[:, 0], :].T, xx[(label[i] == label)[:, 0], :])
        yy1 = yy1 + np.sum(zz[(label[i] == label)[:, 0], :] ** 2)
        s_yy1 = s_yy1 + np.sum(label[i] == label)
    gg = gg / zz.shape[1] + reg * gg1 / s_yy1
    yy = yy / zz.shape[1] + reg * yy1 / s_yy1 + mu * np.dot(A.ravel(), A.ravel())
    gg = np.dot(gg,A.T).T + 2 * mu * A
    gg = gg / np.sqrt(np.trace(np.dot(gg , gg.T)))
    return yy, gg

def mann_cost1(A, xx, S_m, D_m, label, m, alph, reg, mu = 1,s_type = 1):
    """Neighbourhood Components Analysis: cost function and gradients

        ff, gg = mann_cost_log(A, xx, yy)

    Evaluate a linear projection from a D-dim space to a K-dim space (K<=D).
    See Goldberger et al. (2004).

    Inputs:
        A  KxD Current linear transformation.
        xx NxD Input data
        yy Nx1 Corresponding labels, taken from any discrete set

    Outputs:
        ff 1x1 MANN cost function
        gg KxD partial derivatives of ff wrt elements of A

    Motivation: gradients in existing implementations, and as written in the
    paper, have the wrong scaling with D. This implementation should scale
    correctly for problems with many input dimensions.

    Note: this function should be passed to a MINIMIZER.

    """

    N, D = xx.shape
    assert(A.shape[1] == D)


    # projection function:
    zz = np.dot(A, xx.T).T  # KxN

    # TODO Subsample part of data to compute loss on.
    # kk = np.exp(-square_dist(zz.T, zz.T[idxs]))  # Nxn
    # kk[idxs, np.arange(len(idxs))] = 0
    gg = np.zeros((D, D))
    yy = 0
    yy1 = 0
    gg1 = np.zeros((D, D))
    s_yy1 = 1
    for i in range(zz.shape[1]):
        S_m_i = S_m[i]
        D_m_i = D_m[i]
        dist_s = square_dist(zz[i:i+1, :], zz[S_m_i, :])
        dist_d = square_dist(zz[i:i+1, :], zz[D_m_i, :])
        dist_s_exp = np.exp(-alph * dist_s) + 1e-8
        dist_d_exp = np.exp(-dist_d) + 1e-8
        if s_type == 1:
            t_s = -np.log(np.sum(dist_s_exp)/dist_s_exp.shape[1])/alph
            t_d = -np.log(np.sum(dist_d_exp)/dist_d_exp.shape[1])
        else:   # AAAI17 parameter free large margin nearest neighbor
            t_s = -np.log(np.sum(dist_s_exp)) / alph
            t_d = -np.log(np.sum(dist_d_exp))

        t = (t_s - t_d)/m
        t1 = np.log(1 + np.exp(-t))
        yy = yy + t1
        t_s_rate = dist_s_exp / np.sum(dist_s_exp)
        t_d_rate = dist_d_exp / np.sum(dist_d_exp)
        s_temp = np.dot((t_s_rate.T * xx[S_m_i, :]).T, xx[S_m_i, :])
        d_temp = np.dot((t_d_rate.T * xx[D_m_i, :]).T, xx[D_m_i, :])
        gg = gg - (np.exp(-t))/(1+np.exp(-t)) * (s_temp - d_temp)/m
        gg1 = gg1 + np.dot(xx[(label[i] == label)[:, 0], :].T, xx[(label[i] == label)[:, 0], :])
        yy1 = yy1 + np.sum(zz[(label[i] == label)[:, 0], :] ** 2)
        s_yy1 = s_yy1 + np.sum(label[i] == label)
    gg = gg / zz.shape[1] + reg * gg1 / s_yy1
    if np.isnan(gg.sum()):
        print('nan')
    yy = yy / zz.shape[1] + reg * yy1 / s_yy1 + mu * np.dot(A.ravel(), A.ravel())
    gg = np.dot(gg, A.T).T + 2 * mu * A
    #gg = gg / np.sqrt(np.trace(np.dot(gg , gg.T)))
    return yy, gg

 

 

def main():
    parser = argparse.ArgumentParser(
        description='Apply the kNN classifier using different metrics.',
    )

    parser.add_argument(
        '-d', '--data',
        choices=DATA_LOADERS,
        default='wine',
        help='on which data to run the model',
    )
    parser.add_argument(
        '--to-plot',
        action='store_true',
        help='plot the projected data',
    )
    parser.add_argument(
        '--seed',
        default=SEED,
        type=int,
        help='seed to fix the randomness',
    )
    parser.add_argument(
        '-v', '--verbose',
        default=0,
        action='count',
        help='how much information to output',
    )

    args = parser.parse_args()
    np.random.seed(args.seed)

    data = DATA_LOADERS[args.data]()
    X, y = data.data, data.target
    X_tr1, X_te1, y_tr1, y_te1 = train_test_split(
        X, y, test_size=TEST_SIZE, random_state=args.seed)

    from scipy.io import loadmat, savemat
    m = loadmat('G:\\songkun\\matlab_database\\Benchmark Datasets\\UCI\\german_uni')
    X = m['X']
    y = m['Y']
    print(X.shape)
    print(y.shape)
    n_iter = 1
 #   al = 0.9*np.ones((1,))
    al = np.linspace(0.01,0.99,30)
    b1 = np.ones((1,))
    b1 = np.power(2,b1)
    accuracy = np.zeros((len(al),len(b1),n_iter))
    knn = KNeighborsClassifier(n_neighbors=N_NEIGHBORS)
    A = np.eye(X.shape[1])
    for kk in range(n_iter):
        X_tr, X_te, y_tr, y_te = train_test_split(
            X, y, test_size=TEST_SIZE)

        K1 = 25
        K2 = K1 * 4
        print(kk)

        # Apply metric model
        S_m, D_m = find_SD(X_tr, y_tr, K1, K2)
        for i in range(len(al)):
            al1 = -al[i]/(1-al[i])
            for j in range(len(b1)):
                m1 = al[i]
                model = MANN(m = 1, alph = -1,reg = 1, mu = m1, S_m = S_m, D_m = D_m, Y = y_tr)
                X_tr = model.fit_transform(X_tr,A)
                X_te = model.transform(X_te)
                knn.fit(X_tr, y_tr)
                y_pr = knn.predict(X_te)
                accuracy[i,j,kk] = 100 * accuracy_score(y_te, y_pr)
    print(accuracy)
    ac = accuracy[:, 0, 0]
    plt.plot(ac)
    plt.show()
    savemat('D:\\accuracy.amt',{'acc':accuracy})

def main1():
    parser = argparse.ArgumentParser(
        description='Apply the kNN classifier using different metrics.',
    )

    parser.add_argument(
        '-d', '--data',
        choices=DATA_LOADERS,
        default='wine',
        help='on which data to run the model',
    )
    parser.add_argument(
        '--to-plot',
        action='store_true',
        help='plot the projected data',
    )
    parser.add_argument(
        '--seed',
        default=SEED,
        type=int,
        help='seed to fix the randomness',
    )
    parser.add_argument(
        '-v', '--verbose',
        default=0,
        action='count',
        help='how much information to output',
    )
    from sklearn.decomposition import PCA

    args = parser.parse_args()
    np.random.seed(args.seed)


    from scipy.io import loadmat, savemat
    m = loadmat('G:\\songkun\\matlab_database\\Benchmark Datasets\\UCI\\german_uni')
    X = m['X']
    y = m['Y']
    L1 = loadmat('G:\\songkun\\matlab_project\\mlcircus-lmnn-5b49cafaeb9a\\lmnn2\\L')
    n_iter = 1
 #   al = 0.9*np.ones((1,))
    #al = np.power(2.5,np.linspace(-10,10,5))
    #b1 = np.linspace(1,1000,1)
    a1 = np.array([0.1,1,2,4,8,16,32,64])
    b1 = np.array(0[0.1,1,2,4,8,16,32,64])
  #  accuracy = np.zeros((len(al),len(b1),n_iter))
    accuracy = np.zeros((len(al), 150))
    ac_knn = np.zeros((1, 150))
    knn = KNeighborsClassifier(n_neighbors=N_NEIGHBORS)
    for kk in range(n_iter):
        X_tr, X_te, y_tr, y_te = train_test_split(
            X, y, test_size=TEST_SIZE)
        pca = PCA(n_components=X_tr.shape[1])
        pca.fit(X_tr)
        A = pca.components_
#        A = L1['L']+np.eye()
        K1 = 50
        K2 = K1 * 4

        # Apply metric model
        S_m, D_m = find_SD(X_tr, y_tr, K1, K2)
        for i in range(len(al)):
            for j in range(len(b1)):
                alph = -al[i]/b1[j]
                m1 = 1
                print(m1)
                model = MANN(m = b1[i], alph = alph, reg=0.01, mu = 0, S_m = S_m, D_m = D_m, Y = y_tr)
                X_tr1 = model.fit_transform(X_tr,m1*A)
                X_te1 = model.transform(X_te)
                A1=model.A_return()
                print(A1[0,0:5])
                for kk1 in range(50):
                    knn = KNeighborsClassifier(n_neighbors=kk1+1)
                    knn.fit(X_tr1, y_tr)
                    y_pr = knn.predict(X_te1)
                    accuracy[i,kk1] = 100 * accuracy_score(y_te, y_pr)
        for kk1 in range(50):
            knn = KNeighborsClassifier(n_neighbors=kk1 + 1)
            knn.fit(X_tr, y_tr)
            y_pr1 = knn.predict(X_te)
            ac_knn[0,kk1] = 100 * accuracy_score(y_te, y_pr1)
        plt.plot(accuracy[0, 0:50],'-*',markersize = 10,label = '1' )
        plt.plot(accuracy[1, 0:50],'->',markersize = 10, label = '2' )
        plt.plot(accuracy[2, 0:50],'-<', markersize =10, label = '3')
        plt.plot(accuracy[3, 0:50],'-s', markersize =10, label = '4' )
        plt.plot(accuracy[4, 0:50],'-d', markersize =10, label = '5')
        plt.plot(ac_knn[0, 0:50],'-p', markersize = 10, label = 'kkn')
        plt.legend()
        plt.show()
        print('knn accuracy:')
        print(ac_knn)
        s[kk] = np.max(accuracy)
    print('the accuracy is :')
    print(s)

def main2():
    parser = argparse.ArgumentParser(
        description='Apply the kNN classifier using different metrics.',
    )

    parser.add_argument(
        '-d', '--data',
        choices=DATA_LOADERS,
        default='wine',
        help='on which data to run the model',
    )
    parser.add_argument(
        '--to-plot',
        action='store_true',
        help='plot the projected data',
    )
    parser.add_argument(
        '--seed',
        default=SEED,
        type=int,
        help='seed to fix the randomness',
    )
    parser.add_argument(
        '-v', '--verbose',
        default=0,
        action='count',
        help='how much information to output',
    )
    from sklearn.decomposition import PCA

    args = parser.parse_args()
    np.random.seed(args.seed)


    from scipy.io import loadmat, savemat
    m = loadmat('G:\\songkun\\matlab_database\\Benchmark Datasets\\UCI\\german_uni')
    m = loadmat('G:\\songkun\\matlab_database\\Benchmark Datasets\\UCI\\cars_uni')
    m = loadmat('G:\\songkun\\matlab_database\\Benchmark Datasets\\UCI\\Segment_uni')
    X = m['X']
    y = m['Y']
    L1 = loadmat('G:\\songkun\\matlab_project\\mlcircus-lmnn-5b49cafaeb9a\\lmnn2\\L')
    n_iter = 1
 #   al = 0.9*np.ones((1,))
    al = np.power(2,np.linspace(-8,8,20))
    b1 = np.power(2,np.linspace(-8,8,20))
  #  accuracy = np.zeros((len(al),len(b1),n_iter))
    accuracy = np.zeros((len(al),len(al), 150))
    ac_knn = np.zeros((len(al)+1, 150))
    for kk in range(n_iter):
        X_tr, X_te, y_tr, y_te = train_test_split(
            X, y, test_size=TEST_SIZE)
        pca = PCA(n_components=X_tr.shape[1])
        pca.fit(X_tr)
        A = pca.components_
#        A = L1['L']+np.eye()
        K1 = 20
        K2 = K1 * 4

        # Apply metric model
        S_m, D_m = find_SD(X_tr, y_tr, K1, K2)
        for i in range(len(al)):
            for j in range(len(b1)):
                m1 = 1
                model = MANN(m = b1[j], alph = al[i], reg=0.01, mu = 0, S_m = S_m, D_m = D_m, Y = y_tr)
                X_tr1 = model.fit_transform(X_tr,m1*A)
                X_te1 = model.transform(X_te)
                A1=model.A_return()
                print(A1[0,0:5])
                for kk1 in range(50):
                    knn = KNeighborsClassifier(n_neighbors=kk1+1)
                    knn.fit(X_tr1, y_tr)
                    y_pr = knn.predict(X_te1)
                    accuracy[i,j,kk1] = 100 * accuracy_score(y_te, y_pr)
        for kk1 in range(50):
            knn = KNeighborsClassifier(n_neighbors=kk1 + 1)
            knn.fit(X_tr, y_tr)
            y_pr1 = knn.predict(X_te)
            ac_knn[0,kk1] = 100 * accuracy_score(y_te, y_pr1)
            for i in range(len(b1)):
                ac_knn[i+1,kk1] = np.max(accuracy[:,i,kk1+1])
        for kk1 in range(50-5):
            for i in range(len(b1)+1):
                ac_knn[i,kk1] = (ac_knn[i,kk1]+ac_knn[i,kk1+1]+ac_knn[i,kk1+2]+ac_knn[i,kk1+3]+ac_knn[i,kk1+4])/5
        mark={0:'-*',1:'->',2:'-p',3:'-s',4:'-d'}
        for i in range(5):
            plt.plot(ac_knn[i+4, 0:50-5:2],mark[i],markersize = 10,label=str(i))
        plt.legend()
        plt.show()
        print('knn accuracy:')
        print(ac_knn)
    print('the accuracy is :')

def main3():
    parser = argparse.ArgumentParser(
        description='Apply the kNN classifier using different metrics.',
    )

    parser.add_argument(
        '-d', '--data',
        choices=DATA_LOADERS,
        default='wine',
        help='on which data to run the model',
    )
    parser.add_argument(
        '--to-plot',
        action='store_true',
        help='plot the projected data',
    )
    parser.add_argument(
        '--seed',
        default=SEED,
        type=int,
        help='seed to fix the randomness',
    )
    parser.add_argument(
        '-v', '--verbose',
        default=0,
        action='count',
        help='how much information to output',
    )
    from sklearn.decomposition import PCA

    args = parser.parse_args()
    np.random.seed(args.seed)

    from scipy.io import loadmat, savemat
    m = loadmat('G:\\songkun\\matlab_database\\Benchmark Datasets\\UCI\\german_uni')
  #  m = loadmat('G:\\songkun\\matlab_database\\Benchmark Datasets\\UCI\\cars_uni')
  #  m = loadmat('G:\\songkun\\matlab_database\\Benchmark Datasets\\UCI\\Segment_uni')
    X = m['X']
    y = m['Y']
    L1 = loadmat('G:\\songkun\\matlab_project\\mlcircus-lmnn-5b49cafaeb9a\\lmnn2\\L')
    n_iter = 1
    #   al = 0.9*np.ones((1,))
    al = np.power(2, np.linspace(-8, 12, 20))
    b1 = np.power(2, np.linspace(-8, 12, 20))
    #  accuracy = np.zeros((len(al),len(b1),n_iter))
    accuracy = np.zeros((len(al), len(al), 150))
    ac_knn = np.zeros((len(al) + 1, 150))
    for kk in range(n_iter):
        X_tr, X_te, y_tr, y_te = train_test_split(
            X, y, test_size=TEST_SIZE)
        pca = PCA(n_components=X_tr.shape[1])
        pca.fit(X_tr)
        A = pca.components_
        #        A = L1['L']+np.eye()
        K1 = 70
        K2 = K1 * 3

        # Apply metric model
        S_m, D_m = find_SD(X_tr, y_tr, K1, K2)
        for i in range(len(al)):
            for j in range(len(b1)):
                m1 = 1
                model = MANN(m=b1[j], alph=al[i], reg=0.01, mu=0, S_m=S_m, D_m=D_m, Y=y_tr)
                X_tr1 = model.fit_transform(X_tr, m1 * A)
                X_te1 = model.transform(X_te)
                A1 = model.A_return()
                print(A1[0, 0:5])
                for kk1 in range(50):
                    knn = KNeighborsClassifier(n_neighbors=kk1 + 1)
                    knn.fit(X_tr1, y_tr)
                    y_pr = knn.predict(X_te1)
                    accuracy[i, j, kk1] = 100 * accuracy_score(y_te, y_pr)
        for kk1 in range(50):
            knn = KNeighborsClassifier(n_neighbors=kk1 + 1)
            knn.fit(X_tr, y_tr)
            y_pr1 = knn.predict(X_te)
            ac_knn[0, kk1] = 100 * accuracy_score(y_te, y_pr1)
            for i in range(len(b1)):
                ac_knn[i + 1, kk1] = np.max(accuracy[:, i, kk1 + 1])
        for kk1 in range(50 - 5):
            for i in range(len(b1) + 1):
                ac_knn[i, kk1] = (ac_knn[i, kk1] + ac_knn[i, kk1 + 1] + ac_knn[i, kk1 + 2] + ac_knn[i, kk1 + 3] +
                                  ac_knn[i, kk1 + 4]) / 5
        mark = {0: '-*', 1: '->', 2: '-p', 3: '-s', 4: '-d'}
        for i in range(5):
            plt.plot(ac_knn[i + 4, 0:50 - 5:2], mark[i], markersize=10, label=str(i))
        plt.legend()
        plt.show()
        print('knn accuracy:')
        print(ac_knn)
        s[kk] = np.max(accuracy)
    print('the accuracy is :')
def main4():
    parser = argparse.ArgumentParser(
        description='Apply the kNN classifier using different metrics.',
    )

    parser.add_argument(
        '-d', '--data',
        choices=DATA_LOADERS,
        default='wine',
        help='on which data to run the model',
    )
    parser.add_argument(
        '--to-plot',
        action='store_true',
        help='plot the projected data',
    )
    parser.add_argument(
        '--seed',
        default=SEED,
        type=int,
        help='seed to fix the randomness',
    )
    parser.add_argument(
        '-v', '--verbose',
        default=0,
        action='count',
        help='how much information to output',
    )
    from sklearn.decomposition import PCA

    args = parser.parse_args()
    np.random.seed(args.seed)

    from scipy.io import loadmat, savemat
    m = loadmat('C:\\Users\\songkun\\Downloads\\Benchmark Datasets\\MSRA25_uni.mat')
  #  m = loadmat('G:\\songkun\\matlab_database\\Benchmark Datasets\\UCI\\cars_uni')
  #  m = loadmat('G:\\songkun\\matlab_database\\Benchmark Datasets\\UCI\\Segment_uni')
    X = m['X']
    y = m['Y']
    L1 = loadmat('G:\\songkun\\matlab_project\\mlcircus-lmnn-5b49cafaeb9a\\lmnn2\\L')
    n_iter = 1
    #   al = 0.9*np.ones((1,))
    al = np.power(2, np.linspace(-8, 12, 5))
    b1 = np.power(2, np.linspace(-8, 12, 5))
    #  accuracy = np.zeros((len(al),len(b1),n_iter))
    accuracy = np.zeros((len(al), len(al), 150))
    ac_knn = np.zeros((len(al) + 1, 150))
    for kk in range(n_iter):
        X_tr, X_te, y_tr, y_te = train_test_split(
            X, y, test_size=TEST_SIZE)
        pca = PCA(n_components=X_tr.shape[1])
        pca.fit(X_tr)
        A = pca.components_
        #        A = L1['L']+np.eye()
        K1 = 30
        K2 = K1 * 3

        # Apply metric model
        S_m, D_m = find_SD1(X_tr, y_tr, K1, K2)
        for i in range(len(al)):
            for j in range(len(b1)):
                m1 = 1
                model = MANN(m=b1[j], alph=al[i], reg=0.01, mu=0, S_m=S_m, D_m=D_m, Y=y_tr)
                X_tr1 = model.fit_transform(X_tr, m1 * A)
                X_te1 = model.transform(X_te)
                A1 = model.A_return()
                print(A1[0, 0:5])
                for kk1 in range(50):
                    knn = KNeighborsClassifier(n_neighbors=kk1 + 1)
                    knn.fit(X_tr1, y_tr)
                    y_pr = knn.predict(X_te1)
                    accuracy[i, j, kk1] = 100 * accuracy_score(y_te, y_pr)
        for kk1 in range(50):
            knn = KNeighborsClassifier(n_neighbors=kk1 + 1)
            knn.fit(X_tr, y_tr)
            y_pr1 = knn.predict(X_te)
            ac_knn[0, kk1] = 100 * accuracy_score(y_te, y_pr1)
            for i in range(len(b1)):
                ac_knn[i + 1, kk1] = np.max(accuracy[:, i, kk1 + 1])
        for kk1 in range(50 - 5):
            for i in range(len(b1) + 1):
                ac_knn[i, kk1] = (ac_knn[i, kk1] + ac_knn[i, kk1 + 1] + ac_knn[i, kk1 + 2] + ac_knn[i, kk1 + 3] +
                                  ac_knn[i, kk1 + 4]) / 5
        mark = {0: '-*', 1: '->', 2: '-p', 3: '-s', 4: '-d'}
        for i in range(5):
            plt.plot(ac_knn[i + 4, 0:50 - 5:2], mark[i], markersize=10, label=str(i))
        plt.legend()
        plt.show()
        print('knn accuracy:')
        print(ac_knn)
        s[kk] = np.max(accuracy)
    print('the accuracy is :')

def time_test(file_name = 'G://songkun//matlab_database//time_uci_test'):
    parser = argparse.ArgumentParser(
        description='Apply the kNN classifier using different metrics.',
    )

    parser.add_argument(
        '-d', '--data',
        choices=DATA_LOADERS,
        default='wine',
        help='on which data to run the model',
    )
    parser.add_argument(
        '--to-plot',
        action='store_true',
        help='plot the projected data',
    )
    parser.add_argument(
        '--seed',
        default=SEED,
        type=int,
        help='seed to fix the randomness',
    )
    parser.add_argument(
        '-v', '--verbose',
        default=0,
        action='count',
        help='how much information to output',
    )
    from sklearn.decomposition import PCA
    from timeit import default_timer as timer

    args = parser.parse_args()
    np.random.seed(args.seed)

    from scipy.io import loadmat, savemat
    import os
    fileList = os.listdir(file_name)
    t1 = []
    t2 = []
    for file_i in range(len(fileList)):
        m = loadmat(file_name+'//'+fileList[file_i])
        X = m['X']
        y = m['Y']
        X_tr, X_te, y_tr, y_te = train_test_split(
            X, y, test_size=0)
        pca = PCA(n_components=X_tr.shape[1])
        pca.fit(X_tr)
        A = pca.components_
        #        A = L1['L']+np.eye()
        K1 = 30
        K2 = K1 * 3
        # Apply metric model
        S_m, D_m = find_SD1(y_tr)
        start = timer()
        model = MANN(m=0, alph=1, reg=0.01, mu=0, S_m=S_m, D_m=D_m, Y=y_tr)
        X_tr1 = model.fit_transform(X_tr, A)
        end = timer()
        t1.append( end - start)

        start = timer()
        model = MANN(m=0, alph=-1, reg=0.01, mu=0, S_m=S_m, D_m=D_m, Y=y_tr)
        X_tr1 = model.fit_transform(X_tr, A)
        end = timer()

        t2.append(end - start)

        print(t1)
        print(t2)
        print(fileList)

    savemat(file_name+'//test.mat',{'t1':t1,'t2':t2, 'file_name':fileList})

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
前台: (1)注册登录模块:按照学校的相关规定进行注册和登录。 (2)招聘信息查看:高校毕业生们可以网站首页上查看所有的招聘信息,除此之外还可以输入公司名称或岗位名称进行搜索。 (3)用人单位模块:此模块为宣传用人单位的主要功能模块,具体包括用人单位简介、岗位需求及职责及公司介绍等功能。 (4)就业指导:学生朋友们在就业前可以通过此模块获取指导。 (5)新闻信息:为了让用户们可以了解到最新的新闻动态,本系统可以通过新闻信息查看功能阅读近期的新闻动态。 (6)在线论坛:毕业季的同学们可以通过此模块相互交流。 后台: (1)系统用户管理模块:可以查看系统内的管理员信息并进行维护。 (2)学生管理模块:通过此功能可以添加学生用户,还可以对学生信息进行修改和删除。 (3)用人单位管理模块:管理员用户通过此模块可以管理用人单位的信息,还可以对用人单位信息进行查看和维护。 (4)招聘管理模块:管理员通过此功能发布和维护系统内的照片信息。 (5)就业指导管理模块:通过此模块可以编辑和发布就业指导信息,从而更好的帮助就业季的同学们。 (6)论坛管理:通过论坛管理可以查看论坛中的主题帖及里面的回复信息,除此之外还可以对论坛中的信息进行维护和管理。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值