pytorch基础语法

一些常用的pytorch代码段

1. PCA

class PCA:
    #input_shape:[data_nums, data_dimension]
    #output_shape:[data_nums, output_dim]
    def __init__(self,output_dim) -> None:
        self.output_dim = output_dim
    
    def fit(self,X_data):
        N = len(X_data)
        H = torch.eye(n=N)-1/N*(torch.matmul(torch.ones(size=(N,1)),torch.ones(size=(1,N))))
        X_data = torch.matmul(H,X_data)
        _,_,v = torch.svd(X_data)
        self.base = v[:,:self.output_dim]

    def fit_transform(self,X_data):
        self.fit(X_data)
        return self.transform(X_data)

    def transform(self,X_data):
        return torch.matmul(X_data,self.base)

    def inverse_transform(self,X_data):
        return torch.matmul(X_data,self.base.T)

#测试集需要使用训练集的转换矩阵进行降维
pca = PCA(4) #定义降低到的维度
pca.fit(X_train)

X_train = torch.tensor(np.array(X_train)) #转换为Tensor
X_train = torch.reshape(X_train,[X_train.shape[0],X_train.shape[2]*X_train.shape[3]]) #变换维度([2000,784]) 
X_train = pca.fit_transform(X_train) #实现降维

X_test = pca.transform(X_test)

2. 加载数据

import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler

def load_mnist():
    #define the directory where mnist.npz is(Please watch the '\'!)
    path = r'.//dataset//mnist.npz'
    f = np.load(path)
    x_train, y_train = f['x_train'],f['y_train']
    x_test, y_test = f['x_test'],f['y_test']
    f.close()
    return (x_train, y_train), (x_test, y_test)

def filter_36(x, y): #y=3为true,y=6为false
    keep = (y == 3) | (y == 6)
    x, y = x[keep], y[keep]
    y = y == 3
    return x,y

def get_mnist():

    (x_train, y_train), (x_test, y_test) = load_mnist()

    x_train, y_train = filter_36(x_train, y_train)
    x_test, y_test = filter_36(x_test, y_test)

    x_train = np.reshape(x_train,(x_train.shape[0],x_train.shape[1] * x_train.shape[2]))
    x_test = np.reshape(x_test,(x_test.shape[0],x_test.shape[1]*x_test.shape[2]))

    # #数据标准化
    sc = StandardScaler().fit(x_train) #计算均值和方差
    sc1 = StandardScaler().fit(x_test)

    X_std_train = sc.transform(x_train)
    X_std_test = sc1.transform(x_test)

    pca_x_train = PCA(6).fit_transform(X_std_train)
    pca_x_test = PCA(6).fit_transform(X_std_test)

    x_train = np.arctan(pca_x_train)
    x_test = np.arctan(pca_x_test)

    #标签从True,False转为1,0(3,1)(6,0)
    y_train = y_train + 0
    y_test = y_test + 0

    # print(pca_x_test[0])
    # print(x_test[0])

    return x_train,y_train,x_test,
二分类损失

(1) BCELoss()
输出维度为1,一般在网络的最后一层需要Sigmoid()
target的取值为{0,1},训练数据y维度为[batch,1],类型为float,如y_train = torch.tensor(np.array(y_train)).float()

criterion = nn.BCELoss()

#预测值取值范围[0,1]
preds = outputs >= 0.5

batch_corrects = torch.sum(preds == labels.data).item()

(2) BCEWithLogitsLoss()
自动在网络输出后面加上Sigmoid(),然后计算cross_entropy

criterion = nn.BCEWithLogitsLoss() 

#预测值取值范围[0,1],如何求预测值待查询资料

多分类损失

(1) CrossEntropyLoss()
输出维度与待分类类别有关,分n类输出维度为n,自动在输出作用softmax()函数。训练数据y维度为[batch,],类型为Long,如y_train = torch.tensor(np.array(y_train)).long()

criterion = nn.CrossEntropyLoss()

_, preds = torch.max(outputs, 1)
batch_corrects = torch.sum(preds == labels.data).item()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值