日常代码工具(快速索引)

1. 机器学习

1.1 Pytorch

1.1.1 分类器训练函数

from tqdm import tqdm

def train(model, device, epoch, train_loader, optimizer, loss_func):
    model.train().to(device)
    train_loss = 0
    for batch_idx, (data, target) in enumerate(tqdm(train_loader, ascii = True)):
        data, target = data.to(device), target.to(device).long()
        optimizer.zero_grad()
        output = model(data)
        loss = loss_func(output, target)
        loss.backward()
        optimizer.step()
        train_loss += loss.item()

    print("Train epoch {}, loss:{:.4f}, ".format(epoch, train_loss))

1.1.2 分类器测试函数

from tqdm import tqdm

def test(model, device, test_loader):
    model.eval().to(device)
    count = 0
    for batch_idx, (data, target) in enumerate(tqdm(test_loader, ascii = True)):
        data, target = data.to(device), target.to(device)
        output = model(data)
        _, label = torch.max(output, dim=-1)
        count += (label == target).sum()

    print("Acc:{:.4f}".format(count.item()*100.0/len(test_loader.dataset)))

1.1.3 通用训练组件

from torch.utils.data import Dataset, DataLoader
import torch
import torch.nn as nn

def main():
    batch_size = 70
    learning_rate = 0.001
    epoch_num = 100
    labels = ['A','B','C','D']

    dataset = Mydataset(is_train = True)
    test_dataset = Mydataset(is_train = False)

    train_loader = DataLoader(dataset,batch_size=batch_size,shuffle = True)
    test_loader = DataLoader(test_dataset,batch_size=batch_size,shuffle = False)
    model = MODEL()

    if os.path.exists('./model.pkl'):
        model.load_state_dict(torch.load('./model.pkl',map_location = device)['weights'])
        print("load model sucessfully")

    adam = torch.optim.Adam(model.parameters(), lr=learning_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(adam, step_size=2, gamma=0.9)

    loss_func = nn.CrossEntropyLoss()

    for epoch in range(1, epoch_num+1):
    	test(model,device,test_loader)
        train(model, device,train_loader,adam,loss_func,epoch,output_dim)
        scheduler.step()
        model_dict = {
            "labels":labels,
            'weights':model.state_dict(),
        }
        torch.save(model_dict, "model.pkl")

1.2 numpy

1.2.1 匈牙利算法进行目标追踪(bbox中心点距离作为特征)

import numpy as np
from scipy.optimize import linear_sum_assignment
from collections import deque

class IDcomput(object):
    def __init__(self, lastframe = 10):
        # lastframe 消失人物持续时间,超过持续时间,则清空该人物特征
        self.features = []
        self.ids_deque = deque(maxlen=lastframe)

    def checkids(self):
        if len(self.ids_deque) == self.ids_deque.maxlen:
            set_temp = set()
            for ids_set in list(self.ids_deque):
                set_temp = ids_set | set_temp
            for id in range(len(self.features)):
                if id not in set_temp:
                    self.features[id] = [-1, -1]

    def getdistance(self, x, y):
        '''
        input: 
            x: array(n, 2)
            y: array(m, 2)

        output:
            array(n, m)
        '''
        x = x[:,np.newaxis,:]
        y = y[np.newaxis,:,:]
        
        x = np.repeat(x, y.shape[0], axis = 1)
        y = np.repeat(y, x.shape[1], axis = 0)
        dis = (x-y)**2

        dis = np.sqrt(dis[:,:,0]+dis[:,:,1])
        return dis

    def __call__(self, X):
        # print(self.features, X)
        if len(self.features) == 0:
            self.features = X
            return np.array([i for i in range(len(X))])

        elif len(self.features) == len(X):
            dis = self.getdistance(self.features, X)
            feature_ids = linear_sum_assignment(dis)
            feature_ids = feature_ids[1]

        elif len(self.features) < len(X):
            dis = self.getdistance(self.features, X)
            feature_ids = linear_sum_assignment(dis)
            feature_ids = feature_ids[1]
            feature_ids_rest = set([i for i in range(len(X))]).difference(set(feature_ids))
            feature_ids = np.array(list(feature_ids) + list(feature_ids_rest))
            # temp_feat = X
            # temp_feat[0:len(self.features)] = self.features
            # self.features = temp_feat
            self.features = X
        else:
            dis = self.getdistance(X, self.features)
            ids = linear_sum_assignment(dis)
            ids = ids[1]
            self.features[ids] = X

            self.ids_deque.append(set(ids))
            self.checkids()
            return ids

        feature_ids = feature_ids.argsort()
        ids = np.array([i for i in range(len(X))])
        ids = ids[feature_ids]
        self.features[ids] = X

        self.ids_deque.append(set(ids))
        return ids

        


if __name__ == '__main__':
    idcomput = IDcomput()
    x = np.array([[1,2],[0,1]])
    y = np.array([[0,1], [3,2], [1,2]])
    z = np.array([[0,1], [3,2]])

    # i = np.array([[1,2],[0,1],[3,2]])
    # z = np.array([[1,3],[0,2],[3,3]])
    # w = np.array([[0,1]])
    # h = np.array([[1,2],[0,1]])
    # i = np.array([[0,0],[2,3], [0,1]])

    print('x', idcomput(x))
    print('y',idcomput(y))
    # print('i',idcomput(i))
    print('z',idcomput(z))
    # print('w',idcomput(w))
    # print('h',idcomput(h))
    # print('i',idcomput(i))

1.3 结果评估

1.3.1 Confusion Matrix

import os
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np

def plot_confusion_matrix(cm, labels_name, title,rotation=45, cmap = plt.cm.Wistia,fontsize=11):
    cm = cm.astype('float') / cm.sum(axis=1)[:,np.newaxis]
    # plt.figure(figsize=(8,8.5))
    plt.imshow(cm, interpolation='nearest',cmap = cmap) # ,cmap = plt.cm.Oranges cmap = plt.cm.Wistia
    plt.title(title)
    plt.colorbar()
    num_local = np.array(range(len(labels_name)))
    plt.xticks(num_local, labels_name,rotation = rotation)
    plt.yticks(num_local, labels_name)

    plt.ylabel('True label')
    plt.xlabel("Predicted label")
    plt.subplots_adjust(bottom=0.15)

    x, y = np.meshgrid(num_local, num_local)
    for x_val, y_val in zip(x.flatten(),y.flatten()):
        c = cm[y_val][x_val]
        if c>0.01:
            plt.text(x_val,y_val,"{:.2f}".format(c),fontsize=fontsize,va='center',ha='center')


def autodraw_cm(y_true, y_pred, labels_name, save_path="./ResNet18_cm.png", title="ResNet18_cm",**kwargs):
    cm = confusion_matrix(y_true,y_pred)
    # print(cm)
    plot_confusion_matrix(cm, labels_name,title, **kwargs)
    plt.savefig(save_path)
    plt.cla()
    plt.clf()

if __name__ == '__main__':
	y_true = [1, 0]
	y_pred = [1, 1]
	labels_name = ['a','b']
	autodraw_cm(y_true,y_pred,labels_name)
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值