分类评价指标(二分类)——f1score sensitivity specificity roc曲线 auc

注:此文章为记录学习历程,无参考价值。最近在学习分类(二分类)的时候,需要用到一些常用的分类指标,各大中文网站的的代码不仅看不懂,甚至还千篇一律,都不知道到底是谁抄的谁的,自己在一些英文网站看人家写的代码一点一点理解,总算是能弄出结果来了,故写此文章以记录

由于我也是新手,故关于理论的讲解请参考其他文章:
敏感性和特异性
roc曲线和auc,f1score等
(1)roc 曲线以及面积auc和f1score的实现
这三个方法的实现主要是用sklearn官方库里面的函数
f1_score, roc_curve, auc
建议去sklearn 官方查看示例官方roc_curve示例

from sklearn.metrics import roc_curve, f1_score, auc

fp, tp, thresholes = roc_curve(all_labele, all_prescore)
roc_auc = auc(fp, tp)
f1score = f1_score(all_labele, all_pre)

此处,all_lable代表数据数据的标签,all_pre代表预测出来的标签,也就是经过torch.argmax(outputs.data, 1)操作提取出来的标签,all_prescore是结果softmax回归后关于每一类预测的得分,也就是操作torch.max(outputs.data, 1)[0]
然后画出roc曲线:

    plt.plot(fp, tp, 'r', label='ROC curve (area = %0.2f)' % roc_auc)

    plt.legend(loc='lower right')
    plt.plot([0, 1], [0, 1], 'g')
    print(f'loss:{val_loss / len(val_loader):.4f}, acc:{correct / total}, auc:{roc_auc:.3f}, f1score:{f1score:.3f}')
    # plt.savefig('./result/1.png')
    plt.show()

下面给出完整的代码:

import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_curve, f1_score, auc, confusion_matrix
import torch
from torchvision import transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
from torchvision import models
import torch.nn as nn
def prediction():
    device = torch.device("cuda:0")
    transform = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
    val_dataset = ImageFolder('data', transform=transform)
    val_loader = DataLoader(val_dataset,
                            batch_size=40,
                            shuffle=False,
                            num_workers=0)
    net = models.resnet18(pretrained=False)
    in_channel = net.fc.in_features
    net.fc = nn.Linear(in_channel, 2)
    net.load_state_dict(torch.load('yourweight.pth', map_location=device))
    net.to(device)
    loss_function = nn.CrossEntropyLoss()
    net.eval()
    val_loss, correct, total =0, 0, 0
    all_labele, all_prescore, all_pre = np.array([]), np.array([]), np.array([])
    cfmetric = 0
    for step, batch in enumerate(val_loader):
        imgs, lables = batch
        imgs, lables = imgs.to(device), lables.to(device)
        with torch.no_grad():
            outputs = net(imgs)
        loss = loss_function(outputs, lables)
        val_loss += loss.item()
        predicted = torch.argmax(outputs, 1)
        total += lables.size(0)
        predicted_score = torch.max(outputs.data, 1)[0]
        correct += (predicted == lables).sum().item()
        all_pre = np.append(all_pre, np.array(predicted.to('cpu')))
        all_labele = np.append(all_labele, np.array(lables.to('cpu')))
        all_prescore = np.append(all_prescore, np.array(predicted_score.to('cpu')))

    fp, tp, thresholes = roc_curve(all_labele, all_prescore)
    roc_auc = auc(fp, tp)
    f1score = f1_score(all_labele, all_pre)
    plt.plot(fp, tp, 'r', label='ROC curve (area = %0.2f)' % roc_auc)

    plt.legend(loc='lower right')
    plt.plot([0, 1], [0, 1], 'g')
    print(f'loss:{val_loss / len(val_loader):.4f}, acc:{correct / total}, auc:{roc_auc:.3f}, f1score:{f1score:.3f}')
    # plt.savefig('./result/1.png')
    plt.show()

  

if __name__=='__main__':
    prediction()

(2)敏感性,特异性求解:根据公式
sensitivuty = tp/(tp+fn)
specificity = tn/(tn+fp)
那我们利用混淆矩阵,将tp, fp, tn, fn求出来:参考官方示例
all_lable, all_pre同上

from sklearn.metrics import  confusion_matrix

cfmetric = confusion_matrix(all_labele, all_pre, labels=[1, 0])
tn, fp, fn, tp = cfmetric.ravel()
sensitivity = tp/(tp+fn)
specificity = tn/(tn+fp)
print(f'sensitivity:{sensitivity:.3f}, specificity:{specificity:.3f}')

给出完整代码:

import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_curve, f1_score, auc, confusion_matrix
import torch
from torchvision import transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
from torchvision import models
import torch.nn as nn
def prediction():
    device = torch.device("cuda:0")
    transform = transforms.Compose([
           # transforms.Grayscale(1),
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
    val_dataset = ImageFolder('./dataset/MMQ/Axial/val', transform=transform)
    val_loader = DataLoader(val_dataset,
                            batch_size=40,
                            shuffle=False,
                            num_workers=0)
    net = models.resnet18(pretrained=False)
    # net.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
    in_channel = net.fc.in_features
    net.fc = nn.Linear(in_channel, 2)
    net.load_state_dict(torch.load('./model_weight/HCC_MMQ_Axial.pth', map_location=device))
    net.to(device)
    loss_function = nn.CrossEntropyLoss()
    net.eval()
    val_loss, correct, total =0, 0, 0
    all_labele, all_prescore, all_pre = np.array([]), np.array([]), np.array([])
    cfmetric = 0
    for step, batch in enumerate(val_loader):
        imgs, lables = batch
        imgs, lables = imgs.to(device), lables.to(device)
        with torch.no_grad():
            outputs = net(imgs)
        loss = loss_function(outputs, lables)
        val_loss += loss.item()
        predicted = torch.argmax(outputs, 1)
        total += lables.size(0)
        predicted_score = torch.max(outputs.data, 1)[0]
        correct += (predicted == lables).sum().item()
        all_pre = np.append(all_pre, np.array(predicted.to('cpu')))
        all_labele = np.append(all_labele, np.array(lables.to('cpu')))
        all_prescore = np.append(all_prescore, np.array(predicted_score.to('cpu')))

    cfmetric = confusion_matrix(all_labele, all_pre, labels=[1, 0])
    tn, fp, fn, tp = cfmetric.ravel()
    sensitivity = tp/(tp+fn)
    specificity = tn/(tn+fp)
    print(f'sensitivity:{sensitivity:.3f}, specificity:{specificity:.3f}')
    

if __name__=='__main__':
    prediction()

  • 0
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
多指标联合ROC曲线是一种评估分类模型综合性能的方法,可以比较不同分类模型在多个指标上的优劣。下面是一个用R语言实现多指标联合ROC曲线的方法: 首先,我们需要加载一些必要的R包,如“pROC”和“ROCR”,这些包提供了计算和绘制ROC曲线所需的函数。 ```R library(pROC) library(ROCR) ``` 然后,我们需要准备一个包含模型预测概率和真实标签的数据集。假设我们有一个名为“data”的数据集,其中包含两列:“prob”是模型预测概率的列,“label”是真实标签的列。 ```R data <- data.frame(prob = c(0.8, 0.6, 0.3, 0.5, 0.7), label = c(1, 0, 1, 0, 1)) ``` 接下来,我们可以使用“roc”函数计算模型的ROC曲线的多个指标。这个函数将输出一个包含不同指标的ROC曲线对象。“roc”函数的第一个参数是真实标签的向量,“predictor”参数是模型概率的向量。 ```R roc_obj <- roc(data$label, data$prob, levels = rev(levels(as.factor(data$label)))) # 计算不同指标 auc_value <- auc(roc_obj) # 计算AUC sens_value <- roc_obj$sensitivities # 计算灵敏度 spec_value <- 1 - roc_obj$specificities # 计算特异度 ``` 此时,“auc_value”变量将包含计算得到的AUC值,而“sens_value”和“spec_value”变量则分别包含了不同阈值下的灵敏度和特异度值。 最后,我们可以使用“plot”函数将多个指标的ROC曲线绘制出来。 ```R plot(roc_obj, col = "blue") # 绘制ROC曲线 lines(spec_value, sens_value, type = "b", col = "red") # 绘制灵敏度-特异度曲线 legend("bottomright", legend = c("ROC Curve", "Sensitivity-Specificity Curve"), col = c("blue", "red"), lty = 1) ``` 以上是使用R语言实现多指标联合ROC曲线的基本步骤。你可以根据需要进行进一步的探索和调整,以满足具体任务的需求。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值