模型评价指标计算(python)

#分类算法评价指标
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, log_loss, roc_curve
import matplotlib.pyplot as plt

# 计算混淆矩阵
def calculate_confusion_matrix(y_true, y_pred):
    return confusion_matrix(y_true, y_pred)

# 计算准确率
def calculate_accuracy(y_true, y_pred):
    return accuracy_score(y_true, y_pred)

# 计算精确率
def calculate_precision(y_true, y_pred):
    return precision_score(y_true, y_pred)

# 计算召回率
def calculate_recall(y_true, y_pred):
    return recall_score(y_true, y_pred)

# 计算F1值
def calculate_f1_score(y_true, y_pred):
    return f1_score(y_true, y_pred)

# 计算对数损失
def calculate_log_loss(y_true, y_pred_prob):
    return log_loss(y_true, y_pred_prob)

#绘制ROC曲线
def plot_roc_curve(y_true, y_scores):
    # 使用roc_curve函数计算ROC曲线的假正例率和真正例率
    fpr, tpr, _ = roc_curve(y_true, y_scores)

    # 计算AUC(曲线下面积)
    roc_auc = roc_auc_score(y_true, y_scores)
    # 绘制ROC曲线
    plt.plot(fpr, tpr, label='ROC curve (AUC = %0.2f)' % roc_auc)
    plt.plot([0, 1], [0, 1], 'k--')  # 绘制对角线
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic (ROC)')
    plt.legend(loc='lower right')
    plt.show()

# 计算AUC值
def calculate_auc(y_true, y_pred_prob):
    return roc_auc_score(y_true, y_pred_prob)

#计算宏平均和微平均(多元分类)
from sklearn.metrics import precision_score, recall_score, f1_score

def compute_average_scores(y_true, y_pred):
    # 计算宏平均
    precision_macro = precision_score(y_true, y_pred, average='macro') #宏平均的精确率
    recall_macro = recall_score(y_true, y_pred, average='macro') #宏平均的召回率
    f1_macro = f1_score(y_true, y_pred, average='macro') #宏平均的f1得分

    # 计算微平均
    precision_micro = precision_score(y_true, y_pred, average='micro') #微平均的精确率
    recall_micro = recall_score(y_true, y_pred, average='micro') #微平均的召回率
    f1_micro = f1_score(y_true, y_pred, average='micro') #微平均的f1得分
    # 构造结果字典
    result = {
        "Macro-average Precision": precision_macro,
        "Macro-average Recall": recall_macro,
        "Macro-average F1-score": f1_macro,
        "Micro-average Precision": precision_micro,
        "Micro-average Recall": recall_micro,
        "Micro-average F1-score": f1_micro
    }
    return result


#回归算法评价指标
import numpy as np

def mean_absolute_error(y_true, y_pred): #计算平均绝对误差(MAE)
    return np.mean(np.abs(y_true - y_pred))

def mean_bias_error(y_true, y_pred): #计算平均偏差误差(MBE)
    return np.mean(y_pred - y_true)

def mean_squared_error(y_true, y_pred): #计算均方误差(MSE)
    return np.mean((y_true - y_pred) ** 2)

def root_mean_squared_error(y_true, y_pred): #计算均方根误差(RMSE)
    return np.sqrt(mean_squared_error(y_true, y_pred))

def mean_absolute_percentage_error(y_true, y_pred): #计算平均绝对百分比误差(MAPE)
    return np.mean(np.abs((y_true - y_pred) / y_true)) * 100

def relative_absolute_error(y_true, y_pred): #计算相对绝对误差(RAE)
    return np.mean(np.abs(y_true - y_pred) / np.abs(y_true - np.mean(y_true)))

def residual_standard_error(y_true, y_pred, p): #计算残差标准误(RSE)
    rss = np.sum((y_true - y_pred) ** 2)
    rse = np.sqrt(rss / (len(y_true) - p - 1))
    return rse

def r_squared(y_true, y_pred): #计算R平方(R^2)
    ssr = np.sum((y_true - y_pred) ** 2)
    sst = np.sum((y_true - np.mean(y_true)) ** 2)
    r2 = 1 - (ssr / sst)
    return r2

print('MAE:', mean_absolute_error(y_true, y_pred)) #计算平均绝对误差(MAE)
print('MBE:', mean_bias_error(y_true, y_pred)) #计算平均偏差误差(MBE)
print('MSE:', mean_squared_error(y_true, y_pred)) #计算均方误差(MSE)
print('RMSE:', root_mean_squared_error(y_true, y_pred)) #计算均方根误差(RMSE)
print('MAPE:', mean_absolute_percentage_error(y_true, y_pred)) #计算平均绝对百分比误差(MAPE)
print('RAE:', relative_absolute_error(y_true, y_pred)) #计算相对绝对误差(RAE)
print('RSE:', residual_standard_error(y_true, y_pred, p)) #计算残差标准误(RSE)
print('R^2:', r_squared(y_true, y_pred)) #计算R平方(R^2)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值