import numpy as np
def compute_evaluate(W_p, W_true):
assert (W_p.shape == W_true.shape and W_p.shape[0] == W_p.shape[1])
TP = np.sum((W_p + W_true) == 2)
TP_FP = W_p.sum(axis=1).sum() #TP+FP
TP_FN = W_true.sum(axis=1).sum() #TP+FN
TN = ((W_p + W_true) == 0).sum()
accuracy = (TP + TN) / (W_p.shape[0]*W_p.shape[0])
precision = TP / TP_FP
recall = TP / TP_FN
F1 = 2 * (recall * precision) / (recall + precision)
shd=W_p.shape[0]*W_p.shape[0]-TP-TN
mt = {'accuracy': accuracy, 'precision': precision, 'recall': recall, 'F1': F1,'shd': shd}
for i in mt:
mt[i] = round(mt[i], 4) #4-digit decimal
return mt
accuracy_all=[]
precision_all = []
recall_all = []
f1_all=[]
shd_all = []
for name in names:
mt = compute_evaluate(pre_causal_matrix, true_causal_matrix)
if mt['F1'] > 0:
accuracy_all.append(mt['accuracy'])
precision_all.append(mt['precision'])
recall_all.append(mt['recall'])
f1_all.append(mt['F1'])
shd_all.append(mt['shd'])
mean_accuracy = np.mean(accuracy_all)
std_accuracy = np.std(accuracy_all)
mean_precision = np.mean(precision_all)
std_precision = np.std(precision_all)
mean_recall = np.mean(recall_all)
std_recall = np.std(recall_all)
mean_F1 = np.mean(f1_all)
std_F1 = np.std(f1_all)
mean_shd = np.mean(shd_all)
std_shd = np.std(shd_all)
print("{} mean+std--accuracy: {:.2f} + {:.2f}".format(n_nodes, mean_accuracy, std_accuracy))
print("{} mean+std--precision: {:.2f} + {:.2f}".format(n_nodes, mean_precision, std_precision))
print("{} mean+std--recall: {:.2f} + {:.2f}".format(n_nodes, mean_recall, std_recall))
print("{} mean+std--F1: {:.2f} + {:.2f}".format(n_nodes, mean_F1, std_F1))
print("{} mean+std--shd: {:.2f} + {:.2f}".format(n_nodes, mean_shd, std_shd))
图片出自《A survey of Bayesian Network structure learning》