[1] Wu D . Pool-Based Sequential Active Learning for Regression[J]. IEEE Transactions on Neural Networks and Learning Systems, 2018, PP(99):1-12.
In practice, some clusters may contain multiple labeled samples, so usually, there are more than one cluster that do not contain any labeled sample. We then identify the largest cluster that does not contain any labeled sample as the current most representative cluster and select the sample closest to its centroid for labeling.
"""
Author:Daniel
Date: 2021-04-16
"""
import numpy as np
import pandas as pd
import xlwt
from pathlib import Path
from copy import deepcopy
from collections import OrderedDict
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score,mean_absolute_error,f1_score,recall_score
from sklearn.cluster import KMeans
from time import time
from sklearn import datasets
import matplotlib.pyplot as plt
class ALRD():
def __init__(self, X_pool, y_pool, labeled, budget, X_test, y_test):
self.X_pool = X_pool
self.y_pool = y_pool
self.X_test = X_test
self.y_test = y_test
self.nSample, self.nAtt = self.X_pool.shape
self.labeled = list(deepcopy(labeled))
self.unlabeled = self.init_unlabeled()
self.labels = np.sort(np.unique(y_pool))
self.nClass = len(self.labels)
self.budgetLeft = deepcopy(budget)
self.LATmodel = LogisticRegression()
## ----------------
## Evaluation criteria
self.AccList = []
self.MAEList = []
self.RecallList = []
self.FscoreList = []
self.ALC_ACC = 0
self.ALC_MAE = 0
self.ALC_F1 = 0
self.ALC_Recall = 0
def init_unlabeled(self):
unlabeled = [i for i in range(self.nSample)]
for idx in self.labeled:
unlabeled.remove(idx)
return unlabeled
def evaluation(self):
self.LATmodel.fit(X=self.X_pool[self.labeled],y=self.y_pool[self.labeled])
y_pred = self.LATmodel.predict(X=self.X_test)
Acc = accuracy_score(y_pred=y_pred,y_true=self.y_test)
MAE = mean_absolute_error(y_pred=y_pred,y_true=self.y_test)
F1 = f1_score(y_pred=y_pred,y_true=self.y_test,average='macro')
Recall = recall_score(y_pred=y_pred,y_true=self.y_test,average='macro')
self.AccList.append(Acc)
self.MAEList.append(MAE)
self.FscoreList.append(F1)
self.RecallList.append(Recall)
self.ALC_ACC += Acc
self.ALC_MAE += MAE
self.ALC_F1 += F1
self.ALC_Recall += Recall
def select(self):
while self.budgetLeft > 0:
nCluster = len(self.labeled)+1
y_pred = KMeans(n_clusters=nCluster).fit_predict(self.X_pool)
cluster_labels, count = np.unique(y_pred,return_counts=True)
cluster_dict = OrderedDict()
for i in cluster_labels:
cluster_dict[i] = []
for idx in self.labeled:
cluster_dict[y_pred[idx]].append(idx)
empty_ids = OrderedDict()
for i in cluster_labels:
if len(cluster_dict[i]) == 0:
empty_ids[i] = count[i]
tar_label = max(empty_ids,key=empty_ids.get)
tar_cluster_ids = []
for idx in range(self.nSample):
if y_pred[idx] == tar_label:
tar_cluster_ids.append(idx)
centroid = np.mean(self.X_pool[tar_cluster_ids],axis=0)
tar_idx = None
close_dist = np.inf
for idx in tar_cluster_ids:
if np.linalg.norm(self.X_pool[idx] - centroid) < close_dist:
close_dist = np.linalg.norm(self.X_pool[idx] - centroid)
tar_idx = idx
self.labeled.append(tar_idx)
self.unlabeled.remove(tar_idx)
self.budgetLeft -= 1
if __name__ == '__main__':
add = [0, 6, 12, 19]
data = None
for i in range(4):
X, yy = datasets.make_blobs(n_samples=200, n_features=2, center_box=(0, 0), cluster_std=3, random_state=45)
X += add[i]
if i == 0:
data = X
else:
data = np.vstack((data, X))
y = np.ones(800)
y[:200] = 0
y[200:400] = 1
y[400:600] = 2
y[600:800] = 3
X = data
labeled = [6, 206, 406, 606]
budget = 40
model = ALRD(X_pool=X,y_pool=y,labeled=labeled,budget=budget,X_test=X,y_test=y)
model.select()
plt.scatter(X[:,0],X[:,1],c=y)
plt.scatter(X[model.labeled][:, 0], X[model.labeled][:, 1], c=y[model.labeled],edgecolors='r',linewidths=1)
plt.show()