流程
- 就是先在有lable的train data上进行训练,得到一个模型。
- 然后用第一步中训练得到的模型进行inference test data对应的标签类别。
- 然后对test data进行筛选,选出超过概率阈值的样本与原来的train data进行拼接组合成新的的训练数据,然后再重新进行训练出最终的模型。
代码(只需要对原代码中infernce部分进行修改就可以了,其他部分不动)
class TestDataset(Dataset):
def __init__(self, df, tokenizer):
self.title = df['title'].values
self.assignee = df['assignee'].values
self.abstract = df['abstract'].values
self.tokenizer = tokenizer
self.sep_token = tokenizer.sep_token
def __len__(self):
return len(self.title)
def __getitem__(self, item):
title = self.title[item]
assignee = self.assignee[item]
abstract = self.abstract[item]
input_text = title + self.sep_token + assignee + self.sep_token + abstract
inputs = self.tokenizer(input_text, truncation=True, max_length=400, padding='max_length')
return torch.as_tensor(inputs['input_ids'], dtype=torch.long), \
torch.as_tensor(inputs['attention_mask'], dtype=torch.long)
def infer(test_loader, model, device):
model.to(device)
model.eval()
total_logits = []
for step, batch in tqdm(enumerate(test_loader)):
mask = batch[1].to(device)
input_ids = batch[0].to(device)
with torch.no_grad():
output = model(input_ids=input_ids, attention_mask=mask)
logits = F.softmax(output.logits, dim=-1)
total_logits.append(logits.to('cpu').numpy())
total_logits = np.concatenate(total_logits)
return total_logits
res = []
for fold in range(5):
saved_path = CFG.OUTPUT_DIR + "{}_best{}.pth".format(CFG.model_path.replace('/', '_'),fold)
model.load_state_dict(torch.load(saved_path)['model'])
test_dataset = TestDataset(test, tokenizer)
test_dataloader = DataLoader(test_dataset,
batch_size=CFG.batch_size * 2,
shuffle=False,
num_workers=CFG.num_workers, pin_memory=True, drop_last=False)
total_logits = infer(test_dataloader, model, CFG.device)
res.append(total_logits)
total_logits = np.mean(res, axis=1)
pl_prob = np.max(total_logits, axis=-1)
pl = np.argmax(total_logits, axis=-1)
test['label'] = pl
test['prob'] = pl_prob
test = test[test['prob'] >0.99]
test.to_csv('add_data.csv', index=None)