from tqdm import tqdm import torch.nn as nn import numpy as np from torch.utils.data import DataLoader,Dataset import os import json import random import torch import pandas as pd from transformers import BertTokenizer,BertModel,BertConfig from torch.nn.utils.rnn import pad_sequence import torch.nn.functional as F from transformers.modeling_outputs import SequenceClassifierOutput from sklearn.metrics import f1_score,accuracy_score,recall_score class CFG: model_path = "D:\\Users\\stkj\\PycharmProjects\\pythonProject\\nlp\\self_text_classfication\\model_weight\\roberta_data" data_path = os.path.join("..","data","ants") test_data_path = os.path.join("..","data","ants","test.json") learn_rate = 1e-5 epochs = 5 max_len = 510 batch_size = 32 device = "cuda" print_step = 50 save_path = os.path.join("..","model_weith","roberta.bin") throshold = 0.6 data_save_path = os.path.join("..","data","ants","output.csv") def seed_everything(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True class SimcseDataSet(Dataset): def __init__(self,sentence_a,sentence_b,labels,tokenizer,mode="train"): self.sentence_a = sentence_a self.sentence_b = sentence_b self.labels = labels self.tokenizer = tokenizer self.pad_token = tokenizer.pad_token self.mode = mode self.pad_token_id = tokenizer.pad_token_id self.sentence = np.concatenate((sentence_a,sentence_b),axis=0) def __len__(self): if self.mode=="train": return len(self.sentence) assert len(self.sentence_b) == len(self.sentence_b) == len(self.labels), "数据长度不一样,玩个锤子!!!" return len(self.sentence_b) def __getitem__(self, index): if self.mode=="dev": sentence_a = self.sentence_a[index] sentence_b = self.sentence_b[index] input_a = self.tokenizer(sentence_a,truncation=True,max_length = CFG.max_len) input_b = self.tokenizer(sentence_b,truncation=True,max_length = CFG.max_len) return { "input_ids_a": torch.as_tensor(input_a["input_ids"],dtype=torch.long), "input_ids_b": torch.as_tensor(input_b["input_ids"],dtype=torch.long), "attention_mask_a": torch.as_tensor(input_a["attention_mask"],dtype=torch.long), "attention_mask_b": torch.as_tensor(input_b["attention_mask"],dtype=torch.long), "label": torch.as_tensor(int(self.labels[index]),dtype=torch.long) } senten = self.sentence[index] items = self.tokenizer(senten,truncation=True,max_length = CFG.max_len) return { "input_ids": torch.as_tensor(items["input_ids"]), "attention_mask": torch.as_tensor(items["attention_mask"]) } class TestDataSet(Dataset): def __init__(self, sentence_a, sentence_b, tokenizer): self.sentence_a = sentence_a self.sentence_b = sentence_b self.tokenizer = tokenizer self.pad_token = tokenizer.pad_token self.pad_token_id = tokenizer.pad_token_id def __len__(self): assert len(self.sentence_a) == len(self.sentence_b), "数据长度不一样,玩个锤子!!!" return len(self.sentence_b) def __getitem__(self, index): sentence_a = self.sentence_a[index] sentence_b = self.sentence_b[index] input_a = self.tokenizer(sentence_a, truncation=True, max_length=CFG.max_len) input_b = self.tokenizer(sentence_b, truncation=True, max_length=CFG.max_len) return { "input_ids_a": torch.as_tensor(input_a["input_ids"], dtype=torch.long), "input_ids_b": torch.as_tensor(input_b["input_ids"], dtype=torch.long), "attention_mask_a": torch.as_tensor(input_a["attention_mask"], dtype=torch.long), "attention_mask_b": torch.as_tensor(input_b["attention_mask"], dtype=torch.long), } def collate_test_fn(self,batch): input_ids_a = [item["input_ids_a"] for item in batch] input_ids_b = [item["input_ids_b"] for item in batch] attention_mask_a = [item["attention_mask_a"] for item in batch] attention_mask_b = [item["attention_mask_b"] for item in batch] input_ids_a = pad_sequence(input_ids_a, batch_first=True, padding_value=0.0) input_ids_b = pad_sequence(input_ids_b, batch_first=True, padding_value=0.0) attention_mask_a = pad_sequence(attention_mask_a, batch_first=True, padding_value=0.0) attention_mask_b = pad_sequence(attention_mask_b, batch_first=True, padding_value=0.0) return { "input_ids_a": input_ids_a, "input_ids_b": input_ids_b, "attention_mask_a": attention_mask_a, "attention_mask_b": attention_mask_b, } def collate_trian_fn(batch): # [x1,x2,x3]--->0_(0,1),1_(2,3),2_(4,5) 2i and 2i+1 max_len = max([len(item["input_ids"]) for item in batch]) input_ids = torch.as_tensor(torch.zeros((len(batch)*2,max_len)),dtype=torch.long) attention_mask = torch.as_tensor(torch.zeros((len(batch)*2,max_len)),dtype=torch.long) for i in range(len(batch)): input_ids[2*i,:len(batch[i]["input_ids"])] = batch[i]["input_ids"] input_ids[2*i+1,:len(batch[i]["input_ids"])] = batch[i]["input_ids"] attention_mask[2*i,:len(batch[i]["attention_mask"])] = batch[i]["attention_mask"] attention_mask[2*i+1,:len(batch[i]["attention_mask"])] = batch[i]["attention_mask"] return { "input_ids": input_ids, "attention_mask": attention_mask } def collate_dev_fn(batch): input_ids_a = [item["input_ids_a"] for item in batch] input_ids_b = [item["input_ids_b"] for item in batch] attention_mask_a = [item["attention_mask_a"] for item in batch] attention_mask_b = [item["attention_mask_b"] for item in batch] labels = [item["label"] for item in batch] input_ids_a = pad_sequence(input_ids_a,batch_first=True,padding_value=0.0) input_ids_b = pad_sequence(input_ids_b,batch_first=True,padding_value=0.0) attention_mask_a = pad_sequence(attention_mask_a,batch_first=True,padding_value=0.0) attention_mask_b = pad_sequence(attention_mask_b,batch_first=True,padding_value=0.0) labels = torch.as_tensor(labels,dtype=torch.long) return { "input_ids_a": input_ids_a, "input_ids_b": input_ids_b, "attention_mask_a": attention_mask_a, "attention_mask_b": attention_mask_b, "labels": labels, } def get_loader(): tokenizer = BertTokenizer.from_pretrained(CFG.model_path) trian_sentence_a, trian_sentence_b, train_labels = read_data(f"{CFG.data_path}\\train.json") dev_sentence_a, dev_sentence_b, dev_labels = read_data(f"{CFG.data_path}\\dev.json") train_set = SimcseDataSet(trian_sentence_a,trian_sentence_b,train_labels,tokenizer,mode="train") dev_set = SimcseDataSet(dev_sentence_a,dev_sentence_b,dev_labels,tokenizer,mode="dev") train_loder = DataLoader(train_set,batch_size=CFG.batch_size,collate_fn=collate_trian_fn,shuffle=True) dev_loder = DataLoader(dev_set,batch_size=CFG.batch_size,collate_fn=collate_dev_fn,shuffle=True) return train_loder,dev_loder class MeanPoling(nn.Module): def __init__(self): super().__init__() pass def forward(self,embed,attention_mask): attention_mask_copy = attention_mask.unsqueeze(-1).expand(embed.size()).float() embed = embed*attention_mask_copy sum_sql_emb = torch.sum(embed,dim=-2) sum_sql_attetion = torch.sum(attention_mask_copy,dim=-2) sum_sql_attetion = torch.clamp(sum_sql_attetion,min=1e-8) return sum_sql_emb/sum_sql_attetion def cal_loss(sentence_embed,tao=0.05): i = torch.arange(0,len(sentence_embed),device=CFG.device) y_ture = i+1-(i%2)*2 out = F.cosine_similarity(sentence_embed.unsqueeze(1),sentence_embed.unsqueeze(0),dim=-1) out = out-torch.eye(len(sentence_embed),device=CFG.device)*1e8 out = out/tao return torch.mean(F.cross_entropy(out,y_ture)) class Simcse(nn.Module): def __init__(self): super().__init__() config = BertConfig.from_pretrained(CFG.model_path) config.output_hidden_states=True self.bert = BertModel.from_pretrained(CFG.model_path,config=config) self.pooling = MeanPoling() def forward(self,input_ids,attention_mask,mode="dev"): last_hidden_state = self.bert(input_ids,attention_mask).hidden_states[-1] sentence_embed = self.pooling(last_hidden_state,attention_mask) return SequenceClassifierOutput(logits=sentence_embed,loss=None) if mode=="dev" else SequenceClassifierOutput(logits=None,loss=cal_loss(sentence_embed)) def get_metric(pres,labels): return f1_score(labels,pres),accuracy_score(labels,pres),recall_score(labels,pres) def train_fn(model,train_loader,optimizer,epoch): model.train() for index, item in tqdm(enumerate(train_loader), total=len(train_loader), desc="单论训练进度:"): input_ids = item["input_ids"].to(CFG.device) attention_mask = item["attention_mask"].to(CFG.device) out = model(input_ids, attention_mask, mode="trian") loss = out.loss loss.backward() optimizer.step() optimizer.zero_grad() if (index+1)%CFG.print_step == 0 or index== len(train_loader)-1: print(f"epoch:{epoch} step:{index} loss:{loss.item():.6f}") def dev_fn(model,dev_loader,epoch): model.eval() prediction, labels=[],[] with torch.no_grad(): for index, item in tqdm(enumerate(dev_loader), total=len(dev_loader), desc="单论训练进度:"): input_ids_a = item["input_ids_a"].to(CFG.device) input_ids_b = item["input_ids_b"].to(CFG.device) attention_mask_a = item["attention_mask_a"].to(CFG.device) attention_mask_b = item["attention_mask_b"].to(CFG.device) label = item["labels"] emb_a = model(input_ids_a,attention_mask_a,mode="dev").logits emb_b = model(input_ids_b,attention_mask_b,mode="dev").logits pre = F.cosine_similarity(emb_a,emb_b,dim=-1) pre = (pre>CFG.throshold).long().detach().cpu().numpy() label = label.detach().cpu().numpy() prediction.extend(pre) labels.extend(label) f1,acc,rec = get_metric(prediction,labels) print(f"epoch:{epoch} f1:{f1:.4f} acc:{acc:.6f} rec:{rec:.4f}") return f1,acc,rec def read_data(path,num=None): datas = [] with open(path,"r",encoding="utf-8") as f: for line in f: data = json.loads(line) datas.append(data) f.close() datas = pd.DataFrame(datas) sentence_a,sentence_b ,labels = datas["sentence1"].values,datas["sentence2"].values,datas["label"].values assert len(sentence_a) == len(sentence_b) == len(labels) return (sentence_a[:num],sentence_b[:num] ,labels[:num]) if num else (sentence_a,sentence_b ,labels) def train_loop(): train_loader, dev_loader = get_loader() model = Simcse().to(CFG.device) optimizer = torch.optim.Adam(model.parameters(), lr=CFG.learn_rate) best_acc = 0 for epoch in range(CFG.epochs): print(f"总训练进度:{epoch + 1}/{CFG.epochs}") train_fn(model, train_loader, optimizer, epoch) f1, acc, rec = dev_fn(model, dev_loader, epoch) if acc > best_acc: best_acc = acc torch.save(model.state_dict(), CFG.save_path) print("training endding!!!") def read_test_data(test_path): datas = [] with open(test_path, "r", encoding="utf-8") as f: for line in f: data = json.loads(line) datas.append(data) f.close() datas = pd.DataFrame(datas) sentence_a, sentence_b = datas["sentence1"].values, datas["sentence2"].values assert len(sentence_a) == len(sentence_b) return datas, sentence_a, sentence_b def infer_fn(): tokenizer = BertTokenizer.from_pretrained(CFG.model_path) pd_data, test_sentence_a, test_sentence_b = read_test_data(CFG.test_data_path) test_set = TestDataSet(test_sentence_a, test_sentence_b, tokenizer) test_loder = DataLoader(test_set, batch_size=CFG.batch_size, collate_fn=test_set.collate_test_fn, shuffle=True) model = Simcse().to(CFG.device) model.load_state_dict(torch.load(CFG.save_path,map_location="cuda")) model.eval() prediction= [] with torch.no_grad(): for index, item in tqdm(enumerate(test_loder), total=len(test_loder), desc="单论训练进度:"): input_ids_a = item["input_ids_a"].to(CFG.device) input_ids_b = item["input_ids_b"].to(CFG.device) attention_mask_a = item["attention_mask_a"].to(CFG.device) attention_mask_b = item["attention_mask_b"].to(CFG.device) emb_a = model(input_ids_a, attention_mask_a, mode="dev").logits emb_b = model(input_ids_b, attention_mask_b, mode="dev").logits pre = F.cosine_similarity(emb_a, emb_b, dim=-1) pre = (pre > CFG.throshold).long().detach().cpu().numpy() prediction.extend(pre) pd_data["label"] = prediction pd_data.to_csv(CFG.data_save_path,index=False) pass if __name__ == '__main__': seed_everything() train_loop() infer_fn()