李宏毅机器学习课后作业(hw4)

直接上代码:

import torch
import numpy as np
import pandas as pd
import torch.optim as optim
import torch.nn.functional as F
import argparse
from gensim.models import word2vec
import os
from torch import nn
from torch.utils import data
from sklearn.model_selection import train_test_split

def load_training_data(path='training_label.txt'):
    # 如果是 'training_label.txt',需要讀取 label,如果是 'training_nolabel.txt',不需要讀取 label
    if 'training_label' in path:
        with open(path, 'r',encoding='UTF-8') as f:
            lines = f.readlines()
            lines = [line.strip('\n').split(' ') for line in lines] #将每一行作为一个列表返回,并删除了换行符
        x = [line[2:] for line in lines]
        y = [line[0] for line in lines]
        return x, y
    else:
        with open(path, 'r',encoding='UTF-8') as f:
            lines = f.readlines()
            x = [line.strip('\n').split(' ') for line in lines]
        return x
def load_testing_data(path='testing_data'):
    # 把 testing 時需要的 data 讀進來
    with open(path, 'r',encoding='UTF-8') as f:
        lines = f.readlines()
        X = ["".join(line.strip('\n').split(",")[1:]).strip() for line in lines[1:]]
        X = [sen.split(' ') for sen in X]
    return X
def load_no_label(path_x,path_y): #self-training部分加载train_no_lalel的x和y
    y=[]
    x=[]
    Index=[]
    with open(path_y, 'r',encoding='UTF-8') as f:
        lines = f.readlines()
        for line in lines[1:]:
            sen = line.split(",")
            #print(sen[1])
            if(sen[1][0]=='0' and sen[1][2]=='0'):
                y.append('0')
                Index.append(sen[0])
            elif(sen[1][0]=='1'):
                y.append('1')
                Index.append(sen[0])
    with open(path_x, 'r',encoding='UTF-8') as f:
        lines = f.readlines()
        lines = [line.strip('\n').split(' ') for line in lines]
        for index in Index:
            count = 0
            for n in index:
                count = int(n)+count*10
            x.append(lines[count])
        #Y = ["".join(line.strip('\n').split(",")[1:]).strip() for line in lines[1:]]
    return x ,y
def evaluation(outputs, labels):
    # outputs => probability (float)
    # labels => labels
    outputs[outputs>=0.5] = 1 # 大於等於 0.5 為正面
    outputs[outputs<0.5] = 0 # 小於 0.5 為負面
    correct = torch.sum(torch.eq(outputs, labels)).item() #返回正确分类的个数
    return correct
def train_word2vec(x): #Word2Vec是通过训练将单词映射到vector的网络模型
    # 訓練 word to vector 的 word embedding
    model = word2vec.Word2Vec(x, size=256, window=15, min_count=5, workers=12, iter=10, sg=1)
    return model

class Preprocess(): #数据预处理部分
    def __init__(self, sentences, sen_len, w2v_path="./w2v.model"):
        self.w2v_path = w2v_path
        self.sentences = sentences
        self.sen_len = sen_len
        self.idx2word = []
        self.word2idx = {}
        self.embedding_matrix = []
    def get_w2v_model(self):
        # 把之前訓練好的 word to vec 模型讀進來
        self.embedding = word2vec.Word2Vec.load(self.w2v_path)
        self.embedding_dim = self.embedding.vector_size#获取Word2Vec模型中的参数 向量长度, 方便之后定义
        print(f"embedding_dim is {self.embedding_dim}")
    def add_embedding(self, word):
        # 把 word 加進 embedding,並賦予他一個隨機生成的 representation vector
        # word 只會是 "<PAD>" 或 "<UNK>"
        vector = torch.empty(1, self.embedding_dim)
        torch.nn.init.uniform_(vector)
        self.word2idx[word] = len(self.word2idx)
        self.idx2word.append(word)
        self.embedding_matrix = torch.cat([self.embedding_matrix, vector], 0)
    def make_embedding(self, load=True):
        print("Get embedding ...")
        # 取得訓練好的 Word2vec word embedding
        if load:
            print("loading word to vec model ...")
            self.get_w2v_model()
        else:
            raise NotImplementedError
        # 製作一個 word2idx 的 dictionary
        # 製作一個 idx2word 的 list
        # 製作一個 word2vector 的 list
        for i, word in enumerate(self.embedding.wv.vocab):
            print('get words #{}'.format(i+1), end='\r')
            #e.g. self.word2index['he'] = 1 
            #e.g. self.index2word[1] = 'he'
            #e.g. self.vectors[1] = 'he' vector
            self.word2idx[word] = len(self.word2idx)
            self.idx2word.append(word)
            self.embedding_matrix.append(self.embedding.wv[word])
        print(' ')
        self.embedding_matrix = torch.tensor(self.embedding_matrix)
        # 將 "<PAD>" 跟 "<UNK>" 加進 embedding 裡面
        self.add_embedding("<PAD>")
        self.add_embedding("<UNK>")
        print("total words: {}".format(len(self.embedding_matrix)))
        return self.embedding_matrix
    def pad_sequence(self, sentence):
        # 將每個句子變成一樣的長度
        if len(sentence) > self.sen_len:
            sentence = sentence[:self.sen_len]
        else:
            pad_len = self.sen_len - len(sentence)
            for _ in range(pad_len):
                sentence.append(self.word2idx["<PAD>"])
        assert len(sentence) == self.sen_len
        return sentence
    def sentence_word2idx(self):
        # 把句子裡面的字轉成相對應的 index
        sentence_list = []
        for i, sen in enumerate(self.sentences):
            print('sentence count #{}'.format(i+1), end='\r')
            sentence_idx = []
            for word in sen:
                if (word in self.word2idx.keys()):
                    sentence_idx.append(self.word2idx[word])
                else:
                    sentence_idx.append(self.word2idx["<UNK>"])
            # 將每個句子變成一樣的長度
            sentence_idx = self.pad_sequence(sentence_idx)
            sentence_list.append(sentence_idx)
        return torch.LongTensor(sentence_list)
    def labels_to_tensor(self, y):
        # 把 labels 轉成 tensor
        y = [int(label) for label in y]
        return torch.LongTensor(y)
       
class TwitterDataset(data.Dataset):
    """
    Expected data shape like:(data_num, data_len)
    Data can be a list of numpy array or a list of lists
    input data shape : (data_num, seq_len, feature_dim)
    
    __len__ will return the number of data
    """
    def __init__(self, X, y):
        self.data = X
        self.label = y
    def __getitem__(self, idx):
        if self.label is None: return self.data[idx]
        return self.data[idx], self.label[idx]
    def __len__(self):
        return len(self.data)

def load_train_val(train_x,y,sen_len,path_prefix,w2v_path,if_add_nolable=False):
    #分成两部分,前面是做了no_label的标签后加入训练
    if(if_add_nolable):
        train_x_no_label,train_y_no_label = load_no_label(os.path.join(path_prefix,'training_nolabel.txt'),
                                                          os.path.join(path_prefix,'predict_nolable.txt'))
        preprocess = Preprocess(train_x+train_x_no_label, sen_len, w2v_path=w2v_path)
        embedding = preprocess.make_embedding(load=True)
        train_x = preprocess.sentence_word2idx()
        z = y+train_y_no_label
        z = preprocess.labels_to_tensor(z)
        X_train = torch.cat((train_x[:180000],train_x[200000:]),dim=0)
        X_val = train_x[180000:200000]
        y_train = torch.cat((z[:180000],z[200000:]),dim=0)
        y_val = z[180000:200000]
    else:
        preprocess = Preprocess(train_x, sen_len, w2v_path=w2v_path)
        embedding = preprocess.make_embedding(load=True)
        train_x = preprocess.sentence_word2idx()
        y = preprocess.labels_to_tensor(y)
        X_train = train_x[:180000]
        X_val = train_x[180000:]
        y_train = y[:180000]
        y_val = y[180000:]
    train_dataset = TwitterDataset(X=X_train, y=y_train)
    val_dataset = TwitterDataset(X=X_val, y=y_val)
    train_loader = torch.utils.data.DataLoader(dataset = train_dataset,
                                            batch_size = batch_size,
                                            shuffle = True,
                                            num_workers = 0)
    val_loader = torch.utils.data.DataLoader(dataset = val_dataset,
                                            batch_size = batch_size,
                                            shuffle = False,
                                            num_workers = 0)
    return train_loader,val_loader,embedding

class LSTM_Net(nn.Module):
    def __init__(self, embedding, embedding_dim, hidden_dim, num_layers, dropout=0.5, fix_embedding=True):
        super(LSTM_Net, self).__init__()
        # 製作 embedding layer
        self.embedding = torch.nn.Embedding(embedding.size(0),embedding.size(1))
        self.embedding.weight = torch.nn.Parameter(embedding)
        # 是否將 embedding fix 住,如果 fix_embedding 為 False,在訓練過程中,embedding 也會跟著被訓練
        self.embedding.weight.requires_grad = False if fix_embedding else True
        self.embedding_dim = embedding.size(1)
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.dropout = dropout
        self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=num_layers, batch_first=True, bidirectional=False)
        for name, param in self.lstm.named_parameters():
            if name.startswith("weight"):
                nn.init.orthogonal_(param, gain=1)
            else:
                nn.init.zeros_(param)
        self.classifier = nn.Sequential( nn.Dropout(dropout),
                                         nn.Linear(hidden_dim, 1),
                                         nn.Sigmoid() )
    def forward(self, inputs):
        inputs = self.embedding(inputs)
        x, _ = self.lstm(inputs, None)
        # x 的 dimension (batch, seq_len, hidden_size)
        # 取用 LSTM 最後一層的 hidden state
        x = x[:, -1, :] 
        x = self.classifier(x)
        return x
        
def training(batch_size, n_epoch, lr, model_dir, train, valid, model, device):
    total = sum(p.numel() for p in model.parameters())
    trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print('\nstart training, parameter total:{}, trainable:{}\n'.format(total, trainable))
    criterion = nn.BCELoss() # 定義損失函數,這裡我們使用 binary cross entropy loss
    t_batch = len(train) 
    v_batch = len(valid) 
    optimizer = torch.optim.Adam(model.parameters(), lr=lr) #betas=(0.9,0.999),eps=1e-8,weight_decay=0.0005 # 將模型的參數給 optimizer,並給予適當的 learning rate
    total_loss, total_acc, best_acc = 0, 0, 0
    for epoch in range(n_epoch):
        total_loss, total_acc = 0, 0
        model.train() # 將 model 的模式設為 train,這樣 optimizer 就可以更新 model 的參數
        # 這段做 training
        for i, (inputs, labels) in enumerate(train):
            inputs = inputs.to(device, dtype=torch.long) # device 為 "cuda",將 inputs 轉成 torch.cuda.LongTensor
            labels = labels.to(device, dtype=torch.float) # device為 "cuda",將 labels 轉成 torch.cuda.FloatTensor,因為等等要餵進 criterion,所以型態要是 float
            optimizer.zero_grad() # 由於 loss.backward() 的 gradient 會累加,所以每次餵完一個 batch 後需要歸零
            outputs = model(inputs) # 將 input 餵給模型
            outputs = outputs.squeeze() # 去掉最外面的 dimension,好讓 outputs 可以餵進 criterion()
            loss = criterion(outputs, labels) # 計算此時模型的 training loss
            loss.backward() # 算 loss 的 gradient
            optimizer.step() # 更新訓練模型的參數
            correct = evaluation(outputs, labels) # 計算此時模型的 training accuracy
            total_acc += (correct / batch_size)
            total_loss += loss.item()
            print('[ Epoch{}: {}/{} ] loss:{:.3f} acc:{:.3f} '.format(
            	epoch+1, i+1, t_batch, loss.item(), correct*100/batch_size), end='\r')
        print('\nTrain | Loss:{:.5f} Acc: {:.3f}'.format(total_loss/t_batch, total_acc/t_batch*100))

        # 這段做 validation
        model.eval() # 將 model 的模式設為 eval,這樣 model 的參數就會固定住
        with torch.no_grad():
            total_loss, total_acc = 0, 0
            for i, (inputs, labels) in enumerate(valid):
                inputs = inputs.to(device, dtype=torch.long) # device 為 "cuda",將 inputs 轉成 torch.cuda.LongTensor
                labels = labels.to(device, dtype=torch.float) # device 為 "cuda",將 labels 轉成 torch.cuda.FloatTensor,因為等等要餵進 criterion,所以型態要是 float
                outputs = model(inputs) # 將 input 餵給模型
                outputs = outputs.squeeze() # 去掉最外面的 dimension,好讓 outputs 可以餵進 criterion()
                loss = criterion(outputs, labels) # 計算此時模型的 validation loss
                correct = evaluation(outputs, labels) # 計算此時模型的 validation accuracy
                total_acc += (correct / batch_size)
                total_loss += loss.item()

            print("Valid | Loss:{:.5f} Acc: {:.3f} ".format(total_loss/v_batch, total_acc/v_batch*100))
            if total_acc > best_acc:
                # 如果 validation 的結果優於之前所有的結果,就把當下的模型存下來以備之後做預測時使用
                best_acc = total_acc
                #torch.save(model, "{}/val_acc_{:.3f}.model".format(model_dir,total_acc/v_batch*100))
                torch.save(model, "{}/ckpt.model".format(model_dir))
                print('saving model with acc {:.3f}'.format(total_acc/v_batch*100))
        print('-----------------------------------------------')

def testing(batch_size, test_loader, model, device):
    model.eval()
    ret_output = []
    with torch.no_grad():
        for i, inputs in enumerate(test_loader):
            inputs = inputs.to(device, dtype=torch.long)
            outputs = model(inputs)
            outputs = outputs.squeeze()
            outputs[outputs>=0.5] = 1 # 大於等於 0.5 為正面
            outputs[outputs<0.5] = 0 # 小於 0.5 為負面
            ret_output += outputs.int().tolist()
    
    return ret_output

def prepare(test_x,sen_len,batch_size,w2v_path): #将Train_x_no_lable初始化并放入DataLoader
    print("loading data ...")
    preprocess = Preprocess(test_x, sen_len, w2v_path=w2v_path)
    embedding = preprocess.make_embedding(load=True)
    test_x = preprocess.sentence_word2idx()
    test_dataset = TwitterDataset(X=test_x, y=None)
    test_loader = torch.utils.data.DataLoader(dataset = test_dataset,
                                            batch_size = batch_size,
                                            shuffle = False,
                                            num_workers = 0)
    return test_loader

def testing_nolable(batch_size, test_loader, model, device): #对Train_x_no_lable进行预测
    model.eval()
    ret_output = []
    with torch.no_grad():
        for i, inputs in enumerate(test_loader):
            inputs = inputs.to(device, dtype=torch.long)
            outputs = model(inputs)
            outputs = outputs.squeeze()
            outputs[outputs>0.9] = 1 # 大於等於 0.9 為正面
            outputs[outputs<0.1] = 0 # 小於 0.1 為負面
            ret_output += outputs.tolist()
    
    return ret_output


#下面是主函数:
path_prefix = 'C:\\Users\\13554\\jupyter practice\\lihongyi\\hw4'
print("loading training data ...")
train_x, y = load_training_data(os.path.join(path_prefix,'training_label.txt'))
train_x_no_label = load_training_data(os.path.join(path_prefix,'training_nolabel.txt'))
test_x = load_testing_data(os.path.join(path_prefix, 'testing_data.txt'))

model = train_word2vec(train_x + test_x + train_x_no_label)
print("saving model ...")
model.save(os.path.join(path_prefix, 'w2v_all.model'))
w2v_path = os.path.join(path_prefix, 'w2v_all.model')

# 通過 torch.cuda.is_available() 的回傳值進行判斷是否有使用 GPU 的環境,如果有的話 device 就設為 "cuda",沒有的話就設為 "cpu"
if torch.cuda.is_available():
    print('torch.cuda.is_available')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_dir = path_prefix # model directory for checkpoint model

#定義句子長度、要不要固定 embedding、batch 大小、要訓練幾個 epoch、learning rate 的值
sen_len = 20
fix_embedding = True # 训练时固定embedding 
batch_size = 256
epoch = 15
lr = 0.001
# 對 input 跟 labels 做預處理
train_loader,val_loader ,embedding = load_train_val(train_x, y ,sen_len,path_prefix,w2v_path,if_add_nolable=False)

# 製作一個 model 的對象
model2 = LSTM_Net(embedding, embedding_dim=256, hidden_dim=128, num_layers=4, dropout=0.5, fix_embedding=True)
model2 = model2.to(device) # device為 "cuda",model 使用 GPU 來訓練(餵進去的 inputs 也需要是 cuda tensor)

# 把 data 轉成 batch of tensors
# 開始訓練
training(batch_size, epoch, lr, model_dir, train_loader, val_loader, model2, device)

#下面这部分是self-training部分,可以重复执行(没有预测test_data,预测的是train_no_label),我的实验结果是self-training对val的预测准确率基本没影响,最后的准确率80%
test_loader = prepare(train_x_no_label,sen_len,batch_size,w2v_path)
epoch=5
model3 = torch.load(os.path.join(path_prefix,'ckpt.model'))
# 開始測試模型並做預測
outputs = testing_nolable(batch_size, test_loader, model3, device)
# 寫到 csv 檔案供上傳 Kaggle
tmp = pd.DataFrame({"id":[str(i) for i in range(len(train_x_no_label))],"label":outputs})
print("save txt ...")
tmp.to_csv(os.path.join(path_prefix, 'predict_nolable.txt'), index=False)
print("Finish Predicting")
train_loader,val_loader ,embedding = load_train_val(train_x, y ,sen_len,path_prefix,w2v_path,if_add_nolable=True)
model2 = LSTM_Net(embedding, embedding_dim=256, hidden_dim=128, num_layers=1, dropout=0.5, fix_embedding=True)
model2 = model2.to(device) # device為 "cuda",model 使用 GPU 來訓練(餵進去的 inputs 也需要是 cuda tensor)
training(batch_size, epoch, lr, model_dir, train_loader, val_loader, model2, device)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值