截至到二月上旬,给自己研一上和寒假的学习做一个总结。目前学完了transformer模型,后面看bert模型。
首先把李宏毅的机器学习课程看一遍,用xmind做一下笔记.
然后可以看一下这个人的入门机器学习
机器学习入门https://github.com/leerumor/nlp_tutorial基础知识比如python基础和数学基础应该没什么问题,然后就是看机器学习的经典算法和论文,看经典算法的时候可以中间穿插看李宏毅的课
李宏毅的课看了两遍算是理解了,第一遍确实懂的不多,边学边看挺好,第一遍看视频算是有个印象,第二遍重点看自己不会的地方。看了几个人的入门准备导图, 李rumor的入门顺序不错
李航的统计学习方法,基本原理看一下
邱锡朋的神经网络书,简单看一下,有个印象
这个是经典算法的代码,有详细解释,看一遍https://github.com/Dod-o/Statistical-Learning-Method_Code看了一遍刘建平的博客,有一些帮助,代码的解释
刘建平博客https://www.cnblogs.com/pinard/category/894692.html?page=4%20%20https://github.com/ljpzzz/machinelearningB站《PyTorch深度学习实践》完结合集
NLP教程完全版https://www.bilibili.com/video/BV17A411e7qL?p=12然后开始看文本分类的论文,首先先看几篇中文的先有个了解,知网直接搜文本分类,一上午看个7-8篇就能大致了解,然后开始看英文的综述
基本上就是TextCNN,Fasttext,transformer,bert。
跑模型可以快速理解内容.
整体代码参考:
主要代码https://github.com/649453932/Chinese-Text-Classification-Pytorch
FastText,TextCNN,transformer,Bert等
# coding: UTF-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class Config(object):
"""配置参数"""
def __init__(self, dataset, embedding):
self.model_name = 'TextCNN'
self.train_path = dataset + '/data/train.txt' # 训练集
self.dev_path = dataset + '/data/dev.txt' # 验证集
self.test_path = dataset + '/data/test.txt' # 测试集
self.class_list = [x.strip() for x in open(
dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
self.vocab_path = dataset + '/data/vocab.pkl' # 词表
self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
self.log_path = dataset + '/log/' + self.model_name
self.embedding_pretrained = torch.tensor(
np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
if embedding != 'random' else None # 预训练词向量
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
self.dropout = 0.5 # 随机失活0.5
self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
self.num_classes = len(self.class_list) # 类别数
self.n_vocab = 0 # 词表大小,在运行时赋值
self.num_epochs = 20 # epoch数
self.batch_size = 256 # mini-batch大小128
self.pad_size = 32 # 每句话处理成的长度(短填长切)
self.learning_rate = 2e-3 # 学习率1e-3
self.embed = self.embedding_pretrained.size(1)\
if self.embedding_pretrained is not None else 300 # 字向量维度
self.filter_sizes = (2, 3, 4) # 卷积核尺寸
self.num_filters = 256 # 卷积核数量(channels数)
'''Convolutional Neural Networks for Sentence Classification'''
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
if config.embedding_pretrained is not None:
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)#通过设置参数:freeze=False,来使模型学习embedding中的参数。
else:
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
self.convs = nn.ModuleList(
[nn.Conv2d(1, config.num_filters, (k, config.embed)) for k in config.filter_sizes])
self.dropout = nn.Dropout(config.dropout)
self.fc = nn.Linear(config.num_filters * len(config.filter_sizes), config.num_classes)
def conv_and_pool(self, x, conv):
x = F.relu(conv(x)).squeeze(3)#主要对数据的维度进行压缩,去掉维数为1的的维度,squeeze(a)就是将a中所有为1的维度删掉。不为1的维度没有影响
x = F.max_pool1d(x, x.size(2)).squeeze(2)#在由几个输入平面组成的输入信号上应用1D自适应最大池化
return x
def forward(self, x):
out = self.embedding(x[0])
out = out.unsqueeze(1)#对数据维度进行扩充。给指定位置加上维数为一的维度
out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)
out = self.dropout(out)
out = self.fc(out)
return out
学习率为2e-3的时候acc能到91.11%
TEXTRNN
# coding: UTF-8
import torch
import torch.nn as nn
import numpy as np
class Config(object):
"""配置参数"""
def __init__(self, dataset, embedding):
self.model_name = 'TextRNN'
self.train_path = dataset + '/data/train.txt' # 训练集
self.dev_path = dataset + '/data/dev.txt' # 验证集
self.test_path = dataset + '/data/test.txt' # 测试集
self.class_list = [x.strip() for x in open(
dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
self.vocab_path = dataset + '/data/vocab.pkl' # 词表
self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
self.log_path = dataset + '/log/' + self.model_name
self.embedding_pretrained = torch.tensor(
np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
if embedding != 'random' else None # 预训练词向量
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
self.dropout = 0.5 # 随机失活
self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
self.num_classes = len(self.class_list) # 类别数
self.n_vocab = 0 # 词表大小,在运行时赋值
self.num_epochs = 10 # epoch数
self.batch_size = 128 # mini-batch大小
self.pad_size = 32 # 每句话处理成的长度(短填长切)
self.learning_rate = 1e-3 # 学习率
self.embed = self.embedding_pretrained.size(1)\
if self.embedding_pretrained is not None else 300 # 字向量维度, 若使用了预训练词向量,则维度统一
self.hidden_size = 256 # lstm隐藏层
self.num_layers = 3 # lstm层数
'''Recurrent Neural Network for Text Classification with Multi-Task Learning'''
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
if config.embedding_pretrained is not None:
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
else:
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers,
bidirectional=True, batch_first=True, dropout=config.dropout)
self.fc = nn.Linear(config.hidden_size * 2, config.num_classes)
def forward(self, x):
x, _ = x
out = self.embedding(x) # [batch_size, seq_len, embeding]=[128, 32, 300]
out, _ = self.lstm(out)
out = self.fc(out[:, -1, :]) # 句子最后时刻的 hidden state
return out
Test Loss: 0.28, Test Acc: 91.30%