import torch
import time
from tqdm import tqdm
import pandas as pd
from datetime import timedelta
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
from sklearn import metrics
import numpy as np
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
-----------------------------------------------------------------------
train_valid = pd.read_csv('train.csv', sep='\t')
test_df = pd.read_csv('test.csv', sep='\t')
train_size = 0.8
valid_size = 0.2
class_list = ['苹果','香蕉','榴莲']
train_valid = train_valid[train_valid.label.isin(class_list)]
test_df = test_df[test_df.label.isin(class_list)]
train_df = train_valid.sample(frac=train_size,random_state=7)
valid_df = train_valid.drop(train_df.index)
valid_df = valid_df.reset_index(drop=True)
train_df = train_df.reset_index(drop=True)
test_df = test_df.reset_index(drop=True)
-------------------------------------------------------------------------
UNK, PAD = '<UNK>', '<PAD>'
def build_vocab(file_path, tokenizer, max_size, min_freq):
vocab_dic = {}
for i in range(len(train_df)):
content = train_df.iloc[i,[0,1,2]].sum()
if not content:
continue
for word in tokenizer(content):
vocab_dic[word] = vocab_dic.get(word, 0) + 1
vocab_list = sorted([_ for _ in vocab_dic.items() if _[1] >= min_freq], key=lambda x: x[1], reverse=True)[:max_size]
vocab_dic = {word_count[0]: idx for idx, word_count in enumerate(vocab_list)}
vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1})
return vocab_dic
def get_time_dif(start_time):
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
class My_Dataset(Dataset):
def __init__(self, config, path, vocab_file):
self.config = config
self.contents = []
self.labels = []
for i in range(len(path)):
content = path.iloc[i,[0,1,2]].sum()
label = class_list.index(path.iloc[i,3])
self.contents.append(content)
self.labels.append(int(label))
self.pad_size = config.pad_size
self.tokenizer = config.tokenizer
self.vocab = vocab_file
def __len__(self):
return len(self.contents)
def __getitem__(self, idx):
content, label = self.contents[idx], self.labels[idx]
token = self.tokenizer(content)
seq_len = len(token)
words_line = []
if len(token) < self.pad_size:
token.extend([PAD] * (self.pad_size - len(token)))
else:
token = token[:self.pad_size]
seq_len = self.pad_size
for word in token:
words_line.append(self.vocab.get(word, self.vocab.get(UNK)))
data = torch.Tensor(words_line).long()
label = int(label)
seq_len = int(seq_len)
return (data, seq_len), label
-------------------------------------------------------------------------
class Config(object):
def __init__(self):
self.save_path = 'TextCNN.ckpt'
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.dropout = 0.5
self.require_improvement = 1000
self.num_classes = len(class_list)
self.n_vocab = 0
self.num_epochs = 20
self.batch_size = 128
self.pad_size = 32
self.learning_rate = 1e-3
self.embed = 300
self.filter_sizes = (2, 3, 4)
self.num_filters = 256
self.tokenizer = lambda x:[y for y in x]
self.max_size = 10000
self.min_freq = 1
self.UNK, self.PAD = '<UNK>', '<PAD>'
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
self.convs = nn.ModuleList(
[nn.Conv2d(1, config.num_filters, (k, config.embed)) for k in config.filter_sizes])
self.dropout = nn.Dropout(config.dropout)
self.fc = nn.Linear(config.num_filters * len(config.filter_sizes), config.num_classes)
def conv_and_pool(self, x, conv):
x = conv(x)
x = F.relu(x)
x = x.squeeze(3)
x = F.max_pool1d(x, x.size(2))
x = x.squeeze(2)
return x
def forward(self, x):
out = self.embedding(x[0])
out = out.unsqueeze(1)
out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)
out = self.dropout(out)
out = self.fc(out)
return out
----------------------------------------------------------------------------------
def init_network(model, method='xavier', exclude='embedding', seed=123):
for name, w in model.named_parameters():
if exclude not in name:
if 'weight' in name:
if method == 'xavier':
nn.init.xavier_normal_(w)
elif method == 'kaiming':
nn.init.kaiming_normal_(w)
else:
nn.init.normal_(w)
elif 'bias' in name:
nn.init.constant_(w, 0)
else:
pass
def train(config, model, train_iter, dev_iter, test_iter):
start_time = time.time()
model.train()
optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
total_batch = 0
dev_best_loss = float('inf')
last_improve = 0
flag = False
for epoch in range(config.num_epochs):
print('Epoch [{}/{}]'.format(epoch + 1, config.num_epochs))
for i, (trains, labels) in enumerate(train_iter):
outputs = model(trains)
model.zero_grad()
loss = F.cross_entropy(outputs, labels)
loss.backward()
optimizer.step()
if total_batch % 100 == 0:
true = labels.data.cpu()
predic = torch.max(outputs.data, 1)[1].cpu()
train_acc = metrics.accuracy_score(true, predic)
dev_acc, dev_loss = evaluate(config, model, dev_iter)
if dev_loss < dev_best_loss:
dev_best_loss = dev_loss
torch.save(model.state_dict(), config.save_path)
improve = '*'
last_improve = total_batch
else:
improve = ''
time_dif = get_time_dif(start_time)
msg = 'Iter: {0:>6}, Train Loss: {1:>5.2}, Train Acc: {2:>6.2%}, Val Loss: {3:>5.2}, Val Acc: {4:>6.2%}, Time: {5} {6}'
print(msg.format(total_batch, loss.item(), train_acc, dev_loss, dev_acc, time_dif, improve))
model.train()
total_batch += 1
if total_batch - last_improve > config.require_improvement:
print("No optimization for a long time, auto-stopping...")
flag = True
break
if flag:
break
test(config, model, test_iter)
def test(config, model, test_iter):
model.load_state_dict(torch.load(config.save_path))
model.eval()
start_time = time.time()
test_acc, test_loss, test_report, test_confusion = evaluate(config, model, test_iter, test=True)
msg = 'Test Loss: {0:>5.2}, Test Acc: {1:>6.2%}'
print(msg.format(test_loss, test_acc))
print("Precision, Recall and F1-Score...")
print(test_report)
print("Confusion Matrix...")
print(test_confusion)
time_dif = get_time_dif(start_time)
print("Time usage:", time_dif)
def evaluate(config, model, data_iter, test=False):
model.eval()
loss_total = 0
predict_all = np.array([], dtype=int)
labels_all = np.array([], dtype=int)
with torch.no_grad():
for texts, labels in data_iter:
outputs = model(texts)
loss = F.cross_entropy(outputs, labels)
loss_total += loss
labels = labels.data.cpu().numpy()
predic = torch.max(outputs.data, 1)[1].cpu().numpy()
labels_all = np.append(labels_all, labels)
predict_all = np.append(predict_all, predic)
acc = metrics.accuracy_score(labels_all, predict_all)
if test:
report = metrics.classification_report(labels_all, predict_all, target_names=class_list, digits=4)
confusion = metrics.confusion_matrix(labels_all, predict_all)
return acc, loss_total / len(data_iter), report, confusion
return acc, loss_total / len(data_iter)
----------------------------------------------------------------------------------
config = Config()
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
torch.backends.cudnn.deterministic = True
print('Loading data...')
vocab = build_vocab(train_df, config.tokenizer, config.max_size, config.min_freq)
train_data = My_Dataset(config, train_df, vocab)
dev_data = My_Dataset(config, valid_df, vocab)
test_data = My_Dataset(config, test_df, vocab)
train_iter=DataLoader(train_data, batch_size=config.batch_size)
dev_iter=DataLoader(dev_data, batch_size=config.batch_size)
test_iter=DataLoader(test_data, batch_size=config.batch_size)
config.n_vocab = len(vocab)
TextCNN_model = Model(config)
print(TextCNN_model.parameters)
train(config, TextCNN_model, train_iter, dev_iter, test_iter)