defget_analogy(token_a, token_b, token_c, embed):
vecs =[embed.vectors[embed.stoi[t]]for t in[token_a, token_b, token_c]]
x = vecs[1]- vecs[0]+ vecs[2]
topk, cos = knn(embed.vectors, x,1)return embed.itos[topk[0]]
get_analogy('man','woman','son', glove)
'daughter'
get_analogy('beijing','china','tokyo', glove)
'japan'
get_analogy('bad','worst','big', glove)
'biggest'
get_analogy('do','did','go', glove)
'went'
文本情感分类:使用卷积神经网络(textCNN)
import os
import torch
from torch import nn
import torchtext.vocab as Vocab
import torch.utils.data as Data
import torch.nn.functional as F
import sys
sys.path.append("..")import d2lzh_pytorch as d2l
os.environ["CUDA_VISIBLE_DEVICES"]="0"
device = torch.device('cuda'if torch.cuda.is_available()else'cpu')
DATA_ROOT ="./Datasets"print(torch.__version__, device)
1.4.0 cuda
一维卷积层
defcorr1d(X, K):
w = K.shape[0]
Y = torch.zeros((X.shape[0]- w +1))for i inrange(Y.shape[0]):
Y[i]=(X[i: i + w]* K).sum()return Y
X, K = torch.tensor([0,1,2,3,4,5,6]), torch.tensor([1,2])
corr1d(X, K)
tensor([ 2., 5., 8., 11., 14., 17.])
defcorr1d_multi_in(X, K):# 首先沿着X和K的第0维(通道维)遍历并计算一维互相关结果。然后将所有结果堆叠起来沿第0维累加return torch.stack([corr1d(x, k)for x, k inzip(X, K)]).sum(dim=0)
X = torch.tensor([[0,1,2,3,4,5,6],[1,2,3,4,5,6,7],[2,3,4,5,6,7,8]])
K = torch.tensor([[1,2],[3,4],[-1,-3]])
corr1d_multi_in(X, K)
training on cuda
epoch 1, loss 0.4830, train acc 0.761, test acc 0.842, time 158.9 sec
epoch 2, loss 0.1625, train acc 0.860, test acc 0.869, time 156.0 sec
epoch 3, loss 0.0685, train acc 0.919, test acc 0.876, time 156.4 sec
epoch 4, loss 0.0299, train acc 0.958, test acc 0.874, time 156.3 sec
epoch 5, loss 0.0128, train acc 0.979, test acc 0.867, time 156.9 sec