词的相似性和词类比任务

from d2l import torch as d2l
import os, torch
from torch import nn
torch.cuda.is_available()
True
#@save
d2l.DATA_HUB['glove.6b.50d'] = (d2l.DATA_URL + 'glove.6B.50d.zip',
'0b8703943ccdb6eb788e6f091b8946e82231bc4d')
#@save
d2l.DATA_HUB['glove.6b.100d'] = (d2l.DATA_URL + 'glove.6B.100d.zip',
'cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a')
#@save
d2l.DATA_HUB['glove.42b.300d'] = (d2l.DATA_URL + 'glove.42B.300d.zip',
'b5116e234e9eb9076672cfeabf5469f3eec904fa')
#@save
d2l.DATA_HUB['wiki.en'] = (d2l.DATA_URL + 'wiki.en.zip',
'c1816da3821ae9f43899be655002f6c723e91b88')
d2l.DATA_URL
'http://d2l-data.s3-accelerate.amazonaws.com/'
# 为了加载预训练的glove和fasttext嵌入,定义以下TokenEmbedding类
class TokenEmbedding:
    
    def __init__(self, embedding_name):
        self.idx_to_token, self.idx_to_vec = self._load_embedding(
        embedding_name)
        self.unknown_idx = 0
        self.token_to_idx = {token: idx for idx, token in enumerate(self.idx_to_token)}
        
    def _load_embedding(self, embedding_name):
        idx_to_token, idx_to_vec = ['<unk>'], []
#         data_dir = d2l.download_extract(embedding_name)
        # GloVe⽹站:https://nlp.stanford.edu/projects/glove/
        # fastText⽹站:https://fasttext.cc/
        with open('glove.6B.50d.txt', 'r', encoding='utf-8') as f:
            for line in f:
                elems = line.rstrip().split(' ')
                token, elems = elems[0], [float(elem) for elem in elems[1:]]
                # 跳过标题信息,例如fastText中的⾸⾏
                if len(elems) > 1:
                    idx_to_token.append(token)
                    idx_to_vec.append(elems)
        idx_to_vec = [[0] * len(idx_to_vec[0])] + idx_to_vec
        return idx_to_token, torch.tensor(idx_to_vec)
    
    def __getitem__(self, tokens):
        indices = [self.token_to_idx.get(token, self.unknown_idx) for token in tokens]
        vecs = self.idx_to_vec[torch.tensor(indices)]
        return vecs
    
    def __len__(self):
        return len(self.idx_to_token)

# 加载50维glove嵌入
glove_6d50d = TokenEmbedding('glove.6d.50d')
# 词表大小
len(glove_6d50d)
400001
# 得到此表中的一个单词的索引,反之亦然
glove_6d50d.token_to_idx['hello'], glove_6d50d.idx_to_token[13076]
(13076, 'hello')

14.7.2 应用预训练词向量

词相似度

# 使用以下knn函数实现
def knn(W, x, k):
    cos = torch.mv(W, x.reshape(-1,)) / (
    torch.sqrt(torch.sum(W * W, axis=1) + 1e-9) *
    torch.sqrt((x * x).sum()))
    _, topk = torch.topk(cos, k=k)
    return topk, [cos[int(i)] for i in topk]
# 然后,搜索相似的词
def get_similar_tokens(query_token, k, embed):
    topk, cos = knn(embed.idx_to_vec, embed[[query_token]], k+1)
    # 打印出来
    for i, c in zip(topk[1:], cos[1:]):
        print(f'{embed.idx_to_token[int(i)]}: cosine相似度={float(c):.3f}')
# 最相似的三个词
get_similar_tokens('chip', 6, glove_6d50d)
chips: cosine相似度=0.856
intel: cosine相似度=0.749
electronics: cosine相似度=0.749
semiconductor: cosine相似度=0.731
maker: cosine相似度=0.716
computer: cosine相似度=0.708
# 下面输出baby和beautiful相似的词
get_similar_tokens('baby', 8, glove_6d50d)
babies: cosine相似度=0.839
boy: cosine相似度=0.800
girl: cosine相似度=0.792
newborn: cosine相似度=0.778
pregnant: cosine相似度=0.765
mom: cosine相似度=0.762
child: cosine相似度=0.759
toddler: cosine相似度=0.756
get_similar_tokens('beautiful', 8, glove_6d50d)
lovely: cosine相似度=0.921
gorgeous: cosine相似度=0.893
wonderful: cosine相似度=0.830
charming: cosine相似度=0.825
beauty: cosine相似度=0.801
elegant: cosine相似度=0.774
looks: cosine相似度=0.758
love: cosine相似度=0.736

词类比

def get_analogy(token_a, token_b, token_c, embed):
    vecs = embed[[token_a, token_b, token_c]]
    x = vecs[1] - vecs[0] + vecs[2]
    topk, cos = knn(embed.idx_to_vec, x, 1) # 返回与x最相似的那个词
    return embed.idx_to_token[int(topk[0])] # 删除未知词
# 性别
get_analogy('man', 'woman', 'son', glove_6d50d)
'daughter'
# 首府
get_analogy('beijing', 'china', 'taipei', glove_6d50d)
'taiwan'
# 首都
get_analogy('beijing', 'china', 'tokyo', glove_6d50d)
'japan'
# 最高级
get_analogy('bad', 'worst', 'big', glove_6d50d)
'biggest'
# 过去式
get_analogy('do', 'did', 'go', glove_6d50d)
'went'
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值