from d2l import torch as d2l
import os, torch
from torch import nn
torch.cuda.is_available()
True
d2l.DATA_HUB['glove.6b.50d'] = (d2l.DATA_URL + 'glove.6B.50d.zip',
'0b8703943ccdb6eb788e6f091b8946e82231bc4d')
d2l.DATA_HUB['glove.6b.100d'] = (d2l.DATA_URL + 'glove.6B.100d.zip',
'cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a')
d2l.DATA_HUB['glove.42b.300d'] = (d2l.DATA_URL + 'glove.42B.300d.zip',
'b5116e234e9eb9076672cfeabf5469f3eec904fa')
d2l.DATA_HUB['wiki.en'] = (d2l.DATA_URL + 'wiki.en.zip',
'c1816da3821ae9f43899be655002f6c723e91b88')
d2l.DATA_URL
'http://d2l-data.s3-accelerate.amazonaws.com/'
class TokenEmbedding:
def __init__(self, embedding_name):
self.idx_to_token, self.idx_to_vec = self._load_embedding(
embedding_name)
self.unknown_idx = 0
self.token_to_idx = {token: idx for idx, token in enumerate(self.idx_to_token)}
def _load_embedding(self, embedding_name):
idx_to_token, idx_to_vec = ['<unk>'], []
with open('glove.6B.50d.txt', 'r', encoding='utf-8') as f:
for line in f:
elems = line.rstrip().split(' ')
token, elems = elems[0], [float(elem) for elem in elems[1:]]
if len(elems) > 1:
idx_to_token.append(token)
idx_to_vec.append(elems)
idx_to_vec = [[0] * len(idx_to_vec[0])] + idx_to_vec
return idx_to_token, torch.tensor(idx_to_vec)
def __getitem__(self, tokens):
indices = [self.token_to_idx.get(token, self.unknown_idx) for token in tokens]
vecs = self.idx_to_vec[torch.tensor(indices)]
return vecs
def __len__(self):
return len(self.idx_to_token)
glove_6d50d = TokenEmbedding('glove.6d.50d')
len(glove_6d50d)
400001
glove_6d50d.token_to_idx['hello'], glove_6d50d.idx_to_token[13076]
(13076, 'hello')
14.7.2 应用预训练词向量
词相似度
def knn(W, x, k):
cos = torch.mv(W, x.reshape(-1,)) / (
torch.sqrt(torch.sum(W * W, axis=1) + 1e-9) *
torch.sqrt((x * x).sum()))
_, topk = torch.topk(cos, k=k)
return topk, [cos[int(i)] for i in topk]
def get_similar_tokens(query_token, k, embed):
topk, cos = knn(embed.idx_to_vec, embed[[query_token]], k+1)
for i, c in zip(topk[1:], cos[1:]):
print(f'{embed.idx_to_token[int(i)]}: cosine相似度={float(c):.3f}')
get_similar_tokens('chip', 6, glove_6d50d)
chips: cosine相似度=0.856
intel: cosine相似度=0.749
electronics: cosine相似度=0.749
semiconductor: cosine相似度=0.731
maker: cosine相似度=0.716
computer: cosine相似度=0.708
get_similar_tokens('baby', 8, glove_6d50d)
babies: cosine相似度=0.839
boy: cosine相似度=0.800
girl: cosine相似度=0.792
newborn: cosine相似度=0.778
pregnant: cosine相似度=0.765
mom: cosine相似度=0.762
child: cosine相似度=0.759
toddler: cosine相似度=0.756
get_similar_tokens('beautiful', 8, glove_6d50d)
lovely: cosine相似度=0.921
gorgeous: cosine相似度=0.893
wonderful: cosine相似度=0.830
charming: cosine相似度=0.825
beauty: cosine相似度=0.801
elegant: cosine相似度=0.774
looks: cosine相似度=0.758
love: cosine相似度=0.736
词类比
def get_analogy(token_a, token_b, token_c, embed):
vecs = embed[[token_a, token_b, token_c]]
x = vecs[1] - vecs[0] + vecs[2]
topk, cos = knn(embed.idx_to_vec, x, 1)
return embed.idx_to_token[int(topk[0])]
get_analogy('man', 'woman', 'son', glove_6d50d)
'daughter'
get_analogy('beijing', 'china', 'taipei', glove_6d50d)
'taiwan'
get_analogy('beijing', 'china', 'tokyo', glove_6d50d)
'japan'
get_analogy('bad', 'worst', 'big', glove_6d50d)
'biggest'
get_analogy('do', 'did', 'go', glove_6d50d)
'went'
|