记录一下,如何在torch中进行单个batch计算
batch_size = x.shape[0]
seq_len = x.shape[1]
emo_induce = emo_induce.cpu().numpy() #aspect开始和结束的位置
text_len = x_len.cpu().numpy() #传入子句的真实长度
emo_weight = [[] for i in range(batch_size)]
context_weight = [[] for i in range(batch_size)]
for i in range(batch_size):
# print('context_len = ', context_len)
for j in range(emo_induce[i,0]):
context_weight[i].append(1)
emo_weight[i].append(0)
for j in range(emo_induce[i,0], emo_induce[i,1]):
context_weight[i].append(0)
emo_weight[i].append(1)
for j in range(emo_induce[i,1], text_len[i]):
context_weight[i].append(1)
emo_weight[i].append(0)
for j in range(text_len[i], seq_len):
context_weight[i].append(0)
emo_weight[i].append(0)
context_weight = torch.tensor(context_weight).cuda()
** 判断符合列表中某些数字是否符合条件**
import torch
a =torch.Tensor([1,2,4,4,5])
print(torch.masked_select(a, a<4))
在linux终端下载github代码
git clone 网址
*.flat的用法
import numpy as np
a = np.arange(4).reshape(2,2)
print(a)
b = a.flat
print('b = ', b)
for i in b:
print(i)
#迭代器可以用list进行输出
print(list(a.flat))
print(type(a.flat))#返回类型为 numpy.flatiter
#可以用索引对迭代器进行引号
a.flat[3]