郭哥优化代码
a = np.array([
[[1,1,1],
[1,1,1]],
[[1,1,1],
[1,1,1]]
])
b = np.array([[[0,0,0],[0,0,0]],[[1,1,1],[1,1,1]]])
c = np.array([[[2,2,2],[2,2,2]],[[1,1,1],[1,1,1]]])
a = torch.from_numpy(a)
b = torch.from_numpy(b)
c = torch.from_numpy(c)
# a = a.unsqueeze(0)
t0 = time.time()
# for i in range(1000):
# # aa = a.unsqueeze(0)
# # bb = b.unsqueeze(0)
# # cc = torch.cat([aa,bb], dim=0).float()
cc = torch.stack((a, b, c), 0)
print(cc.shape)
ccc = (cc/255.0 - torch.tensor([0.485, 0.456, 0.406]))/torch.tensor([0.229, 0.224, 0.225])
# d = torch.stack((cc, a), 1)
print(ccc.shape)
ccc = torch.transpose(ccc, 0, 1)
t1 = time.time()
print(t1-t0, ccc.shape)
def _preprocess(self, im_crops):
"""
TODO:
1. to float with scale from 0 to 1
2. resize to (64, 128) as Market1501 dataset did
3. concatenate to a numpy array
3. to torch Tensor
4. normalize
"""
def _resize(im, size):
return cv2.resize(im.astype(np.float32)/255., size)
allim = []
for im in im_crops:
tmp_0 = _resize(im, self.size)
tmp_0 = torch.from_numpy(tmp_0)
allim.append(tmp_0)
tmp_1 = torch.stack(tuple(allim),0)
# tmp_1 = (tmp_1/255.0 - torch.tensor([0.485, 0.456, 0.406]))/torch.tensor([0.229, 0.224, 0.225])
tmp_1 = (tmp_1 - torch.tensor([0.485, 0.456, 0.406]))/self.B
tmp_2 = torch.transpose(tmp_1,1,3)
im_batch = torch.transpose(tmp_2,2,3)
#im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(0) for im in im_crops], dim=0).float()
print("*"*100,im_batch.shape)
return im_batch