useCuda = True
if useCuda:
assert torch.cuda.is_available()
# test
inputs = numpy.reshape(numpy.array(range(96 * 96), dtype=numpy.float32) * 0.01, (1,96,96))
inputs = numpy.concatenate([inputs, inputs, inputs], axis=0)
data = torch.from_numpy(inputs).unsqueeze(0)
if useCuda:
data = data.cuda()
data = Variable(data)
# Code borrowed from: thnkim/OpenFacePytorch