1 输入指令:(PS:模式test 显著性数据集是ECSSD)
python3 run.py --mode test --sal_mode e
2 模型&数据集放置如下:(PS:请问选中的这两个的放置位置是否正确?对的,另外这个ECSSD是自己下载的,里面内容改名如下)
3 报错如下:(PS:报错核心部分,原因是图片没有读取到)
ValueError: non-broadcastable output operand with shape () doesn't match the broadcast shape (3,)
File Not Exists
4 备注:(PS:我把你的相对路径改成了相对路径,对应如下)
run.py
if __name__ == '__main__':
# vgg_path = '/home/liuj/code/Messal/weights/vgg16_20M.pth'
vgg_path = './weights/vgg16_20M.pth'
# vgg_path = '/home/liuj/code/Messal/weights/resnet50_caffe.pth'
resnet_path = './weights/resnet50_caffe.pth'
dataset.py (PS:这部分我只改了test_model == 1的 e 数据集,因为用的这个,
self.image_root = './ECSSD/' # todo 去掉这个Imgs不然会重复 Imgs/Imgs)
class ImageDataTest(data.Dataset):
def __init__(self, test_mode=1, sal_mode='e'):
if test_mode == 0:
# self.image_root = '/home/liuj/dataset/saliency_test/ECSSD/Imgs/'
# self.image_source = '/home/liuj/dataset/saliency_test/ECSSD/test.lst'
self.image_root = '/home/liuj/dataset/HED-BSDS_PASCAL/HED-BSDS/test/'
self.image_source = '/home/liuj/dataset/HED-BSDS_PASCAL/HED-BSDS/test.lst'
elif test_mode == 1:
if sal_mode == 'e':
# self.image_root = '/home/liuj/dataset/saliency_test/ECSSD/Imgs/' # todo
# self.image_root = './ECSSD/Imgs/'
self.image_root = './ECSSD/'
# self.image_source = '/home/liuj/dataset/saliency_test/ECSSD/test.lst'
self.image_source = './ECSSD/test.lst'
# self.test_fold = '/media/ubuntu/disk/Result/saliency/ECSSD/'
self.test_fold = './Result/saliency/ECSSD/'
./ECSSD/test.lst 因为github没有提供,是我自己写的,如下左边.
目录放置结构如右
这样才对,上面是用来训练的
然后输出图片要注意,name[:-4]要改成name[3:-4],这样会生成一个s文件夹
最终路径如下 ./Result/saliency/ECSSD/EGNet_ResNet50/s/806.png
5 下面是完整报错内容:(下述问题,通过上面办法已经解决)
(env_py36) hp@hp-X599:~/zjc/nk_PyCharm/PyCharm_project/EGNet-master$ python3 run.py --mode test --sal_mode e
trueUnify bone part
TUN_bone(
(convert): ConvertLayer(
(convert0): ModuleList(
(0): Sequential(
(0): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): ReLU(inplace)
)
(1): Sequential(
(0): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): ReLU(inplace)
)
(2): Sequential(
(0): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): ReLU(inplace)
)
(3): Sequential(
(0): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): ReLU(inplace)
)
(4): Sequential(
(0): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): ReLU(inplace)
)
)
)
(base): ResNet(
(conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=True)
(layer1): Sequential(
(0): Bottleneck(
(conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(downsample): Sequential(
(0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(2): Bottleneck(
(conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
)
(layer2): Sequential(
(0): Bottleneck(
(conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(downsample): Sequential(
(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(2): Bottleneck(
(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(3): Bottleneck(
(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
)
(layer3): Sequential(
(0): Bottleneck(
(conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(downsample): Sequential(
(0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(2): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(3): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(4): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(5): Bottleneck(
(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
)
(layer4): Sequential(
(0): Bottleneck(
(conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(downsample): Sequential(
(0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
(conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(2): Bottleneck(
(conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
)
)
(merge1): MergeLayer1(
(trans): ModuleList(
(0): Sequential(
(0): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): ReLU(inplace)
)
(1): Sequential(
(0): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): ReLU(inplace)
)
(2): Sequential(
(0): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): ReLU(inplace)
)
)
(up): ModuleList(
(0): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace)
(4): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(5): ReLU(inplace)
)
(1): Sequential(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace)
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace)
(4): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(5): ReLU(inplace)
)
(2): Sequential(
(0): Conv2d(512, 512, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(1): ReLU(inplace)
(2): Conv2d(512, 512, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(3): ReLU(inplace)
(4): Conv2d(512, 512, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(5): ReLU(inplace)
)
(3): Sequential(
(0): Conv2d(512, 512, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(1): ReLU(inplace)
(2): Conv2d(512, 512, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(3): ReLU(inplace)
(4): Conv2d(512, 512, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(5): ReLU(inplace)
)
(4): Sequential(
(0): Conv2d(512, 512, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3))
(1): ReLU(inplace)
(2): Conv2d(512, 512, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3))
(3): ReLU(inplace)
(4): Conv2d(512, 512, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3))
(5): ReLU(inplace)
)
)
(score): ModuleList(
(0): Conv2d(128, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): Conv2d(512, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): Conv2d(512, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): Conv2d(512, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(relu): ReLU()
)
(merge2): MergeLayer2(
(trans): ModuleList(
(0): ModuleList(
(0): Sequential(
(0): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): ReLU(inplace)
)
(1): Sequential(
(0): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): ReLU(inplace)
)
(2): Sequential(
(0): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): ReLU(inplace)
)
(3): Sequential(
(0): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): ReLU(inplace)
)
)
)
(up): ModuleList(
(0): ModuleList(
(0): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace)
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace)
(4): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(5): ReLU(inplace)
)
(1): Sequential(
(0): Conv2d(128, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(1): ReLU(inplace)
(2): Conv2d(128, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(3): ReLU(inplace)
(4): Conv2d(128, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(5): ReLU(inplace)
)
(2): Sequential(
(0): Conv2d(128, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(1): ReLU(inplace)
(2): Conv2d(128, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(3): ReLU(inplace)
(4): Conv2d(128, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(5): ReLU(inplace)
)
(3): Sequential(
(0): Conv2d(128, 128, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3))
(1): ReLU(inplace)
(2): Conv2d(128, 128, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3))
(3): ReLU(inplace)
(4): Conv2d(128, 128, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3))
(5): ReLU(inplace)
)
)
)
(score): ModuleList(
(0): ModuleList(
(0): Conv2d(128, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): Conv2d(128, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): Conv2d(128, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): Conv2d(128, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(final_score): Sequential(
(0): Conv2d(128, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(1): ReLU(inplace)
(2): Conv2d(128, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(relu): ReLU()
)
)
The number of parameters: 111692618
Loading pre-trained model from ./epoch_resnet.pth...
File Not Exists
File Not Exists
File Not Exists
File Not Exists
File Not Exists
File Not Exists
Traceback (most recent call last):
File "run.py", line 69, in <module>
File Not Exists
File Not Exists
main(config)
File "run.py", line 23, in main
test.test(test_mode=config.test_mode)
File "/home/hp/zjc/nk_PyCharm/PyCharm_project/EGNet-master/solver.py", line 109, in test
for i, data_batch in enumerate(self.test_loader):
File "/home/hp/miniconda3/envs/env_py36/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 336, in __next__
return self._process_next_batch(batch)
File "/home/hp/miniconda3/envs/env_py36/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 357, in _process_next_batch
raise batch.exc_type(batch.exc_msg)
ValueError: Traceback (most recent call last):
File "/home/hp/miniconda3/envs/env_py36/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 106, in _worker_loop
samples = collate_fn([dataset[i] for i in batch_indices])
File "/home/hp/miniconda3/envs/env_py36/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 106, in <listcomp>
samples = collate_fn([dataset[i] for i in batch_indices])
File "/home/hp/zjc/nk_PyCharm/PyCharm_project/EGNet-master/dataset.py", line 99, in __getitem__
image, im_size = load_image_test(os.path.join(self.image_root, self.image_list[item]))
File "/home/hp/zjc/nk_PyCharm/PyCharm_project/EGNet-master/dataset.py", line 145, in load_image_test
in_ -= np.array((104.00699, 116.66877, 122.67892))
ValueError: non-broadcastable output operand with shape () doesn't match the broadcast shape (3,)
File Not Exists