【无标题】1

Traceback (most recent call last):
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 358, in <module>
    main()
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 191, in main
    mp.spawn(
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 230, in spawn
    return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 188, in start_processes
    while not context.join():
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 150, in join
    raise ProcessRaisedException(msg, error_index, failed_process.pid)
torch.multiprocessing.spawn.ProcessRaisedException: 

-- Process 0 terminated with the following error:
Traceback (most recent call last):
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 59, in _wrap
    fn(i, *args)
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 331, in main_worker
    do_train(cfg,model,train_loader,targets_encoder,optimizer,lr_scheduler, epoch,final_output_dir, writer_dict, logger,
  File "/media/wagnchogn/data/logocap-main/logocap/trainer.py", line 42, in do_train
    for i, (images_batch, targets_batch, joints_batch, areas_batch, meta_batch) in enumerate(data_loader):
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 517, in __next__
    data = self._next_data()
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 557, in _next_data
    data = self._dataset_fetcher.fetch(index)  # may raise StopIteration
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 44, in fetch
    data = [self.dataset[idx] for idx in possibly_batched_index]
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 44, in <listcomp>
    data = [self.dataset[idx] for idx in possibly_batched_index]
  File "/media/wagnchogn/data/logocap-main/logocap/dataset/COCOKeypoints.py", line 95, in __getitem__
    img, mask_list, joints_list, area, meta = self.transforms(
  File "/media/wagnchogn/data/logocap-main/logocap/dataset/transforms/transforms.py", line 28, in __call__
    image, mask, joints, area, meta = t(image, mask, joints, area,meta)
  File "/media/wagnchogn/data/logocap-main/logocap/dataset/transforms/transforms.py", line 77, in __call__
    raise ValueError(
ValueError: Some indices in self.flip_index are out of bounds for joints with shape (18, 6, 3)


Process finished with exit code 1
/home/wagnchogn/anaconda3/envs/logocap-temp/bin/python /home/wagnchogn/Downloads/pycharm-professional-2021.2/pycharm-2021.2/plugins/python/helpers/pydev/pydevd.py --multiproc --qt-support=auto --client 127.0.0.1 --port 35961 --file /media/wagnchogn/data/logocap-main/tools/train_net.py
Connected to pydev debugger (build 212.4746.96)
Traceback (most recent call last):
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/multiprocessing/spawn.py", line 126, in _main
    self = reduction.pickle.load(from_parent)
AttributeError: Can't get attribute 'main_worker' on <module '__main__' (built-in)>
python-BaseException
Traceback (most recent call last):
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/multiprocessing/spawn.py", line 126, in _main
    self = reduction.pickle.load(from_parent)
AttributeError: Can't get attribute 'main_worker' on <module '__main__' (built-in)>
python-BaseException
/home/wagnchogn/anaconda3/envs/logocap-temp/bin/python /media/wagnchogn/data/logocap-main/tools/train_net.py
Use GPU: 1 for training
Init process group: dist_url: tcp://127.0.0.1:23456, world_size: 2, rank: 1
Use GPU: 0 for training
Init process group: dist_url: tcp://127.0.0.1:23456, world_size: 2, rank: 0
INFO:logocap.models.backbone:=> init weights from normal distribution
INFO:logocap.models.backbone:=> init weights from normal distribution
INFO:logocap.models.backbone:=> loading pretrained model /media/wagnchogn/data/logocap-main/weights/imagenet/hrnet_w32-36af842e.pth
INFO:logocap.models.backbone:=> loading pretrained model /media/wagnchogn/data/logocap-main/weights/imagenet/hrnet_w32-36af842e.pth
loading annotations into memory...
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
10
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
Done (t=0.00s)
creating index...
index created!
10
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
INFO:logocap.dataset.COCODataset:=> classes: ['__background__', 'mouse']
INFO:logocap:=> loading checkpoint 'output/logocap-hrnet-w32-mouse/checkpoint.pth.tar'
INFO:logocap.dataset.COCODataset:=> classes: ['__background__', 'mouse']
INFO:logocap:=> loading checkpoint 'output/logocap-hrnet-w32-mouse/checkpoint.pth.tar'
INFO:logocap:=> loaded checkpoint 'output/logocap-hrnet-w32-mouse/checkpoint.pth.tar' (epoch 140)
INFO:logocap:=> loaded checkpoint 'output/logocap-hrnet-w32-mouse/checkpoint.pth.tar' (epoch 140)
1

Process finished with exit code 0
-- Process 1 terminated with the following error:
Traceback (most recent call last):
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 59, in _wrap
    fn(i, *args)
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 333, in main_worker
    do_train(cfg,model,train_loader,targets_encoder,optimizer,lr_scheduler, epoch,final_output_dir, writer_dict, logger,
  File "/media/wagnchogn/data/logocap-main/logocap/trainer.py", line 46, in do_train
    outputs_dict, loss_dict = model(images_batch, targets_batch)
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
    result = self.forward(*input, **kwargs)
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/nn/parallel/distributed.py", line 705, in forward
    output = self.module(*inputs[0], **kwargs[0])
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
    result = self.forward(*input, **kwargs)
  File "/media/wagnchogn/data/logocap-main/logocap/models/model.py", line 798, in forward
    return self.forward_training(images_batch, targets_batch)
  File "/media/wagnchogn/data/logocap-main/logocap/models/model.py", line 389, in forward_training
    decoder_output = self.decoder(images_batch, {'centermaps':heatmaps_hr[:,-1:].detach(),'offsets':offsets.detach()})
  File "/media/wagnchogn/data/logocap-main/logocap/models/decoder.py", line 136, in __call__
    allposes = self.pose_excitation(center_poses_xy)
  File "/media/wagnchogn/data/logocap-main/logocap/models/decoder.py", line 100, in pose_excitation
    allposes = center_poses_xy[:,:,:,None,None] + dxy
RuntimeError: The size of tensor a (5) must match the size of tensor b (9) at non-singleton dimension 2
Traceback (most recent call last):
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 360, in <module>
    main()
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 191, in main
    mp.spawn(
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 230, in spawn
    return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 188, in start_processes
    while not context.join():
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 150, in join
    raise ProcessRaisedException(msg, error_index, failed_process.pid)
torch.multiprocessing.spawn.ProcessRaisedException: 

-- Process 0 terminated with the following error:
Traceback (most recent call last):
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 59, in _wrap
    fn(i, *args)
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 337, in main_worker
    perf_indicator = run_test(cfg, InferenceShell, model, test_loader, final_output_dir, logger, use_wandb = args.wandb)
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 92, in run_test
    for i, (images, rgb, meta) in enumerate(test_loader):
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 517, in __next__
    data = self._next_data()
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1199, in _next_data
    return self._process_data(data)
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1225, in _process_data
    data.reraise()
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/_utils.py", line 429, in reraise
    raise self.exc_type(msg)
RuntimeError: Caught RuntimeError in DataLoader worker process 0.
Original Traceback (most recent call last):
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/utils/data/_utils/worker.py", line 202, in _worker_loop
    data = fetcher.fetch(index)
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 44, in fetch
    data = [self.dataset[idx] for idx in possibly_batched_index]
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 44, in <listcomp>
    data = [self.dataset[idx] for idx in possibly_batched_index]
  File "/media/wagnchogn/data/logocap-main/logocap/dataset/COCOTest.py", line 182, in __getitem__
    keypoints = torch.tensor(ann['keypoints'],dtype=torch.float32).reshape(-1,3)
RuntimeError: shape '[-1, 3]' is invalid for input of size 10



Process finished with exit code 1
Traceback (most recent call last):
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 59, in _wrap
    fn(i, *args)
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 337, in main_worker
    perf_indicator = run_test(cfg, InferenceShell, model, test_loader, final_output_dir, logger, use_wandb = args.wandb)
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 97, in run_test
    final_poses, scores = infshell(images, rgb, outputs, meta[0])
  File "/media/wagnchogn/data/logocap-main/logocap/inference.py", line 65, in __call__
    final_poses = final_poses[inds]
IndexError: arrays used as indices must be of integer (or boolean) type


Process finished with exit code 1
Traceback (most recent call last):
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 59, in _wrap
    fn(i, *args)
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 337, in main_worker
    perf_indicator = run_test(cfg, InferenceShell, model, test_loader, final_output_dir, logger, use_wandb = args.wandb)
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 136, in run_test
    coco_eval = coco.loadRes(results_path_gathered)
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/pycocotools/coco.py", line 328, in loadRes
    if 'caption' in anns[0]:
IndexError: list index out of range


Process finished with exit code 1
Traceback (most recent call last):
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 360, in <module>
    main()
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 200, in main
    main_worker(
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 337, in main_worker
    perf_indicator = run_test(cfg, InferenceShell, model, test_loader, final_output_dir, logger, use_wandb = args.wandb)
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 142, in run_test
    evaluator.evaluate()
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/pycocotools/cocoeval.py", line 148, in evaluate
    self.ious = {(imgId, catId): computeIoU(imgId, catId) \
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/pycocotools/cocoeval.py", line 148, in <dictcomp>
    self.ious = {(imgId, catId): computeIoU(imgId, catId) \
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/pycocotools/cocoeval.py", line 229, in computeOks
    e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2
ValueError: operands could not be broadcast together with shapes (5,) (17,) 
self.kpt_oks_sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0
/home/wagnchogn/anaconda3/envs/logocap-temp/bin/python /media/wagnchogn/data/logocap-main/tools/train_net.py
Use GPU: 0 for training
Init process group: dist_url: tcp://127.0.0.1:23456, world_size: 2, rank: 0
Use GPU: 1 for training
Init process group: dist_url: tcp://127.0.0.1:23456, world_size: 2, rank: 1
INFO:logocap.models.backbone:=> init weights from normal distribution
INFO:logocap.models.backbone:=> init weights from normal distribution
INFO:logocap.models.backbone:=> loading pretrained model /media/wagnchogn/data/logocap-main/weights/imagenet/hrnet_w32-36af842e.pth
INFO:logocap.models.backbone:=> loading pretrained model /media/wagnchogn/data/logocap-main/weights/imagenet/hrnet_w32-36af842e.pth
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
10
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
loading annotations into memory...
INFO:logocap.dataset.COCODataset:=> classes: ['__background__', 'mouse']
INFO:logocap:=> loading checkpoint 'output/logocap-hrnet-w32-mouse/checkpoint.pth.tar'
INFO:logocap.dataset.COCODataset:=> classes: ['__background__', 'mouse']
INFO:logocap:=> loading checkpoint 'output/logocap-hrnet-w32-mouse/checkpoint.pth.tar'
Done (t=0.01s)
creating index...
index created!
10
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
INFO:logocap:=> loaded checkpoint 'output/logocap-hrnet-w32-mouse/checkpoint.pth.tar' (epoch 140)
INFO:logocap:=> loaded checkpoint 'output/logocap-hrnet-w32-mouse/checkpoint.pth.tar' (epoch 140)

Process finished with exit code 0
/home/wagnchogn/anaconda3/envs/logocap-temp/bin/python /home/wagnchogn/Downloads/pycharm-professional-2021.2/pycharm-2021.2/plugins/python/helpers/pydev/pydevd.py --multiproc --qt-support=auto --client 127.0.0.1 --port 35345 --file /media/wagnchogn/data/logocap-main/tools/train_net.py
Connected to pydev debugger (build 212.4746.96)
Use GPU: 0 for training
2024-06-27 14:52:05,543 logocap.models.backbone INFO: => init weights from normal distribution
2024-06-27 14:52:05,996 logocap.models.backbone INFO: => loading pretrained model /media/wagnchogn/data/logocap-main/weights/imagenet/hrnet_w32-36af842e.pth
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
2024-06-27 14:52:09,176 logocap.dataset.COCODataset INFO: => classes: ['__background__', 'mouse']
10
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
2024-06-27 14:52:09,178 logocap INFO: => loading checkpoint 'output/logocap-hrnet-w32-mouse/checkpoint.pth.tar'
Traceback (most recent call last):
  File "/home/wagnchogn/Downloads/pycharm-professional-2021.2/pycharm-2021.2/plugins/python/helpers/pydev/pydevd.py", line 1483, in _exec
    pydev_imports.execfile(file, globals, locals)  # execute the script
  File "/home/wagnchogn/Downloads/pycharm-professional-2021.2/pycharm-2021.2/plugins/python/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
    exec(compile(contents+"\n", file, 'exec'), glob, loc)
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 360, in <module>
    main()
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 200, in main
    main_worker(
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 320, in main_worker
    model.load_state_dict(checkpoint['state_dict'])
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1223, in load_state_dict
    raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
RuntimeError: Error(s) in loading state_dict for Model:
/home/wagnchogn/anaconda3/envs/logocap-temp/bin/python /media/wagnchogn/data/logocap-main/tools/train_net.py
Use GPU: 1 for training
Init process group: dist_url: tcp://127.0.0.1:23456, world_size: 2, rank: 1
Use GPU: 0 for training
Init process group: dist_url: tcp://127.0.0.1:23456, world_size: 2, rank: 0
INFO:logocap.models.backbone:=> init weights from normal distribution
INFO:logocap.models.backbone:=> init weights from normal distribution
INFO:logocap.models.backbone:=> loading pretrained model /media/wagnchogn/data/logocap-main/weights/imagenet/hrnet_w32-36af842e.pth
INFO:logocap.models.backbone:=> loading pretrained model /media/wagnchogn/data/logocap-main/weights/imagenet/hrnet_w32-36af842e.pth
loading annotations into memory...
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
10
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
Done (t=0.00s)
creating index...
index created!
10
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
INFO:logocap.dataset.COCODataset:=> classes: ['__background__', 'mouse']
INFO:logocap.dataset.COCODataset:=> classes: ['__background__', 'mouse']
Traceback (most recent call last):
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 360, in <module>
    main()
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 191, in main
    mp.spawn(
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 230, in spawn
    return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 188, in start_processes
    while not context.join():
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 150, in join
    raise ProcessRaisedException(msg, error_index, failed_process.pid)
torch.multiprocessing.spawn.ProcessRaisedException: 

-- Process 0 terminated with the following error:
Traceback (most recent call last):
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 59, in _wrap
    fn(i, *args)
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 333, in main_worker
    do_train(cfg,model,train_loader,targets_encoder,optimizer,lr_scheduler, epoch,final_output_dir, writer_dict, logger,
  File "/media/wagnchogn/data/logocap-main/logocap/trainer.py", line 42, in do_train
    for i, (images_batch, targets_batch, joints_batch, areas_batch, meta_batch) in enumerate(data_loader):
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 517, in __next__
    data = self._next_data()
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 557, in _next_data
    data = self._dataset_fetcher.fetch(index)  # may raise StopIteration
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 44, in fetch
    data = [self.dataset[idx] for idx in possibly_batched_index]
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 44, in <listcomp>
    data = [self.dataset[idx] for idx in possibly_batched_index]
  File "/media/wagnchogn/data/logocap-main/logocap/dataset/COCOKeypoints.py", line 131, in __getitem__
    target_t, ignored_t = self.heatmap_generator[0](joints_list[0],self.sigma[0][0],self.center_sigma,self.bg_weight[0][0])
  File "/media/wagnchogn/data/logocap-main/logocap/dataset/target_generators/target_generators.py", line 26, in __call__
    assert self.num_joints == joints.shape[1], \
AssertionError: the number of joints should be 10

  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值