def _benchmark_paf_graphs(
config,
inference_cfg,
data,
paf_inds,
greedy=False,
add_discarded=True,
identity_only=False,
calibration_file="",
oks_sigma=0.1,
margin=0,
symmetric_kpts=None,
split_inds=None,
):
metadata = data.pop("metadata")
multi_bpts_orig = auxfun_multianimal.extractindividualsandbodyparts(config)[2]
multi_bpts = [j for j in metadata["all_joints_names"] if j in multi_bpts_orig]
n_multi = len(multi_bpts)
data_ = {"metadata": metadata}
for k, v in data.items():
data_[k] = v["prediction"]
ass = Assembler(
data_,
max_n_individuals=inference_cfg["topktoretain"],
n_multibodyparts=n_multi,
greedy=greedy,
pcutoff=inference_cfg.get("pcutoff", 0.1),
min_affinity=inference_cfg.get("pafthreshold", 0.1),
add_discarded=add_discarded,
identity_only=identity_only,
)
if calibration_file:
ass.calibrate(calibration_file)
params = ass.metadata
image_paths = params["imnames"]
bodyparts = params["joint_names"]
idx = (
data[image_paths[0]]["groundtruth"][2]
.unstack("coords")
.reindex(bodyparts, level="bodyparts")
.index
)
mask_multi = idx.get_level_values("individuals") != "single"
if not mask_multi.all():
idx = idx.drop("single", level="individuals")
individuals = idx.get_level_values("individuals").unique()
n_individuals = len(individuals)
map_ = dict(zip(individuals, range(n_individuals)))
# Form ground truth beforehand
ground_truth = []
for i, imname in enumerate(image_paths):
temp = data[imname]["groundtruth"][2].reindex(multi_bpts, level="bodyparts")
ground_truth.append(temp.to_numpy().reshape((-1, 2)))
ground_truth = np.stack(ground_truth)
temp = np.ones((*ground_truth.shape[:2], 3))
temp[..., :2] = ground_truth
temp = temp.reshape((temp.shape[0], n_individuals, -1, 3))
ass_true_dict = _parse_ground_truth_data(temp)
ids = np.vectorize(map_.get)(idx.get_level_values("individuals").to_numpy())
ground_truth = np.insert(ground_truth, 2, ids, axis=2)
# Assemble animals on the full set of detections
paf_inds = sorted(paf_inds, key=len)
n_graphs = len(paf_inds)
all_scores = []
all_metrics = []
all_assemblies = []
for j, paf in enumerate(paf_inds, start=1):
print(f"Graph {j}|{n_graphs}")
ass.paf_inds = paf
ass.assemble()
all_assemblies.append((ass.assemblies, ass.unique, ass.metadata["imnames"]))
if split_inds is not None:
oks = []
for inds in split_inds:
ass_gt = {k: v for k, v in ass_true_dict.items() if k in inds}
oks.append(
evaluate_assembly(
ass.assemblies,
ass_gt,
oks_sigma,
margin=margin,
symmetric_kpts=symmetric_kpts,
greedy_matching=inference_cfg.get("greedy_oks", False),
)
)
else:
oks = evaluate_assembly(
ass.assemblies,
ass_true_dict,
oks_sigma,
margin=margin,
symmetric_kpts=symmetric_kpts,
greedy_matching=inference_cfg.get("greedy_oks", False),
)
all_metrics.append(oks)
scores = np.full((len(image_paths), 2), np.nan)
for i, imname in enumerate(tqdm(image_paths)):
gt = ground_truth[i]
gt = gt[~np.isnan(gt).any(axis=1)]
if len(np.unique(gt[:, 2])) < 2: # Only consider frames with 2+ animals
continue
# Count the number of unassembled bodyparts
n_dets = len(gt)
animals = ass.assemblies.get(i)
if animals is None:
if n_dets:
scores[i, 0] = 1
else:
animals = [
np.c_[animal.data, np.ones(animal.data.shape[0]) * n]
for n, animal in enumerate(animals)
]
hyp = np.concatenate(animals)
hyp = hyp[~np.isnan(hyp).any(axis=1)]
scores[i, 0] = max(0, (n_dets - hyp.shape[0]) / n_dets)
neighbors = _find_closest_neighbors(gt[:, :2], hyp[:, :2])
valid = neighbors != -1
id_gt = gt[valid, 2]
id_hyp = hyp[neighbors[valid], -1]
mat = contingency_matrix(id_gt, id_hyp)
purity = mat.max(axis=0).sum() / mat.sum()
scores[i, 1] = purity
all_scores.append((scores, paf))
dfs = []
for score, inds in all_scores:
df = pd.DataFrame(score, columns=["miss", "purity"])
df["ngraph"] = len(inds)
dfs.append(df)
big_df = pd.concat(dfs)
group = big_df.groupby("ngraph")
return (all_scores, group.agg(["mean", "std"]).T, all_metrics, all_assemblies)
paf_graphs
最新推荐文章于 2024-09-04 20:18:34 发布