evaluate

def pairwisedistances(DataCombined, scorer1, scorer2, pcutoff=-1, bodyparts=None):
    """Calculates the pairwise Euclidean distance metric over body parts vs. images"""
    mask = DataCombined[scorer2].xs("likelihood", level=1, axis=1) >= pcutoff
    if bodyparts is None:
        Pointwisesquareddistance = (DataCombined[scorer1] - DataCombined[scorer2]) ** 2
        RMSE = np.sqrt(
            Pointwisesquareddistance.xs("x", level=1, axis=1)
            + Pointwisesquareddistance.xs("y", level=1, axis=1)
        )  # Euclidean distance (proportional to RMSE)
        return RMSE, RMSE[mask]
    else:
        Pointwisesquareddistance = (
            DataCombined[scorer1][bodyparts] - DataCombined[scorer2][bodyparts]
        ) ** 2
        RMSE = np.sqrt(
            Pointwisesquareddistance.xs("x", level=1, axis=1)
            + Pointwisesquareddistance.xs("y", level=1, axis=1)
        )  # Euclidean distance (proportional to RMSE)
        return RMSE, RMSE[mask]


def distance(v, w):
    return np.sqrt(np.sum((v - w) ** 2))
def calculatepafdistancebounds(
    config, shuffle=0, trainingsetindex=0, modelprefix="", numdigits=0, onlytrain=False
):
    import os
    from deeplabcut.utils import auxiliaryfunctions, auxfun_multianimal
    from deeplabcut.pose_estimation_tensorflow.config import load_config

    # Read file path for pose_config file. >> pass it on
    cfg = auxiliaryfunctions.read_config(config)

    if cfg["multianimalproject"]:
        (
            individuals,
            uniquebodyparts,
            multianimalbodyparts,
        ) = auxfun_multianimal.extractindividualsandbodyparts(cfg)

        # Loading human annotatated data
        trainingsetfolder = auxiliaryfunctions.get_training_set_folder(cfg)
        trainFraction = cfg["TrainingFraction"][trainingsetindex]
        modelfolder = os.path.join(
            cfg["project_path"],
            str(
                auxiliaryfunctions.get_model_folder(
                    trainFraction, shuffle, cfg, modelprefix=modelprefix
                )
            ),
        )

        # Load meta data & annotations
        Data = pd.read_hdf(
            os.path.join(
                cfg["project_path"],
                str(trainingsetfolder),
                "CollectedData_" + cfg["scorer"] + ".h5",
            )
        )[cfg["scorer"]]

        path_train_config, path_test_config, _ = return_train_network_path(
            config=config,
            shuffle=shuffle,
            trainingsetindex=trainingsetindex,
            modelprefix=modelprefix,
        )
        train_pose_cfg = load_config(str(path_train_config))
        test_pose_cfg = load_config(str(path_test_config))

        _, trainIndices, _, _ = auxiliaryfunctions.load_metadata(
            Path(cfg["project_path"]) / train_pose_cfg["metadataset"]
        )

        # get the graph!
        partaffinityfield_graph = test_pose_cfg["partaffinityfield_graph"]
        jointnames = [
            test_pose_cfg["all_joints_names"][i]
            for i in range(len(test_pose_cfg["all_joints"]))
        ]
        path_inferencebounds_config = (
            Path(modelfolder) / "test" / "inferencebounds.yaml"
        )
        inferenceboundscfg = {}
        for pi, edge in enumerate(partaffinityfield_graph):
            j1, j2 = jointnames[edge[0]], jointnames[edge[1]]
            ds_within = []
            ds_across = []
            for ind in individuals:
                for ind2 in individuals:
                    if ind != "single" and ind2 != "single":
                        if (ind, j1, "x") in Data.keys() and (
                            ind2,
                            j2,
                            "y",
                        ) in Data.keys():
                            distances = (
                                np.sqrt(
                                    (Data[ind, j1, "x"] - Data[ind2, j2, "x"]) ** 2
                                    + (Data[ind, j1, "y"] - Data[ind2, j2, "y"]) ** 2
                                )
                                / test_pose_cfg["stride"]
                            )
                        else:
                            distances = None

                        if distances is not None:
                            if onlytrain:
                                distances = distances.iloc[trainIndices]
                            if ind == ind2:
                                ds_within.extend(distances.values.flatten())
                            else:
                                ds_across.extend(distances.values.flatten())

            edgeencoding = str(edge[0]) + "_" + str(edge[1])
            inferenceboundscfg[edgeencoding] = {}
            if len(ds_within) > 0:
                inferenceboundscfg[edgeencoding]["intra_max"] = str(
                    round(np.nanmax(ds_within), numdigits)
                )
                inferenceboundscfg[edgeencoding]["intra_min"] = str(
                    round(np.nanmin(ds_within), numdigits)
                )
            else:
                inferenceboundscfg[edgeencoding]["intra_max"] = str(
                    1e5
                )  # large number (larger than any image diameter)
                inferenceboundscfg[edgeencoding]["intra_min"] = str(0)

            # NOTE: the inter-animal distances are currently not used, but are interesting to compare to intra_*
            if len(ds_across) > 0:
                inferenceboundscfg[edgeencoding]["inter_max"] = str(
                    round(np.nanmax(ds_across), numdigits)
                )
                inferenceboundscfg[edgeencoding]["inter_min"] = str(
                    round(np.nanmin(ds_across), numdigits)
                )
            else:
                inferenceboundscfg[edgeencoding]["inter_max"] = str(
                    1e5
                )  # large number (larger than image diameters in typical experiments)
                inferenceboundscfg[edgeencoding]["inter_min"] = str(0)

        auxiliaryfunctions.write_plainconfig(
            str(path_inferencebounds_config), dict(inferenceboundscfg)
        )
        return inferenceboundscfg
    else:
        print("You might as well bring owls to Athens.")
        return {}
def Plotting(
    cfg, comparisonbodyparts, DLCscorer, trainIndices, DataCombined, foldername
):
    """Function used for plotting GT and predictions"""
    from deeplabcut.utils import visualization

    colors = visualization.get_cmap(len(comparisonbodyparts), name=cfg["colormap"])
    NumFrames = np.size(DataCombined.index)
    fig, ax = visualization.create_minimal_figure()
    for ind in tqdm(np.arange(NumFrames)):
        ax = visualization.plot_and_save_labeled_frame(
            DataCombined,
            ind,
            trainIndices,
            cfg,
            colors,
            comparisonbodyparts,
            DLCscorer,
            foldername,
            fig,
            ax,
        )
        visualization.erase_artists(ax)
def return_evaluate_network_data(
    config,
    shuffle=0,
    trainingsetindex=0,
    comparisonbodyparts="all",
    Snapindex=None,
    rescale=False,
    fulldata=False,
    show_errors=True,
    modelprefix="",
    returnjustfns=True,
):
    import os

    from deeplabcut.pose_estimation_tensorflow.config import load_config
    from deeplabcut.utils import auxiliaryfunctions

    start_path = os.getcwd()
    # Read file path for pose_config file. >> pass it on
    cfg = auxiliaryfunctions.read_config(config)

    # Loading human annotatated data
    trainingsetfolder = auxiliaryfunctions.get_training_set_folder(cfg)
    # Data=pd.read_hdf(os.path.join(cfg["project_path"],str(trainingsetfolder),'CollectedData_' + cfg["scorer"] + '.h5'),'df_with_missing')

    # Get list of body parts to evaluate network for
    comparisonbodyparts = (
        auxiliaryfunctions.intersection_of_body_parts_and_ones_given_by_user(
            cfg, comparisonbodyparts
        )
    )
    ##################################################
    # Load data...
    ##################################################
    trainFraction = cfg["TrainingFraction"][trainingsetindex]
    modelfolder = os.path.join(
        cfg["project_path"],
        str(
            auxiliaryfunctions.get_model_folder(
                trainFraction, shuffle, cfg, modelprefix=modelprefix
            )
        ),
    )
    path_train_config, path_test_config, _ = return_train_network_path(
        config=config,
        shuffle=shuffle,
        trainingsetindex=trainingsetindex,
        modelprefix=modelprefix,
    )

    try:
        test_pose_cfg = load_config(str(path_test_config))
    except FileNotFoundError:
        raise FileNotFoundError(
            "It seems the model for shuffle %s and trainFraction %s does not exist."
            % (shuffle, trainFraction)
        )

    train_pose_cfg = load_config(str(path_train_config))
    # Load meta data
    data, trainIndices, testIndices, _ = auxiliaryfunctions.load_metadata(
        Path(cfg["project_path"]) / train_pose_cfg["metadataset"],
    )

    ########################### RESCALING (to global scale)
    if rescale == True:
        scale = test_pose_cfg["global_scale"]
        print("Rescaling Data to ", scale)
        Data = (
            pd.read_hdf(
                os.path.join(
                    cfg["project_path"],
                    str(trainingsetfolder),
                    "CollectedData_" + cfg["scorer"] + ".h5",
                )
            )
            * scale
        )
    else:
        scale = 1
        Data = pd.read_hdf(
            os.path.join(
                cfg["project_path"],
                str(trainingsetfolder),
                "CollectedData_" + cfg["scorer"] + ".h5",
            )
        )

    evaluationfolder = os.path.join(
        cfg["project_path"],
        str(
            auxiliaryfunctions.get_evaluation_folder(
                trainFraction, shuffle, cfg, modelprefix=modelprefix
            )
        ),
    )
    # Check which snapshots are available and sort them by # iterations
    Snapshots = np.array(
        [
            fn.split(".")[0]
            for fn in os.listdir(os.path.join(str(modelfolder), "train"))
            if "index" in fn
        ]
    )

    if len(Snapshots) == 0:
        print(
            "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
            % (shuffle, trainFraction)
        )
        snapindices = []
    else:
        increasing_indices = np.argsort([int(m.split("-")[1]) for m in Snapshots])
        Snapshots = Snapshots[increasing_indices]
        if Snapindex is None:
            Snapindex = cfg["snapshotindex"]

        if Snapindex == -1:
            snapindices = [-1]
        elif Snapindex == "all":
            snapindices = range(len(Snapshots))
        elif Snapindex < len(Snapshots):
            snapindices = [Snapindex]
        else:
            print(
                "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
            )

    DATA = []
    results = []
    resultsfns = []
    for snapindex in snapindices:
        test_pose_cfg["init_weights"] = os.path.join(
            str(modelfolder), "train", Snapshots[snapindex]
        )  # setting weights to corresponding snapshot.
        trainingsiterations = (test_pose_cfg["init_weights"].split(os.sep)[-1]).split(
            "-"
        )[
            -1
        ]  # read how many training siterations that corresponds to.

        # name for deeplabcut net (based on its parameters)
        DLCscorer, DLCscorerlegacy = auxiliaryfunctions.get_scorer_name(
            cfg, shuffle, trainFraction, trainingsiterations, modelprefix=modelprefix
        )
        if not returnjustfns:
            print(
                "Retrieving ",
                DLCscorer,
                " with # of trainingiterations:",
                trainingsiterations,
            )

        (
            notanalyzed,
            resultsfilename,
            DLCscorer,
        ) = auxiliaryfunctions.check_if_not_evaluated(
            str(evaluationfolder), DLCscorer, DLCscorerlegacy, Snapshots[snapindex]
        )
        # resultsfilename=os.path.join(str(evaluationfolder),DLCscorer + '-' + str(Snapshots[snapindex])+  '.h5') # + '-' + str(snapshot)+  ' #'-' + Snapshots[snapindex]+  '.h5')
        print(resultsfilename)
        resultsfns.append(resultsfilename)
        if not returnjustfns:
            if not notanalyzed and os.path.isfile(resultsfilename):  # data exists..
                DataMachine = pd.read_hdf(resultsfilename)
                DataCombined = pd.concat([Data.T, DataMachine.T], axis=0).T
                RMSE, RMSEpcutoff = pairwisedistances(
                    DataCombined,
                    cfg["scorer"],
                    DLCscorer,
                    cfg["pcutoff"],
                    comparisonbodyparts,
                )

                testerror = np.nanmean(RMSE.iloc[testIndices].values.flatten())
                trainerror = np.nanmean(RMSE.iloc[trainIndices].values.flatten())
                testerrorpcutoff = np.nanmean(
                    RMSEpcutoff.iloc[testIndices].values.flatten()
                )
                trainerrorpcutoff = np.nanmean(
                    RMSEpcutoff.iloc[trainIndices].values.flatten()
                )
                if show_errors == True:
                    print(
                        "Results for",
                        trainingsiterations,
                        " training iterations:",
                        int(100 * trainFraction),
                        shuffle,
                        "train error:",
                        np.round(trainerror, 2),
                        "pixels. Test error:",
                        np.round(testerror, 2),
                        " pixels.",
                    )
                    print(
                        "With pcutoff of",
                        cfg["pcutoff"],
                        " train error:",
                        np.round(trainerrorpcutoff, 2),
                        "pixels. Test error:",
                        np.round(testerrorpcutoff, 2),
                        "pixels",
                    )
                    print("Snapshot", Snapshots[snapindex])

                r = [
                    trainingsiterations,
                    int(100 * trainFraction),
                    shuffle,
                    np.round(trainerror, 2),
                    np.round(testerror, 2),
                    cfg["pcutoff"],
                    np.round(trainerrorpcutoff, 2),
                    np.round(testerrorpcutoff, 2),
                    Snapshots[snapindex],
                    scale,
                    test_pose_cfg["net_type"],
                ]
                results.append(r)
            else:
                print("Model not trained/evaluated!")
            if fulldata == True:
                DATA.append(
                    [
                        DataMachine,
                        Data,
                        data,
                        trainIndices,
                        testIndices,
                        trainFraction,
                        DLCscorer,
                        comparisonbodyparts,
                        cfg,
                        evaluationfolder,
                        Snapshots[snapindex],
                    ]
                )

    os.chdir(start_path)
    if returnjustfns:
        return resultsfns
    else:
        if fulldata == True:
            return DATA, results
        else:
            return results

  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值