ModifiedFunction.py

"""
    File name         : ModifiedFunction.py
    Description       : 改写 stonesoup 中的函数
                        1:修改 PassiveElevationBearing 增加检测率和杂波,保持类名不变
                        2:用于增加杂波的仿真

    Compile command   : python ModifiedFunction.py
    Author            : Haoger
    Date created      : 10/05/2022
    code Version      : v1.0
"""
import datetime
import random
from collections import Sized

import numpy as np
from typing import Sequence, Set, Union, Optional, Tuple, Callable, Collection
from ordered_set import OrderedSet
from scipy.linalg import inv
from scipy.spatial import distance
from stonesoup.base import Property
from stonesoup.buffered_generator import BufferedGenerator
from stonesoup.dataassociator import DataAssociator
from stonesoup.deleter import Deleter
from stonesoup.functions import pol2cart, cart2pol, cart2sphere, sphere2cart, cart2angles
from stonesoup.hypothesiser import Hypothesiser
from stonesoup.hypothesiser.distance import DistanceHypothesiser
from stonesoup.initiator import Initiator
from stonesoup.measures import Measure
from stonesoup.models.base import ReversibleModel, Model
from stonesoup.models.clutter import ClutterModel
from stonesoup.models.measurement.nonlinear import CartesianToElevationBearing, CartesianToBearingRange, \
    NonLinearGaussianMeasurement
from stonesoup.models.transition import TransitionModel
from stonesoup.platform import Platform
from stonesoup.predictor import Predictor
from stonesoup.reader import GroundTruthReader
from stonesoup.sensor.sensor import Sensor
from stonesoup.simulator import DetectionSimulator
from stonesoup.simulator.simple import SingleTargetGroundTruthSimulator
from stonesoup.types import Type
from stonesoup.types.angle import Bearing, Elevation
from stonesoup.types.array import CovarianceMatrix, StateVector, StateVectors
from stonesoup.types.detection import TrueDetection, MissedDetection, Clutter, Detection
from stonesoup.types.groundtruth import GroundTruthState, GroundTruthPath
from stonesoup.types.hypothesis import SingleHypothesis, CompositeHypothesis, SingleProbabilityHypothesis
# from stonesoup.types.multihypothesis import MultipleHypothesis
from Modifiedmultihypothesis import MultipleHypothesis
from stonesoup.types.numeric import Probability
from stonesoup.types.prediction import Prediction
from stonesoup.types.state import CompositeState, State, GaussianState
from stonesoup.types.track import Track
from stonesoup.types.update import Update, CompositeUpdate
import warnings
from scipy.stats import poisson
from abc import ABC

from stonesoup.updater import Updater
from sympy import Range
from PassiveInitiators import PassiveSimpleMeasurementInitiator

"""
    计算向量的相似度
"""


def similarityCalculation(state_vector_1, state_vector_2, weights):  # 可以增加index,指明计算哪几个量的相似度,修改i和j的取值范围就好
    # 先判断两个向量长度是否相等
    if len(state_vector_1) != len(state_vector_2):
        raise ValueError("The lenth of state_vector are not equal.")
    else:
        # 差异百分比
        difference_scores = []
        similarity = 0
        for i in range(0, len(state_vector_1)):
            temp = 0
            difference = 1 - abs(state_vector_1[i] - state_vector_2[i]) / \
                         (state_vector_1[i] + state_vector_2[i])
            temp += weights[i] * difference  # 根据权重计算两个相邻向量的相似性
            difference_scores.append(temp)
            similarity += difference_scores[i]
    return similarity


# print(similarityCalculation([1, 2, 3], [2, 3, 4], [0.5, 0.4, 0.1]))
# print(similarityCalculation([1, 2, 3], [2, 3], [0.5, 0.5]))
# vector_1 = [1, 2, 3, 4, 5]
# vector_2 = [2, 3, 4, 5, 6]
# print(similarityCalculation(vector_1[2:4], vector_2[3:5], [0.2, 0.8]))

"""
    A simple passive sensor that generates measurements of targets
    给 PassiveElevationBearing 增加检测概率
    2022-10-17已经修改
"""
class PassiveElevationBearing(Sensor):  # 有杂波
    ndim_state: int = Property()
    mapping: np.ndarray = Property()
    noise_covar: CovarianceMatrix = Property()
    detection_probability: Probability = Property(default=1)
    clutter_model: ClutterModel = Property(default=None)  # 增加杂波模型

    # def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True,
    #             **kwargs) -> Set[TrueDetection]:
    #
    #     measurement_model = CartesianToElevationBearing(
    # stonesoup库做了一些修改,所以对于个人版本需要做同步修改
    @property
    def measurement_model(self):
        return CartesianToElevationBearing(
            ndim_state=self.ndim_state,
            mapping=self.mapping,
            noise_covar=self.noise_covar,
            translation_offset=self.position,
            rotation_offset=self.orientation)

    def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True,
                **kwargs) -> Set[TrueDetection]:

        measurement_model = self.measurement_model

        detections = set()
        for truth in ground_truths:
            measurement_vector = measurement_model.function(truth, noise=noise, **kwargs)
            if np.random.rand() <= self.detection_probability:
                detection = TrueDetection(measurement_vector,
                                          measurement_model=measurement_model,
                                          timestamp=truth.timestamp,
                                          groundtruth_path=truth)
                detections.add(detection)
        # 置于 for 循环下,根据真实航迹数量来产生杂波,泊松分布参数取小一些;置于 for 循环为根据时间来产生杂波,泊松分布参数取大一些
        if self.clutter_model is not None:
            self.clutter_model.measurement_model = measurement_model
            clutter = self.clutter_model.function(ground_truths)
            detections = set.union(detections, clutter)

        return detections


"""
    A simple passive sensor that generates measurements of targets
    增加检测概率
    修改 measurement_model,增加目标特征信息
    2022-10-17已经修改
"""
class PassiveElevationBearingFeature(Sensor):  # 有杂波
    ndim_state: int = Property()
    mapping: np.ndarray = Property()
    noise_covar: CovarianceMatrix = Property()
    detection_probability: Probability = Property(default=1)
    clutter_model: ClutterModel = Property(default=None)  # 增加杂波模型

    # def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True,
    #             **kwargs) -> Set[TrueDetection]:
    #
    #     measurement_model = ModifiedCartesianToElevationBearing(
    @property
    def measurement_model(self):
        return CartesianToElevationBearing(
            ndim_state=self.ndim_state,
            mapping=self.mapping,
            noise_covar=self.noise_covar,
            translation_offset=self.position,
            rotation_offset=self.orientation)

    def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True,
                **kwargs) -> Set[TrueDetection]:

        measurement_model = self.measurement_model
        detections = set()
        for truth in ground_truths:
            measurement_vector = measurement_model.function(truth, noise=noise, **kwargs)
            if np.random.rand() <= self.detection_probability:
                detection = TrueDetection(measurement_vector,
                                          measurement_model=measurement_model,
                                          timestamp=truth.timestamp,
                                          groundtruth_path=truth)
                detections.add(detection)
        # 置于 for 循环下,根据真实航迹数量来产生杂波,泊松分布参数取小一些;置于 for 循环为根据时间来产生杂波,泊松分布参数取大一些
        if self.clutter_model is not None:
            self.clutter_model.measurement_model = measurement_model
            clutter = self.clutter_model.function(ground_truths)
            detections = set.union(detections, clutter)

        return detections


"""
    增加检测率
    增加雷达能够测量的最远距离限制
"""
class RadarBearingRange(Sensor):
    """A simple radar sensor that generates measurements of targets, using a
    :class:`~.CartesianToBearingRange` model, relative to its position.

    Note
    ----
    The current implementation of this class assumes a 3D Cartesian plane.

    """

    ndim_state: int = Property(
        default=2,
        doc="Number of state dimensions. This is utilised by (and follows in format) "
            "the underlying :class:`~.CartesianToBearingRange` model")
    position_mapping: Tuple[int, int] = Property(
        doc="Mapping between the targets state space and the sensors "
            "measurement capability")
    noise_covar: CovarianceMatrix = Property(
        doc="The sensor noise covariance matrix. This is utilised by "
            "(and follow in format) the underlying "
            ":class:`~.CartesianToBearingRange` model")
    clutter_model: ClutterModel = Property(
        default=None,
        doc="An optional clutter generator that adds a set of simulated "
            ":class:`Clutter` ojects to the measurements at each time step. "
            "The clutter is simulated according to the provided distribution.")
    detection_probability: Probability = Property(default=1)
    max_range: float = Property(
        default=np.inf,
        doc="The maximum detection range of the radar (in meters)")

    @property
    def measurement_model(self):
        return CartesianToBearingRange(
            ndim_state=self.ndim_state,
            mapping=self.position_mapping,
            noise_covar=self.noise_covar,
            translation_offset=self.position,
            rotation_offset=self.orientation)

    def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True,
                **kwargs) -> Set[TrueDetection]:
        measurement_model = self.measurement_model

        detections = set()
        for truth in ground_truths:
            if np.random.rand() <= self.detection_probability:
                measurement_vector = measurement_model.function(truth, noise=False, **kwargs)

                if noise is True:
                    measurement_noise = measurement_model.rvs()
                else:
                    measurement_noise = noise

                range_t = measurement_vector[1, 0]  # Bearing(0), Range(1)
                # Do not measure if state is out of range
                if range_t > self.max_range:
                    continue

                # Add in measurement noise to the measurement vector
                measurement_vector += measurement_noise

                detection = TrueDetection(measurement_vector,
                                          measurement_model=measurement_model,
                                          timestamp=truth.timestamp,
                                          groundtruth_path=truth)
                detections.add(detection)

        # Generate clutter at this time step
        if self.clutter_model is not None:
            self.clutter_model.measurement_model = measurement_model
            clutter = self.clutter_model.function(ground_truths)
            detections = set.union(detections, clutter)

        return detections


"""
    增加检测率
    增加雷达能够测量的最远距离限制
    增加雷达能够检测到的其他特征:亮度值、像素值
"""


class RadarBearingRangeFeature(Sensor):
    """
        A simple radar sensor that generates measurements of targets, using a `CartesianToBearingRange` model
        Note
        ----
        The current implementation of this class assumes a 3D Cartesian plane.
    """

    ndim_state: int = Property(
        default=2,
        doc="Number of state dimensions. This is utilised by (and follows in format) "
            "the underlying :class:`~.CartesianToBearingRange` model")
    position_mapping: Tuple[int, int, int, int] = Property(
        doc="Mapping between the targets state space and the sensors "
            "measurement capability")
    noise_covar: CovarianceMatrix = Property(
        doc="The sensor noise covariance matrix. This is utilised by "
            "(and follow in format) the underlying "
            ":class:`~.CartesianToBearingRange` model")
    clutter_model: ClutterModel = Property(
        default=None,
        doc="An optional clutter generator that adds a set of simulated "
            ":class:`Clutter` ojects to the measurements at each time step. "
            "The clutter is simulated according to the provided distribution.")
    detection_probability: Probability = Property(default=1)
    max_range: float = Property(
        default=np.inf,
        doc="The maximum detection range of the radar (in meters)")

    @property
    def measurement_model(self):
        return ModifiedCartesianToBearingRange(
            ndim_state=self.ndim_state,
            mapping=self.position_mapping,
            noise_covar=self.noise_covar,
            translation_offset=self.position,
            rotation_offset=self.orientation)

    # def measurement_model(self):
    #     return CartesianToBearingRange(
    #         ndim_state=self.ndim_state,
    #         mapping=self.position_mapping[0:2],
    #         noise_covar=self.noise_covar[0:2, 0:2],
    #         translation_offset=self.position,
    #         rotation_offset=self.orientation)

    def measure(self, ground_truths: Set[GroundTruthState], noise: Union[np.ndarray, bool] = True,
                **kwargs) -> Set[TrueDetection]:
        measurement_model = self.measurement_model

        detections = set()
        for truth in ground_truths:
            if np.random.rand() <= self.detection_probability:
                measurement_vector = measurement_model.function(truth, noise=False, **kwargs)

                if noise is True:
                    measurement_noise = measurement_model.rvs()
                else:
                    measurement_noise = noise

                range_t = measurement_vector[1, 0]  # Bearing(0), Range(1)
                # Do not measure if state is out of range
                if range_t > self.max_range:
                    continue

                # Add in measurement noise to the measurement vector
                measurement_vector += measurement_noise

                detection = TrueDetection(measurement_vector,
                                          measurement_model=measurement_model,
                                          timestamp=truth.timestamp,
                                          groundtruth_path=truth)
                detections.add(detection)

        # Generate clutter at this time step
        if self.clutter_model is not None:
            self.clutter_model.measurement_model = measurement_model
            clutter = self.clutter_model.function(ground_truths)
            detections = set.union(detections, clutter)

        return detections


"""
    修改 measurement_model,增加目标特征属性
"""


class ModifiedCartesianToElevationBearing(NonLinearGaussianMeasurement):
    translation_offset: StateVector = Property(
        default=None,
        doc="A 5x1 array . (x, y, z, lumen, pixel)")

    def __init__(self, *args, **kwargs):
        """
        Ensure that the translation offset is initiated
        """
        super().__init__(*args, **kwargs)
        # Set values to defaults if not provided
        if self.translation_offset is None:
            self.translation_offset = StateVector([0] * len(self.mapping))  # 实际上即使 * 5

    @property
    def ndim_meas(self) -> int:
        return 4

    def function(self, state, noise=False, **kwargs) -> StateVector:

        if isinstance(noise, bool) or noise is None:
            if noise:
                noise = self.rvs()
            else:
                noise = 0

        # Account for origin offset
        # xyz = state.state_vector[self.mapping, :] - self.translation_offset
        xyz = np.array([state.state_vector[self.mapping[0], :] - self.translation_offset[0, 0],
                        state.state_vector[self.mapping[1], :] - self.translation_offset[1, 0],
                        state.state_vector[self.mapping[2], :] - self.translation_offset[2, 0]
                        ])
        # Rotate coordinates
        xyz_rot = self._rotation_matrix @ xyz

        # Convert to Angles
        phi, theta = cart2angles(*xyz_rot[:3, :])

        bearings = [Bearing(i) for i in np.atleast_1d(phi)]
        elevations = [Elevation(i) for i in np.atleast_1d(theta)]
        # 传递亮度值与像素值
        lumen = state.state_vector[self.mapping[3], :]
        pixel = state.state_vector[self.mapping[4], :]
        return StateVectors([elevations, bearings, lumen, pixel]) + noise

    def rvs(self, num_samples=1, **kwargs) -> Union[StateVector, StateVectors]:
        out = super().rvs(num_samples, **kwargs)
        out = np.array([[Elevation(0.)], [Bearing(0.)], [0.], [0.]]) + out
        return out


"""
    限制需要从笛卡尔转换到极坐标系的量测信息的位数
    将亮度值、像素值直接传递    
"""


class ModifiedCartesianToBearingRange(NonLinearGaussianMeasurement, ReversibleModel):
    translation_offset: StateVector = Property(
        default=None,  # 默认写None,不要写具体数字
        doc="A 4x1 array . (x, y, lumen, pixel)")

    def __init__(self, *args, **kwargs):
        """
            Ensure that the translation offset is initiated
        """
        super().__init__(*args, **kwargs)
        # Set values to defaults if not provided
        if self.translation_offset is None:
            self.translation_offset = StateVector([0] * len(self.mapping))  # 实际上即使 * 4
            # print(self.translation_offset)
            # [[0]
            # [0]
            # [0]
            # [0]]
            # print(self.translation_offset[:2, :])
            # [[0]
            # [0]]
            # print(self.translation_offset[0, 0])  # 0

    @property
    def ndim_meas(self) -> int:
        """
            Returns: int, The number of measurement dimensions
        """
        return 4

    def inverse_function(self, detection, **kwargs) -> StateVector:  # 表示该函数返回一个StateVector类型
        if not ((self.rotation_offset[0] == 0)
                and (self.rotation_offset[1] == 0)):
            raise RuntimeError(
                "Measurement model assumes 2D space. Rotation in 3D space is unsupported at this time.")

        phi, rho = detection.state_vector[:2]  # 期望的量测应该是4列
        xy = StateVector(pol2cart(rho, phi))

        xyz = np.concatenate((xy, StateVector([0])), axis=0)
        inv_rotation_matrix = inv(self._rotation_matrix)
        xyz = inv_rotation_matrix @ xyz
        xy = xyz[0:2]

        res = np.zeros((self.ndim_state, 1)).view(StateVector)
        res[self.mapping[:2], :] = xy + self.translation_offset[:2, :]
        # 直接将检测值拿过来作为输出
        res[self.mapping[2:4], :] = detection.state_vector[2:4] + self.translation_offset[2:4, :]
        return res

    def function(self, state, noise=False, **kwargs) -> StateVector:
        """

        """
        if isinstance(noise, bool) or noise is None:
            if noise:
                noise = self.rvs()
            else:
                noise = 0

        # Account for origin offset
        # print(self.translation_offset.shape)  # (4, 1)
        # print(type(self.translation_offset))  # 为什么最后会有一个int型的整数
        # print(state.state_vector[self.mapping[0], :])
        # print([self.translation_offset[0]])
        xyz = np.array([state.state_vector[self.mapping[0], :] - self.translation_offset[0, 0],
                        state.state_vector[self.mapping[1], :] - self.translation_offset[1, 0],
                        [0] * state.state_vector.shape[1]  # 为 1,
                        ])
        # Rotate coordinates
        xyz_rot = self._rotation_matrix @ xyz

        # Covert to polar
        rho, phi = cart2pol(*xyz_rot[:2, :])
        bearings = [Bearing(i) for i in phi]
        # 传递亮度值与像素值
        lumen = state.state_vector[self.mapping[2], :]
        pixel = state.state_vector[self.mapping[3], :]
        return StateVectors([bearings, rho, lumen, pixel]) + noise

    def rvs(self, num_samples=1, **kwargs) -> Union[StateVector, StateVectors]:
        out = super().rvs(num_samples, **kwargs)
        out = np.array([[Bearing(0)], [0.], [0.], [0.]]) + out
        return out


"""
    限制需要从笛卡尔转换到极坐标系的量测信息的位数
    将亮度值、像素值直接传递
    3D    
"""
class ModifiedCartesianToElevationBearingRange(NonLinearGaussianMeasurement, ReversibleModel):
    translation_offset: StateVector = Property(
        default=None,
        doc="A 5x1 array . (x, y, z, lumen, pixel)")

    def __init__(self, *args, **kwargs):
        """
        Ensure that the translation offset is initiated
        """
        super().__init__(*args, **kwargs)
        # Set values to defaults if not provided
        if self.translation_offset is None:
            self.translation_offset = StateVector([0] * len(self.mapping))  # 实际上即使 * 5

    @property
    def ndim_meas(self) -> int:
        """
            Returns: int, The number of measurement dimensions
        """
        return 5

    def function(self, state, noise=False, **kwargs) -> StateVector:

        if isinstance(noise, bool) or noise is None:
            if noise:
                noise = self.rvs()
            else:
                noise = 0

        # Account for origin offset
        # xyz = state.state_vector[self.mapping[0:3], :] - self.translation_offset
        xyz = np.array([state.state_vector[self.mapping[0], :] - self.translation_offset[0, 0],
                        state.state_vector[self.mapping[1], :] - self.translation_offset[1, 0],
                        state.state_vector[self.mapping[2], :] - self.translation_offset[2, 0]
                        ])

        # Rotate coordinates
        xyz_rot = self._rotation_matrix @ xyz

        # Convert to Spherical 球型
        rho, phi, theta = cart2sphere(*xyz_rot[:3, :])
        elevations = [Elevation(i) for i in np.atleast_1d(theta)]  # 俯仰
        bearings = [Bearing(i) for i in np.atleast_1d(phi)]  # 方位
        rhos = np.atleast_1d(rho)
        # 直接传递检测到的亮度值、像素值
        lumen = state.state_vector[self.mapping[3], :]
        pixel = state.state_vector[self.mapping[4], :]
        return StateVectors([elevations, bearings, rhos, lumen, pixel]) + noise

    def inverse_function(self, detection, **kwargs) -> StateVector:

        theta, phi, rho = detection.state_vector[:3]  # 期望的量测应该是5列
        xyz = StateVector(sphere2cart(rho, phi, theta))

        inv_rotation_matrix = inv(self._rotation_matrix)
        xyz = inv_rotation_matrix @ xyz

        res = np.zeros((self.ndim_state, 1)).view(StateVector)
        res[self.mapping[:3], :] = xyz + self.translation_offset[:3, :]
        # 直接将检测值拿过来作为输出
        res[self.mapping[3:5], :] = detection.state_vector[3:5] + self.translation_offset[3:5, :]

        return res

    def rvs(self, num_samples=1, **kwargs) -> Union[StateVector, StateVectors]:
        out = super().rvs(num_samples, **kwargs)
        out = np.array([[Elevation(0.)], [Bearing(0.)], [0], [0.], [0.]]) + out
        return out


"""
    给最邻近数据关联算法中增加一个判断,根据亮度值和像素值做相似度计算,高于某一个阈值才可以
    否则删除该匹配,继续使用最邻近数据关联   
"""


class ModifiedNearestNeighbour(DataAssociator):
    hypothesiser: Hypothesiser = Property(
        doc="Generate a set of hypotheses for each prediction-detection pair")

    def associate(self, tracks, detections, timestamp, **kwargs):

        # Generate a set of hypotheses for each track on each detection
        hypotheses = self.generate_hypotheses(tracks, detections, timestamp, **kwargs)

        # Only associate tracks with one or more hypotheses
        associate_tracks = {track for track, track_hypotheses in hypotheses.items() if track_hypotheses}

        associations = {}
        associated_measurements = set()
        cnt = 0
        while len(associate_tracks) > (len(associations.keys()) + cnt):
            # Define a 'greedy' association
            best_hypothesis = None
            # best_hypothesis_track = Track()
            for track in associate_tracks - associations.keys():
                for hypothesis in hypotheses[track]:
                    # A measurement may only be associated with a single track
                    if hypothesis.measurement in associated_measurements:
                        continue
                    # best_hypothesis is 'greater than' other
                    if best_hypothesis is None or hypothesis > best_hypothesis:
                        # # 增加相似性判断
                        # if hypothesis.measurement.state_vector is not None \
                        #         and hypothesis.measurement.state_vector[2] > 150:
                        if hypothesis.measurement.state_vector is not None:
                            # 计算预测点迹与量测的亮度值、像素值的相似度
                            # print(len(hypothesis.measurement.state_vector))  # 4
                            # print(len(track.state_vector))  # 6
                            # print(similarityCalculation(hypothesis.measurement.state_vector[2: 4],
                            #                             track.state_vector[4: 6],
                            #                             [0.5, 0.5]))
                            if similarityCalculation(hypothesis.measurement.state_vector[2: 4],
                                                     track.state_vector[6: 8],
                                                     [0.6, 0.4]) < 0.1:  # 设置一个阈值 0.9
                                cnt += 1
                            # # 2维雷达、3维红外,仅仅判断像素值、亮度值的取值区间
                            # if hypothesis.measurement.state_vector[2] < 100 or \
                            #         hypothesis.measurement.state_vector[3] < 10:
                            #     cnt += 1
                            else:
                                best_hypothesis = hypothesis
                                # print(type(track))
                                best_hypothesis_track = track

                                # 如果相似度低则直接过滤掉,则associate_tracks > associations.keys()的条件永远都成立,跳不出去
                                associations[best_hypothesis_track] = best_hypothesis
                        # 2022/07/20 增加else语句,否则使用ModifiedNearestNeighbour阈值最邻近数据关联时会卡死
                        else:
                            best_hypothesis = hypothesis
                            # print(type(track))
                            best_hypothesis_track = track

                            # 如果相似度低则直接过滤掉,则associate_tracks > associations.keys()的条件永远都成立,跳不出去
                            associations[best_hypothesis_track] = best_hypothesis

            if best_hypothesis:
                associated_measurements.add(best_hypothesis.measurement)
            # print("Running")
            # break
        return associations


"""
    遍历式关联算法:前两周期,返回所有符合距离假设条件的关联对
    hypothesiser 选择距离假设器,先通过设置距离,对量测对做一个筛选,即将符合距离条件的第一个周期与第二个周期的量测关联
"""


class ErgodicAssociation(DataAssociator):
    hypothesiser: Hypothesiser = Property(
        doc="Generate a set of hypotheses for each prediction-detection pair")

    def associate(self, tracks, detections, timestamp, **kwargs):

        # Generate a set of hypotheses for each track on each detection  产生假设对
        hypotheses = self.generate_hypotheses(tracks, detections, timestamp, **kwargs)

        # 将轨迹与多个假设相关联
        associate_tracks = {track for track, track_hypotheses in hypotheses.items() if any(track_hypotheses)}

        associations = {}  # 字典
        associated_measurements = set()  # 已经被关联了的量测

        # 遍历式关联
        for track in associate_tracks:
            i = 0
            for hypothesis in hypotheses[track]:
                associations[track, i] = hypothesis  # 多键值,否则字典会不断更新同一个键值对
                i += 1
                associated_measurements.add(hypothesis.measurement)
        # 执行 return 后,则直接抛出 StopIteration 终止迭代,即使 return 是在循环语句中
        return associations  # 返回带有关联假设的轨道键值对


"""
    给杂波增加属性
"""
class ClutterFeatureModel(Model, ABC):
    """
        根据特定分布产生传感器噪声(虚警)
    """
    clutter_rate: float = Property(
        default=1.0,
        doc="The average number of clutter points per time step. The actual "
            "number is Poisson distributed")
    distribution: Callable = Property(
        default=np.random.default_rng().uniform,
        doc="A function which represents the distribution of the clutter over the "
            "measurement space. The function should return a single value (ie, do "
            "not use multivariate distributions).")
    dist_params: Tuple = Property(
        default=((-200, 200), (-200, 200), (-200, 200)),
        doc="The required parameters for the clutter distribution function. The "
            "length of the list must be equal to the number of state dimensions "
            "and should be defined for use in Cartesian space."
            "The default defines the space for a uniform distribution in 3D. The call "
            "`np.array([self.distribution(*arg) for arg in self.dist_params])` "
            "must return a numpy array of length equal to the number of dimensions.")
    seed: Optional[Union[int, np.random.RandomState]] = Property(
        default=None,
        doc="Seed or state for random number generation. If defined as an integer, "
            "it will be used to create a numpy RandomState. Or it can be defined directly "
            "as a RandomState (useful if you want to pass one of the random state's "
            "functions as the :attr:`distribution`).")
    feature_params: Tuple = Property(
        default=((0, 255), (0, 100)),  # 亮度值、像素值
        doc="Feature")

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        if isinstance(self.seed, int):
            self.random_state = np.random.RandomState(self.seed)
        elif isinstance(self.seed, np.random.RandomState):
            self.random_state = self.seed
        else:
            self.random_state = None

    def function(self, ground_truths: Set[GroundTruthState], **kwargs) -> Set[Clutter]:
        # Extract the timestamp from the ground_truths. Groundtruth is
        # necessary to get the proper timestamp. If there is no groundtruth return a set of no Clutter.
        if not ground_truths:
            return set()
        timestamp = next(iter(ground_truths)).timestamp

        # Generate the clutter for this time step
        clutter = set()
        for _ in range(poisson.rvs(self.clutter_rate, random_state=self.random_state)):
            # Call the distribution function to generate a random vector in the space
            random_vector = np.array([self.distribution(*arg) for arg in self.dist_params])
            random_feature = np.array([self.distribution(*arg) for arg in self.feature_params])  # 随机生成杂波属性
            # Make a State object with the random vector
            # print(self.measurement_model.ndim_state)  # 8
            # print(len(random_vector))  # 3
            # print(self.measurement_model.mapping)  # [0, 2, 4, 6, 7]
            state = State([0.0] * self.measurement_model.ndim_state, timestamp=timestamp)
            state.state_vector[self.measurement_model.mapping[:3], 0] += random_vector
            state.state_vector[self.measurement_model.mapping[3:5], 0] += random_feature

            # Use the sensor's measurement model to incorporate the
            # translation offset and sensor rotation. This will also
            # convert the vector to the proper measurement space
            # (polar or spherical coordinates)
            clutter_vector = self.measurement_model.function(state)

            # Create a clutter object.
            clutter.add(Clutter(state_vector=clutter_vector,
                                timestamp=timestamp,
                                measurement_model=self.measurement_model))

        return clutter

    @property
    def ndim(self) -> int:
        """
        Return the number of measurement dimensions or, if a measurement model has
        not yet been assigned, the number of state dimensions.
        """
        if hasattr(self, 'measurement_model'):
            return self.measurement_model.ndim_meas
            # return 5
        else:
            return len(self.dist_params)

    def rvs(self, num_samples: int = 1, **kwargs) -> Union[StateVector, StateVectors]:
        """
        Must be implemented to properly inherit the parent Model.
        """
        return None

    def pdf(self, state1: State, state2: State, **kwargs) -> Union[Probability, np.ndarray]:
        """
        Must be implemented to properly inherit the parent Model.
        """
        return None


"""
    修改MultiTargetGroundTruthSimulator,在起始时刻生成指定数量的真实航迹
"""


class ModifiedMultiTargetGroundTruthSimulator(SingleTargetGroundTruthSimulator):
    transition_model: TransitionModel = Property(doc="Transition Model used as propagator for track.")
    initial_state: GaussianState = Property(doc="Initial state to use to generate states")
    birth_num: int = Property(default=1, doc="The number of true tracks to generate at the initial time")

    seed: Optional[int] = Property(default=None, doc="Seed for random number generation. Default None")

    preexisting_states: Collection[StateVector] = Property(
        default=list(), doc="State vectors at time 0 for "
                            "groundtruths which should exist at the start of simulation.")

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.random_state = np.random.RandomState(self.seed)

    def _new_target(self, time, random_state, state_vector=None):
        vector = state_vector or \
                 self.initial_state.state_vector + \
                 self.initial_state.covar @ \
                 random_state.randn(self.initial_state.ndim, 1)

        gt_track = GroundTruthPath()

        gt_track.append(GroundTruthState(
            state_vector=vector,
            timestamp=time,
            metadata={"index": self.index})
        )
        return gt_track

    @BufferedGenerator.generator_method
    def groundtruth_paths_gen(self, random_state=None):
        time = self.initial_state.timestamp or datetime.datetime.now()
        random_state = random_state if random_state is not None else self.random_state
        number_steps_remaining = self.number_steps

        if self.preexisting_states:
            # Use preexisting_states to make some groundtruth paths 使用提供的航迹状态起始航迹,并使用self.birth_num控制数量
            groundtruth_paths = OrderedSet(
                self._new_target(time, random_state, state) for state in self.preexisting_states[:self.birth_num])

            number_steps_remaining -= 1
            yield time, groundtruth_paths
            # 源代码为 self.time += self.timestep
            # 报错 AttributeError: 'MultiTargetGroundTruthSimulator' object has no attribute 'time'
            time += self.timestep
        else:
            groundtruth_paths = OrderedSet()

        for _ in range(number_steps_remaining):  # 第一个点迹已经起始,所以周期数-1
            # Move tracks forward
            for gt_track in groundtruth_paths:
                self.index = gt_track[-1].metadata.get("index")
                trans_state_vector = self.transition_model.function(
                    gt_track[-1], noise=True, time_interval=self.timestep)
                gt_track.append(GroundTruthState(
                    trans_state_vector, timestamp=time,
                    metadata={"index": self.index}))

            yield time, groundtruth_paths
            time += self.timestep


"""
    基于运动属性的联合航迹起始:
    1:能否使用BoundingBoxReducer来划分区域,分为威胁度较高、威胁度较低
    2:然后使用对不同区域使用分别使用不同的航迹起始算法
    3:再综合起始的所有航迹
"""


class CompositeInitiator(Initiator):
    """
        Composite multiple of the same type initiator
        A composition of sub-initiators
        Requires that all sub-initiators have a defined prior state in order to compose its own composite prior state
    """
    sub_initiators: Sequence[Initiator] = Property(
        doc="Sequence of sub-initiators. Must not be empty."
    )

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        if len(self.sub_initiators) == 0:
            raise ValueError("Cannot create an empty composite initiator")
        if any(not isinstance(sub_initiator, Initiator) for sub_initiator in self.sub_initiators):
            raise ValueError("All sub-initiators must be an initiator type")

    @property
    def prior_state(self):  # 复合初始状态,各个航迹起始的初始状态
        # return CompositeState([sub_initiator.prior_state for sub_initiator in self.sub_initiators])
        # 若是对航迹起始使用了滤波器,则需要使用下面这一句
        return CompositeState([sub_initiator.initiator.prior_state for sub_initiator in self.sub_initiators])

    def initiate(self, detections, timestamp, **kwargs):
        """
            It is required that the sub-initiators initiate on a single measurement, in order for
            the individual sub-states of a track to be linked to one another.
        """
        tracks = set()

        # Store sub-tracks for each composite detection  存储每个复合检测的子航迹
        detection_hyps_states = dict()
        for detection in detections:
            detection_hyps_states[detection] = {'sub-hypotheses': list(), 'sub-states': list()}

        for sub_state_index, sub_initiator in enumerate(self.sub_initiators):
            # print(sub_state_index)  # 0, 1
            # print(sub_initiator)  # 打印航迹起始算法
            # Store all sub-detections produced from sub-state index 存储子状态索引产生的所有子检测
            sub_state_detections = set()
            # Get all composite detections that have sub-detection for this index 获取具有此索引子检测的所有复合检测
            relevant_detections = dict()
            # Get sub-prior state for this index 获取此索引的次优先状态
            # print(self.prior_state[0])
            sub_prior = self.prior_state[sub_state_index]

            mapping = [0, 1]
            for detection in detections:
                if sub_state_index in mapping:  # [0, 1]
                    sub_state_detections.add(detection)  # 不管sub_state_index是0还是1,都是使用同一个detection
                    # print(sub_detection)
                    # print(sub_state_detections)
                    # link sub-detection back to composite  由子检测链接回复合检测
                    # print(detection.state_vector)
                    relevant_detections[detection] = detection  # 字典,键值对
                else:
                    # Consider it a missed detection otherwise add null hypothesis to its sub-hypotheses list
                    # 将其视为漏检,否则将零假设添加到子假设列表中
                    detection_hyps_states[detection]['sub-hypotheses'].append(
                        SingleHypothesis(None, MissedDetection(timestamp=detection.timestamp))
                    )
                    # Add sub-prior to its sub-states list
                    detection_hyps_states[detection]['sub-states'].append(sub_prior)

            # print(relevant_detections)
            if relevant_detections:
                # print(len(relevant_detections))  # 长度为6
                # print(len(sub_state_detections))  # 长度为6
                # print(type(sub_state_detections))
                # temp = [sub_state_detection.state_vector for sub_state_detection in sub_state_detections]
                # print(temp)
                # 应该就是 initiate 没有起作用,需要改写得与提供的航迹起始算法相当
                # sub_tracks = sub_initiator.initiate(sub_state_detections, timestamp=timestamp)
                # print(sub_state_detections)
                sub_tracks = sub_initiator.initiator.initiate(
                    sub_state_detections,
                    timestamp=timestamp)

                # print(sub_tracks)  # 为什么是空的呢,没有执行while循环
                while sub_tracks:
                    print("Is there any run up to this point ?")
                    sub_track = sub_tracks.pop()

                    # Get detection that initiated this sub_track, Expecting single measurement initiation
                    sub_track_detections = {state.hypothesis.measurement
                                            for state in sub_track
                                            if isinstance(state, Update)}
                    print(len(sub_track_detections))  # 等于3
                    if len(sub_track_detections) != 1:
                        # Ambiguity in which detection caused this track
                        # Should not have case where == 0 as this would imply track initiated on no measurement
                        warnings.warn(
                            "Attempted to initiate sub-track with more than one detection"
                        )  # 试图用多个量测起始子航迹
                    for _ in range(len(sub_track_detections)):
                        sub_track_detection = sub_track_detections.pop()

                        # retrieve composite detection that contains sub-track's detection 检索包含子轨道检测的复合检测
                        full_detection = relevant_detections[sub_track_detection]  # KeyError 找不到键值对

                        update = sub_track[-1]

                        # Add to sub-hypotheses list for this composite detection
                        detection_hyps_states[full_detection]['sub-hypotheses'].append(
                            SingleHypothesis(None, sub_track_detection)
                        )

                        # Add update to the sub-states list for this composite detection
                        detection_hyps_states[full_detection]['sub-states'].append(update)

        # For each composite detection, create a track from its corresponding sub-hypotheses and sub-states
        # 对于每个复合检测,从其对应的子假设和子状态创建一个轨迹
        for detection, hyps_states in detection_hyps_states.items():
            # print(hyps_states)  # 为什么是空的呢? {'sub-hypotheses': [], 'sub-states': []}
            # print(hyps_states['sub-hypotheses'])  # []
            # print(hyps_states['sub-states'])  # []
            # 若是报错ValueError: Cannot create an empty composite hypothesis,可以增加如下代码
            if len(hyps_states['sub-hypotheses']) < len(self.sub_initiators):
                # No tracks initiated for this detection.
                continue
            # Create composite hypothesis from list of sub-hypotheses 从子假设列表中创建复合假设
            hypothesis = CompositeHypothesis(prediction=None,
                                             sub_hypotheses=hyps_states['sub-hypotheses'],
                                             measurement=detection)
            # Create composite update from list of sub-states
            composite_update = CompositeUpdate(sub_states=hyps_states['sub-states'],
                                               hypothesis=hypothesis)

            tracks.add(Track([composite_update]))
        return tracks

    def __contains__(self, item):
        return self.sub_initiators.__contains__(item)

    def __getitem__(self, index):
        """Can be indexed as a list, or sliced, in which case a new composite initiator will be
        created from the sub-list of sub-initiators."""
        if isinstance(index, slice):
            return self.__class__(self.sub_initiators.__getitem__(index))
        return self.sub_initiators.__getitem__(index)

    def __iter__(self):
        return self.sub_initiators.__iter__()

    def __len__(self):
        return self.sub_initiators.__len__()


"""
    航迹起始滤波器
"""
from typing import Sequence, Tuple
from stonesoup.base import Property
from stonesoup.initiator import Initiator


class BoundingBoxFilter(Initiator):
    initiator: Initiator = Property()
    limits: Sequence[Tuple[float, float]] = Property()
    mapping: Sequence[int] = Property(default=None)

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        if self.mapping is None:
            self.mapping = tuple(range(len(self.limits)))

    def initiate(self, detections, *args, **kwargs):
        outlier_data = set()
        for state in detections:
            state_vector = state.state_vector
            for i in range(len(self.limits)):
                min = self.limits[i][0]
                max = self.limits[i][1]
                value = state_vector[self.mapping[i]]
                if value < min or value > max:
                    outlier_data.add(state)
                    break
        return self.initiator.initiate(detections - outlier_data, *args, **kwargs)


"""
    用于和基于特征相似度的航迹起始算法作比较,此马氏距离计算不考虑特征
"""


class MahalanobisNoFeatures(Measure):

    def __call__(self, state1, state2):

        if self.mapping is not None:
            u = state1.state_vector[self.mapping[0:3], 0]
            v = state2.state_vector[self.mapping2[0:3], 0]
            # extract the mapped covariance data  提取映射的协方差数据
            rows = np.array(self.mapping[0:3], dtype=np.intp)
            columns = np.array(self.mapping[0:3], dtype=np.intp)
            cov = state1.covar[rows[:, np.newaxis], columns]
        else:
            u = state1.state_vector[:, 0]
            v = state2.state_vector[:, 0]
            # print(u)
            cov = state1.covar

        vi = np.linalg.inv(cov)

        return distance.mahalanobis(u, v, vi)


"""
    改写PDA假设器:实现对关联门交叉区域小概率有效量测的衰减
    主要是针对单一预测和多检测
"""
from functools import lru_cache

from scipy.stats import multivariate_normal, chi2
from scipy.linalg import det
from scipy.special import gamma

"""
    典型的的PDA假设器
"""


class PDAHypothesiser(Hypothesiser):
    """
        Hypothesiser based on Probabilistic Data Association (PDA) 基于概率数据关联的假设算法
        Generate track predictions at detection times and calculate probabilities
        for all prediction-detection pairs for single prediction and multiple detections.
    """
    predictor: Predictor = Property(doc="Predict tracks to detection times")
    updater: Updater = Property(doc="Updater used to get measurement prediction")
    clutter_spatial_density: float = Property(
        default=None,
        doc="Spatial density of clutter - tied to probability of false detection. Default is None "
            "where the clutter spatial density is calculated based on assumption that "
            "all but one measurement within the validation region of the track are clutter.")
    # 目标被检测到的概率
    prob_detect: Probability = Property(
        default=Probability(0.85),
        doc="Target Detection Probability")
    # 量测落入相关波门的概率
    prob_gate: Probability = Property(
        default=Probability(0.95),
        doc="Gate Probability contains true measurement if detected")
    include_all: bool = Property(
        default=False,
        doc="If `True`, hypotheses outside probability gates will be returned. This requires that the clutter spatial "
            "density is also provided, as it may not be possible to estimate this. Default `False`")

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        if self.include_all and self.clutter_spatial_density is None:
            raise ValueError("Must provide clutter spatial density if including all hypotheses")

    def hypothesise(self, track, detections, timestamp, **kwargs):
        """
            Evaluate and return all track association hypotheses.

            For a given track and a set of N detections, return a MultipleHypothesis with N+1 detections
            (first detection is a 'MissedDetection'), each with an associated probability.
            返回轨迹与量测的关联概率,量测包括漏检
            Probabilities are assumed to be exhaustive详尽的 (sum to 1) and mutually exclusive 相斥和穷尽的
            (two detections cannot be the correct association at the same time).

            Returns
            class: MultipleHypothesis, A container of SingleProbabilityHypothesis objects
        """
        hypotheses = list()
        validated_measurements = 0

        """
            漏检:关联门体积是根据检测概率和量测落在关联门内的概率来计算的。
        """
        # measurement prediction  轨迹预测
        prediction = self.predictor.predict(track, timestamp=timestamp, **kwargs)
        # Missed detection hypothesis 计算漏检的概率
        probability = Probability(1 - self.prob_detect * self.prob_gate)
        hypotheses.append(
            SingleProbabilityHypothesis(
                prediction,
                MissedDetection(timestamp=timestamp),
                probability
            ))
        # 打印漏检的概率
        # print(probability)
        """
            关联门内的有效量测
        """
        # True detection hypotheses
        for detection in detections:
            # print(detection.state_vector)
            valid_measurement = False
            # Re-evaluate prediction
            prediction = self.predictor.predict(track, timestamp=detection.timestamp, **kwargs)
            # Compute measurement prediction and probability measure
            measurement_prediction = self.updater.predict_measurement(
                prediction, detection.measurement_model, **kwargs)
            # Calculate difference before to handle custom types (mean defaults to zero) 计算差异
            # This is required as log pdf coverts arrays to floats  将对数概率密度转换为浮点数
            log_pdf = multivariate_normal.logpdf(
                (detection.state_vector[0:2] - measurement_prediction.state_vector[0:2]).ravel(),
                cov=measurement_prediction.covar[0:2, 0:2])
            pdf = Probability(log_pdf, log_value=True)

            # 如果在量测关联门内,即有效量测
            if self._is_valid_measurement(measurement_prediction, detection):
                validated_measurements += 1
                valid_measurement = True

            if self.include_all or valid_measurement:
                probability = pdf * self.prob_detect
                if self.clutter_spatial_density is None:
                    # Note: will divide by validated measurements count later...
                    probability *= self._validation_region_volume(
                        self.prob_gate, measurement_prediction)
                else:
                    probability /= self.clutter_spatial_density

                # True detection hypothesis
                hypotheses.append(
                    SingleProbabilityHypothesis(
                        prediction,
                        detection,
                        probability,
                        measurement_prediction))

        if self.clutter_spatial_density is None:
            for hypothesis in hypotheses[1:]:  # Skip missed detection,第一个是漏检
                hypothesis.probability /= validated_measurements
                # print(validated_measurements)  # 因为有杂波密度,所以没有执行到此处
        # # 输出关联门内有效量测的数量
        # print(validated_measurements)
        # print("--------------------")
        # MultipleHypothesis 来源于 Modifiedmultihypothesis 或 multihypothesis
        return MultipleHypothesis(hypotheses, normalise=True, total_weight=1)

    # 量测关联门:仅根据运动信息来设定关联门
    def _is_valid_measurement(self, meas_pred, det):
        z = meas_pred.state_vector[0:2] - det.state_vector[0:2]
        # z = meas_pred.state_vector - det.state_vector
        return \
            z.T @ self._inv_cov(meas_pred) @ z <= self._gate_threshold(self.prob_gate, meas_pred.ndim)

    @classmethod
    @lru_cache()
    # 关联门的体积
    def _validation_region_volume(cls, prob_gate, meas_pred):
        n = meas_pred.ndim
        gate_threshold = cls._gate_threshold(prob_gate, n)
        c_z = np.pi ** (n / 2) / gamma(n / 2 + 1)
        return c_z * gate_threshold ** (n / 2) * np.sqrt(det(meas_pred.covar[0:2, 0:2]))

    @staticmethod
    @lru_cache()
    # 协方差求逆
    def _inv_cov(meas_pred):
        return np.linalg.inv(meas_pred.covar[0:2, 0:2])

    @staticmethod
    @lru_cache()
    # 卡方分布计算阈值
    def _gate_threshold(prob_gate, n):
        return chi2.ppf(float(prob_gate), n)


"""
    改进的PDA假设器
"""
# class PDAHypothesiser(Hypothesiser):
#     """
#         Hypothesiser based on Probabilistic Data Association (PDA) 基于概率数据关联的假设算法
#         Generate track predictions at detection times and calculate probabilities
#         for all prediction-detection pairs for single prediction and multiple detections.
#     """
#     predictor: Predictor = Property(doc="Predict tracks to detection times")
#     updater: Updater = Property(doc="Updater used to get measurement prediction")
#     clutter_spatial_density: float = Property(
#         default=None,
#         doc="Spatial density of clutter - tied to probability of false detection. Default is None "
#             "where the clutter spatial density is calculated based on assumption that "
#             "all but one measurement within the validation region of the track are clutter.")
#     # 目标被检测到的概率
#     prob_detect: Probability = Property(
#         default=Probability(0.85),
#         doc="Target Detection Probability")
#     # 量测落入相关波门的概率
#     prob_gate: Probability = Property(
#         default=Probability(0.95),
#         doc="Gate Probability contains true measurement if detected")
#     include_all: bool = Property(
#         default=False,
#         doc="If `True`, hypotheses outside probability gates will be returned. This requires that the clutter spatial "
#             "density is also provided, as it may not be possible to estimate this. Default `False`")
#
#     def __init__(self, *args, **kwargs):
#         super().__init__(*args, **kwargs)
#         if self.include_all and self.clutter_spatial_density is None:
#             raise ValueError("Must provide clutter spatial density if including all hypotheses")
#
#     def hypothesise(self, track, detections, timestamp, **kwargs):
#         """
#             Evaluate and return all track association hypotheses.
#
#             For a given track and a set of N detections, return a MultipleHypothesis with N+1 detections
#             (first detection is a 'MissedDetection'), each with an associated probability.
#             返回轨迹与量测的关联概率,量测包括漏检
#             Probabilities are assumed to be exhaustive详尽的 (sum to 1) and mutually exclusive 相斥和穷尽的
#             (two detections cannot be the correct association at the same time).
#
#             Returns
#             class: MultipleHypothesis, A container of SingleProbabilityHypothesis objects
#         """
#         hypotheses = list()
#         validated_measurements = 0
#
#         """
#             漏检:关联门体积是根据检测概率和量测落在关联门内的概率来计算的。
#         """
#         # measurement prediction  轨迹预测
#         prediction = self.predictor.predict(track, timestamp=timestamp, **kwargs)
#         # Missed detection hypothesis 计算漏检的概率
#         probability = Probability(1 - self.prob_detect * self.prob_gate)
#         hypotheses.append(
#             SingleProbabilityHypothesis(
#                 prediction,
#                 MissedDetection(timestamp=timestamp),
#                 probability
#             ))
#         # 打印漏检的概率
#         # print(probability)
#         """
#             关联门内的有效量测
#         """
#         # True detection hypotheses
#         for detection in detections:
#             valid_measurement = False
#             # Re-evaluate prediction
#             prediction = self.predictor.predict(track, timestamp=detection.timestamp, **kwargs)
#             # Compute measurement prediction and probability measure
#             measurement_prediction = self.updater.predict_measurement(
#                 prediction, detection.measurement_model, **kwargs)
#             # Calculate difference before to handle custom types (mean defaults to zero) 计算差异
#             # This is required as log pdf coverts arrays to floats  将对数概率密度转换为浮点数
#             log_pdf = multivariate_normal.logpdf(
#                 (detection.state_vector - measurement_prediction.state_vector).ravel(),
#                 cov=measurement_prediction.covar)
#             pdf = Probability(log_pdf, log_value=True)
#             # print(pdf)
#
#             # 如果在量测关联门内,即有效量测
#             if self._is_valid_measurement(measurement_prediction, detection):
#                 validated_measurements += 1
#                 valid_measurement = True
#
#             if self.include_all or valid_measurement:
#                 probability = pdf * self.prob_detect
#                 if self.clutter_spatial_density is None:
#                     # Note: will divide by validated measurements count later...
#                     probability *= self._validation_region_volume(
#                         self.prob_gate, measurement_prediction)
#                 else:
#                     probability /= self.clutter_spatial_density
#                 """
#                     1:设置一个概率阈值,筛选掉概率值较小的量测
#                     2:计算相似度,过滤掉相似度较低的点迹
#                     3:使用相似度加权
#                 """
#                 # # # 计算相似度
#                 # print(prediction.state_vector)
#                 # # [[1.71872951e+03]
#                 # #  [-1.08923192e+02]
#                 # #  [4.30504800e+03]
#                 # #  [-1.12073342e+00]
#                 # #  [3.26546851e+02]
#                 # #  [2.54690540e+00]
#                 # #  [1.98853556e+02]
#                 # #  [2.30658984e+01]]
#                 # print(detection.state_vector)
#                 # # [[Elevation(0.09159826235348753)]
#                 # #  [Bearing(1.9075684138691358)]
#                 # #  [206.59063215366072]
#                 # #  [27.1390883851286]]
#
#                 score = similarityCalculation(prediction.state_vector[6:8],
#                                               detection.state_vector[2:4],
#                                               [0.6, 0.4])
#                 # print(len(probability))
#                 # print(probability)
#                 if probability < 10 or score < 0.5:  # 需要根据空间杂波密度调整大小,过滤掉小概率量测、漏检
#                     # print(probability)
#                     continue
#                 else:
#                     # print(probability)
#                     # print(validated_measurements)  # 打印关联门内有效量测的个数
#                     # True detection hypothesis
#                     hypotheses.append(
#                         SingleProbabilityHypothesis(
#                             prediction,
#                             detection,
#                             probability * score,
#                             measurement_prediction))
#
#         if self.clutter_spatial_density is None:
#             for hypothesis in hypotheses[1:]:  # Skip missed detection,第一个是漏检
#                 hypothesis.probability /= validated_measurements
#                 # print(validated_measurements)  # 因为有杂波密度,所以没有执行到此处
#         # # 输出关联门内有效量测的数量
#         # print(validated_measurements)
#         # print("--------------------")
#         # MultipleHypothesis 来源于 Modifiedmultihypothesis 或 multihypothesis
#         return MultipleHypothesis(hypotheses, normalise=True, total_weight=1)
#
#     # 量测关联门
#     def _is_valid_measurement(self, meas_pred, det):
#         # print(meas_pred.state_vector)  # 预测量测
#         # print(meas_pred.ndim)  # 增加了亮度值、像素值信息,所以是4维
#         # print(det.state_vector)  # 检测到的量测
#         # 均是如下格式
#         # [[Elevation(0.06175865073617464)]
#         #  [Bearing(0.9075225639643723)]
#         #  [189.34633140734056]
#         #  [35.60601283903278]]
#         z = meas_pred.state_vector - det.state_vector
#         return \
#             z.T @ self._inv_cov(meas_pred) @ z <= self._gate_threshold(self.prob_gate, 2)
#         # 打印出来的meas_pred.ndim=,直接将meas_pred.ndim用2替换
#         # z.T@self._inv_cov(meas_pred)@z <= self._gate_threshold(self.prob_gate, meas_pred.ndim)
#
#     @classmethod
#     @lru_cache()
#     # 关联门的体积
#     def _validation_region_volume(cls, prob_gate, meas_pred):
#         n = meas_pred.ndim
#         gate_threshold = cls._gate_threshold(prob_gate, n)
#         c_z = np.pi ** (n / 2) / gamma(n / 2 + 1)
#         return c_z * gate_threshold ** (n / 2) * np.sqrt(det(meas_pred.covar))
#
#     @staticmethod
#     @lru_cache()
#     # 协方差求逆
#     def _inv_cov(meas_pred):
#         return np.linalg.inv(meas_pred.covar)
#
#     @staticmethod
#     @lru_cache()
#     # 卡方分布计算阈值
#     def _gate_threshold(prob_gate, n):
#         return chi2.ppf(float(prob_gate), n)

"""
    典型的PDA数据关联
"""


class PDA(DataAssociator):
    """
        Probabilistic Data Association (PDA)
        Given a set of detections and a set of tracks, each track has a
        probability that it is associated to each specific detection.
        每条轨迹包含一个与他关联的量测的概率
    """
    hypothesiser: Hypothesiser = Property(
        doc="Generate a set of hypotheses for each prediction-detection pair")

    def associate(self, tracks, detections, timestamp, **kwargs):
        # Generate a set of hypotheses for each track on each detection
        hypotheses = self.generate_hypotheses(tracks, detections, timestamp, **kwargs)
        # Ensure association probabilities are normalised 归一化
        for track, hypothesis in hypotheses.items():
            hypothesis.normalise_probabilities(total_weight=1)

        return hypotheses


"""
    改写的PDA数据关联
"""


class ModifiedPDA(DataAssociator):
    """
        Probabilistic Data Association (PDA)
        Given a set of detections and a set of tracks, each track has a
        probability that it is associated to each specific detection.
        每条轨迹包含一个与他关联的量测的概率
    """
    hypothesiser: Hypothesiser = Property(
        doc="Generate a set of hypotheses for each prediction-detection pair")

    def associate(self, tracks, detections, timestamp, **kwargs):
        # Generate a set of hypotheses for each track on each detection
        hypotheses = self.generate_hypotheses(tracks, detections, timestamp, **kwargs)
        # Ensure association probabilities are normalised 归一化
        for track, hypothesis in hypotheses.items():
            # # 打印量测与轨迹的关联概率,
            # for assumption in hypothesis:
            #     print(assumption.probability)
            # 归一化
            hypothesis.normalise_probabilities(total_weight=1)
            # 打印归一化后的概率 其中assumption和hypothesis都是假设的意思,同义替换一下,方便表达
        return hypotheses

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值