4.HMM模型以及前,后向最优路径算法

代码功能描述:地址链接

HMM.py:

import numpy as np
 
def forward_algorithm(O, HMM_model):
    """HMM Forward Algorithm.
    Args:
        O: (o1, o2, ..., oT), observations
        HMM_model: (pi, A, B), (init state prob, transition prob, emitting prob)
    Return:
        prob: the probability of HMM_model generating O.
    """
    pi, A, B = HMM_model
    T = len(O)
    #状态总数
    N = len(pi)
    prob = 0.0
    #前n种状态观测到O[0]~O[n-1]的概率
    a = np.zeros((T, N))
    a[0, :] = [pi[k] * B[k][O[0]] for k in range(N)]
    for t in range(1, T, 1):
        a[t, :] = [sum([a[t-1][j] * A[j][i] for j in range(N)]) * B[i][O[t]]for i in range(N)]
    #生成此观测的概率
    prob = sum(a[T-1])
    return prob
 
def backward_algorithm(O, HMM_model):
    """HMM Backward Algorithm.
    Args:
        O: (o1, o2, ..., oT), observations
        HMM_model: (pi, A, B), (init state prob, transition prob, emitting prob)
    Return:
        prob: the probability of HMM_model generating O.
    """
    pi, A, B = HMM_model
    T = len(O)
    N = len(pi)
    prob = 0.0
    #后n种状态观测到O[T-n-1]~O[T]的概率
    beta = np.ones((T, N))
    for t in range(T - 2, -1, -1):
        beta[t, :] = [sum([A[i][j] * B[j][O[t+1]] * beta[t+1][j] for j in range(N)]) for i in range(N)]
    #生成此观测的概率
    prob = sum([pi[i] * B[i][O[0]] * beta[0][i] for i in range(N)])
    return prob
 
 
def Viterbi_algorithm(O, HMM_model):
    """Viterbi decoding.
    Args:
        O: (o1, o2, ..., oT), observations
        HMM_model: (pi, A, B), (init state prob, transition prob, emitting prob)
    Returns:
        best_prob: the probability of the best state sequence
        best_path: the best state sequence
    """
    pi, A, B = HMM_model
    T = len(O)
    N = len(pi)
    best_prob, best_path = 0.0, {}
    delta = np.zeros((T, N))
    psi = np.zeros((T, N))
    delta[0, :] = [pi[i] * B[i][O[0]] for i in range(N)]
 
    for t in range(1, T, 1):
        #获得下一个观测的最大概率路径的概率
        delta[t, :] = [max([delta[t - 1][j] * A[j][i] * B[i][O[t]] for j in range(N)]) for i in range(N)]
        #获得下一个观测的最大概率的状态索引
        psi[t, :] = [np.argmax([delta[t - 1][j] * A[j][i] for j in range(N)]) for i in range(N)]

    #取概率最大值路径
    best_prob = max(delta[T - 1])
    #取概率最大值路径最后一个节点状态索引
    best_path[T-1] = np.argmax(delta[T - 1])
    #获得观测序列的状态序列
    for t in range(T - 2, -1, -1):
        best_path[t] = int(psi[t + 1][best_path[t + 1]])
    return best_prob, best_path
 
 
if __name__ == "__main__":
    color2id = { "RED": 0, "WHITE": 1 }
    #初始化概率
    pi = [0.2, 0.4, 0.4]
    #状态转移概率
    A = [[0.5, 0.2, 0.3],
         [0.3, 0.5, 0.2],
         [0.2, 0.3, 0.5]]
    #观测概率
    B = [[0.5, 0.5],
         [0.4, 0.6],
         [0.7, 0.3]]
    #观测序列
    observations = (0, 1, 0)
    HMM_model = (pi, A, B)
    # process
    observ_prob_forward = forward_algorithm(observations, HMM_model)
    print(observ_prob_forward)
 
    observ_prob_backward = backward_algorithm(observations, HMM_model)
    print(observ_prob_backward)
 
    best_prob, best_path = Viterbi_algorithm(observations, HMM_model) 
    print(best_prob, best_path)

后续思考笔记:

        关于viterb算法,给出观测集O和HMM的参数λ,就一定可以以输出观测集O输出最佳路径,再通过其他算法优化迭代,这样在后续对某一个语音信号的所有帧进行状态对齐的时候可以使用到。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值