hmmlearn之MultinomialHMM 离散隐马尔科夫模型
1. 安装:pip3 install hmmlearn
2. 模型参数:
hmmlearn.hmm.MultinomialHMM(
n_components=1,
n_trials=None,
startprob_prior=1.0,
transmat_prior=1.0,
algorithm='viterbi',
n_iter=10,
tol=0.01,
verbose=False,
params='ste',
init_params='ste',
implementation='log')
s: 初始概率分布
t: 状态转移概率
e: 发射概率
3. 建模
(1)初始化全部的模型参数
(2)初始化部分的模型参数
import numpy as np
from hmmlearn import hmm
states = ["box1", "box2", "box3"]
n_states = len(states)
observations = ["red", "white"]
n_observations = len(observations)
start_probability = np.array([0.2, 0.4, 0.4])
transition_probability = np.array([
[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]
])
emission_probability = np.array([
[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]
])
model_1 = hmm.MultinomialHMM(n_components=n_states)
model_1.startprob_=start_probability
model_1.transmat_=transition_probability
model_1.emissionprob_=emission_probability
model_1.n_features = n_observations
se = np.array([[0, 1, 0, 0, 1]]).T
logprob, box_index = model_1.decode(se, algorithm='viterbi')
print("颜色:", end="")
print(" ".join(map(lambda t: observations[t], [0, 1, 0, 0, 1])))
print("盒子:", end="")
print(" ".join(map(lambda t: states[t], box_index)))
print("概率值:", end="")
print(np.exp(logprob))
import numpy as np
from hmmlearn import hmm
n_components = 3
pi = np.ones(n_components)/n_components
A = np.array([ [0.4, 0.3, 0.3],
[0.3, 0.4, 0.3],
[0.3, 0.3, 0.4], ])
M_O2S = np.zeros([3,8])
M_O2S[0,:6]=1/6.0
M_O2S[1,:4]=1/4.0
M_O2S[2,:8]=1/8.0
n_features = 8
model_2 = hmm.MultinomialHMM(n_components=n_components)
model_2.startprob_ = pi
model_2.transmat_ = A
model_2.emissionprob_ = M_O2S
model_2.n_features = n_features
data = np.array([3,4,5])
data = np.expand_dims(data,axis=1)
prob = model_2.score(data)
print("log_prob:",prob)
decode = model_2.predict(data)
print("decode:",decode)
train_data = []
for i in range(200):
data_m2,_ = model_2.sample(100)
train_data.append(data_m2)
test_data = []
for i in range(30):
data_m2,_ = model_2.sample(100)
test_data.append(data_m2)
length_train_data = []
for data in train_data:
length_train_data.append(np.shape(data)[0])
import numpy as np
import hmmlearn.hmm as hmm
states = ['盒子1', '盒子2', '盒子3']
obs = ['白球', '黑球']
n_states = len(states)
n_obs = len(obs)
model_3 = hmm.MultinomialHMM(n_components=n_states, n_iter=20, tol=0.001)
X3 = np.array([
[0, 1, 0, 0, 1],
[0, 0, 0, 1, 1],
[1, 1, 0, 1, 0],
[0, 1, 0, 1, 1],
[0, 0, 0, 1, 0]
])
model_3.fit(X3)
print("输出根据数据训练出来的π")
print(model_3.startprob_)
print("输出根据数据训练出来的A")
print(model_3.transmat_)
print("输出根据数据训练出来的B")
print(model_3.emissionprob_)
import numpy as np
from hmmlearn import hmm
n_components = 3
pi_4 = np.array([0.2,0.7,0.1])
A_4 = np.array([ [0.8, 0.2, 0],
[0, 0.8, 0.2],
[0, 0, 1], ])
M_O2S_4 = np.zeros([3,8])
M_O2S_4[0,:6]=0.8,0.04,0.04,0.04,0.04,0.04
M_O2S_4[1,:4]=1/4.0
M_O2S_4[2,:8]=1/8.0
model_4 = hmm.MultinomialHMM(n_components=n_components,n_iter=50,tol=0.001,verbose=True,params='te',init_params='')
model_4.startprob_ = pi_4
model_4.transmat_ = A_4
model_4.emissionprob_ = M_O2S_4
model_4.fit(np.concatenate(train_data,axis=0),np.array(length_train_data))
print(model_4.startprob_)
print(model_4.transmat_)
print(model_4.emissionprob_)
'''
N_test1 = len(test_datas_hmm1)
N_test2 = len(test_datas_hmm2)
labs = [0 for i in range(N_test1)] + [1 for i in range(N_test2)]
test_datas = test_datas_hmm1 + test_datas_hmm2
for i,test_data in enumerate(test_datas):
score1 = trained_model_1.score(test_data)
score2 = trained_model_2.score(test_data)
det_lab = np.argmax([score1,score2])
print("%f %f det_lab=%d true_lab = %d"%(score1,score2,det_lab,labs[i]))
'''
参考链接:
B站bugyu_ld
https://www.jianshu.com/p/0f175b9781de