hmm的matlab源代码,隐马尔科夫HMM在matlab实现的两个例子如何读懂,附HMM工具箱

% ①定义一个HMM并训练这个HMM。

% ②用一组观察值测试这个HMM,计算该组观察值域HMM的匹配度。

% 修改:旺齐齐

% 修改部分为:添加 HMM2 模型。测试一个观察序列更加符合哪个哪个HMM模型。

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

% O:观察状态数

O = 7;

O2 = 7;

% Q:HMM状态数

Q = 5;

Q2 = 5;

%训练的数据集,每一行数据就是一组训练的观察值

data=[1,2,3,1,2,2,4,2,3,1,2,7,2;

1,2,3,6,2,2,1,4,3,1,5,3,1;

1,2,3,1,2,5,1,2,4,1,2,3,2;

1,2,7,1,2,2,1,2,5,1,2,4,1;

5,2,3,3,5,2,1,2,3,1,2,3,6;

1,2,3,1,2,2,1,6,5,1,2,6,4;

5,2,3,4,4,2,1,2,3,1,2,5,6;

1,2,6,1,2,2,1,2,3,1,4,3,2;

1,2,3,4,2,7,1,4,3,1,7,3,3;

5,2,3,5,2,2,1,2,3,1,2,3,4;

5,2,4,1,2,2,5,2,3,7,1,6,2;]

data2 = [1,2,3,1,2,2,4,2,3,1,2,7,2;

1,2,3,6,2,2,1,4,3,1,5,3,1;

1,2,3,1,2,5,1,2,4,1,2,3,2;

1,2,7,1,2,2,1,2,5,1,2,4,1;

5,2,3,3,5,2,1,2,3,1,2,3,6;

1,2,3,1,2,2,1,6,5,1,2,6,4;

5,2,3,4,4,2,1,2,3,1,2,5,6;

1,2,6,1,2,2,1,2,3,1,4,3,2;

1,2,3,4,2,7,1,4,3,1,7,3,3;

5,2,3,5,2,2,1,2,3,1,2,3,4;

4,2,5,1,2,2,6,2,3,7,1,6,4;]

% initial guess of parameters

% 初始化参数

prior1 = normalise(rand(Q,1));

transmat1 = mk_stochastic(rand(Q,Q));

obsmat1 = mk_stochastic(rand(Q,O));

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

% 添加部分

prior3 = normalise(rand(Q2,1));

transmat3 = mk_stochastic(rand(Q2,Q2));

obsmat3 = mk_stochastic(rand(Q2,O2));

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

% improve guess of parameters using EM

% 用data数据集训练参数矩阵形成新的HMM模型

[LL, prior2, transmat2, obsmat2] = dhmm_em(data, prior1, transmat1, obsmat1, 'max_iter', size(data,1));

% 训练后那行观察值与HMM匹配度

LL

% 训练后的初始概率分布

prior2

% 训练后的状态转移概率矩阵

transmat2

% 观察值概率矩阵

obsmat2

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

% 添加部分

[LL2, prior4, transmat4, obsmat4] = dhmm_em(data2, prior3, transmat3, obsmat3, 'max_iter', size(data2,1));

LL2

prior4

transmat4

obsmat4

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

% use model to compute log likelihood

% data1=[1,2,3,1,2,2,1,2,3,1,2,3,1]

data1 = [5,2,4,1,2,2,5,2,3,7,1,6,2]

loglik = dhmm_logprob(data1, prior2, transmat2, obsmat2)

% log lik is slightly different than LL(end), since it is computed after the final M step

% loglik 代表着data和这个hmm(三参数为prior2, transmat2, obsmat2)的匹配值,越大说明越匹配,0为极大值。

% path为viterbi算法的结果,即最大概率path

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

% 添加部分

loglik2 = dhmm_logprob(data1, prior4, transmat4, obsmat4)

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

B = multinomial_prob(data1,obsmat2);

path = viterbi_path(prior2, transmat2, B)

save('sa.mat');

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

% 添加部分

B2 = multinomial_prob(data1,obsmat4);

path2 = viterbi_path(prior4, transmat4, B2)

save('sa2.mat');

if loglik2 > loglik

fuhe = 2

else

fuhe = 1

end

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

------

运行结果

------

data =

1     2     3     1     2     2     4     2     3     1     2     7     2

1     2     3     6     2     2     1     4     3     1     5     3     1

1     2     3     1     2     5     1     2     4     1     2     3     2

1     2     7     1     2     2     1     2     5     1     2     4     1

5     2     3     3     5     2     1     2     3     1     2     3     6

1     2     3     1     2     2     1     6     5     1     2     6     4

5     2     3     4     4     2     1     2     3     1     2     5     6

1     2     6     1     2     2     1     2     3     1     4     3     2

1     2     3     4     2     7     1     4     3     1     7     3     3

5     2     3     5     2     2     1     2     3     1     2     3     4

5     2     4     1     2     2     5     2     3     7     1     6     2

data2 =

1     2     3     1     2     2     4     2     3     1     2     7     2

1     2     3     6     2     2     1     4     3     1     5     3     1

1     2     3     1     2     5     1     2     4     1     2     3     2

1     2     7     1     2     2     1     2     5     1     2     4     1

5     2     3     3     5     2     1     2     3     1     2     3     6

1     2     3     1     2     2     1     6     5     1     2     6     4

5     2     3     4     4     2     1     2     3     1     2     5     6

1     2     6     1     2     2     1     2     3     1     4     3     2

1     2     3     4     2     7     1     4     3     1     7     3     3

5     2     3     5     2     2     1     2     3     1     2     3     4

4     2     5     1     2     2     6     2     3     7     1     6     4

iteration 1, loglik = -327.100465

iteration 2, loglik = -238.259812

iteration 3, loglik = -232.962948

iteration 4, loglik = -223.323891

iteration 5, loglik = -207.630875

iteration 6, loglik = -191.012697

iteration 7, loglik = -178.611546

iteration 8, loglik = -171.524132

iteration 9, loglik = -168.626526

iteration 10, loglik = -167.387057

iteration 11, loglik = -166.689175

LL =

Columns 1 through 9

-327.1005 -238.2598 -232.9629 -223.3239 -207.6309 -191.0127 -178.6115 -171.5241 -168.6265

Columns 10 through 11

-167.3871 -166.6892

prior2 =

0.0000

0.0000

1.0000

0.0000

0.0000

transmat2 =

0.0138    0.0089    0.7680    0.1060    0.1033

0.7811    0.0000    0.0199    0.0067    0.1923

0.0000    0.9936    0.0000    0.0064    0.0000

0.1686    0.2604    0.2242    0.3398    0.0070

0.0053    0.0406    0.8350    0.1184    0.0007

obsmat2 =

0.0000    0.2351    0.5738    0.0256    0.1118    0.0186    0.0351

0.0000    0.8270    0.0000    0.0790    0.0256    0.0456    0.0228

0.7514    0.0021    0.0011    0.0550    0.1472    0.0432    0.0000

0.0014    0.4208    0.0447    0.4366    0.0023    0.0887    0.0055

0.0000    0.0784    0.3223    0.2014    0.0116    0.1525    0.2338

iteration 1, loglik = -277.738670

iteration 2, loglik = -242.163247

iteration 3, loglik = -238.321971

iteration 4, loglik = -233.166746

iteration 5, loglik = -225.682259

iteration 6, loglik = -214.560296

iteration 7, loglik = -201.182015

iteration 8, loglik = -189.427453

iteration 9, loglik = -179.156352

iteration 10, loglik = -171.744096

iteration 11, loglik = -168.409063

LL2 =

Columns 1 through 9

-277.7387 -242.1632 -238.3220 -233.1667 -225.6823 -214.5603 -201.1820 -189.4275 -179.1564

Columns 10 through 11

-171.7441 -168.4091

prior4 =

0.0000

0.9982

0.0004

0.0014

0.0000

transmat4 =

0.0873    0.5277    0.2799    0.1007    0.0045

0.0002    0.0000    0.0005    0.0000    0.9994

0.0180    0.0000    0.0118    0.0011    0.9692

0.0436    0.0226    0.0810    0.0219    0.8310

0.9746    0.0056    0.0003    0.0195    0.0000

obsmat4 =

0.0000    0.2012    0.5080    0.0580    0.1093    0.0465    0.0770

0.7939    0.0001    0.0000    0.0745    0.1277    0.0038    0.0000

0.4120    0.1044    0.0049    0.1736    0.0032    0.3017    0.0001

0.4527    0.0622    0.0637    0.2568    0.0549    0.0295    0.0802

0.0000    0.8172    0.0000    0.0943    0.0270    0.0389    0.0225

data1 =

5     2     4     1     2     2     5     2     3     7     1     6     2

loglik =

-19.2351

loglik2 =

-21.0715

path =

3     2     5     3     2     1     3     2     1     5     3     2     1

path2 =

2     5     1     2     5     1     2     5     1     1     2     5     1

fuhe =

1

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
hmm算法matlab实现和实例 hmm_em.m function [LL, prior, transmat, obsmat, nrIterations] = ... dhmm_em(data, prior, transmat, obsmat, varargin) % LEARN_DHMM Find the ML/MAP parameters of an HMM with discrete outputs using EM. % [ll_trace, prior, transmat, obsmat, iterNr] = learn_dhmm(data, prior0, transmat0, obsmat0, ...) % % Notation: Q(t) = hidden state, Y(t) = observation % % INPUTS: % data{ex} or data(ex,:) if all sequences have the same length % prior(i) % transmat(i,j) % obsmat(i,o) % % Optional parameters may be passed as 'param_name', param_value pairs. % Parameter names are shown below; default values in [] - if none, argument is mandatory. % % 'max_iter' - max number of EM iterations [10] % 'thresh' - convergence threshold [1e-4] % 'verbose' - if 1, print out loglik at every iteration [1] % 'obs_prior_weight' - weight to apply to uniform dirichlet prior on observation matrix [0] % % To clamp some of the parameters, so learning does not change them: % 'adj_prior' - if 0, do not change prior [1] % 'adj_trans' - if 0, do not change transmat [1] % 'adj_obs' - if 0, do not change obsmat [1] % % Modified by Herbert Jaeger so xi are not computed individually % but only their sum (over time) as xi_summed; this is the only way how they are used % and it saves a lot of memory. [max_iter, thresh, verbose, obs_prior_weight, adj_prior, adj_trans, adj_obs] = ... process_options(varargin, 'max_iter', 10, 'thresh', 1e-4, 'verbose', 1, ... 'obs_prior_weight', 0, 'adj_prior', 1, 'adj_trans', 1, 'adj_obs', 1); previous_loglik = -inf; loglik = 0; converged = 0; num_iter = 1; LL = []; if ~iscell(data) data = num2cell(data, 2); % each row gets its own cell end while (num_iter <= max_iter) & ~converged % E step [loglik, exp_num_trans, exp_num_visits1, exp_num_emit] = ... compute_ess_dhmm(prior, transmat, obsmat, data, obs_prior_weight); % M step if adj_prior prior = normalise(exp_num_visits1); end if adj_trans & ~isempty(exp_num_trans) transmat = mk_stochastic(exp_num_trans); end if adj_obs obsmat = mk_stochastic(exp_num_emit); end
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值