em gmm matlab,EM算法估计GMM的MATLAB实现

function varargout = gmm(X, K_or_centroids)

% ============================================================

% Expectation-Maximization iteration implementation of

% Gaussian Mixture Model.

%

% PX = GMM(X, K_OR_CENTROIDS)

% [PX MODEL] = GMM(X, K_OR_CENTROIDS)

%

%  - X: N-by-D data matrix.

%  - K_OR_CENTROIDS: either K indicating the number of

%       components or a K-by-D matrix indicating the

%       choosing of the initial K centroids.

%

%  - PX: N-by-K matrix indicating the probability of each

%       component generating each point.

%  - MODEL: a structure containing the parameters for a GMM:

%       MODEL.Miu: a K-by-D matrix.

%       MODEL.Sigma: a D-by-D-by-K matrix.

%       MODEL.Pi: a 1-by-K vector.

% ============================================================

threshold = 1e-15;

[N, D] = size(X);

if isscalar(K_or_centroids)

K = K_or_centroids;

% randomly pick centroids

rndp = randperm(N);

centroids = X(rndp(1:K), :);

else

K = size(K_or_centroids, 1);

centroids = K_or_centroids;

end

% initial values

[pMiu pPi pSigma] = init_params();

Lprev = -inf;

while true

Px = calc_prob();

% new value for pGamma

pGamma = Px .* repmat(pPi, N, 1);

pGamma = pGamma ./ repmat(sum(pGamma, 2), 1, K);

% new value for parameters of each Component

Nk = sum(pGamma, 1);

pMiu = diag(1./Nk) * pGamma' * X;

pPi = Nk/N;

for kk = 1:K

Xshift = X-repmat(pMiu(kk, :), N, 1);

pSigma(:, :, kk) = (Xshift' * ...

(diag(pGamma(:, kk)) * Xshift)) / Nk(kk);

end

% check for convergence

L = sum(log(Px*pPi'));

if L-Lprev < threshold

break;

end

Lprev = L;

end

if nargout == 1

varargout = {Px};

else

model = [];

model.Miu = pMiu;

model.Sigma = pSigma;

model.Pi = pPi;

varargout = {Px, model};

end

function [pMiu pPi pSigma] = init_params()

pMiu = centroids;

pPi = zeros(1, K);

pSigma = zeros(D, D, K);

% hard assign x to each centroids

distmat = repmat(sum(X.*X, 2), 1, K) + ...

repmat(sum(pMiu.*pMiu, 2)', N, 1) - ...

2*X*pMiu';

[dummy labels] = min(distmat, [], 2);

for k=1:K

Xk = X(labels == k, :);

pPi(k) = size(Xk, 1)/N;

pSigma(:, :, k) = cov(Xk);

end

end

function Px = calc_prob()

Px = zeros(N, K);

for k = 1:K

Xshift = X-repmat(pMiu(k, :), N, 1);

inv_pSigma = inv(pSigma(:, :, k));

tmp = sum((Xshift*inv_pSigma) .* Xshift, 2);

coef = (2*pi)^(-D/2) * sqrt(det(inv_pSigma));

Px(:, k) = coef * exp(-0.5*tmp);

end

end

end

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值