下面是一段使用python编写的高斯混合模型隐变量后验分布代码:
``` import numpy as np from scipy.stats import multivariate_normal
class GaussianMixtureModel: def init(self, n_components, max_iter=100, tol=1e-3): self.n_components = n_components self.max_iter = max_iter self.tol = tol
def fit(self, X):
n_samples, n_features = X.shape
self.weights_ = np.ones(self.n_components) / self.n_components
self.means_ = np.random.randn(self.n_components, n_features)
self.covariances_ = np.array([np.eye(n_features)] * self.n_components)
responsibilities = np.zeros((n_samples, self.n_components))
log_likelihoods = []
for i in range(self.max_iter):
# E-step
for j in range(self.n_components):
responsibilities[:, j] = self.weights_[j] * \
multivariate_normal.pdf(X, mean=self.means_[j], cov=self.covariances_[j])
responsibilities /= responsibilities.sum(axis=1, keepdims=True)
# M-step
n_responsibilities = responsibilities.sum(axis=0)
self.weights_ = n_responsibilities / n_samples
self.means_ = (responsibilities.T @ X) / n_responsibilities[:, None]
self.covariances_ = np.zeros((self.n_components, n_features, n_features))
for j in range(self.n_components):
delta = X - self.means_[j]
self.covariances_[j] = (responsibilities[:, j, None, None] * delta).T @ delta / n_responsibilities[j]
# Calculate log likelihood
log_likelihood = 0
for j in range(self.n_components):
log_likelihood += self.weights_[j] * \
multivariate_normal.pdf(X, mean=self.means_[j], cov=self.covariances_[j])
log_likelihood = np.log(log_likelihood).sum()
log_likelihoods.append(log_likelihood)
# Check convergence
if i > 0 and np.abs(log_likelihoods[-1] - log_likelihoods[-2]) < self.tol:
break
return log_likelihoods
def predict(self, X):
n_samples, n_features = X.shape
responsibilities = np.zeros((n_samples, self.n_components))
for