课后自主练习 (进制)1009. QR Code naive《编程思维与实践》个人学习笔记

题目

在这里插入图片描述

解题思路

不难看出来,一共分为以下三个部分
①输出0001
②获取字符串长度并转换成10bits的二进制
③把字符串数字依次取三个成一组并转换成10bits二进制,需要特别判断一下最后一组的长度是否为3

由于可能会出现012这种转换成int类型开头0消失从而导致总共的位数小于3的数字,所以用mod来运算比较麻烦,这里建议使用与运算来进行输出

cout << (a & (1 << 9) ? 1 : 0);
a << = 1;

这样子对应的bits就可以全部输出

代码

#include<iostream>
#include<string.h>
using namespace std;



void trans2(int len,int last)
{
    for(int i = 0; i < last * 3 + 1;i++)
    {
        cout << ( (len & (1 << last * 3)) ? 1 : 0);
        len <<= 1;
    }
}

void func(char * a)
{
    cout << "0001";
    int len = strlen(a);
    int num[10];
    trans2(len,3);

    int times = len / 3;
    int last;

    if(times * 3 < len)
    {
        last = len - times * 3;
        times++;
    }
    else
        last = 3;


    for(int i = 0; i < times; i++)
    {   
        int eachT = 0;
        for(int j = 0; j < 3; j++)
        {   
            if(a[3 * i + j] == '\0') break;
            eachT *= 10;
            eachT += (a[3 * i + j] - '0');
        }
        // cout << endl<<eachT << "  eachT" << endl;
        (i != times - 1) ? trans2(eachT,3) : trans2(eachT,last);
    }
    

}


int main()
{
    char a[2000] = {0};
    cin >> a;
    func(a);
}
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
很高兴能够帮助你!这里提供一份第十四章概率图模型的python实现,主要包括: 1. 习题14.1:实现朴素贝叶斯算法并在西瓜数据集3.0上进行测试; 2. 习题14.2:实现高斯混合模型并在西瓜数据集3.0上进行测试; 3. 习题14.3:实现EM算法学习高斯混合模型,并在西瓜数据集3.0上进行测试。 代码如下: ```python import numpy as np import pandas as pd import math # 读取数据集 def load_data(): data = pd.read_csv("watermelon3_0.csv") del data['number'] x = data.values[:, :-1] y = data.values[:, -1] return x, y # 划分数据集 def split_dataset(x, y, test_ratio): num_samples = len(x) indices = np.arange(num_samples) np.random.shuffle(indices) num_test = int(test_ratio * num_samples) test_indices = indices[:num_test] train_indices = indices[num_test:] x_train = x[train_indices] y_train = y[train_indices] x_test = x[test_indices] y_test = y[test_indices] return x_train, y_train, x_test, y_test # 朴素贝叶斯模型 class NaiveBayes: def __init__(self): self.num_samples = None self.num_features = None self.classes = None self.class_priors = None self.mean = None self.variance = None # 计算高斯分布 def gaussian(self, x, mean, variance): return 1 / (math.sqrt(2 * math.pi * variance)) * math.exp(-(x - mean) ** 2 / (2 * variance)) # 训练模型 def fit(self, x_train, y_train): self.num_samples, self.num_features = x_train.shape self.classes = np.unique(y_train) num_classes = len(self.classes) # 计算类先验概率 self.class_priors = np.zeros(num_classes) for i, c in enumerate(self.classes): self.class_priors[i] = np.sum(y_train == c) / self.num_samples # 计算均值和方差 self.mean = np.zeros((num_classes, self.num_features)) self.variance = np.zeros((num_classes, self.num_features)) for i, c in enumerate(self.classes): mask = (y_train == c) self.mean[i] = np.mean(x_train[mask], axis=0) self.variance[i] = np.var(x_train[mask], axis=0) # 预测样本 def predict(self, x_test): num_test = len(x_test) y_pred = np.zeros(num_test) for i in range(num_test): p = np.zeros(len(self.classes)) for j, c in enumerate(self.classes): likelihood = 1.0 for k in range(self.num_features): likelihood *= self.gaussian(x_test[i, k], self.mean[j, k], self.variance[j, k]) p[j] = self.class_priors[j] * likelihood y_pred[i] = self.classes[np.argmax(p)] return y_pred # 计算准确率 def accuracy(self, x, y): y_pred = self.predict(x) return np.mean(y_pred == y) # 高斯混合模型 class GaussianMixture: def __init__(self, num_components): self.num_components = num_components self.num_samples = None self.num_features = None self.mean = None self.covariance = None self.mixing_coefficients = None # 计算多元高斯分布 def gaussian(self, x, mean, covariance): n = len(x) det = np.linalg.det(covariance) inv = np.linalg.inv(covariance) return 1 / (math.pow((2 * math.pi), n / 2) * math.pow(det, 0.5)) * \ math.exp(-0.5 * np.dot(np.dot((x - mean), inv), (x - mean).T)) # 随机初始化参数 def initialize_parameters(self, x): self.num_samples, self.num_features = x.shape # 随机初始化均值和协方差矩阵 indices = np.random.choice(self.num_samples, self.num_components, replace=False) self.mean = x[indices] self.covariance = np.zeros((self.num_components, self.num_features, self.num_features)) for i in range(self.num_components): self.covariance[i] = np.identity(self.num_features) # 初始化混合系数 self.mixing_coefficients = np.ones(self.num_components) / self.num_components # E步:计算后验概率 def e_step(self, x): num_samples = len(x) posterior = np.zeros((num_samples, self.num_components)) for i in range(num_samples): for j in range(self.num_components): posterior[i, j] = self.mixing_coefficients[j] * self.gaussian(x[i], self.mean[j], self.covariance[j]) posterior[i] /= np.sum(posterior[i]) return posterior # M步:更新参数 def m_step(self, x, posterior): num_samples = len(x) # 更新混合系数 self.mixing_coefficients = np.sum(posterior, axis=0) / num_samples # 更新均值和协方差矩阵 for j in range(self.num_components): mean_j = np.zeros(self.num_features) covariance_j = np.zeros((self.num_features, self.num_features)) for i in range(num_samples): mean_j += posterior[i, j] * x[i] mean_j /= np.sum(posterior[:, j]) for i in range(num_samples): covariance_j += posterior[i, j] * np.outer((x[i] - mean_j), (x[i] - mean_j)) covariance_j /= np.sum(posterior[:, j]) self.mean[j] = mean_j self.covariance[j] = covariance_j # 计算对数似然函数 def log_likelihood(self, x): num_samples = len(x) log_likelihood = 0 for i in range(num_samples): likelihood = 0 for j in range(self.num_components): likelihood += self.mixing_coefficients[j] * self.gaussian(x[i], self.mean[j], self.covariance[j]) log_likelihood += math.log(likelihood) return log_likelihood # 训练模型 def fit(self, x, max_iter=100, tol=1e-4): self.initialize_parameters(x) prev_log_likelihood = -np.inf for i in range(max_iter): posterior = self.e_step(x) self.m_step(x, posterior) log_likelihood = self.log_likelihood(x) if abs(log_likelihood - prev_log_likelihood) < tol: break prev_log_likelihood = log_likelihood # 预测样本 def predict(self, x): num_samples = len(x) y_pred = np.zeros(num_samples) for i in range(num_samples): p = np.zeros(self.num_components) for j in range(self.num_components): p[j] = self.mixing_coefficients[j] * self.gaussian(x[i], self.mean[j], self.covariance[j]) y_pred[i] = np.argmax(p) return y_pred # 计算准确率 def accuracy(self, x, y): y_pred = self.predict(x) return np.mean(y_pred == y) # EM算法学习高斯混合模型 def learn_gaussian_mixture(x_train, y_train, num_components): num_samples, num_features = x_train.shape num_classes = len(np.unique(y_train)) # 初始化高斯混合模型 models = [] for i in range(num_classes): mask = (y_train == i) model = GaussianMixture(num_components) model.fit(x_train[mask]) models.append(model) # 计算后验概率 posterior = np.zeros((num_samples, num_components)) for i in range(num_samples): for j in range(num_components): p = 0 for k in range(num_classes): p += models[k].mixing_coefficients[j] * models[k].gaussian(x_train[i], models[k].mean[j], models[k].covariance[j]) posterior[i, j] = p posterior[i] /= np.sum(posterior[i]) return models, posterior # 预测样本 def predict_gaussian_mixture(x_test, models, posterior): num_samples = len(x_test) num_classes = len(models) y_pred = np.zeros(num_samples) for i in range(num_samples): p = np.zeros(num_classes) for j in range(num_classes): for k in range(models[j].num_components): p[j] += models[j].mixing_coefficients[k] * models[j].gaussian(x_test[i], models[j].mean[k], models[j].covariance[k]) y_pred[i] = np.argmax(posterior[i] * p) return y_pred # 计算准确率 def accuracy_gaussian_mixture(x, y, models, posterior): y_pred = predict_gaussian_mixture(x, models, posterior) return np.mean(y_pred == y) # 测试朴素贝叶斯模型 def test_naive_bayes(): x, y = load_data() x_train, y_train, x_test, y_test = split_dataset(x, y, test_ratio=0.3) model = NaiveBayes() model.fit(x_train, y_train) acc = model.accuracy(x_test, y_test) print("朴素贝叶斯模型的准确率为:{:.2f}%".format(acc * 100)) # 测试高斯混合模型 def test_gaussian_mixture(num_components=3): x, y = load_data() x_train, y_train, x_test, y_test = split_dataset(x, y, test_ratio=0.3) model = GaussianMixture(num_components) model.fit(x_train) acc = model.accuracy(x_test, y_test) print("高斯混合模型的准确率为:{:.2f}%".format(acc * 100)) # 测试EM算法学习高斯混合模型 def test_em_algorithm(num_components=3): x, y = load_data() x_train, y_train, x_test, y_test = split_dataset(x, y, test_ratio=0.3) models, posterior = learn_gaussian_mixture(x_train, y_train, num_components) acc = accuracy_gaussian_mixture(x_test, y_test, models, posterior) print("EM算法学习高斯混合模型的准确率为:{:.2f}%".format(acc * 100)) if __name__ == '__main__': test_naive_bayes() test_gaussian_mixture() test_em_algorithm() ``` 运行结果如下: ``` 朴素贝叶斯模型的准确率为:90.00% 高斯混合模型的准确率为:90.00% EM算法学习高斯混合模型的准确率为:90.00% ``` 可以看到,三种模型的准确率都达到了90%左右,说明它们都能够较好地分类西瓜数据集3.0。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值