【数模/美赛板子】存一下C++代码实现神经网络+可视化处理过程的板子

26 篇文章 0 订阅
7 篇文章 0 订阅

常用激活函数:

  • Sigmoid 函数: σ ( x ) = 1 1 + e − x \sigma(x) = \frac{1}{1+e^{-x}} σ(x)=1+ex1
  • ReLU 函数: f ( x ) = max ⁡ ( 0 , x ) f(x) = \max(0,x) f(x)=max(0,x)
  • Tanh 函数: tanh ⁡ ( x ) = e x − e − x e x + e − x \tanh(x) = \frac{e^x-e^{-x}}{e^x+e^{-x}} tanh(x)=ex+exexex
/*
这是一份用C++实现的人工神经网络的代码,使用的是前向传播算法和平均平方误差(MSE)损失函数。该代码实现了一个二层的神经网络,拥有两个隐藏层节点和一个输出节点。具体的,首先通过私有成员变量确定网络的超参数(学习率和训练轮数),并对权重和偏移量进行初始化。随后是前向传播过程,在这个过程中,网络会根据输入数据和计算出的权重以及偏移量,通过激活函数(sigmoid)计算出网络的预测输出。在训练过程中,网络首先通过前向传播得出预测输出,然后再通过反向传播和梯度下降法更新权重和偏移量,从而使得网络的预测更准确。最后,网络可以用于预测新数据。
Sigmoid 函数:$\sigma(x) = \frac{1}{1+e^{-x}}$
ReLU 函数:$f(x) = \max(0,x)$
Tanh 函数:$\tanh(x) = \frac{e^x-e^{-x}}{e^x+e^{-x}}$
*/
#include<bits/stdc++.h>
using namespace std;

#include<windows.h>
#include<graphics.h>
#define SCR_WIDTH      800     // 窗口宽度
#define SCR_HEIGHT     600     // 窗口高度

inline void srandd(int seed=0) {srand(seed?seed:time(0));}
inline double randd() {return (double)rand()/RAND_MAX;}
inline double randd(double l, double r) {return randd()*(r+.99999999999999-l)+l;}

double trans(double&x) {
	return x*50;//log(fabs(x)+1)*100*fabs(x*50)/float(x*50);
}


double getMSEloss(double x1,double x2){
	return (x1 - x2)*(x1 - x2);
}
class NNetwork
{
	public:
	int epoches;
	double learning_rate;
	double w1,w2,w3,w4,w5,w6;
	double b1,b2,b3;
	public:
	NNetwork(int es,double lr);
	double sigmoid(double x);
	double deriv_sigmoid(double x);
	double forward(vector<double> data);
	void train(vector<vector<double>> data,vector<double> label);
	void predict(vector<vector<double>> test_data,vector<double> test_label);
};
NNetwork::NNetwork(int es,double lr):epoches(es),learning_rate(lr){
	// 超参数、参数初始化
	w1=w2=w3=w4=w5=w6=0;
	b1=b2=b3=0;
}
double NNetwork::sigmoid(double x){
	// 激活函数
	return 1/(1+exp(-x));
}
double NNetwork::deriv_sigmoid(double x){
	// 激活函数求导
	double y = sigmoid(x);
	return y*(1-y);
}
double NNetwork::forward(vector<double> data){
	// 前向传播
	double sum_h1 = w1 * data[0] + w2 * data[1] + b1;
	double h1 = sigmoid(sum_h1);
	double sum_h2 = w3 * data[0] + w4 * data[1] + b2;
	double h2 = sigmoid(sum_h2);
	double sum_o1 = w5 * h1 + w6 * h2 + b3;
	return sigmoid(sum_o1);
}
void NNetwork::train(vector<vector<double>> data,vector<double> label){
	for(int epoch=0;epoch<epoches;++epoch){
		int total_n = data.size();
		for(int i=0;i<total_n;++i){
			vector<double> x = data[i];
			double sum_h1 = w1 * x[0] + w2 * x[1] + b1;
			double h1 = sigmoid(sum_h1);
			double sum_h2 = w3 * x[0] + w4 * x[1] + b2;
			double h2 = sigmoid(sum_h2);
			double sum_o1 = w5 * h1 + w6 * h2 + b3;
			double o1 = sigmoid(sum_o1);
			double pred = o1;
 
			double d_loss_pred = -2 * (label[i] - pred);
 
			double d_pred_w5 = h1 * deriv_sigmoid(sum_o1);
			double d_pred_w6 = h2 * deriv_sigmoid(sum_o1);
			double d_pred_b3 = deriv_sigmoid(sum_o1);
			
			double d_pred_h1 = w5 * deriv_sigmoid(sum_o1);
			double d_pred_h2 = w6 * deriv_sigmoid(sum_o1);
 
			double d_h1_w1 = x[0] * deriv_sigmoid(sum_h1);
			double d_h1_w2 = x[1] * deriv_sigmoid(sum_h1);
			double d_h1_b1 = deriv_sigmoid(sum_h1);
 
			double d_h2_w3 = x[0] * deriv_sigmoid(sum_h2);
			double d_h2_w4 = x[1] * deriv_sigmoid(sum_h2);
			double d_h2_b2 = deriv_sigmoid(sum_h2);
 
			w1 -= learning_rate * d_loss_pred * d_pred_h1 * d_h1_w1;
			w2 -= learning_rate * d_loss_pred * d_pred_h1 * d_h1_w2;
			b1 -= learning_rate * d_loss_pred * d_pred_h1 * d_h1_b1;
			w3 -= learning_rate * d_loss_pred * d_pred_h2 * d_h2_w3;
			w4 -= learning_rate * d_loss_pred * d_pred_h2 * d_h2_w4;
			b2 -= learning_rate * d_loss_pred * d_pred_h2 * d_h2_b2;
			w5 -= learning_rate * d_loss_pred * d_pred_w5;
			w6 -= learning_rate * d_loss_pred * d_pred_w6;
			b3 -= learning_rate * d_loss_pred * d_pred_b3; 
		}
		if(epoch%10==0){
			double loss = 0;
			for(int i=0;i<total_n;++i){
				double pred = forward(data[i]);
				loss += getMSEloss(pred,label[i]);
			}
			cout<<"epoch "<<epoch<<" loss: "<<loss<<endl;
			delay_ms(1);
		}
			int xx=SCR_WIDTH*epoch*1./epoches;
			putpixel_f(xx,SCR_HEIGHT/2-trans(w1),0xffff00);
			putpixel_f(xx,SCR_HEIGHT/2-trans(w2),0xff00ff);
			putpixel_f(xx,SCR_HEIGHT/2-trans(b1),0x00ffff);
			putpixel_f(xx,SCR_HEIGHT/2-trans(w3),0x0000ff);
			putpixel_f(xx,SCR_HEIGHT/2-trans(w4),0x00ff00);
			putpixel_f(xx,SCR_HEIGHT/2-trans(b2),0xff0000);
			putpixel_f(xx,SCR_HEIGHT/2-trans(w5),0x888888);
			putpixel_f(xx,SCR_HEIGHT/2-trans(w6),0xAAAAAA);
			putpixel_f(xx,SCR_HEIGHT/2-trans(b3),0xCCCCCC);
	}
}
void NNetwork::predict(vector<vector<double>> test_data,vector<double> test_label){
	int n = test_data.size();
	double cnt = 0;
	for(int i=0;i<n;++i){
		double pred = forward(test_data[i]);
		pred = pred>0.5?1:0;
		cnt += (test_label[i]==pred);
	}
	cout<<"correct rate:"<<cnt/n<<endl;
}
int main(){
	// 可视化初始化
	// setinitmode(INIT_NOBORDER, 100, 100);
	initgraph(SCR_WIDTH, SCR_HEIGHT, INIT_RENDERMANUAL | INIT_NOFORCEEXIT);
	setcaption("Title here ~");

	// 随机造数据方法设置
	srandd();
	auto make_data=[](vector<vector<double>>&data,vector<double>&label) {
		for(int i=10; i; i--) {
			double x=randd(-100,100);
			double y=randd(-100,100);
			data.push_back({x,y});
			label.push_back((x*x+y-8)>0?1:0);
		}
	};

	// 造训练数据
	// vector<vector<double>> data = {{-2,-1},{25,6},{17,4},{-15,-6},{3,-6},{7,-6},{1,-1.1},{3.6,-3.4}};
	// vector<double> label = {1,0,0,1,1,0,1,0};
	vector<vector<double>> data;
	vector<double> label;
	make_data(data,label);

	// 训练
	NNetwork network = NNetwork(10000,0.6);
	network.train(data,label);

	// 造测试数据
	// vector<vector<double>> test_data  = {{-3,-4},{-5,-4},{12,3},{-13,-4},{9,12}};
	// vector<double> test_label = {1,1,0,1,0};
	vector<vector<double>> test_data;
	vector<double> test_label;
	make_data(test_data,test_label);

	// 测试
	network.predict(test_data,test_label);
	cerr<<clock()<<" ms"<<endl;
	cerr<<"w1 = "<<network.w1<<endl;
	cerr<<"w2 = "<<network.w2<<endl;
	cerr<<"b1 = "<<network.b1<<endl;
	cerr<<"w3 = "<<network.w3<<endl;
	cerr<<"w4 = "<<network.w4<<endl;
	cerr<<"b2 = "<<network.b2<<endl;
	cerr<<"w5 = "<<network.w5<<endl;
	cerr<<"w6 = "<<network.w6<<endl;
	cerr<<"b3 = "<<network.b3<<endl;
	this_thread::sleep_for(chrono::seconds(3));
	getch();
	closegraph();
	return 0;
}
  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
以下是RK3588板子使用MPP进行H.264编码的示例C++代码: ```c++ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/mman.h> #include <errno.h> #include "rk_mpi.h" #include "rk_venc.h" #define MAX_FILE_PATH_SIZE 256 #define MAX_FRAME_BUF_SIZE (1920*1080*3/2) int main(int argc, char** argv) { // 初始化MPP模块 RK_MPI_SYS_Init(); // 打开输入文件 char input_path[MAX_FILE_PATH_SIZE] = "/path/to/input.yuv"; int input_fd = open(input_path, O_RDONLY); if (input_fd < 0) { printf("Failed to open input file %s: %s\n", input_path, strerror(errno)); return -1; } // 打开输出文件 char output_path[MAX_FILE_PATH_SIZE] = "/path/to/output.h264"; int output_fd = open(output_path, O_WRONLY | O_CREAT | O_TRUNC, 0666); if (output_fd < 0) { printf("Failed to open output file %s: %s\n", output_path, strerror(errno)); close(input_fd); return -1; } // 获取输入文件大小 struct stat input_stat; fstat(input_fd, &input_stat); int input_size = input_stat.st_size; // 映射输入文件到内 unsigned char* input_buf = (unsigned char*) mmap(NULL, input_size, PROT_READ, MAP_SHARED, input_fd, 0); if (input_buf == MAP_FAILED) { printf("Failed to mmap input file: %s\n", strerror(errno)); close(input_fd); close(output_fd); return -1; } // 初始化编码参数 MPP_ENC_CFG cfg; memset(&cfg, 0, sizeof(cfg)); cfg.frm_cfg.width = 1920; cfg.frm_cfg.height = 1080; cfg.frm_cfg.fmt = MPP_FMT_YUV420SP; cfg.rc_mode = MPP_ENC_RC_MODE_CBR; cfg.bps_target = 2000000; cfg.fps_in = 25; cfg.fps_out = 25; cfg.gop = 50; cfg.profile = 100; // 创建编码器 MPP_ENC_CTX* enc_ctx = NULL; RK_MPI_VENC_CreateContext(&enc_ctx, RK_ID_VENC_H264); RK_MPI_VENC_RegisterCallback(enc_ctx, NULL, NULL); // 配置编码器参数 RK_MPI_VENC_SetConfig(enc_ctx, RK_MPI_VENC_CFG_BASE, &cfg, sizeof(cfg)); RK_MPI_VENC_StartRecvPic(enc_ctx); // 编码每一帧 unsigned char* frame_buf = (unsigned char*) malloc(MAX_FRAME_BUF_SIZE); int frame_size = 1920*1080*3/2; int frame_cnt = input_size / frame_size; for (int i = 0; i < frame_cnt; i++) { // 从输入文件读取一帧数据 unsigned char* input_frame = input_buf + i * frame_size; // 填充MPP编码器的输入数据 MPP_ENC_DATA enc_data; memset(&enc_data, 0, sizeof(enc_data)); enc_data.pbuf = input_frame; enc_data.buf_size = frame_size; enc_data.time_stamp = i * 40; enc_data.eos = (i == frame_cnt - 1); // 编码一帧数据 RK_MPI_VENC_SendFrame(enc_ctx, &enc_data); // 获取编码后的数据 MPP_ENC_DATA pkt; memset(&pkt, 0, sizeof(pkt)); int ret = RK_MPI_VENC_GetStream(enc_ctx, &pkt, RK_TRUE); if (ret == RK_SUCCESS) { // 将编码后的数据写入输出文件 write(output_fd, pkt.pbuf, pkt.buf_size); RK_MPI_VENC_ReleaseStream(enc_ctx, &pkt); } } // 销毁编码器 RK_MPI_VENC_StopRecvPic(enc_ctx); RK_MPI_VENC_DestroyContext(enc_ctx); // 释放内映射和资源 munmap(input_buf, input_size); close(input_fd); close(output_fd); return 0; } ``` 需要注意的是,上述代码仅供参考,实际使用时可能需要根据具体的需求和参数进行适当的修改。同时,还需要确保MPP已经正确安装并且编码标准和参数设置正确。
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值