将按照matlab中的LMS算法原理,编写C语言版的LMS算法模型,移植至DSP中运行,部分代码如下:
/*
* main.c
*/
#include <string.h>
#include <stdio.h>
#include "LMS.h"
//#include <mathlib.h>
//#include <dsplib.h>
#include "data.h"
#include "data2.h"
#define PI 3.141592654
#define Fs 10000
#define Ts 1/Fs
#define F1 2
#define F2 10
#define F3 20
#define F4 500
#define NUM_OF_POINT (12900)
#pragma DATA_SECTION(signal, ".mydata");
float signal[NUM_OF_POINT];
#pragma DATA_SECTION(noise, ".mydata");
float noise[NUM_OF_POINT];
#pragma DATA_SECTION(signal_noise, ".mydata");
float signal_noise[NUM_OF_POINT];
#pragma DATA_SECTION(out_error, ".mydata");
float out_error[NUM_OF_POINT];
#pragma DATA_SECTION(out_y, ".mydata");
float out_y[NUM_OF_POINT];
tsk0_func()
{
}
//void make_data(void)
//{
// int i;
// for(i=0;i<NUM_OF_POINT;i++)
// {
// signal[i] = sinsp(2*PI*F1*i*Ts) + 0.5*sinsp(2*PI*F2*i*Ts) + 0.25*sinsp(2*PI*F3*i*Ts);
// noise[i] = 5*sinsp(2*PI*F4*i*Ts+PI/2);
// signal_noise[i] = signal[i] + 0.2*noise[i];
// if(i>0)
// signal_noise[i] += 0.15*noise[i-1];
// if(i>1)
// signal_noise[i] += 0.1*noise[i-2];
// }
//}
int main(void)
{
int i, j;
//make_data();
memset((void *)lms_x, 0, sizeof(lms_x)*sizeof(float));
for(i=0;i<NUM_OF_POINT;i++)
{
if(i<LMS_M)
{
for(j=0;j<=i;j++)
{
//lms_x[j] = noise[i-j];
lms_x[j] = g_Xn[i-j];
}
}
else
{
for(j=0;j<LMS_M;j++)
{
//lms_x[j] = noise[i-j];
lms_x[j] = g_Xn[i-j];
}
}
//lms_param_in.d = signal_noise[i];
lms_param_in.d = g_Dn[i];
lms_param_in.x_ptr = &lms_x[0];
lms_param_in.length_x = LMS_M;
LMS_Gradient_Instantaneous_Estimates(&lms_param_in, &lms_param_out); //运行瞬时梯度估计LMS算法 耗时514个时钟周期
out_error[i] = lms_param_out.error;
out_y[i] = lms_param_out.y;
}
return 0;
}
CCS运行结果