注解:原理和代码均非本人所创,代码转自:Ziv Yaniv 所写的ransac算法中的最小二乘法实现。
原理实现如下:
转载自:http://www.cnblogs.com/gnuhpc/archive/2012/12/09/2810000.html
原理:
如果经验方程是线性的,形如y=ax+b,就是线性回归。按上面的分析,误差函数为:
e=∑(yi-axi-b)^2
各偏导为:
de/da=2∑(yi-axi-b)xi=0
de/db=-2∑(yi-axi-b)=0
于是得到关于a,b的线性方程组:
(∑xi^2)a+(∑xi)b=∑yixi
(∑xi)a+nb=∑yi
设A=∑xi^2,B=∑xi,C=∑yixi,D=∑yi,则方程化为:
Aa+Bb=C
Ba+nb=D
解出a,b得:
a=(Cn-BD)/(An-BB)
b=(AD-CB)/(An-BB)
void LineParamEstimator::leastSquaresEstimate(std::vector<Point2D *> &data, std::vector<double> ¶meters)
{
double meanX, meanY, nx, ny, norm;
double covMat11, covMat12, covMat21, covMat22; // The entries of the symmetric covarinace matrix
int i, dataSize = data.size();
parameters.clear();
if(data.size()<2)
return;
meanX = meanY = 0.0;
covMat11 = covMat12 = covMat21 = covMat22 = 0;
for(i=0; i<dataSize; i++) {
meanX +=data[i]->x;
meanY +=data[i]->y;
covMat11 +=data[i]->x * data[i]->x;
covMat12 +=data[i]->x * data[i]->y;
covMat22 +=data[i]->y * data[i]->y;
}
meanX/=dataSize;//所有x的平均值。
meanY/=dataSize;//所有y的平均值。
covMat11 -= dataSize*meanX*meanX;
covMat12 -= dataSize*meanX*meanY;
covMat22 -= dataSize*meanY*meanY;
covMat21 = covMat12;
if(covMat11<1e-12) {
nx = 1.0;
ny = 0.0;
}
else { //lamda1 is the largest eigen-value of the covariance matrix
//and is used to compute the eigne-vector corresponding to the smallest
//eigenvalue, which isn't computed explicitly.
double lamda1 = (covMat11 + covMat22 + sqrt((covMat11-covMat22)*(covMat11-covMat22) + 4*covMat12*covMat12)) / 2.0;
nx = -covMat12;
ny = lamda1 - covMat22;
norm = sqrt(nx*nx + ny*ny);
nx/=norm;
ny/=norm;
}
parameters.push_back(nx);
parameters.push_back(ny);
parameters.push_back(meanX);
parameters.push_back(meanY);
}
;;;;看不懂它的这个nx,ny是怎么求的。