-
【原文:http://www.blogbus.com/shijuanfeng-logs/220816641.html】
终于是彻彻底底地把LDA弄懂了,说来惭愧,好歹专业也叫图像处理与模式识别,就个简单的LDA都没有彻底理解,这次是NO problem了。
1. LDA简介
线性鉴别分析(Linear Discriminant Analysis)
2. LDA的主要思想
求解最佳投影方向,使得样本在该方向上投影后,达到最大的类间离散度和最小的类内离散度
3. 具体步骤:
随后补个图~~
得到的是这样的形式:Sb ω* = λ Sw ω*
其中,Sb表示类间距离,Sw表示类内距离,ω*是所求。
4. 求解
一般地,我们的求解方法是:Sw^-1 *Sb的特征值特征向量
Opencv2.4.0版本也是这么做的,可是这样做有一个问题:Sw^-1是否可逆!答案是不一定的,如果在Sw奇异的情况下,只能求伪逆,但求解伪逆将引入较大误差。
因此需要将其正则化:
先将Sw进行SVD分解:
Sw=PΛPT
即 (PT)-1SwP-1= Λ
可以写成QTSwQ=Λ
(QΛ-1/2)TSwQΛ-1/2=I
换言之,UTSwU= I
因此原式可以变为:UTSbUω* = λω*
再对等式左边求特征值,特征向量即可
5. opencv实现
不能用WLW的代码插件发布,所以只能不好看地贴代码了~~
LDA主函数
void MyLDA( const Mat &_data, const vector<int> &labelVec, int &classNum, int dim, Mat &eigenvalues, Mat &eigenvectors )
{
Mat data = cv::Mat_<double>(_data);
cout << "/********************判定LDA是否正确******************/" << endl;
cout << "data.rows:" << data.rows << endl;
cout << "data.cols:" << data.cols << endl;
cout << "labelVec.size:" << labelVec.size()<< endl;
cout << "classNum:" << classNum << endl;
cout << "dim:" << dim << endl;
cout << "判定完毕" << endl;
assert( dim>0 );
int D = data.cols;
int N = data.rows;
if ( dim>data.cols )
{
dim = data.cols;
}
/***************************计算类内类间距离*************************/
cout << "计算类内类间距离..." << endl;
/// 类内距离
Mat Sw;
/// 类间距离
Mat Sb;
/// 每类的均值
vector meanClass(classNum);
/// 总均值
Mat meanTotal;
/// 每类的样本数
vector<int> numClass(classNum);
// initialize
for (int i = 0; i < classNum; i++)
{
numClass[i] = 0;
meanClass[i] = Mat::zeros(1, D, data.type()); //! Dx1 image vector
}
meanTotal = Mat::zeros(1, D, data.type());
// calculate sums
for ( int i=0; i
{
Mat instance = data.row(i);
int classIdx = labelVec[i];
add( meanTotal, instance, meanTotal );
add( meanClass[classIdx], instance, meanClass[classIdx] );
numClass[classIdx]++;
}
// calculate means
meanTotal.convertTo( meanTotal, meanTotal.type(),1.0 / static_cast<double> (data.rows) );
for ( int i = 0; i < classNum; i++ )
{
assert( numClass[i] != 0 );
meanClass[i].convertTo( meanClass[i], meanClass[i].type(),
1.0 / static_cast<double> (numClass[i]) );
}
// subtract class means
for ( int i = 0; i < N; i++ )
{
int classIdx = labelVec[i];
Mat instance = data.row(i);
subtract( instance, meanClass[classIdx], instance );
}
// calculate within-classes scatter
Sw = Mat::zeros(D, D, data.type());
// 记录每类的类内距离
int rowBegin = 0;
for ( int i=0; i
{
if ( i!=0 )
{
rowBegin += numClass[i-1];
}
Rect r(0, rowBegin, D, numClass[i] );
Mat tmp( data, r );
Mat Si;
mulTransposed( tmp, Si, TRUE, noArray(), (double)numClass[i]/N );
add( Sw, Si, Sw );
}
// calculate between-classes scatter
Sb = Mat::zeros(D, D, data.type());
for (int i = 0; i < classNum; i++)
{
Mat tmp;
subtract(meanClass[i], meanTotal, tmp);
mulTransposed( tmp, tmp, TRUE, noArray(), (double)numClass[i]/N );
add( Sb, tmp, Sb );
}
/***************************原始LDA*************************/
#if 0
/// 计算Sw^-1*Sb
Mat M = Sw.inv()*Sb ;
/// 求Sw^-1*Sb的特征值和特征向量
EigenvalueDecomposition es(M);
eigenvalues = es.eigenvalues();
eigenvectors = es.eigenvectors();
#endif
/***************************正则化LDA*************************/
cout << "计算LDA:" << endl;
// eigen decompose of Sw
cout << "Sw 分解..." << endl;
Mat eigValSw;
Mat eigVectSw;
eigen( Sw, eigValSw, eigVectSw );
eigValSw = eigValSw.reshape(1, 1);
eigVectSw = eigVectSw.t(); ///< 特征向量按列排
/// 特征值的求解过程中有一定的误差,特别是较小的值,因此将这些值置为一个固定值
// regularization, change eigen values that are less than given threshold to constant value
cout << endl <<"将小的特征值置为定值..." << endl;
const float threshold = 1.0E-6f * eigValSw.at<double>(0,0);
int index = dim;
for (int i = 0; i < dim; ++i )
{
if (eigValSw.at<double>(0,i) <= threshold)
{
index = i;
break;
}
}
// eigen values that are less then threshold is set to trimValue
float trimValue = threshold;
if (index > 0)
{
trimValue = eigValSw.at<double>(0,index-1) * 0.8f * index / dim;
}
for (int i = index; i < dim; ++i)
{
eigValSw.at<double>(0,i) = trimValue;
}
cout << endl << "计算Sw^(-1/2)=R*I^-1/2" << endl;
// calculate inverse square root of eigen values
for (int i = 0; i < dim; ++i)
{
eigValSw.at<double>(0,i) = 1.0f / sqrt( eigValSw.at<double>(0,i) );
}
for (int i = 0; i < dim; ++i)
{
for (int j = 0; j < dim; ++j)
{
eigVectSw.at<double>(i, j) *= eigValSw.at<double>(0,j);
}
}
cout << endl << "计算Sw^(-1/2))^T* Sb * (Sw^(-1/2)" << endl;
// calculate (Sw^(-1/2))^T * Sb * (Sw^(-1/2))
Mat S = eigVectSw.t() * Sb * eigVectSw;
cout << endl << " eigen decompose of (Sw^(-1/2))' * Sb * (Sw^(-1/2)) " << endl;
// eigen decompose of (Sw^(-1/2))' * Sb * (Sw^(-1/2))
EigenvalueDecomposition es(S);
eigenvalues = es.eigenvalues();
eigenvectors = es.eigenvectors();
cout << endl << " eigVectSw * eigenvectors " << endl;
eigenvectors = eigVectSw * eigenvectors.t();
/***************对特征值特征向量进行排序等处理********************/
cout << "reshape" << endl;
eigenvalues = eigenvalues.reshape(1, 1);
/ 排序:根据特征值的大小将特征值和特征向量排序
cout << "sorting..." << endl;
vector<int> sorted_indices;
for ( int v=0; v
{
sorted_indices.push_back( v );
}
sortIdxEigenVal( eigenvalues,sorted_indices, CV_SORT_EVERY_ROW+CV_SORT_DESCENDING );
eigenvectors = sortMatrixColumnsByIndices( eigenvectors, sorted_indices);
/ 取出需要的维度作为结果
cout << "取出需要的维度作为结果" << endl;
eigenvalues = Mat( eigenvalues, Range::all(), Range(0, dim) );
eigenvectors = Mat( eigenvectors, Range::all(), Range(0, dim) );
cout << "clear" << endl;
numClass.clear();
meanClass.clear();
cout << "eigenvectors.rows: " << eigenvectors.rows << endl;
cout << "eigenvectors.cols: " << eigenvectors.cols << endl;
cout << "/********************My LDA end******************/" << endl;
}
调用函数:
排序——
///
/// 排序
void exchange( Mat &data, vector<int> &sorted_indices,int x, int y )
{
double d = data.at<double>(0,x);
data.at<double>(0,x) = data.at<double>(0,y);
data.at<double>(0,y) = d;
int i = sorted_indices[x];
sorted_indices[x] = sorted_indices[y];
sorted_indices[y] = i;
}
int Partition( Mat &data, vector<int> &sorted_indices, int p, int r)
{
double key = data.at<double>(0,r);
int i=p-1; // 第一个标记:存放小于key的元素的末位置
for (int j=p; j
{
if (data.at<double>(0,j)>=key)
{
i++;
//exchange(data+i, data+j);
exchange( data, sorted_indices, i, j );
}
}
//exchange(data+i+1, data+r);
exchange(data, sorted_indices, i+1, r );
return i+1;
}
void QuickSort( Mat &data, vector<int> &sorted_indices, int p, int r)// p:起始位置r:末尾位置
{
if (p
{
int q = Partition(data, sorted_indices, p, r); // 核心函数
QuickSort(data, sorted_indices, p, q-1);
QuickSort(data, sorted_indices, q+1, r);
}
}
/// inputData 是一行数据
void sortIdxEigenVal( Mat &inputData, vector<int> &sorted_indices, int flag )
{
if ( flag!=CV_SORT_EVERY_ROW+CV_SORT_DESCENDING )
{
return;
}
for ( int i=0; i
{
Mat m = inputData.row(i);
QuickSort( inputData.row(i), sorted_indices, 0, m.cols-1);
}
}
Mat sortMatrixColumnsByIndices(Mat src, vector<int> indices)
{
Mat dst(src.rows, src.cols, src.type());
for(size_t idx = 0; idx < indices.size(); idx++)
{
Mat originalCol = src.col(indices[idx]);
Mat sortedCol = dst.col((int)idx);
originalCol.copyTo(sortedCol);
}
return dst;
};
下面这个类是从opencv2.4.0的lda.cpp文件中copy的,用于任何矩阵求解特征值特征向量,主要解决奇异矩阵求特征值特征向量的问题,我还没看懂,只是拿来使用了
template static bool
isSymmetric_(InputArray src) {
Mat _src = src.getMat();
if(_src.cols != _src.rows)
return false;
for (int i = 0; i < _src.rows; i++) {
for (int j = 0; j < _src.cols; j++) {
_Tp a = _src.at<_Tp> (i, j);
_Tp b = _src.at<_Tp> (j, i);
if (a != b) {
return false;
}
}
}
return true;
}
template static bool
isSymmetric_(InputArray src, double eps) {
Mat _src = src.getMat();
if(_src.cols != _src.rows)
return false;
for (int i = 0; i < _src.rows; i++) {
for (int j = 0; j < _src.cols; j++) {
_Tp a = _src.at<_Tp> (i, j);
_Tp b = _src.at<_Tp> (j, i);
if (std::abs(a - b) > eps) {
return false;
}
}
}
return true;
}
static bool isSymmetric(InputArray src, double eps=1e-16)
{
Mat m = src.getMat();
switch (m.type()) {
case CV_8SC1: return isSymmetric_<char>(m); break;
case CV_8UC1:
return isSymmetric_char>(m); break;
case CV_16SC1:
return isSymmetric_<short>(m); break;
case CV_16UC1:
return isSymmetric_short>(m); break;
case CV_32SC1:
return isSymmetric_<int>(m); break;
case CV_32FC1:
return isSymmetric_<float>(m, eps); break;
case CV_64FC1:
return isSymmetric_<double>(m, eps); break;
default:
break;
}
return false;
}
/
class EigenvalueDecomposition {
private:
// Holds the data dimension.
int n;
// Stores real/imag part of a complex division.
double cdivr, cdivi;
// Pointer to internal memory.
double *d, *e, *ort;
double **V, **H;
// Holds the computed eigenvalues.
Mat _eigenvalues;
// Holds the computed eigenvectors.
Mat _eigenvectors;
// Allocates memory.
template
_Tp *alloc_1d(int m) {
return new _Tp[m];
}
// Allocates memory.
template
_Tp *alloc_1d(int m, _Tp val) {
_Tp *arr = alloc_1d<_Tp> (m);
for (int i = 0; i < m; i++)
arr[i] = val;
return arr;
}
// Allocates memory.
template
_Tp **alloc_2d(int m, int n) {
_Tp **arr = new _Tp*[m];
for (int i = 0; i < m; i++)
arr[i] = new _Tp[n];
return arr;
}
// Allocates memory.
template
_Tp **alloc_2d(int m, int n, _Tp val) {
_Tp **arr = alloc_2d<_Tp> (m, n);
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
arr[i][j] = val;
}
}
return arr;
}
void cdiv(double xr, double xi, double yr, double yi) {
double r, d;
if (std::abs(yr) > std::abs(yi)) {
r = yi / yr;
d = yr + r * yi;
cdivr = (xr + r * xi) / d;
cdivi = (xi - r * xr) / d;
} else {
r = yr / yi;
d = yi + r * yr;
cdivr = (r * xr + xi) / d;
cdivi = (r * xi - xr) / d;
}
}
// Nonsymmetric reduction from Hessenberg to real Schur form.
void hqr2() {
// This is derived from the Algol procedure hqr2,
// by Martin and Wilkinson, Handbook for Auto. Comp.,
// Vol.ii-Linear Algebra, and the corresponding
// Fortran subroutine in EISPACK.
// Initialize
int nn = this->n;
int n = nn - 1;
int low = 0;
int high = nn - 1;
double eps = pow(2.0, -52.0);
double exshift = 0.0;
double p = 0, q = 0, r = 0, s = 0, z = 0, t, w, x, y;
// Store roots isolated by balanc and compute matrix norm
double norm = 0.0;
for (int i = 0; i < nn; i++) {
if (i < low || i > high) {
d[i] = H[i][i];
e[i] = 0.0;
}
for (int j = max(i - 1, 0); j < nn; j++) {
norm = norm + std::abs(H[i][j]);
}
}
// Outer loop over eigenvalue index
int iter = 0;
while (n >= low) {
// Look for single small sub-diagonal element
int l = n;
while (l > low) {
s = std::abs(H[l - 1][l - 1]) + std::abs(H[l][l]);
if (s == 0.0) {
s = norm;
}
if (std::abs(H[l][l - 1]) < eps * s) {
break;
}
l--;
}
// Check for convergence
// One root found
if (l == n) {
H[n][n] = H[n][n] + exshift;
d[n] = H[n][n];
e[n] = 0.0;
n--;
iter = 0;
// Two roots found
} else if (l == n - 1) {
w = H[n][n - 1] * H[n - 1][n];
p = (H[n - 1][n - 1] - H[n][n]) / 2.0;
q = p * p + w;
z = sqrt(std::abs(q));
H[n][n] = H[n][n] + exshift;
H[n - 1][n - 1] = H[n - 1][n - 1] + exshift;
x = H[n][n];
// Real pair
if (q >= 0) {
if (p >= 0) {
z = p + z;
} else {
z = p - z;
}
d[n - 1] = x + z;
d[n] = d[n - 1];
if (z != 0.0) {
d[n] = x - w / z;
}
e[n - 1] = 0.0;
e[n] = 0.0;
x = H[n][n - 1];
s = std::abs(x) + std::abs(z);
p = x / s;
q = z / s;
r = sqrt(p * p + q * q);
p = p / r;
q = q / r;
// Row modification
for (int j = n - 1; j < nn; j++) {
z = H[n - 1][j];
H[n - 1][j] = q * z + p * H[n][j];
H[n][j] = q * H[n][j] - p * z;
}
// Column modification
for (int i = 0; i <= n; i++) {
z = H[i][n - 1];
H[i][n - 1] = q * z + p * H[i][n];
H[i][n] = q * H[i][n] - p * z;
}
// Accumulate transformations
for (int i = low; i <= high; i++) {
z = V[i][n - 1];
V[i][n - 1] = q * z + p * V[i][n];
V[i][n] = q * V[i][n] - p * z;
}
// Complex pair
} else {
d[n - 1] = x + p;
d[n] = x + p;
e[n - 1] = z;
e[n] = -z;
}
n = n - 2;
iter = 0;
// No convergence yet
} else {
// Form shift
x = H[n][n];
y = 0.0;
w = 0.0;
if (l < n) {
y = H[n - 1][n - 1];
w = H[n][n - 1] * H[n - 1][n];
}
// Wilkinson's original ad hoc shift
if (iter == 10) {
exshift += x;
for (int i = low; i <= n; i++) {
H[i][i] -= x;
}
s = std::abs(H[n][n - 1]) + std::abs(H[n - 1][n - 2]);
x = y = 0.75 * s;
w = -0.4375 * s * s;
}
// MATLAB's new ad hoc shift
if (iter == 30) {
s = (y - x) / 2.0;
s = s * s + w;
if (s > 0) {
s = sqrt(s);
if (y < x) {
s = -s;
}
s = x - w / ((y - x) / 2.0 + s);
for (int i = low; i <= n; i++) {
H[i][i] -= s;
}
exshift += s;
x = y = w = 0.964;
}
}
iter = iter + 1; // (Could check iteration count here.)
// Look for two consecutive small sub-diagonal elements
int m = n - 2;
while (m >= l) {
z = H[m][m];
r = x - z;
s = y - z;
p = (r * s - w) / H[m + 1][m] + H[m][m + 1];
q = H[m + 1][m + 1] - z - r - s;
r = H[m + 2][m + 1];
s = std::abs(p) + std::abs(q) + std::abs(r);
p = p / s;
q = q / s;
r = r / s;
if (m == l) {
break;
}
if (std::abs(H[m][m - 1]) * (std::abs(q) + std::abs(r)) < eps * (std::abs(p)
* (std::abs(H[m - 1][m - 1]) + std::abs(z) + std::abs(
H[m + 1][m + 1])))) {
break;
}
m--;
}
for (int i = m + 2; i <= n; i++) {
H[i][i - 2] = 0.0;
if (i > m + 2) {
H[i][i - 3] = 0.0;
}
}
// Double QR step involving rows l:n and columns m:n
for (int k = m; k <= n - 1; k++) {
bool notlast = (k != n - 1);
if (k != m) {
p = H[k][k - 1];
q = H[k + 1][k - 1];
r = (notlast ? H[k + 2][k - 1] : 0.0);
x = std::abs(p) + std::abs(q) + std::abs(r);
if (x != 0.0) {
p = p / x;
q = q / x;
r = r / x;
}
}
if (x == 0.0) {
break;
}
s = sqrt(p * p + q * q + r * r);
if (p < 0) {
s = -s;
}
if (s != 0) {
if (k != m) {
H[k][k - 1] = -s * x;
} else if (l != m) {
H[k][k - 1] = -H[k][k - 1];
}
p = p + s;
x = p / s;
y = q / s;
z = r / s;
q = q / p;
r = r / p;
// Row modification
for (int j = k; j < nn; j++) {
p = H[k][j] + q * H[k + 1][j];
if (notlast) {
p = p + r * H[k + 2][j];
H[k + 2][j] = H[k + 2][j] - p * z;
}
H[k][j] = H[k][j] - p * x;
H[k + 1][j] = H[k + 1][j] - p * y;
}
// Column modification
for (int i = 0; i <= min(n, k + 3); i++) {
p = x * H[i][k] + y * H[i][k + 1];
if (notlast) {
p = p + z * H[i][k + 2];
H[i][k + 2] = H[i][k + 2] - p * r;
}
H[i][k] = H[i][k] - p;
H[i][k + 1] = H[i][k + 1] - p * q;
}
// Accumulate transformations
for (int i = low; i <= high; i++) {
p = x * V[i][k] + y * V[i][k + 1];
if (notlast) {
p = p + z * V[i][k + 2];
V[i][k + 2] = V[i][k + 2] - p * r;
}
V[i][k] = V[i][k] - p;
V[i][k + 1] = V[i][k + 1] - p * q;
}
} // (s != 0)
} // k loop
} // check convergence
} // while (n >= low)
// Backsubstitute to find vectors of upper triangular form
if (norm == 0.0) {
return;
}
for (n = nn - 1; n >= 0; n--) {
p = d[n];
q = e[n];
// Real vector
if (q == 0) {
int l = n;
H[n][n] = 1.0;
for (int i = n - 1; i >= 0; i--) {
w = H[i][i] - p;
r = 0.0;
for (int j = l; j <= n; j++) {
r = r + H[i][j] * H[j][n];
}
if (e[i] < 0.0) {
z = w;
s = r;
} else {
l = i;
if (e[i] == 0.0) {
if (w != 0.0) {
H[i][n] = -r / w;
} else {
H[i][n] = -r / (eps * norm);
}
// Solve real equations
} else {
x = H[i][i + 1];
y = H[i + 1][i];
q = (d[i] - p) * (d[i] - p) + e[i] * e[i];
t = (x * s - z * r) / q;
H[i][n] = t;
if (std::abs(x) > std::abs(z)) {
H[i + 1][n] = (-r - w * t) / x;
} else {
H[i + 1][n] = (-s - y * t) / z;
}
}
// Overflow control
t = std::abs(H[i][n]);
if ((eps * t) * t > 1) {
for (int j = i; j <= n; j++) {
H[j][n] = H[j][n] / t;
}
}
}
}
// Complex vector
} else if (q < 0) {
int l = n - 1;
// Last vector component imaginary so matrix is triangular
if (std::abs(H[n][n - 1]) > std::abs(H[n - 1][n])) {
H[n - 1][n - 1] = q / H[n][n - 1];
H[n - 1][n] = -(H[n][n] - p) / H[n][n - 1];
} else {
cdiv(0.0, -H[n - 1][n], H[n - 1][n - 1] - p, q);
H[n - 1][n - 1] = cdivr;
H[n - 1][n] = cdivi;
}
H[n][n - 1] = 0.0;
H[n][n] = 1.0;
for (int i = n - 2; i >= 0; i--) {
double ra, sa, vr, vi;
ra = 0.0;
sa = 0.0;
for (int j = l; j <= n; j++) {
ra = ra + H[i][j] * H[j][n - 1];
sa = sa + H[i][j] * H[j][n];
}
w = H[i][i] - p;
if (e[i] < 0.0) {
z = w;
r = ra;
s = sa;
} else {
l = i;
if (e[i] == 0) {
cdiv(-ra, -sa, w, q);
H[i][n - 1] = cdivr;
H[i][n] = cdivi;
} else {
// Solve complex equations
x = H[i][i + 1];
y = H[i + 1][i];
vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q;
vi = (d[i] - p) * 2.0 * q;
if (vr == 0.0 && vi == 0.0) {
vr = eps * norm * (std::abs(w) + std::abs(q) + std::abs(x)
+ std::abs(y) + std::abs(z));
}
cdiv(x * r - z * ra + q * sa,
x * s - z * sa - q * ra, vr, vi);
H[i][n - 1] = cdivr;
H[i][n] = cdivi;
if (std::abs(x) > (std::abs(z) + std::abs(q))) {
H[i + 1][n - 1] = (-ra - w * H[i][n - 1] + q
* H[i][n]) / x;
H[i + 1][n] = (-sa - w * H[i][n] - q * H[i][n
- 1]) / x;
} else {
cdiv(-r - y * H[i][n - 1], -s - y * H[i][n], z,
q);
H[i + 1][n - 1] = cdivr;
H[i + 1][n] = cdivi;
}
}
// Overflow control
t = max(std::abs(H[i][n - 1]), std::abs(H[i][n]));
if ((eps * t) * t > 1) {
for (int j = i; j <= n; j++) {
H[j][n - 1] = H[j][n - 1] / t;
H[j][n] = H[j][n] / t;
}
}
}
}
}
}
// Vectors of isolated roots
for (int i = 0; i < nn; i++) {
if (i < low || i > high) {
for (int j = i; j < nn; j++) {
V[i][j] = H[i][j];
}
}
}
// Back transformation to get eigenvectors of original matrix
for (int j = nn - 1; j >= low; j--) {
for (int i = low; i <= high; i++) {
z = 0.0;
for (int k = low; k <= min(j, high); k++) {
z = z + V[i][k] * H[k][j];
}
V[i][j] = z;
}
}
}
// Nonsymmetric reduction to Hessenberg form.
void orthes() {
// This is derived from the Algol procedures orthes and ortran,
// by Martin and Wilkinson, Handbook for Auto. Comp.,
// Vol.ii-Linear Algebra, and the corresponding
// Fortran subroutines in EISPACK.
int low = 0;
int high = n - 1;
for (int m = low + 1; m <= high - 1; m++) {
// Scale column.
double scale = 0.0;
for (int i = m; i <= high; i++) {
scale = scale + std::abs(H[i][m - 1]);
}
if (scale != 0.0) {
// Compute Householder transformation.
double h = 0.0;
for (int i = high; i >= m; i--) {
ort[i] = H[i][m - 1] / scale;
h += ort[i] * ort[i];
}
double g = sqrt(h);
if (ort[m] > 0) {
g = -g;
}
h = h - ort[m] * g;
ort[m] = ort[m] - g;
// Apply Householder similarity transformation
// H = (I-u*u'/h)*H*(I-u*u')/h)
for (int j = m; j < n; j++) {
double f = 0.0;
for (int i = high; i >= m; i--) {
f += ort[i] * H[i][j];
}
f = f / h;
for (int i = m; i <= high; i++) {
H[i][j] -= f * ort[i];
}
}
for (int i = 0; i <= high; i++) {
double f = 0.0;
for (int j = high; j >= m; j--) {
f += ort[j] * H[i][j];
}
f = f / h;
for (int j = m; j <= high; j++) {
H[i][j] -= f * ort[j];
}
}
ort[m] = scale * ort[m];
H[m][m - 1] = scale * g;
}
}
// Accumulate transformations (Algol's ortran).
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
V[i][j] = (i == j ? 1.0 : 0.0);
}
}
for (int m = high - 1; m >= low + 1; m--) {
if (H[m][m - 1] != 0.0) {
for (int i = m + 1; i <= high; i++) {
ort[i] = H[i][m - 1];
}
for (int j = m; j <= high; j++) {
double g = 0.0;
for (int i = m; i <= high; i++) {
g += ort[i] * V[i][j];
}
// Double division avoids possible underflow
g = (g / ort[m]) / H[m][m - 1];
for (int i = m; i <= high; i++) {
V[i][j] += g * ort[i];
}
}
}
}
}
// Releases all internal working memory.
void release() {
// releases the working data
delete[] d;
delete[] e;
delete[] ort;
for (int i = 0; i < n; i++) {
delete[] H[i];
delete[] V[i];
}
delete[] H;
delete[] V;
}
// Computes the Eigenvalue Decomposition for a matrix given in H.
void compute() {
// Allocate memory for the working data.
V = alloc_2d<double> (n, n, 0.0);
d = alloc_1d<double> (n);
e = alloc_1d<double> (n);
ort = alloc_1d<double> (n);
// Reduce to Hessenberg form.
orthes();
// Reduce Hessenberg to real Schur form.
hqr2();
// Copy eigenvalues to OpenCV Matrix.
_eigenvalues.create(1, n, CV_64FC1);
for (int i = 0; i < n; i++) {
_eigenvalues.at<double> (0, i) = d[i];
}
// Copy eigenvectors to OpenCV Matrix.
_eigenvectors.create(n, n, CV_64FC1);
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
_eigenvectors.at<double> (i, j) = V[i][j];
// Deallocate the memory by releasing all internal working data.
release();
}
public:
LDA详解 包括opencv实现代码
最新推荐文章于 2024-09-06 23:59:49 发布