提起人工智能优化算法,你可能首先想到的是遗传算法、PSO算法、模拟退火算法。的确,微分进化算法长久以来一直不被重视。但是,不可否认的是,近年来,微分进化应用的领域不断的扩大,也有越来越多的人去研究。那么,什么是微分进化算法呢?
微分进化算法和遗传算法一样,同属于进化算法,但是微分进化(DE) 是比较新的基于群体的随机优化方法。它具有简单、快速、鲁棒性好等特点。不同于其它进化算法,它的变异算子是由种群中任意选取的多对向量的差值得到的。微分进化主 要用于实参数优化问题,在非线性和不可微的连续空间问题上优于其它进化方法。
微分进化算法秉承着进化算法的特性,模拟生物的进化,根据适者生存的法则,通过多次迭代,找到搜索控件内的最优解。与其他进化算法相比,包含着相似机理,如初始化种群,进行变异,交叉和选择操作,不断进化更新,判断停止条件,等等。下面是实现的代码:
//产生一个服从正太分布的随机数
float rnd_uni(long *idum) {
long j, k;
static long idum2 = 123456789;
static long iy = 0;
static long iv[NTAB];
float temp;
if (*idum <= 0) {
if (-(*idum) < 1) {
*idum = 1;
} else {
*idum = -(*idum);
}
idum2 = (*idum);
for (j=NTAB+7;j >= 0;j--) {
k = (*idum)/IQ1;
*idum = IA1*(*idum - k*IQ1) - k*IR1;
if (*idum < 0) {
*idum += IM1;
}
if (j < NTAB) {
iv[j] = *idum;
}
}
iy = iv[0];
}
k = (*idum)/IQ1;
*idum = IA1*(*idum - k*IQ1) - k*IR1;
if (*idum < 0) {
*idum += IM1;
}
k = idum2/IQ2;
idum2 = IA2*(idum2 - k*IQ2) - k*IR2;
if (idum2 < 0) {
idum2 += IM2;
}
j = iy/NDIV;
iy = iv[j] - idum2;
iv[j] = *idum;
if (iy < 1) {
iy += IMM1;
}
double AM = (1.0/IM1);
temp = (float) AM*iy;
if (temp > RNMX) {
return (float) RNMX;
} else {
return temp;
}
}
//进化操作(对个体进行评估)
float evaluate(int D, float tmp[]) {
int i, j;
int const M = 60;
float px, x = -1, dx = M, result = 0;
for (i=0;i<D;i++)
{
px=tmp[i];
result+=(1-px)*(1-px); // 目标函数
}
return result;
}
//主函数
int main ()
{
int i, j, L, n;
int r1, r2, r3, r4,r5; // 用来产生随机数
int imin; // index to member with lowest energy
int gen; //迭代的次数
int seed;
int D,NP,iterations;
long nfeval; // number of function evaluations
float trial_energy; // 缓冲变量
float inibound_h; // 参数的上限
float inibound_l; // 参数的下限
float tmp[MAXDIM], best[MAXDIM], bestit[MAXDIM]; // members
float energy[MAXPOP]; // obj. funct. values
float F , CR; // control variables of DE
float emin; // help variables
rnd_uni_init = -seed; // initialization of rnd_uni()
nfeval = 0; // reset number of function evaluations
float r;
// spread initial population members
for (i=0; i<NP; i++) {
for (j=0; j<D; j++) {
r = rnd_uni(&rnd_uni_init);
c[i][j] = inibound_l + r*(inibound_h - inibound_l);
}
energy[i] = evaluate(D, c[i], &nfeval,index);
// printf("%2d %20.8f %3d\n", i, energy[i], nfeval);
// cin.get(ch);
}
//for(i=0;i<NP;i++)
// {
// pause[i]=energy[i];
// }
account=NP;
emin = energy[0];
imin = 0;
for (i=1; i<NP; i++) {
if (energy[i] < emin) {
emin = energy[i];
imin = i;
}
}
CopyVector(best, c[imin]);
CopyVector(bestit, c[imin]);
// old population (generation G)
// new population (generation G+1)
CopyArray(oldarray, c);
// new population (generation G+1)
CopyArray(newarray, d);
// Iteration loop
gen = 0; // generation counter reset
while ((gen <iterations)) { //迭代
gen++;
imin = 0;
for (i=0; i<NP; i++) {
// Pick a random population member
do {
// Endless loop for NP < 2 !!!
r = rnd_uni(&rnd_uni_init);
r1 = (int)(r*NP);
} while(r1 == i);
do {
// Endless loop for NP < 3 !!!
r = rnd_uni(&rnd_uni_init);
r2 = (int)(r*NP);
} while((r2 == i) || (r2 == r1));
do {
// Endless loop for NP < 4 !!!
r3 = (int)(rnd_uni(&rnd_uni_init)*NP);
} while((r3 == i) || (r3 == r1) || (r3 == r2));
do {
// Endless loop for NP < 5 !!!
r4 = (int)(rnd_uni(&rnd_uni_init)*NP);
} while((r4 == i) || (r4 == r1) || (r4 == r2) || (r4 == r3));
do {
// Endless loop for NP < 6 !!!
r5 = (int)(rnd_uni(&rnd_uni_init)*NP);
} while((r5==i) || (r5==r1) || (r5==r2) || (r5==r3) || (r5==r4));
// strategy DE0 (not in our paper)
if (strategy == 1) {
for (int k=0; k<MAXDIM; k++) {
tmp[k] = oldarray[i][k];
}
n = (int)(rnd_uni(&rnd_uni_init)*D);
L = 0;
do {
tmp[n] = bestit[n] + F*(oldarray[r2][n] - oldarray[r3][n]);
n = (n+1)%D;
L++;
} while((rnd_uni(&rnd_uni_init) < CR) && (L < D));
}
// DE/rand/1/exp
// This is one of my favourite strategies. It works especially well when the
// "bestit[]"-schemes experience misconvergence. Try e.g. F=0.7 and CR = 0.5
// as a first guess.
// strategy DE1 in the techreport
else if (strategy == 2) {
for (int k=0; k<MAXDIM; k++) {
tmp[k] = oldarray[i][k];
}
n = (int)(rnd_uni(&rnd_uni_init)*D);
L = 0;
do {
tmp[n] = oldarray[r1][n] + F*(oldarray[r2][n] - oldarray[r3][n]);
n = (n+1)%D;
L++;
} while((rnd_uni(&rnd_uni_init) < CR) && (L < D));
}
// DE/rand-to-best/1/exp
// This strategy seems to be one of the best strategies. Try F=0.85 and CR = 1.0
// If you get misconvergence try to increase NP. If this doesn't help you
// should play around with all three control variables.
// similiar to DE2 but generally better
else if (strategy == 3) {
for (int k=0; k<MAXDIM; k++) {
tmp[k] = oldarray[i][k];
}
n = (int)(rnd_uni(&rnd_uni_init)*D);
L = 0;
do {
tmp[n] = tmp[n] + F*(bestit[n] - tmp[n]) + F*(oldarray[r1][n] - oldarray[r2][n]);
n = (n+1)%D;
L++;
} while((rnd_uni(&rnd_uni_init) < CR) && (L < D));
}
// DE/best/2/exp is another powerful strategy worth trying
else if (strategy == 4) {
for (int k=0; k<MAXDIM; k++) {
tmp[k] = oldarray[i][k];
}
n = (int)(rnd_uni(&rnd_uni_init)*D);
L = 0;
do {
tmp[n] = bestit[n] + (oldarray[r1][n] + oldarray[r2][n] - oldarray[r3][n] - oldarray[r4][n])*F;
n = (n+1)%D;
L++;
} while((rnd_uni(&rnd_uni_init) < CR) && (L < D));
}
// DE/rand/2/exp seems to be a robust optimizer for many functions
else if (strategy == 5) {
for (int k=0; k<MAXDIM; k++) {
tmp[k] = oldarray[i][k];
}
n = (int)(rnd_uni(&rnd_uni_init)*D);
L = 0;
do {
tmp[n] = oldarray[r5][n] + (oldarray[r1][n] + oldarray[r2][n] - oldarray[r3][n] - oldarray[r4][n])*F;
n = (n+1)%D;
L++;
} while((rnd_uni(&rnd_uni_init) < CR) && (L < D));
}
// Essentially same strategies but BINOMIAL CROSSOVER
// DE/best/1/bin
else if (strategy == 6) {
for (int k=0; k<MAXDIM; k++) {
tmp[k] = oldarray[i][k];
}
n = (int)(rnd_uni(&rnd_uni_init)*D);
// perform D binomial trials
for (L=0; L<D; L++) {
// change at least one parameter
if ((rnd_uni(&rnd_uni_init) < CR) || L == (D-1)) {
tmp[n] = bestit[n] + F*(oldarray[r2][n] - oldarray[r3][n]);
}
n = (n+1)%D;
}
}
// DE/rand/1/bin
else if (strategy == 7) {
for (int k=0; k<MAXDIM; k++) {
tmp[k] = oldarray[i][k];
}
n = (int)(rnd_uni(&rnd_uni_init)*D);
for (L=0; L<D; L++) {
// change at least one parameter
if ((rnd_uni(&rnd_uni_init) < CR) || L == (D-1)) {
tmp[n] = oldarray[r1][n] + F*(oldarray[r2][n] - oldarray[r3][n]);
}
n = (n+1)%D;
}
}
// DE/rand-to-best/1/bin
else if (strategy == 8) {
for (int k=0; k<MAXDIM; k++) {
tmp[k] = oldarray[i][k];
}
n = (int)(rnd_uni(&rnd_uni_init)*D);
for (L=0; L<D; L++) {
if ((rnd_uni(&rnd_uni_init) < CR) || L == (D-1)) {
tmp[n] = tmp[n] + F*(bestit[n] - tmp[n]) + F*(oldarray[r1][n] - oldarray[r2][n]);
}
n = (n+1)%D;
}
}
// DE/best/2/bin
else if (strategy == 9) {
for (int k=0; k<MAXDIM; k++) {
tmp[k] = oldarray[i][k];
}
n = (int)(rnd_uni(&rnd_uni_init)*D);
for (L=0; L<D; L++) {
if ((rnd_uni(&rnd_uni_init) < CR) || L == (D-1)) {
tmp[n] = bestit[n] + (oldarray[r1][n] + oldarray[r2][n] - oldarray[r3][n] - oldarray[r4][n])*F;
}
n = (n+1)%D;
}
}
// DE/rand/2/bin
else {
for (int k=0; k<MAXDIM; k++) {
tmp[k] = oldarray[i][k];
}
n = (int)(rnd_uni(&rnd_uni_init)*D);
for (L=0; L<D; L++) {
if ((rnd_uni(&rnd_uni_init) < CR) || L == (D-1)) {
tmp[n] = oldarray[r5][n] + (oldarray[r1][n] + oldarray[r2][n] - oldarray[r3][n] - oldarray[r4][n])*F;
}
n = (n+1)%D;
}
}
// Trial mutation now in tmp[]. Test how good this choice really was.
trial_energy = evaluate(D, tmp, &nfeval,index); // Evaluate new vector in tmp[]
// improved objective function value?
if (trial_energy <= energy[i]) {
energy[i] = trial_energy;
for (int k=0; k<MAXDIM; k++) {
newarray[i][k] = tmp[k];
}
// Was this a new minimum?
if (trial_energy<emin) {
// reset emin to new low...
emin = trial_energy;
imin = i;
for (int k=0; k<MAXDIM; k++) {
best[k] = tmp[k];
}
}
} else {
for (int k=0; k<MAXDIM; k++) {
newarray[i][k] = oldarray[i][k];
}
}
}
CopyVector(bestit, best); // Save best population member of current iteration
CopyArray(swaparray, oldarray);
CopyArray(oldarray, newarray);
CopyArray(newarray, swaparray);
}