MLSDPL-PSO算法的实现

基本说明

本文是基于发表于Applied Soft Computing上的Pyramid particle swarm optimization with novel strategies of competition and cooperation文章,在详细阅读文章的前提下,通过C++实现了对算法的复现,且效果基本达到文中所述,现将算法的主体分享在本篇文章,代码均为本人所写,如有错误,请指出。

效果基本达到文中所述是指本人所写代码在cec2010lsgo测试集上 D=1000环境下,进行了实验,与文中效果接近,其他测试集与相关维度由于时间关系,并未进行测试,如读者有时间,可自行测试,欢迎将测试结果分享给我。

论文截图,有兴趣的读者可自行前往下载

算法的主体部分实现

1.MLSPDPL-PSO.cpp
//written by Zhiping Jin
#include "Self_Define_Functions.h"
#include<algorithm>
#include<iostream>
#include"Header.h"
#include"Benchmarks.h"

using namespace std;
typedef struct newtype
{
	double fitness;
	int index;
}Newtype;

bool cmp(Newtype a, Newtype b)
{
	return a.fitness < b.fitness;
}

//the settings of parameters in mlsdpl-PSO
const int dimension = 1000;
const int Population_Size = 2 * (100 + dimension / 10);
const int L = 20;
const double pmin = 0.05;
const double fai = (double)dimension / 100 * 0.01;
int subswarm_population_size;
double initial_sampling_probability;
double r1, r2, r3;
double fitness_max, fitness_min;
int sampling_particle;
double current_fitness;
double num_of_particles = Population_Size / L;

//the settings of the program parameters
const int MAX_FES =3000* dimension;
const int MAX_RUN = 10;
const int MAX_FUNC = 20;
int FES;
int past_FES;
int func_index;
int run_index;
double final_val;
char file_name[256];
int i, j, temp_index, k;
double lowerbound, upperbound;
double lambda = 10e-200;



boost::mt19937 generator(time(0)* rand());
boost::uniform_real<> uniform_real_generate_r(0, 1);
boost::variate_generator< boost::mt19937&, boost::uniform_real<> > random_real_num_r(generator, uniform_real_generate_r);//to generate a random number within [0,1]



int main()
{
	
	double** population = new double* [Population_Size];
	double* result = new double[Population_Size];
	double* p = new double[Population_Size];
	double* sampling_probability = new double[L];
	double** speed = new double* [Population_Size];
	double* temp_result = new double[Population_Size];
	int* sub_swarm = new int[Population_Size];
	int** multi_level = new int* [L];
	Newtype* arr = new Newtype[Population_Size];
	//分配空间
	for (i = 0; i < Population_Size; i++)
	{
		population[i] = new double[dimension];
		speed[i] = new double[dimension];
	}
	for (i = 0; i < L; i++)
	{
		multi_level[i] = new int[Population_Size / L];
	}

	for (func_index = 0; func_index < MAX_FUNC; func_index++)
	{
		cout << "start to optimize function " << func_index + 1 << endl;
		//sprintf_s(file_name, "convergency/F%d.txt", func_index+1);
		//string FileName = string(file_name);
		//ofstream  out_fitness(FileName);
		sprintf_s(file_name, "fitness/F%d.txt", func_index + 1);
		string FileName = string(file_name);
		ofstream out_final_val(FileName);
		Benchmarks* fp = NULL;
		fp = generateFuncObj(func_index + 1);
		lowerbound = (double)fp->getMinX();
		upperbound = (double)fp->getMaxX();
		boost::uniform_real<> uniform_real_generate_x(lowerbound, upperbound);
		boost::variate_generator< boost::mt19937&, boost::uniform_real<> > random_real_num_x(generator, uniform_real_generate_x);
		for (run_index = 0; run_index < MAX_RUN; run_index++)
		{
			cout << "run_index:" << run_index + 1 << endl;
			FES = 0;
			for (i = 0; i < dimension; i++)
			{

				//初始化种群和速度
				for (j = 0; j < Population_Size; j++)
				{
					population[j][i] = random_real_num_x();
					speed[j][i] = 0;
				}
			}

			//初始化final_val
			for (i = 0; i < Population_Size; i++)
			{
				result[i] = fp->compute(population[i]);
			}
			
			FES += Population_Size;
			final_val = result[0];
			for (i = 0; i < Population_Size; i++)
			{
				if (result[i] < final_val)
				{
					final_val = result[i];
				}
			}

			//初始化p
			fitness_min = result[0];
			fitness_max = result[0];
			for (i = 0; i < Population_Size; i++)
			{
				if (result[i] > fitness_max)
					fitness_max = result[i];
				if (result[i] < fitness_min)
					fitness_min = result[i];
			}
			
			for (i = 0; i < Population_Size; i++)
			{
				p[i] = (1 - pmin) * (result[i] - fitness_min) / (fitness_max - fitness_min+lambda) + pmin;
			}


			while (FES <= MAX_FES )
			{
				//cout << FES << endl;
				//对初始种群进行排序
				for (i = 0; i < Population_Size; i++)
				{
					arr[i].fitness = result[i];
					arr[i].index = i;
				}
				sort(arr, arr + Population_Size,cmp);

				//多层抽样机制
				k = 0;
				for (i = 0; i < L; i++)
				{
					for (j = 0; j < num_of_particles; j++)
					{
						multi_level[i][j] = arr[k].index;
						k++;
					}
				}
				/*
				for (i = 0; i < L; i++)
				{
					for (j = 0; j < Population_Size / L; j++)
					{
						cout << multi_level[i][j] << " ";
					}
					cout << endl;
				}
				*/
				for (i = 0; i < L; i++)
				{
					initial_sampling_probability = (double)i / (double)(L - 1);
					sampling_probability[i] = initial_sampling_probability + (1 - 2 * initial_sampling_probability) * (double)FES / (double)MAX_FES;
				}
				subswarm_population_size = 0;
				for (i = 0; i < L; i++)
				{
					for (j = 0; j < num_of_particles; j++)
					{
						if (random_real_num_r() < sampling_probability[i])
						{
							sub_swarm[subswarm_population_size++] = multi_level[i][j];
						}
					}
				}
			
				//动态p学习机制
				for (i = 0; i < subswarm_population_size; i++)
				{
					
					boost::uniform_int<> uniform_rand_particle(0, ceill(subswarm_population_size* p[sub_swarm[i]])-1);
					boost::variate_generator< boost::mt19937&, boost::uniform_int<> > random_particle(generator, uniform_rand_particle);
					sampling_particle = random_particle();
					if (result[sub_swarm[sampling_particle]] <= result[sub_swarm[i]])
					{
						for (j = 0; j < dimension; j++)
						{
							r1 = random_real_num_r();
							r2 = random_real_num_r();
							r3 = random_real_num_r();
							speed[sub_swarm[i]][j] = r1 * speed[sub_swarm[i]][j] + r2 * (population[sub_swarm[sampling_particle]][j] - population[sub_swarm[i]][j]) + fai * r3 * (cal_weight(population, result, j, subswarm_population_size, sub_swarm) - population[sub_swarm[i]][j]);
							//cout << cal_weight(population, result, j, subswarm_population_size, sub_swarm) << endl;
							population[sub_swarm[i]][j] += speed[sub_swarm[i]][j];
							if (population[sub_swarm[i]][j] > upperbound)
								population[sub_swarm[i]][j] = upperbound;
							if (population[sub_swarm[i]][j] < lowerbound)
								population[sub_swarm[i]][j] = lowerbound;
							
						}
						//适应度计算 current_fitness
						
						current_fitness = fp->compute(population[sub_swarm[i]]);
						FES += 1;
						if (current_fitness < final_val)
						{
							final_val = current_fitness;
							//cout << final_val << endl;
						}
							
						if (current_fitness < result[sub_swarm[i]])
						{
							p[sub_swarm[i]] = (p[sub_swarm[i]] + p[sub_swarm[sampling_particle]]) / 2;
							if (p[sub_swarm[i]] < pmin)
								p[sub_swarm[i]] = pmin;
							if (p[sub_swarm[i]] > 1)
								p[sub_swarm[i]] = 1;
						}
						else
						{
							p[sub_swarm[i]] = 2 * p[sub_swarm[i]] - p[sub_swarm[sampling_particle]];
							if (p[sub_swarm[i]] < pmin)
								p[sub_swarm[i]] = pmin;
							if (p[sub_swarm[i]] > 1)
								p[sub_swarm[i]] = 1;
						}
						result[sub_swarm[i]] = current_fitness;
						if (FES % 50000 == 0)
						{
							//out_fitness << final_val << " ";
							cout <<  FES << ":" << final_val << endl;
						}
					}
				}
				/*
				for (i = 0; i < subswarm_population_size; i++)
				{
					cout << result[sub_swarm[i]] << " ";
				}
				cout << endl;
				*/
				
			}
			//out_fitness << endl;
			out_final_val << final_val << endl;
			cout << final_val << endl;
			
		}
		cout << "Function " << func_index + 1 << "'s optimization is finished!" << endl;
	}

	for (i = 0; i < Population_Size; i++)
	{
		delete[]population[i];
		delete[]speed[i];
	}

	for (i = 0; i < L; i++)
	{
		delete[]multi_level[i];
	}

	delete[]population;
	delete[]speed;
	delete[]multi_level;
	delete[]result;
	delete[]p;
	delete[]sampling_probability;
	delete[]temp_result;
	delete[]sub_swarm;
	return 0;
}

2.Self_Define_Functions.cpp

//written by Zhiping Jin
#include "Self_Define_Functions.h"
#include <math.h>
#include <iostream>
#include <fstream>
#include<algorithm>
#include"Header.h"
#include"Benchmarks.h"

int search_index(double* arr, int length, double result)
{
	int i;
	for (i = 0; i < length; i++)
	{
		if (result == arr[i])
		{
			return i;
		}
	}
	return -1;
}

double cal_weight(double** population, double* result, int dimension, int subswarm_populationsize, int* subswarm)
{
	double temp1 = 0.0, temp2 = 0.0;
	int i, j;
	for (i = 0; i < subswarm_populationsize; i++)
	{
		temp1 += result[subswarm[i]] * population[subswarm[i]][dimension];
	}
	for (i = 0; i < subswarm_populationsize; i++)
	{
		temp2 += result[subswarm[i]];
	}
	return temp1 / temp2;
}



Benchmarks* generateFuncObj(int funcID) {
	Benchmarks* fp = NULL;
	// run each of specified function in "configure.ini"
	if (funcID == 1) {
		fp = new F1();
	}
	else if (funcID == 2) {
		fp = new F2();
	}
	else if (funcID == 3) {
		fp = new F3();
	}
	else if (funcID == 4) {
		fp = new F4();
	}
	else if (funcID == 5) {
		fp = new F5();
	}
	else if (funcID == 6) {
		fp = new F6();
	}
	else if (funcID == 7) {
		fp = new F7();
	}
	else if (funcID == 8) {
		fp = new F8();
	}
	else if (funcID == 9) {
		fp = new F9();
	}
	else if (funcID == 10) {
		fp = new F10();
	}
	else if (funcID == 11) {
		fp = new F11();
	}
	else if (funcID == 12) {
		fp = new F12();
	}
	else if (funcID == 13) {
		fp = new F13();
	}
	else if (funcID == 14) {
		fp = new F14();
	}
	else if (funcID == 15) {
		fp = new F15();
	}
	else if (funcID == 16) {
		fp = new F16();
	}
	else if (funcID == 17) {
		fp = new F17();
	}
	else if (funcID == 18) {
		fp = new F18();
	}
	else if (funcID == 19) {
		fp = new F19();
	}
	else if (funcID == 20) {
		fp = new F20();
	}
	else {
		cerr << "Fail to locate Specified Function Index" << endl;
		exit(-1);
	}
	return fp;
}

3.Self_Define_Functions.h

#pragma once
//written by Zhiping Jin
#pragma once
#pragma once
#ifndef SELF_DEFINE_FUNCTIONS_H_INCLUDED
#define SELF_DEFINE_FUNCTIONS_H_INCLUDED


#include <time.h>
#include <cstdio>
#include "unistd.h"
#include <algorithm>
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <iomanip>
#include <math.h>
#include <string>
#include <string.h>

// The following is the library of random number generators in boost
#include <boost/random.hpp>
#include <boost/random/uniform_int.hpp>
#include <boost/random/cauchy_distribution.hpp>
#include <boost/random/normal_distribution.hpp>
#include <boost/random/uniform_real.hpp>

int search_index(double* arr, int length, double result);
double cal_weight(double** population, double* result, int dimension, int subswarm_populationsize, int* subswarm);
#endif // SELF_DEFINE_FUNCTIONS_H_INCLUDED

以上为算法的主体部分,剩余cec2010测试集相关部分可自行下载cec2010测试集查看

由于本人水平有限,程序难免存在差错,如发现问题,可私信我进行修改。

elm-pso算法是结合了极限学习机(ELM)和粒子群优化(PSO)的一种优化算法。ELM是一种机器学习算法,通过随机初始化输入层到隐藏层的权重和偏置,然后通过最小化误差来优化这些参数。而PSO是一种启发式优化算法,通过模拟鸟群觅食的方式来寻找最优解。 elm-pso算法能够用来解决一些实际问题,其中包括: 1. 模式识别:elm-pso算法可以用来识别复杂的模式,比如人脸识别、手写体识别等。通过对隐藏层的权重和偏置进行优化,elm-pso算法能够提高模型的准确率和泛化能力。 2. 回归分析:elm-pso算法可以用来进行回归分析,通过拟合非线性的函数关系,来预测未知的数据点。这在金融、经济预测等领域具有应用前景。 3. 特征选择:elm-pso算法可以用来选择最优的特征子集,从而提高模型的精确度和解释能力。通过优化隐藏层的权重和偏置,elm-pso算法能够选择最相关的输入特征,剔除无关的噪声特征。 4. 参数优化:elm-pso算法还可以用来优化其他机器学习算法中的参数。通过对待优化的参数进行编码,并结合粒子群的搜索机制,elm-pso算法能够找到最优的参数组合,提高模型的表现力和泛化能力。 总之,elm-pso算法结合了ELM和PSO的优势,能够在模式识别、回归分析、特征选择和参数优化等问题中发挥重要作用,提高模型的性能和效果。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

ttzif

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值