对于种群优化算法来说,最著名的为粒子群算法、遗传算法及差分进化算法,在前面的博客中实现了其中基本型。应在用C语言编写的运行速度快,但是Python近年来越来越流行,所以蟒蛇实现差分进化算法,通过函数测试运行很快速。主要是用到了numpy库来实现,绘图显示使用的是matplotlib库。
差分进化算法总的有四步骤:
(1)种群初始化
(2)变异操作
(3)交叉操作
(4)贪婪选择操作
# -*- coding:utf-8 -*-
# copyright(c) Greg Gong reserved
#Copyright Clarify
#Copyright ownership belongs to Greg Gong, shall not be reproduced , copied, or used in #other ways without permission. Otherwise Greg Gong will have the right to pursue legal #responsibilities.
import numpy as np
import matplotlib.pyplot as plt
class de:
"""
pop_pos:
pop_fit:
pop_best_pos:
pop_best_fit:
CR:
F:
max_iter:
n_var:
"""
def __init__(self,max_iter,n_var,CR,F,n_pop,bound,fit_func):
self.max_iter=max_iter
self.n_var=n_var
self.CR=CR
self.F=F
self.fit_func=fit_func
self.n_pop=n_pop
self.iter_fit=np.arange(max_iter)
self.bound=bound
# 产生种群
self.pop_pos=bound[1,:]+np.random.rand(n_pop,n_var)*(bound[0,:]-bound[1,:])
self.pop_fit=self.fit_func(self.pop_pos)
self.pop_best_fit=np.min(self.pop_fit)
self.pop_best_pos=self.pop_pos[np.argmin(self.pop_fit),:]
self.pop_cross=self.pop_pos.copy()
self.pop_cross_fit=self.pop_fit.copy()
self.pop_mutute=self.pop_pos.copy()
self.pop_mutute_fit=self.pop_fit.copy()
def mutute_pop(self):
rand1=np.random.choice(self.n_pop)
rand2=np.random.choice(self.n_pop)
rand3=np.random.choice(self.n_pop)
if (rand1==rand2)|(rand2==rand3)|(rand1==rand3):
rand1=np.random.choice(self.n_pop)
rand2=np.random.choice(self.n_pop)
rand3=np.random.choice(self.n_pop)
for i in range(self.n_pop):
self.pop_mutute[i,:]=self.pop_pos[rand1,:]+self.F*(self.pop_pos[rand2,:]-self.pop_pos[rand3,:])
index_up=self.pop_mutute[i,:]>self.bound[0,:]
self.pop_mutute[i,index_up]=self.bound[0,index_up].copy()
index_down=self.pop_mutute[i,:]<self.bound[1,:]
self.pop_mutute[i,index_down]=self.bound[1,index_down].copy()
def cross_pop(self):
for i in range(self.n_pop):
randc=np.random.choice(self.n_var)
for j in range(self.n_var):
randF=np.random.rand()
if(randc==j|(randF<self.CR)):
self.pop_cross[i,j]=self.pop_mutute[i,j].copy()
else:
self.pop_cross[i,j]=self.pop_pos[i,j].copy()
index_up=self.pop_cross[i,:]>self.bound[0,:]
self.pop_cross[i,index_up]=self.bound[0,index_up].copy()
index_down=self.pop_cross[i,:]<self.bound[1,:]
self.pop_cross[i,index_down]=self.bound[1,index_down].copy()
self.pop_cross_fit=self.fit_func(self.pop_cross)
def greedy_select(self):
index_x=self.pop_cross_fit<self.pop_fit
self.pop_pos[index_x,:]=self.pop_cross[index_x,:].copy()
self.pop_fit[index_x]=self.pop_cross_fit[index_x]
if np.min(self.pop_fit)<self.pop_best_fit:
self.pop_best_pos=self.pop_pos[np.argmin(self.pop_fit),:].copy()
self.pop_best_fit=self.pop_fit[np.argmin(self.pop_fit)].copy()
def show_result(self):
plt.plot(range(self.max_iter),self.iter_fit,'-b')
plt.title('converage of process in de')
plt.xlabel('iter')
plt.ylabel('fitness')
plt.show()
def print_result(self):
print('the result is showing...')
print('best_fitness for min problem:',self.pop_best_fit)
print('best solution for result:',self.pop_best_pos)
def run(self):
for it in np.arange(0,self.max_iter):
self.mutute_pop()
self.cross_pop()
self.greedy_select()
self.iter_fit[it]=self.pop_best_fit.copy()
print('iter,iter_fit',(it,self.iter_fit[it]))
def fitness_fun(x):
result=np.sum(x*x,axis=1)
return result
def main():
max_iter=1000
var_num=100
pop=50
bound=np.zeros((2,var_num))
bound[0,:]=np.ones(var_num)*10 #上边界,每个变量的上边界可以不同
bound[1,:]=-np.ones(var_num)*10 #下边界,每个变量的下边界可以不同
CR=0.2
F=0.2
de_new=de(max_iter,var_num,CR,F,pop,bound,fitness_fun)
de_new.run()
de_new.show_result()
de_new.print_result()
if __name__=='__main__':
main()