粒子群优化算法和python代码_粒子群优化算法-python实现

1 importnumpy as np2 from PSOIndividual importPSOIndividual3 importrandom4 importcopy5 importmatplotlib.pyplot as plt6

7

8 classParticleSwarmOptimization:9

10 ‘‘‘

11 the class for Particle Swarm Optimization12 ‘‘‘

13

14 def __init__(self, sizepop, vardim, bound, MAXGEN, params):15 ‘‘‘

16 sizepop: population sizepop17 vardim: dimension of variables18 bound: boundaries of variables19 MAXGEN: termination condition20 params: algorithm required parameters, it is a list which is consisting of[w, c1, c2]21 ‘‘‘

22 self.sizepop =sizepop23 self.vardim =vardim24 self.bound =bound25 self.MAXGEN =MAXGEN26 self.params =params27 self.population =[]28 self.fitness = np.zeros((self.sizepop, 1))29 self.trace = np.zeros((self.MAXGEN, 2))30

31 definitialize(self):32 ‘‘‘

33 initialize the population of pso34 ‘‘‘

35 for i inxrange(0, self.sizepop):36 ind =PSOIndividual(self.vardim, self.bound)37 ind.generate()38 self.population.append(ind)39

40 defevaluation(self):41 ‘‘‘

42 evaluation the fitness of the population43 ‘‘‘

44 for i inxrange(0, self.sizepop):45 self.population[i].calculateFitness()46 self.fitness[i] =self.population[i].fitness47 if self.population[i].fitness >self.population[i].bestFitness:48 self.population[i].bestFitness =self.population[i].fitness49 self.population[i].bestIndex =copy.deepcopy(50 self.population[i].chrom)51

52 defupdate(self):53 ‘‘‘

54 update the population of pso55 ‘‘‘

56 for i inxrange(0, self.sizepop):57 self.population[i].velocity = self.params[0] * self.population[i].velocity + self.params[1] * np.random.random(self.vardim) *(58 self.population[i].bestPosition - self.population[i].chrom) + self.params[2] * np.random.random(self.vardim) * (self.best.chrom -self.population[i].chrom)59 self.population[i].chrom =self.population[60 i].chrom +self.population[i].velocity61

62 defsolve(self):63 ‘‘‘

64 the evolution process of the pso algorithm65 ‘‘‘

66 self.t =067 self.initialize()68 self.evaluation()69 best =np.max(self.fitness)70 bestIndex =np.argmax(self.fitness)71 self.best =copy.deepcopy(self.population[bestIndex])72 self.avefitness =np.mean(self.fitness)73 self.trace[self.t, 0] = (1 - self.best.fitness) /self.best.fitness74 self.trace[self.t, 1] = (1 - self.avefitness) /self.avefitness75 print("Generation %d: optimal function value is: %f; average function value is %f" %(76 self.t, self.trace[self.t, 0], self.trace[self.t, 1]))77 while self.t < self.MAXGEN - 1:78 self.t += 1

79 self.update()80 self.evaluation()81 best =np.max(self.fitness)82 bestIndex =np.argmax(self.fitness)83 if best >self.best.fitness:84 self.best =copy.deepcopy(self.population[bestIndex])85 self.avefitness =np.mean(self.fitness)86 self.trace[self.t, 0] = (1 - self.best.fitness) /self.best.fitness87 self.trace[self.t, 1] = (1 - self.avefitness) /self.avefitness88 print("Generation %d: optimal function value is: %f; average function value is %f" %(89 self.t, self.trace[self.t, 0], self.trace[self.t, 1]))90

91 print("Optimal function value is: %f;" %self.trace[self.t, 0])92 print "Optimal solution is:"

93 printself.best.chrom94 self.printResult()95

96 defprintResult(self):97 ‘‘‘

98 plot the result of pso algorithm99 ‘‘‘

100 x =np.arange(0, self.MAXGEN)101 y1 =self.trace[:, 0]102 y2 = self.trace[:, 1]103 plt.plot(x, y1, ‘r‘, label=‘optimal value‘)104 plt.plot(x, y2, ‘g‘, label=‘average value‘)105 plt.xlabel("Iteration")106 plt.ylabel("function value")107 plt.title("Particle Swarm Optimization algorithm for function optimization")108 plt.legend()109 plt.show()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值