python做差分_差分进化算法-python实现

1 importnumpy as np2 from DEIndividual importDEIndividual3 importrandom4 importcopy5 importmatplotlib.pyplot as plt6

7

8 classDifferentialEvolutionAlgorithm:9

10 '''

11 The class for differential evolution algorithm12 '''

13

14 def __init__(self, sizepop, vardim, bound, MAXGEN, params):15 '''

16 sizepop: population sizepop17 vardim: dimension of variables18 bound: boundaries of variables19 MAXGEN: termination condition20 param: algorithm required parameters, it is a list which is consisting of [crossover rate CR, scaling factor F]21 '''

22 self.sizepop =sizepop23 self.MAXGEN =MAXGEN24 self.vardim =vardim25 self.bound =bound26 self.population =[]27 self.fitness = np.zeros((self.sizepop, 1))28 self.trace = np.zeros((self.MAXGEN, 2))29 self.params =params30

31 definitialize(self):32 '''

33 initialize the population34 '''

35 for i inxrange(0, self.sizepop):36 ind =DEIndividual(self.vardim, self.bound)37 ind.generate()38 self.population.append(ind)39

40 defevaluate(self, x):41 '''

42 evaluation of the population fitnesses43 '''

44 x.calculateFitness()45

46 defsolve(self):47 '''

48 evolution process of differential evolution algorithm49 '''

50 self.t =051 self.initialize()52 for i inxrange(0, self.sizepop):53 self.evaluate(self.population[i])54 self.fitness[i] =self.population[i].fitness55 best =np.max(self.fitness)56 bestIndex =np.argmax(self.fitness)57 self.best =copy.deepcopy(self.population[bestIndex])58 self.avefitness =np.mean(self.fitness)59 self.trace[self.t, 0] = (1 - self.best.fitness) /self.best.fitness60 self.trace[self.t, 1] = (1 - self.avefitness) /self.avefitness61 print("Generation %d: optimal function value is: %f; average function value is %f" %(62 self.t, self.trace[self.t, 0], self.trace[self.t, 1]))63 while (self.t < self.MAXGEN - 1):64 self.t += 1

65 for i inxrange(0, self.sizepop):66 vi =self.mutationOperation(i)67 ui =self.crossoverOperation(i, vi)68 xi_next =self.selectionOperation(i, ui)69 self.population[i] =xi_next70 for i inxrange(0, self.sizepop):71 self.evaluate(self.population[i])72 self.fitness[i] =self.population[i].fitness73 best =np.max(self.fitness)74 bestIndex =np.argmax(self.fitness)75 if best >self.best.fitness:76 self.best =copy.deepcopy(self.population[bestIndex])77 self.avefitness =np.mean(self.fitness)78 self.trace[self.t, 0] = (1 - self.best.fitness) /self.best.fitness79 self.trace[self.t, 1] = (1 - self.avefitness) /self.avefitness80 print("Generation %d: optimal function value is: %f; average function value is %f" %(81 self.t, self.trace[self.t, 0], self.trace[self.t, 1]))82

83 print("Optimal function value is: %f;" %

84 self.trace[self.t, 0])85 print "Optimal solution is:"

86 printself.best.chrom87 self.printResult()88

89 defselectionOperation(self, i, ui):90 '''

91 selection operation for differential evolution algorithm92 '''

93 xi_next =copy.deepcopy(self.population[i])94 xi_next.chrom =ui95 self.evaluate(xi_next)96 if xi_next.fitness >self.population[i].fitness:97 returnxi_next98 else:99 returnself.population[i]100

101 defcrossoverOperation(self, i, vi):102 '''

103 crossover operation for differential evolution algorithm104 '''

105 k = np.random.random_integers(0, self.vardim - 1)106 ui =np.zeros(self.vardim)107 for j inxrange(0, self.vardim):108 pick =random.random()109 if pick < self.params[0] or j ==k:110 ui[j] =vi[j]111 else:112 ui[j] =self.population[i].chrom[j]113 returnui114

115 defmutationOperation(self, i):116 '''

117 mutation operation for differential evolution algorithm118 '''

119 a = np.random.random_integers(0, self.sizepop - 1)120 while a ==i:121 a = np.random.random_integers(0, self.sizepop - 1)122 b = np.random.random_integers(0, self.sizepop - 1)123 while b == i or b ==a:124 b = np.random.random_integers(0, self.sizepop - 1)125 c = np.random.random_integers(0, self.sizepop - 1)126 while c == i or c == b or c ==a:127 c = np.random.random_integers(0, self.sizepop - 1)128 vi = self.population[c].chrom + self.params[1] *\129 (self.population[a].chrom -self.population[b].chrom)130 for j inxrange(0, self.vardim):131 if vi[j] self.bound[1, j]:134 vi[j] = self.bound[1, j]135 returnvi136

137 defprintResult(self):138 '''

139 plot the result of the differential evolution algorithm140 '''

141 x =np.arange(0, self.MAXGEN)142 y1 =self.trace[:, 0]143 y2 = self.trace[:, 1]144 plt.plot(x, y1, 'r', label='optimal value')145 plt.plot(x, y2, 'g', label='average value')146 plt.xlabel("Iteration")147 plt.ylabel("function value")148 plt.title("Differential Evolution Algorithm for function optimization")149 plt.legend()150 plt.show()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值