python 多条件 选择 算法_克隆选择算法-python实现

1 importnumpy as np2 from CSAIndividual importCSAIndividual3 importrandom4 importcopy5 importmatplotlib.pyplot as plt6

7

8 classCloneSelectionAlgorithm:9

10 '''

11 the class for clone selection algorithm12 '''

13

14 def __init__(self, sizepop, vardim, bound, MAXGEN, params):15 '''

16 sizepop: population sizepop17 vardim: dimension of variables18 bound: boundaries of variables19 MAXGEN: termination condition20 params: algorithm required parameters, it is a list which is consisting of[beta, pm, alpha_max, alpha_min]21 '''

22 self.sizepop =sizepop23 self.vardim =vardim24 self.bound =bound25 self.MAXGEN =MAXGEN26 self.params =params27 self.population =[]28 self.fitness =np.zeros(self.sizepop)29 self.trace = np.zeros((self.MAXGEN, 2))30

31 definitialize(self):32 '''

33 initialize the population of ba34 '''

35 for i inxrange(0, self.sizepop):36 ind =CSAIndividual(self.vardim, self.bound)37 ind.generate()38 self.population.append(ind)39

40 defevaluation(self):41 '''

42 evaluation the fitness of the population43 '''

44 for i inxrange(0, self.sizepop):45 self.population[i].calculateFitness()46 self.fitness[i] =self.population[i].fitness47

48 defsolve(self):49 '''

50 the evolution process of the clone selection algorithm51 '''

52 self.t =053 self.initialize()54 self.evaluation()55 bestIndex =np.argmax(self.fitness)56 self.best =copy.deepcopy(self.population[bestIndex])57 while self.t <58 self.t>

59 tmpPop =self.reproduction()60 tmpPop =self.mutation(tmpPop)61 self.selection(tmpPop)62 best =np.max(self.fitness)63 bestIndex =np.argmax(self.fitness)64 if best >self.best.fitness:65 self.best =copy.deepcopy(self.population[bestIndex])66

67 self.avefitness =np.mean(self.fitness)68 self.trace[self.t - 1, 0] =\69 (1 - self.best.fitness) /self.best.fitness70 self.trace[self.t - 1, 1] = (1 - self.avefitness) /self.avefitness71 print("Generation %d: optimal function value is: %f; average function value is %f" %(72 self.t, self.trace[self.t - 1, 0], self.trace[self.t - 1, 1]))73 print("Optimal function value is: %f;" % self.trace[self.t - 1, 0])74 print "Optimal solution is:"

75 printself.best.chrom76 self.printResult()77

78 defreproduction(self):79 '''

80 reproduction81 '''

82 tmpPop =[]83 for i inxrange(0, self.sizepop):84 nc = int(self.params[1] *self.sizepop)85 for j inxrange(0, nc):86 ind =copy.deepcopy(self.population[i])87 tmpPop.append(ind)88 returntmpPop89

90 defmutation(self, tmpPop):91 '''

92 hypermutation93 '''

94 for i inxrange(0, self.sizepop):95 nc = int(self.params[1] *self.sizepop)96 for j in xrange(1, nc):97 rnd = np.random.random(1)98 if rnd

100 #2] + self.t * (self.params[3] - self.params[2]) / self.MAXGEN

101 delta = self.params[2] + self.t *\102 (self.params[3] - self.params[3]) /self.MAXGEN103 tmpPop[i * nc + j].chrom += np.random.normal(0.0, delta, self.vardim)104 #tmpPop[i * nc + j].chrom += alpha * np.random.random(

105 #self.vardim) * (self.best.chrom - tmpPop[i * nc +

106 #j].chrom)

107 for k inxrange(0, self.vardim):108 if tmpPop[i * nc + j].chrom[k] self.bound[1, k]:111 tmpPop[i * nc + j].chrom[k] = self.bound[1, k]112 tmpPop[i * nc +j].calculateFitness()113 returntmpPop114

115 defselection(self, tmpPop):116 '''

117 re-selection118 '''

119 for i inxrange(0, self.sizepop):120 nc = int(self.params[1] *self.sizepop)121 best = 0.0

122 bestIndex = -1

123 for j inxrange(0, nc):124 if tmpPop[i * nc + j].fitness >best:125 best = tmpPop[i * nc +j].fitness126 bestIndex = i * nc +j127 if self.fitness[i] <128 self.population self.fitness>

131 defprintResult(self):132 '''

133 plot the result of clone selection algorithm134 '''

135 x =np.arange(0, self.MAXGEN)136 y1 =self.trace[:, 0]137 y2 = self.trace[:, 1]138 plt.plot(x, y1, 'r', label='optimal value')139 plt.plot(x, y2, 'g', label='average value')140 plt.xlabel("Iteration")141 plt.ylabel("function value")142 plt.title("Clone selection algorithm for function optimization")143 plt.legend()144 plt.show()

128>58>
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值