差分进化算法——Python代码

DEIndividual.py

import numpy as np
import ObjFunction


class DEIndividual:

    '''
    individual of differential evolution algorithm
    '''

    def __init__(self,  vardim, bound):
        '''
        vardim: dimension of variables
        bound: boundaries of variables
        '''
        self.vardim = vardim
        self.bound = bound
        self.fitness = 0.

    def generate(self):
        '''
        generate a random chromsome for differential evolution algorithm
        '''
        len = self.vardim
        rnd = np.random.random(size=len)
        self.chrom = np.zeros(len)
        for i in xrange(0, len):
            self.chrom[i] = self.bound[0, i] + \
                (self.bound[1, i] - self.bound[0, i]) * rnd[i]

    def calculateFitness(self):
        '''
        calculate the fitness of the chromsome
        '''
        self.fitness = ObjFunction.GrieFunc(
            self.vardim, self.chrom, self.bound)

DE.py

import numpy as np
from DEIndividual import DEIndividual
import random
import copy
import matplotlib.pyplot as plt


class DifferentialEvolutionAlgorithm:

    '''
    The class for differential evolution algorithm
    '''

    def __init__(self, sizepop, vardim, bound, MAXGEN, params):
        '''
        sizepop: population sizepop
        vardim: dimension of variables
        bound: boundaries of variables
        MAXGEN: termination condition
        param: algorithm required parameters, it is a list which is consisting of [crossover rate CR, scaling factor F]
        '''
        self.sizepop = sizepop
        self.MAXGEN = MAXGEN
        self.vardim = vardim
        self.bound = bound
        self.population = []
        self.fitness = np.zeros((self.sizepop, 1))
        self.trace = np.zeros((self.MAXGEN, 2))
        self.params = params

    def initialize(self):
        '''
        initialize the population
        '''
        for i in xrange(0, self.sizepop):
            ind = DEIndividual(self.vardim, self.bound)
            ind.generate()
            self.population.append(ind)

    def evaluate(self, x):
        '''
        evaluation of the population fitnesses
        '''
        x.calculateFitness()

    def solve(self):
        '''
        evolution process of differential evolution algorithm
        '''
        self.t = 0
        self.initialize()
        for i in xrange(0, self.sizepop):
            self.evaluate(self.population[i])
            self.fitness[i] = self.population[i].fitness
        best = np.max(self.fitness)
        bestIndex = np.argmax(self.fitness)
        self.best = copy.deepcopy(self.population[bestIndex])
        self.avefitness = np.mean(self.fitness)
        self.trace[self.t, 0] = (1 - self.best.fitness) / self.best.fitness
        self.trace[self.t, 1] = (1 - self.avefitness) / self.avefitness
        print("Generation %d: optimal function value is: %f; average function value is %f" % (
            self.t, self.trace[self.t, 0], self.trace[self.t, 1]))
        while (self.t < self.MAXGEN - 1):
            self.t += 1
            for i in xrange(0, self.sizepop):
                vi = self.mutationOperation(i)
                ui = self.crossoverOperation(i, vi)
                xi_next = self.selectionOperation(i, ui)
                self.population[i] = xi_next
            for i in xrange(0, self.sizepop):
                self.evaluate(self.population[i])
                self.fitness[i] = self.population[i].fitness
            best = np.max(self.fitness)
            bestIndex = np.argmax(self.fitness)
            if best > self.best.fitness:
                self.best = copy.deepcopy(self.population[bestIndex])
            self.avefitness = np.mean(self.fitness)
            self.trace[self.t, 0] = (1 - self.best.fitness) / self.best.fitness
            self.trace[self.t, 1] = (1 - self.avefitness) / self.avefitness
            print("Generation %d: optimal function value is: %f; average function value is %f" % (
                self.t, self.trace[self.t, 0], self.trace[self.t, 1]))

        print("Optimal function value is: %f; " %
              self.trace[self.t, 0])
        print "Optimal solution is:"
        print self.best.chrom
        self.printResult()

    def selectionOperation(self, i, ui):
        '''
        selection operation for differential evolution algorithm
        '''
        xi_next = copy.deepcopy(self.population[i])
        xi_next.chrom = ui
        self.evaluate(xi_next)
        if xi_next.fitness > self.population[i].fitness:
            return xi_next
        else:
            return self.population[i]

    def crossoverOperation(self, i, vi):
        '''
        crossover operation for differential evolution algorithm
        '''
        k = np.random.random_integers(0, self.vardim - 1)
        ui = np.zeros(self.vardim)
        for j in xrange(0, self.vardim):
            pick = random.random()
            if pick < self.params[0] or j == k:
                ui[j] = vi[j]
            else:
                ui[j] = self.population[i].chrom[j]
        return ui

    def mutationOperation(self, i):
        '''
        mutation operation for differential evolution algorithm
        '''
        a = np.random.random_integers(0, self.sizepop - 1)
        while a == i:
            a = np.random.random_integers(0, self.sizepop - 1)
        b = np.random.random_integers(0, self.sizepop - 1)
        while b == i or b == a:
            b = np.random.random_integers(0, self.sizepop - 1)
        c = np.random.random_integers(0, self.sizepop - 1)
        while c == i or c == b or c == a:
            c = np.random.random_integers(0, self.sizepop - 1)
        vi = self.population[c].chrom + self.params[1] * \
            (self.population[a].chrom - self.population[b].chrom)
        for j in xrange(0, self.vardim):
            if vi[j] < self.bound[0, j]:
                vi[j] = self.bound[0, j]
            if vi[j] > self.bound[1, j]:
                vi[j] = self.bound[1, j]
        return vi

    def printResult(self):
        '''
        plot the result of the differential evolution algorithm
        '''
        x = np.arange(0, self.MAXGEN)
        y1 = self.trace[:, 0]
        y2 = self.trace[:, 1]
        plt.plot(x, y1, 'r', label='optimal value')
        plt.plot(x, y2, 'g', label='average value')
        plt.xlabel("Iteration")
        plt.ylabel("function value")
        plt.title("Differential Evolution Algorithm for function optimization")
        plt.legend()
        plt.show()

运行代码:

 if __name__ == "__main__":
 
     bound = np.tile([[-600], [600]], 25)
     dea = DEA(60, 25, bound, 1000, [0.8,  0.6])
     dea.solve()

  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
差分进化算法(DE)是一种优化算法,用于解决函数优化问题。它通过模拟生物进化的过程,通过变异和交叉操作来搜索最优解。在Python中,可以使用numpy库来实现差分进化算法。 以下是一个简单的差分进化算法Python实现示例: ```python import numpy as np def differential_evolution(fitness_func, bounds, pop_size=50, F=0.8, CR=0.9, max_iter=100): # 初始化种群 pop = np.random.uniform(bounds[:, 0], bounds[:, 1], (pop_size, len(bounds))) best_solution = None best_fitness = float('inf') for i in range(max_iter): for j in range(pop_size): # 选择三个不同的个体作为变异向量 candidates = [idx for idx in range(pop_size) if idx != j] a, b, c = np.random.choice(candidates, 3, replace=False) # 变异操作 mutant = pop[a] + F * (pop[b] - pop[c]) # 交叉操作 trial = np.copy(pop[j]) for k in range(len(bounds)): if np.random.rand() < CR: trial[k] = mutant[k] # 选择操作 trial_fitness = fitness_func(trial) if trial_fitness < best_fitness: best_solution = trial best_fitness = trial_fitness if trial_fitness <= fitness_func(pop[j]): pop[j] = trial return best_solution, best_fitness # 示例适应度函数 def fitness_func(x): return np.sum(x**2) # 示例边界 bounds = np.array([[-5, 5], [-5, 5]]) # 运行差分进化算法 best_solution, best_fitness = differential_evolution(fitness_func, bounds) print("Best solution:", best_solution) print("Best fitness:", best_fitness) ``` 这个示例中,我们定义了一个适应度函数`fitness_func`,它计算了解的适应度值。然后,我们定义了变量的边界`bounds`,用于限制解的取值范围。最后,我们调用`differential_evolution`函数来运行差分进化算法,并输出最佳解和最佳适应度值。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

毛栗子201105

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值