import optimizers
from evolopy
This commit is contained in:
parent
05343dad24
commit
4daac79f23
14 changed files with 2243 additions and 0 deletions
110
evolopy/BAT.py
Normal file
110
evolopy/BAT.py
Normal file
|
@ -0,0 +1,110 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Thu May 26 02:00:55 2016
|
||||
|
||||
@author: hossam
|
||||
"""
|
||||
import math
|
||||
import numpy
|
||||
import random
|
||||
import time
|
||||
from solution import solution
|
||||
|
||||
|
||||
def BAT(objf, lb, ub, dim, N, Max_iteration):
|
||||
|
||||
n = N
|
||||
# Population size
|
||||
|
||||
if not isinstance(lb, list):
|
||||
lb = [lb] * dim
|
||||
if not isinstance(ub, list):
|
||||
ub = [ub] * dim
|
||||
N_gen = Max_iteration # Number of generations
|
||||
|
||||
A = 0.5
|
||||
# Loudness (constant or decreasing)
|
||||
r = 0.5
|
||||
# Pulse rate (constant or decreasing)
|
||||
|
||||
Qmin = 0 # Frequency minimum
|
||||
Qmax = 2 # Frequency maximum
|
||||
|
||||
d = dim # Number of dimensions
|
||||
|
||||
# Initializing arrays
|
||||
Q = numpy.zeros(n) # Frequency
|
||||
v = numpy.zeros((n, d)) # Velocities
|
||||
Convergence_curve = []
|
||||
|
||||
# Initialize the population/solutions
|
||||
Sol = numpy.zeros((n, d))
|
||||
for i in range(dim):
|
||||
Sol[:, i] = numpy.random.rand(n) * (ub[i] - lb[i]) + lb[i]
|
||||
|
||||
S = numpy.zeros((n, d))
|
||||
S = numpy.copy(Sol)
|
||||
Fitness = numpy.zeros(n)
|
||||
|
||||
# initialize solution for the final results
|
||||
s = solution()
|
||||
print('BAT is optimizing "' + objf.__name__ + '"')
|
||||
|
||||
# Initialize timer for the experiment
|
||||
timerStart = time.time()
|
||||
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
|
||||
# Evaluate initial random solutions
|
||||
for i in range(0, n):
|
||||
Fitness[i] = objf(Sol[i, :])
|
||||
|
||||
# Find the initial best solution and minimum fitness
|
||||
I = numpy.argmin(Fitness)
|
||||
best = Sol[I, :]
|
||||
fmin = min(Fitness)
|
||||
|
||||
# Main loop
|
||||
for t in range(0, N_gen):
|
||||
|
||||
# Loop over all bats(solutions)
|
||||
for i in range(0, n):
|
||||
Q[i] = Qmin + (Qmin - Qmax) * random.random()
|
||||
v[i, :] = v[i, :] + (Sol[i, :] - best) * Q[i]
|
||||
S[i, :] = Sol[i, :] + v[i, :]
|
||||
|
||||
# Check boundaries
|
||||
for j in range(d):
|
||||
Sol[i, j] = numpy.clip(Sol[i, j], lb[j], ub[j])
|
||||
|
||||
# Pulse rate
|
||||
if random.random() > r:
|
||||
S[i, :] = best + 0.001 * numpy.random.randn(d)
|
||||
|
||||
# Evaluate new solutions
|
||||
Fnew = objf(S[i, :])
|
||||
|
||||
# Update if the solution improves
|
||||
if (Fnew <= Fitness[i]) and (random.random() < A):
|
||||
Sol[i, :] = numpy.copy(S[i, :])
|
||||
Fitness[i] = Fnew
|
||||
|
||||
# Update the current best solution
|
||||
if Fnew <= fmin:
|
||||
best = numpy.copy(S[i, :])
|
||||
fmin = Fnew
|
||||
|
||||
# update convergence curve
|
||||
Convergence_curve.append(fmin)
|
||||
|
||||
if t % 1 == 0:
|
||||
print(["At iteration " + str(t) + " the best fitness is " + str(fmin)])
|
||||
|
||||
timerEnd = time.time()
|
||||
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
s.executionTime = timerEnd - timerStart
|
||||
s.convergence = Convergence_curve
|
||||
s.optimizer = "BAT"
|
||||
s.bestIndividual = best
|
||||
s.objfname = objf.__name__
|
||||
|
||||
return s
|
157
evolopy/CS.py
Normal file
157
evolopy/CS.py
Normal file
|
@ -0,0 +1,157 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue May 24 13:13:28 2016
|
||||
|
||||
@author: Hossam Faris
|
||||
"""
|
||||
import math
|
||||
import numpy
|
||||
import random
|
||||
import time
|
||||
from solution import solution
|
||||
|
||||
|
||||
def get_cuckoos(nest, best, lb, ub, n, dim):
|
||||
|
||||
# perform Levy flights
|
||||
tempnest = numpy.zeros((n, dim))
|
||||
tempnest = numpy.array(nest)
|
||||
beta = 3 / 2
|
||||
sigma = (
|
||||
math.gamma(1 + beta)
|
||||
* math.sin(math.pi * beta / 2)
|
||||
/ (math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2))
|
||||
) ** (1 / beta)
|
||||
|
||||
s = numpy.zeros(dim)
|
||||
for j in range(0, n):
|
||||
s = nest[j, :]
|
||||
u = numpy.random.randn(len(s)) * sigma
|
||||
v = numpy.random.randn(len(s))
|
||||
step = u / abs(v) ** (1 / beta)
|
||||
|
||||
stepsize = 0.01 * (step * (s - best))
|
||||
|
||||
s = s + stepsize * numpy.random.randn(len(s))
|
||||
|
||||
for k in range(dim):
|
||||
tempnest[j, k] = numpy.clip(s[k], lb[k], ub[k])
|
||||
|
||||
return tempnest
|
||||
|
||||
|
||||
def get_best_nest(nest, newnest, fitness, n, dim, objf):
|
||||
# Evaluating all new solutions
|
||||
tempnest = numpy.zeros((n, dim))
|
||||
tempnest = numpy.copy(nest)
|
||||
|
||||
for j in range(0, n):
|
||||
# for j=1:size(nest,1),
|
||||
fnew = objf(newnest[j, :])
|
||||
if fnew <= fitness[j]:
|
||||
fitness[j] = fnew
|
||||
tempnest[j, :] = newnest[j, :]
|
||||
|
||||
# Find the current best
|
||||
|
||||
fmin = min(fitness)
|
||||
K = numpy.argmin(fitness)
|
||||
bestlocal = tempnest[K, :]
|
||||
|
||||
return fmin, bestlocal, tempnest, fitness
|
||||
|
||||
|
||||
# Replace some nests by constructing new solutions/nests
|
||||
def empty_nests(nest, pa, n, dim):
|
||||
|
||||
# Discovered or not
|
||||
tempnest = numpy.zeros((n, dim))
|
||||
|
||||
K = numpy.random.uniform(0, 1, (n, dim)) > pa
|
||||
|
||||
stepsize = random.random() * (
|
||||
nest[numpy.random.permutation(n), :] - nest[numpy.random.permutation(n), :]
|
||||
)
|
||||
|
||||
tempnest = nest + stepsize * K
|
||||
|
||||
return tempnest
|
||||
|
||||
|
||||
##########################################################################
|
||||
|
||||
|
||||
def CS(objf, lb, ub, dim, n, N_IterTotal):
|
||||
|
||||
# lb=-1
|
||||
# ub=1
|
||||
# n=50
|
||||
# N_IterTotal=1000
|
||||
# dim=30
|
||||
|
||||
# Discovery rate of alien eggs/solutions
|
||||
pa = 0.25
|
||||
|
||||
nd = dim
|
||||
|
||||
# Lb=[lb]*nd
|
||||
# Ub=[ub]*nd
|
||||
convergence = []
|
||||
if not isinstance(lb, list):
|
||||
lb = [lb] * dim
|
||||
if not isinstance(ub, list):
|
||||
ub = [ub] * dim
|
||||
|
||||
# RInitialize nests randomely
|
||||
nest = numpy.zeros((n, dim))
|
||||
for i in range(dim):
|
||||
nest[:, i] = numpy.random.uniform(0, 1, n) * (ub[i] - lb[i]) + lb[i]
|
||||
|
||||
new_nest = numpy.zeros((n, dim))
|
||||
new_nest = numpy.copy(nest)
|
||||
|
||||
bestnest = [0] * dim
|
||||
|
||||
fitness = numpy.zeros(n)
|
||||
fitness.fill(float("inf"))
|
||||
|
||||
s = solution()
|
||||
|
||||
print('CS is optimizing "' + objf.__name__ + '"')
|
||||
|
||||
timerStart = time.time()
|
||||
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
|
||||
fmin, bestnest, nest, fitness = get_best_nest(nest, new_nest, fitness, n, dim, objf)
|
||||
convergence = []
|
||||
# Main loop counter
|
||||
for iter in range(0, N_IterTotal):
|
||||
# Generate new solutions (but keep the current best)
|
||||
|
||||
new_nest = get_cuckoos(nest, bestnest, lb, ub, n, dim)
|
||||
|
||||
# Evaluate new solutions and find best
|
||||
fnew, best, nest, fitness = get_best_nest(nest, new_nest, fitness, n, dim, objf)
|
||||
|
||||
new_nest = empty_nests(new_nest, pa, n, dim)
|
||||
|
||||
# Evaluate new solutions and find best
|
||||
fnew, best, nest, fitness = get_best_nest(nest, new_nest, fitness, n, dim, objf)
|
||||
|
||||
if fnew < fmin:
|
||||
fmin = fnew
|
||||
bestnest = best
|
||||
|
||||
if iter % 10 == 0:
|
||||
print(["At iteration " + str(iter) + " the best fitness is " + str(fmin)])
|
||||
convergence.append(fmin)
|
||||
|
||||
timerEnd = time.time()
|
||||
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
s.executionTime = timerEnd - timerStart
|
||||
s.convergence = convergence
|
||||
s.optimizer = "CS"
|
||||
s.bestIndividual = bestnest
|
||||
s.objfname = objf.__name__
|
||||
|
||||
return s
|
124
evolopy/DE.py
Normal file
124
evolopy/DE.py
Normal file
|
@ -0,0 +1,124 @@
|
|||
import random
|
||||
import numpy
|
||||
import time
|
||||
from solution import solution
|
||||
|
||||
|
||||
# Differential Evolution (DE)
|
||||
# mutation factor = [0.5, 2]
|
||||
# crossover_ratio = [0,1]
|
||||
def DE(objf, lb, ub, dim, PopSize, iters):
|
||||
|
||||
mutation_factor = 0.5
|
||||
crossover_ratio = 0.7
|
||||
stopping_func = None
|
||||
|
||||
# convert lb, ub to array
|
||||
if not isinstance(lb, list):
|
||||
lb = [lb for _ in range(dim)]
|
||||
ub = [ub for _ in range(dim)]
|
||||
|
||||
# solution
|
||||
s = solution()
|
||||
|
||||
s.best = float("inf")
|
||||
|
||||
# initialize population
|
||||
population = []
|
||||
|
||||
population_fitness = numpy.array([float("inf") for _ in range(PopSize)])
|
||||
|
||||
for p in range(PopSize):
|
||||
sol = []
|
||||
for d in range(dim):
|
||||
d_val = random.uniform(lb[d], ub[d])
|
||||
sol.append(d_val)
|
||||
|
||||
population.append(sol)
|
||||
|
||||
population = numpy.array(population)
|
||||
|
||||
# calculate fitness for all the population
|
||||
for i in range(PopSize):
|
||||
fitness = objf(population[i, :])
|
||||
population_fitness[p] = fitness
|
||||
# s.func_evals += 1
|
||||
|
||||
# is leader ?
|
||||
if fitness < s.best:
|
||||
s.best = fitness
|
||||
s.leader_solution = population[i, :]
|
||||
|
||||
convergence_curve = numpy.zeros(iters)
|
||||
# start work
|
||||
print('DE is optimizing "' + objf.__name__ + '"')
|
||||
|
||||
timerStart = time.time()
|
||||
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
|
||||
t = 0
|
||||
while t < iters:
|
||||
# should i stop
|
||||
if stopping_func is not None and stopping_func(s.best, s.leader_solution, t):
|
||||
break
|
||||
|
||||
# loop through population
|
||||
for i in range(PopSize):
|
||||
# 1. Mutation
|
||||
|
||||
# select 3 random solution except current solution
|
||||
ids_except_current = [_ for _ in range(PopSize) if _ != i]
|
||||
id_1, id_2, id_3 = random.sample(ids_except_current, 3)
|
||||
|
||||
mutant_sol = []
|
||||
for d in range(dim):
|
||||
d_val = population[id_1, d] + mutation_factor * (
|
||||
population[id_2, d] - population[id_3, d]
|
||||
)
|
||||
|
||||
# 2. Recombination
|
||||
rn = random.uniform(0, 1)
|
||||
if rn > crossover_ratio:
|
||||
d_val = population[i, d]
|
||||
|
||||
# add dimension value to the mutant solution
|
||||
mutant_sol.append(d_val)
|
||||
|
||||
# 3. Replacement / Evaluation
|
||||
|
||||
# clip new solution (mutant)
|
||||
mutant_sol = numpy.clip(mutant_sol, lb, ub)
|
||||
|
||||
# calc fitness
|
||||
mutant_fitness = objf(mutant_sol)
|
||||
# s.func_evals += 1
|
||||
|
||||
# replace if mutant_fitness is better
|
||||
if mutant_fitness < population_fitness[i]:
|
||||
population[i, :] = mutant_sol
|
||||
population_fitness[i] = mutant_fitness
|
||||
|
||||
# update leader
|
||||
if mutant_fitness < s.best:
|
||||
s.best = mutant_fitness
|
||||
s.leader_solution = mutant_sol
|
||||
|
||||
convergence_curve[t] = s.best
|
||||
if t % 1 == 0:
|
||||
print(
|
||||
["At iteration " + str(t + 1) + " the best fitness is " + str(s.best)]
|
||||
)
|
||||
|
||||
# increase iterations
|
||||
t = t + 1
|
||||
|
||||
timerEnd = time.time()
|
||||
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
s.executionTime = timerEnd - timerStart
|
||||
s.convergence = convergence_curve
|
||||
s.optimizer = "DE"
|
||||
s.bestIndividual = s.leader_solution
|
||||
s.objfname = objf.__name__
|
||||
|
||||
# return solution
|
||||
return s
|
139
evolopy/FFA.py
Normal file
139
evolopy/FFA.py
Normal file
|
@ -0,0 +1,139 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Sun May 29 00:49:35 2016
|
||||
|
||||
@author: hossam
|
||||
"""
|
||||
|
||||
#% ======================================================== %
|
||||
#% Files of the Matlab programs included in the book: %
|
||||
#% Xin-She Yang, Nature-Inspired Metaheuristic Algorithms, %
|
||||
#% Second Edition, Luniver Press, (2010). www.luniver.com %
|
||||
#% ======================================================== %
|
||||
#
|
||||
#% -------------------------------------------------------- %
|
||||
#% Firefly Algorithm for constrained optimization using %
|
||||
#% for the design of a spring (benchmark) %
|
||||
#% by Xin-She Yang (Cambridge University) Copyright @2009 %
|
||||
#% -------------------------------------------------------- %
|
||||
|
||||
import numpy
|
||||
import math
|
||||
import time
|
||||
from solution import solution
|
||||
|
||||
|
||||
def alpha_new(alpha, NGen):
|
||||
#% alpha_n=alpha_0(1-delta)^NGen=10^(-4);
|
||||
#% alpha_0=0.9
|
||||
delta = 1 - (10 ** (-4) / 0.9) ** (1 / NGen)
|
||||
alpha = (1 - delta) * alpha
|
||||
return alpha
|
||||
|
||||
|
||||
def FFA(objf, lb, ub, dim, n, MaxGeneration):
|
||||
|
||||
# General parameters
|
||||
|
||||
# n=50 #number of fireflies
|
||||
# dim=30 #dim
|
||||
# lb=-50
|
||||
# ub=50
|
||||
# MaxGeneration=500
|
||||
|
||||
# FFA parameters
|
||||
alpha = 0.5 # Randomness 0--1 (highly random)
|
||||
betamin = 0.20 # minimum value of beta
|
||||
gamma = 1 # Absorption coefficient
|
||||
if not isinstance(lb, list):
|
||||
lb = [lb] * dim
|
||||
if not isinstance(ub, list):
|
||||
ub = [ub] * dim
|
||||
|
||||
zn = numpy.ones(n)
|
||||
zn.fill(float("inf"))
|
||||
|
||||
# ns(i,:)=Lb+(Ub-Lb).*rand(1,d);
|
||||
ns = numpy.zeros((n, dim))
|
||||
for i in range(dim):
|
||||
ns[:, i] = numpy.random.uniform(0, 1, n) * (ub[i] - lb[i]) + lb[i]
|
||||
Lightn = numpy.ones(n)
|
||||
Lightn.fill(float("inf"))
|
||||
|
||||
# [ns,Lightn]=init_ffa(n,d,Lb,Ub,u0)
|
||||
|
||||
convergence = []
|
||||
s = solution()
|
||||
|
||||
print('FFA is optimizing "' + objf.__name__ + '"')
|
||||
|
||||
timerStart = time.time()
|
||||
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
|
||||
# Main loop
|
||||
for k in range(0, MaxGeneration): # start iterations
|
||||
|
||||
#% This line of reducing alpha is optional
|
||||
alpha = alpha_new(alpha, MaxGeneration)
|
||||
|
||||
#% Evaluate new solutions (for all n fireflies)
|
||||
for i in range(0, n):
|
||||
zn[i] = objf(ns[i, :])
|
||||
Lightn[i] = zn[i]
|
||||
|
||||
# Ranking fireflies by their light intensity/objectives
|
||||
|
||||
Lightn = numpy.sort(zn)
|
||||
Index = numpy.argsort(zn)
|
||||
ns = ns[Index, :]
|
||||
|
||||
# Find the current best
|
||||
nso = ns
|
||||
Lighto = Lightn
|
||||
nbest = ns[0, :]
|
||||
Lightbest = Lightn[0]
|
||||
|
||||
#% For output only
|
||||
fbest = Lightbest
|
||||
|
||||
#% Move all fireflies to the better locations
|
||||
# [ns]=ffa_move(n,d,ns,Lightn,nso,Lighto,nbest,...
|
||||
# Lightbest,alpha,betamin,gamma,Lb,Ub);
|
||||
scale = []
|
||||
for b in range(dim):
|
||||
scale.append(abs(ub[b] - lb[b]))
|
||||
scale = numpy.array(scale)
|
||||
for i in range(0, n):
|
||||
# The attractiveness parameter beta=exp(-gamma*r)
|
||||
for j in range(0, n):
|
||||
r = numpy.sqrt(numpy.sum((ns[i, :] - ns[j, :]) ** 2))
|
||||
# r=1
|
||||
# Update moves
|
||||
if Lightn[i] > Lighto[j]: # Brighter and more attractive
|
||||
beta0 = 1
|
||||
beta = (beta0 - betamin) * math.exp(-gamma * r ** 2) + betamin
|
||||
tmpf = alpha * (numpy.random.rand(dim) - 0.5) * scale
|
||||
ns[i, :] = ns[i, :] * (1 - beta) + nso[j, :] * beta + tmpf
|
||||
|
||||
# ns=numpy.clip(ns, lb, ub)
|
||||
|
||||
convergence.append(fbest)
|
||||
|
||||
IterationNumber = k
|
||||
BestQuality = fbest
|
||||
|
||||
if k % 1 == 0:
|
||||
print(
|
||||
["At iteration " + str(k) + " the best fitness is " + str(BestQuality)]
|
||||
)
|
||||
#
|
||||
####################### End main loop
|
||||
timerEnd = time.time()
|
||||
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
s.executionTime = timerEnd - timerStart
|
||||
s.convergence = convergence
|
||||
s.optimizer = "FFA"
|
||||
s.bestIndividual = nbest
|
||||
s.objfname = objf.__name__
|
||||
|
||||
return s
|
439
evolopy/GA.py
Normal file
439
evolopy/GA.py
Normal file
|
@ -0,0 +1,439 @@
|
|||
"""
|
||||
Created on Sat Feb 24 20:18:05 2019
|
||||
|
||||
@author: Raneem
|
||||
"""
|
||||
import numpy
|
||||
import random
|
||||
import time
|
||||
import sys
|
||||
|
||||
from solution import solution
|
||||
|
||||
|
||||
def crossoverPopulaton(population, scores, popSize, crossoverProbability, keep):
|
||||
"""
|
||||
The crossover of all individuals
|
||||
|
||||
Parameters
|
||||
----------
|
||||
population : list
|
||||
The list of individuals
|
||||
scores : list
|
||||
The list of fitness values for each individual
|
||||
popSize: int
|
||||
Number of chrmosome in a population
|
||||
crossoverProbability: float
|
||||
The probability of crossing a pair of individuals
|
||||
keep: int
|
||||
Number of best individuals to keep without mutating for the next generation
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
N/A
|
||||
"""
|
||||
# initialize a new population
|
||||
newPopulation = numpy.empty_like(population)
|
||||
newPopulation[0:keep] = population[0:keep]
|
||||
# Create pairs of parents. The number of pairs equals the number of individuals divided by 2
|
||||
for i in range(keep, popSize, 2):
|
||||
# pair of parents selection
|
||||
parent1, parent2 = pairSelection(population, scores, popSize)
|
||||
crossoverLength = min(len(parent1), len(parent2))
|
||||
parentsCrossoverProbability = random.uniform(0.0, 1.0)
|
||||
if parentsCrossoverProbability < crossoverProbability:
|
||||
offspring1, offspring2 = crossover(crossoverLength, parent1, parent2)
|
||||
else:
|
||||
offspring1 = parent1.copy()
|
||||
offspring2 = parent2.copy()
|
||||
|
||||
# Add offsprings to population
|
||||
newPopulation[i] = numpy.copy(offspring1)
|
||||
newPopulation[i + 1] = numpy.copy(offspring2)
|
||||
|
||||
return newPopulation
|
||||
|
||||
|
||||
def mutatePopulaton(population, popSize, mutationProbability, keep, lb, ub):
|
||||
"""
|
||||
The mutation of all individuals
|
||||
|
||||
Parameters
|
||||
----------
|
||||
population : list
|
||||
The list of individuals
|
||||
popSize: int
|
||||
Number of chrmosome in a population
|
||||
mutationProbability: float
|
||||
The probability of mutating an individual
|
||||
keep: int
|
||||
Number of best individuals to keep without mutating for the next generation
|
||||
lb: list
|
||||
lower bound limit list
|
||||
ub: list
|
||||
Upper bound limit list
|
||||
|
||||
Returns
|
||||
-------
|
||||
N/A
|
||||
"""
|
||||
for i in range(keep, popSize):
|
||||
# Mutation
|
||||
offspringMutationProbability = random.uniform(0.0, 1.0)
|
||||
if offspringMutationProbability < mutationProbability:
|
||||
mutation(population[i], len(population[i]), lb, ub)
|
||||
|
||||
|
||||
def elitism(population, scores, bestIndividual, bestScore):
|
||||
"""
|
||||
This melitism operator of the population
|
||||
|
||||
Parameters
|
||||
----------
|
||||
population : list
|
||||
The list of individuals
|
||||
scores : list
|
||||
The list of fitness values for each individual
|
||||
bestIndividual : list
|
||||
An individual of the previous generation having the best fitness value
|
||||
bestScore : float
|
||||
The best fitness value of the previous generation
|
||||
|
||||
Returns
|
||||
-------
|
||||
N/A
|
||||
"""
|
||||
|
||||
# get the worst individual
|
||||
worstFitnessId = selectWorstIndividual(scores)
|
||||
|
||||
# replace worst cromosome with best one from previous generation if its fitness is less than the other
|
||||
if scores[worstFitnessId] > bestScore:
|
||||
population[worstFitnessId] = numpy.copy(bestIndividual)
|
||||
scores[worstFitnessId] = numpy.copy(bestScore)
|
||||
|
||||
|
||||
def selectWorstIndividual(scores):
|
||||
"""
|
||||
It is used to get the worst individual in a population based n the fitness value
|
||||
|
||||
Parameters
|
||||
----------
|
||||
scores : list
|
||||
The list of fitness values for each individual
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
maxFitnessId: The individual id of the worst fitness value
|
||||
"""
|
||||
|
||||
maxFitnessId = numpy.where(scores == numpy.max(scores))
|
||||
maxFitnessId = maxFitnessId[0][0]
|
||||
return maxFitnessId
|
||||
|
||||
|
||||
def pairSelection(population, scores, popSize):
|
||||
"""
|
||||
This is used to select one pair of parents using roulette Wheel Selection mechanism
|
||||
|
||||
Parameters
|
||||
----------
|
||||
population : list
|
||||
The list of individuals
|
||||
scores : list
|
||||
The list of fitness values for each individual
|
||||
popSize: int
|
||||
Number of chrmosome in a population
|
||||
|
||||
Returns
|
||||
-------
|
||||
list
|
||||
parent1: The first parent individual of the pair
|
||||
list
|
||||
parent2: The second parent individual of the pair
|
||||
"""
|
||||
parent1Id = rouletteWheelSelectionId(scores, popSize)
|
||||
parent1 = population[parent1Id].copy()
|
||||
|
||||
parent2Id = rouletteWheelSelectionId(scores, popSize)
|
||||
parent2 = population[parent2Id].copy()
|
||||
|
||||
return parent1, parent2
|
||||
|
||||
|
||||
def rouletteWheelSelectionId(scores, popSize):
|
||||
"""
|
||||
A roulette Wheel Selection mechanism for selecting an individual
|
||||
|
||||
Parameters
|
||||
----------
|
||||
scores : list
|
||||
The list of fitness values for each individual
|
||||
popSize: int
|
||||
Number of chrmosome in a population
|
||||
|
||||
Returns
|
||||
-------
|
||||
id
|
||||
individualId: The id of the individual selected
|
||||
"""
|
||||
|
||||
##reverse score because minimum value should have more chance of selection
|
||||
reverse = max(scores) + min(scores)
|
||||
reverseScores = reverse - scores.copy()
|
||||
sumScores = sum(reverseScores)
|
||||
pick = random.uniform(0, sumScores)
|
||||
current = 0
|
||||
for individualId in range(popSize):
|
||||
current += reverseScores[individualId]
|
||||
if current > pick:
|
||||
return individualId
|
||||
|
||||
|
||||
def crossover(individualLength, parent1, parent2):
|
||||
"""
|
||||
The crossover operator of a two individuals
|
||||
|
||||
Parameters
|
||||
----------
|
||||
individualLength: int
|
||||
The maximum index of the crossover
|
||||
parent1 : list
|
||||
The first parent individual of the pair
|
||||
parent2 : list
|
||||
The second parent individual of the pair
|
||||
|
||||
Returns
|
||||
-------
|
||||
list
|
||||
offspring1: The first updated parent individual of the pair
|
||||
list
|
||||
offspring2: The second updated parent individual of the pair
|
||||
"""
|
||||
|
||||
# The point at which crossover takes place between two parents.
|
||||
crossover_point = random.randint(0, individualLength - 1)
|
||||
# The new offspring will have its first half of its genes taken from the first parent and second half of its genes taken from the second parent.
|
||||
offspring1 = numpy.concatenate(
|
||||
[parent1[0:crossover_point], parent2[crossover_point:]]
|
||||
)
|
||||
# The new offspring will have its first half of its genes taken from the second parent and second half of its genes taken from the first parent.
|
||||
offspring2 = numpy.concatenate(
|
||||
[parent2[0:crossover_point], parent1[crossover_point:]]
|
||||
)
|
||||
|
||||
return offspring1, offspring2
|
||||
|
||||
|
||||
def mutation(offspring, individualLength, lb, ub):
|
||||
"""
|
||||
The mutation operator of a single individual
|
||||
|
||||
Parameters
|
||||
----------
|
||||
offspring : list
|
||||
A generated individual after the crossover
|
||||
individualLength: int
|
||||
The maximum index of the crossover
|
||||
lb: list
|
||||
lower bound limit list
|
||||
ub: list
|
||||
Upper bound limit list
|
||||
|
||||
Returns
|
||||
-------
|
||||
N/A
|
||||
"""
|
||||
mutationIndex = random.randint(0, individualLength - 1)
|
||||
mutationValue = random.uniform(lb[mutationIndex], ub[mutationIndex])
|
||||
offspring[mutationIndex] = mutationValue
|
||||
|
||||
|
||||
def clearDups(Population, lb, ub):
|
||||
|
||||
"""
|
||||
It removes individuals duplicates and replace them with random ones
|
||||
|
||||
Parameters
|
||||
----------
|
||||
objf : function
|
||||
The objective function selected
|
||||
lb: list
|
||||
lower bound limit list
|
||||
ub: list
|
||||
Upper bound limit list
|
||||
|
||||
Returns
|
||||
-------
|
||||
list
|
||||
newPopulation: the updated list of individuals
|
||||
"""
|
||||
newPopulation = numpy.unique(Population, axis=0)
|
||||
oldLen = len(Population)
|
||||
newLen = len(newPopulation)
|
||||
if newLen < oldLen:
|
||||
nDuplicates = oldLen - newLen
|
||||
newPopulation = numpy.append(
|
||||
newPopulation,
|
||||
numpy.random.uniform(0, 1, (nDuplicates, len(Population[0])))
|
||||
* (numpy.array(ub) - numpy.array(lb))
|
||||
+ numpy.array(lb),
|
||||
axis=0,
|
||||
)
|
||||
|
||||
return newPopulation
|
||||
|
||||
|
||||
def calculateCost(objf, population, popSize, lb, ub):
|
||||
|
||||
"""
|
||||
It calculates the fitness value of each individual in the population
|
||||
|
||||
Parameters
|
||||
----------
|
||||
objf : function
|
||||
The objective function selected
|
||||
population : list
|
||||
The list of individuals
|
||||
popSize: int
|
||||
Number of chrmosomes in a population
|
||||
lb: list
|
||||
lower bound limit list
|
||||
ub: list
|
||||
Upper bound limit list
|
||||
|
||||
Returns
|
||||
-------
|
||||
list
|
||||
scores: fitness values of all individuals in the population
|
||||
"""
|
||||
scores = numpy.full(popSize, numpy.inf)
|
||||
|
||||
# Loop through individuals in population
|
||||
for i in range(0, popSize):
|
||||
# Return back the search agents that go beyond the boundaries of the search space
|
||||
population[i] = numpy.clip(population[i], lb, ub)
|
||||
|
||||
# Calculate objective function for each search agent
|
||||
scores[i] = objf(population[i, :])
|
||||
|
||||
return scores
|
||||
|
||||
|
||||
def sortPopulation(population, scores):
|
||||
"""
|
||||
This is used to sort the population according to the fitness values of the individuals
|
||||
|
||||
Parameters
|
||||
----------
|
||||
population : list
|
||||
The list of individuals
|
||||
scores : list
|
||||
The list of fitness values for each individual
|
||||
|
||||
Returns
|
||||
-------
|
||||
list
|
||||
population: The new sorted list of individuals
|
||||
list
|
||||
scores: The new sorted list of fitness values of the individuals
|
||||
"""
|
||||
sortedIndices = scores.argsort()
|
||||
population = population[sortedIndices]
|
||||
scores = scores[sortedIndices]
|
||||
|
||||
return population, scores
|
||||
|
||||
|
||||
def GA(objf, lb, ub, dim, popSize, iters):
|
||||
|
||||
"""
|
||||
This is the main method which implements GA
|
||||
|
||||
Parameters
|
||||
----------
|
||||
objf : function
|
||||
The objective function selected
|
||||
lb: list
|
||||
lower bound limit list
|
||||
ub: list
|
||||
Upper bound limit list
|
||||
dim: int
|
||||
The dimension of the indivisual
|
||||
popSize: int
|
||||
Number of chrmosomes in a population
|
||||
iters: int
|
||||
Number of iterations / generations of GA
|
||||
|
||||
Returns
|
||||
-------
|
||||
obj
|
||||
s: The solution obtained from running the algorithm
|
||||
"""
|
||||
|
||||
cp = 1 # crossover Probability
|
||||
mp = 0.01 # Mutation Probability
|
||||
keep = 2
|
||||
# elitism parameter: how many of the best individuals to keep from one generation to the next
|
||||
|
||||
s = solution()
|
||||
|
||||
if not isinstance(lb, list):
|
||||
lb = [lb] * dim
|
||||
if not isinstance(ub, list):
|
||||
ub = [ub] * dim
|
||||
|
||||
bestIndividual = numpy.zeros(dim)
|
||||
scores = numpy.random.uniform(0.0, 1.0, popSize)
|
||||
bestScore = float("inf")
|
||||
|
||||
ga = numpy.zeros((popSize, dim))
|
||||
for i in range(dim):
|
||||
ga[:, i] = numpy.random.uniform(0, 1, popSize) * (ub[i] - lb[i]) + lb[i]
|
||||
convergence_curve = numpy.zeros(iters)
|
||||
|
||||
print('GA is optimizing "' + objf.__name__ + '"')
|
||||
|
||||
timerStart = time.time()
|
||||
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
|
||||
for l in range(iters):
|
||||
|
||||
# crossover
|
||||
ga = crossoverPopulaton(ga, scores, popSize, cp, keep)
|
||||
|
||||
# mutation
|
||||
mutatePopulaton(ga, popSize, mp, keep, lb, ub)
|
||||
|
||||
ga = clearDups(ga, lb, ub)
|
||||
|
||||
scores = calculateCost(objf, ga, popSize, lb, ub)
|
||||
|
||||
bestScore = min(scores)
|
||||
|
||||
# Sort from best to worst
|
||||
ga, scores = sortPopulation(ga, scores)
|
||||
|
||||
convergence_curve[l] = bestScore
|
||||
|
||||
if l % 1 == 0:
|
||||
print(
|
||||
[
|
||||
"At iteration "
|
||||
+ str(l + 1)
|
||||
+ " the best fitness is "
|
||||
+ str(bestScore)
|
||||
]
|
||||
)
|
||||
|
||||
timerEnd = time.time()
|
||||
s.bestIndividual = bestIndividual
|
||||
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
s.executionTime = timerEnd - timerStart
|
||||
s.convergence = convergence_curve
|
||||
s.optimizer = "GA"
|
||||
s.objfname = objf.__name__
|
||||
|
||||
return s
|
145
evolopy/GWO.py
Normal file
145
evolopy/GWO.py
Normal file
|
@ -0,0 +1,145 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon May 16 00:27:50 2016
|
||||
|
||||
@author: Hossam Faris
|
||||
"""
|
||||
|
||||
import random
|
||||
import numpy
|
||||
import math
|
||||
from solution import solution
|
||||
import time
|
||||
|
||||
|
||||
def GWO(objf, lb, ub, dim, SearchAgents_no, Max_iter):
|
||||
|
||||
# Max_iter=1000
|
||||
# lb=-100
|
||||
# ub=100
|
||||
# dim=30
|
||||
# SearchAgents_no=5
|
||||
|
||||
# initialize alpha, beta, and delta_pos
|
||||
Alpha_pos = numpy.zeros(dim)
|
||||
Alpha_score = float("inf")
|
||||
|
||||
Beta_pos = numpy.zeros(dim)
|
||||
Beta_score = float("inf")
|
||||
|
||||
Delta_pos = numpy.zeros(dim)
|
||||
Delta_score = float("inf")
|
||||
|
||||
if not isinstance(lb, list):
|
||||
lb = [lb] * dim
|
||||
if not isinstance(ub, list):
|
||||
ub = [ub] * dim
|
||||
|
||||
# Initialize the positions of search agents
|
||||
Positions = numpy.zeros((SearchAgents_no, dim))
|
||||
for i in range(dim):
|
||||
Positions[:, i] = (
|
||||
numpy.random.uniform(0, 1, SearchAgents_no) * (ub[i] - lb[i]) + lb[i]
|
||||
)
|
||||
|
||||
Convergence_curve = numpy.zeros(Max_iter)
|
||||
s = solution()
|
||||
|
||||
# Loop counter
|
||||
print('GWO is optimizing "' + objf.__name__ + '"')
|
||||
|
||||
timerStart = time.time()
|
||||
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
# Main loop
|
||||
for l in range(0, Max_iter):
|
||||
for i in range(0, SearchAgents_no):
|
||||
|
||||
# Return back the search agents that go beyond the boundaries of the search space
|
||||
for j in range(dim):
|
||||
Positions[i, j] = numpy.clip(Positions[i, j], lb[j], ub[j])
|
||||
|
||||
# Calculate objective function for each search agent
|
||||
fitness = objf(Positions[i, :])
|
||||
|
||||
# Update Alpha, Beta, and Delta
|
||||
if fitness < Alpha_score:
|
||||
Delta_score = Beta_score # Update delte
|
||||
Delta_pos = Beta_pos.copy()
|
||||
Beta_score = Alpha_score # Update beta
|
||||
Beta_pos = Alpha_pos.copy()
|
||||
Alpha_score = fitness
|
||||
# Update alpha
|
||||
Alpha_pos = Positions[i, :].copy()
|
||||
|
||||
if fitness > Alpha_score and fitness < Beta_score:
|
||||
Delta_score = Beta_score # Update delte
|
||||
Delta_pos = Beta_pos.copy()
|
||||
Beta_score = fitness # Update beta
|
||||
Beta_pos = Positions[i, :].copy()
|
||||
|
||||
if fitness > Alpha_score and fitness > Beta_score and fitness < Delta_score:
|
||||
Delta_score = fitness # Update delta
|
||||
Delta_pos = Positions[i, :].copy()
|
||||
|
||||
a = 2 - l * ((2) / Max_iter)
|
||||
# a decreases linearly fron 2 to 0
|
||||
|
||||
# Update the Position of search agents including omegas
|
||||
for i in range(0, SearchAgents_no):
|
||||
for j in range(0, dim):
|
||||
|
||||
r1 = random.random() # r1 is a random number in [0,1]
|
||||
r2 = random.random() # r2 is a random number in [0,1]
|
||||
|
||||
A1 = 2 * a * r1 - a
|
||||
# Equation (3.3)
|
||||
C1 = 2 * r2
|
||||
# Equation (3.4)
|
||||
|
||||
D_alpha = abs(C1 * Alpha_pos[j] - Positions[i, j])
|
||||
# Equation (3.5)-part 1
|
||||
X1 = Alpha_pos[j] - A1 * D_alpha
|
||||
# Equation (3.6)-part 1
|
||||
|
||||
r1 = random.random()
|
||||
r2 = random.random()
|
||||
|
||||
A2 = 2 * a * r1 - a
|
||||
# Equation (3.3)
|
||||
C2 = 2 * r2
|
||||
# Equation (3.4)
|
||||
|
||||
D_beta = abs(C2 * Beta_pos[j] - Positions[i, j])
|
||||
# Equation (3.5)-part 2
|
||||
X2 = Beta_pos[j] - A2 * D_beta
|
||||
# Equation (3.6)-part 2
|
||||
|
||||
r1 = random.random()
|
||||
r2 = random.random()
|
||||
|
||||
A3 = 2 * a * r1 - a
|
||||
# Equation (3.3)
|
||||
C3 = 2 * r2
|
||||
# Equation (3.4)
|
||||
|
||||
D_delta = abs(C3 * Delta_pos[j] - Positions[i, j])
|
||||
# Equation (3.5)-part 3
|
||||
X3 = Delta_pos[j] - A3 * D_delta
|
||||
# Equation (3.5)-part 3
|
||||
|
||||
Positions[i, j] = (X1 + X2 + X3) / 3 # Equation (3.7)
|
||||
|
||||
Convergence_curve[l] = Alpha_score
|
||||
|
||||
if l % 1 == 0:
|
||||
print(["At iteration " + str(l) + " the best fitness is " + str(Alpha_score)])
|
||||
|
||||
timerEnd = time.time()
|
||||
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
s.executionTime = timerEnd - timerStart
|
||||
s.convergence = Convergence_curve
|
||||
s.optimizer = "GWO"
|
||||
s.bestIndividual = Alpha_pos
|
||||
s.objfname = objf.__name__
|
||||
|
||||
return s
|
211
evolopy/HHO.py
Normal file
211
evolopy/HHO.py
Normal file
|
@ -0,0 +1,211 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Thirsday March 21 2019
|
||||
|
||||
@author:
|
||||
% _____________________________________________________
|
||||
% Main paper:
|
||||
% Harris hawks optimization: Algorithm and applications
|
||||
% Ali Asghar Heidari, Seyedali Mirjalili, Hossam Faris, Ibrahim Aljarah, Majdi Mafarja, Huiling Chen
|
||||
% Future Generation Computer Systems,
|
||||
% DOI: https://doi.org/10.1016/j.future.2019.02.028
|
||||
% _____________________________________________________
|
||||
|
||||
"""
|
||||
import random
|
||||
import numpy
|
||||
import math
|
||||
from solution import solution
|
||||
import time
|
||||
|
||||
|
||||
def HHO(objf, lb, ub, dim, SearchAgents_no, Max_iter):
|
||||
|
||||
# dim=30
|
||||
# SearchAgents_no=50
|
||||
# lb=-100
|
||||
# ub=100
|
||||
# Max_iter=500
|
||||
|
||||
# initialize the location and Energy of the rabbit
|
||||
Rabbit_Location = numpy.zeros(dim)
|
||||
Rabbit_Energy = float("inf") # change this to -inf for maximization problems
|
||||
|
||||
if not isinstance(lb, list):
|
||||
lb = [lb for _ in range(dim)]
|
||||
ub = [ub for _ in range(dim)]
|
||||
lb = numpy.asarray(lb)
|
||||
ub = numpy.asarray(ub)
|
||||
|
||||
# Initialize the locations of Harris' hawks
|
||||
X = numpy.asarray(
|
||||
[x * (ub - lb) + lb for x in numpy.random.uniform(0, 1, (SearchAgents_no, dim))]
|
||||
)
|
||||
|
||||
# Initialize convergence
|
||||
convergence_curve = numpy.zeros(Max_iter)
|
||||
|
||||
############################
|
||||
s = solution()
|
||||
|
||||
print('HHO is now tackling "' + objf.__name__ + '"')
|
||||
|
||||
timerStart = time.time()
|
||||
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
############################
|
||||
|
||||
t = 0 # Loop counter
|
||||
|
||||
# Main loop
|
||||
while t < Max_iter:
|
||||
for i in range(0, SearchAgents_no):
|
||||
|
||||
# Check boundries
|
||||
|
||||
X[i, :] = numpy.clip(X[i, :], lb, ub)
|
||||
|
||||
# fitness of locations
|
||||
fitness = objf(X[i, :])
|
||||
|
||||
# Update the location of Rabbit
|
||||
if fitness < Rabbit_Energy: # Change this to > for maximization problem
|
||||
Rabbit_Energy = fitness
|
||||
Rabbit_Location = X[i, :].copy()
|
||||
|
||||
E1 = 2 * (1 - (t / Max_iter)) # factor to show the decreaing energy of rabbit
|
||||
|
||||
# Update the location of Harris' hawks
|
||||
for i in range(0, SearchAgents_no):
|
||||
|
||||
E0 = 2 * random.random() - 1 # -1<E0<1
|
||||
Escaping_Energy = E1 * (
|
||||
E0
|
||||
) # escaping energy of rabbit Eq. (3) in the paper
|
||||
|
||||
# -------- Exploration phase Eq. (1) in paper -------------------
|
||||
|
||||
if abs(Escaping_Energy) >= 1:
|
||||
# Harris' hawks perch randomly based on 2 strategy:
|
||||
q = random.random()
|
||||
rand_Hawk_index = math.floor(SearchAgents_no * random.random())
|
||||
X_rand = X[rand_Hawk_index, :]
|
||||
if q < 0.5:
|
||||
# perch based on other family members
|
||||
X[i, :] = X_rand - random.random() * abs(
|
||||
X_rand - 2 * random.random() * X[i, :]
|
||||
)
|
||||
|
||||
elif q >= 0.5:
|
||||
# perch on a random tall tree (random site inside group's home range)
|
||||
X[i, :] = (Rabbit_Location - X.mean(0)) - random.random() * (
|
||||
(ub - lb) * random.random() + lb
|
||||
)
|
||||
|
||||
# -------- Exploitation phase -------------------
|
||||
elif abs(Escaping_Energy) < 1:
|
||||
# Attacking the rabbit using 4 strategies regarding the behavior of the rabbit
|
||||
|
||||
# phase 1: ----- surprise pounce (seven kills) ----------
|
||||
# surprise pounce (seven kills): multiple, short rapid dives by different hawks
|
||||
|
||||
r = random.random() # probablity of each event
|
||||
|
||||
if (
|
||||
r >= 0.5 and abs(Escaping_Energy) < 0.5
|
||||
): # Hard besiege Eq. (6) in paper
|
||||
X[i, :] = (Rabbit_Location) - Escaping_Energy * abs(
|
||||
Rabbit_Location - X[i, :]
|
||||
)
|
||||
|
||||
if (
|
||||
r >= 0.5 and abs(Escaping_Energy) >= 0.5
|
||||
): # Soft besiege Eq. (4) in paper
|
||||
Jump_strength = 2 * (
|
||||
1 - random.random()
|
||||
) # random jump strength of the rabbit
|
||||
X[i, :] = (Rabbit_Location - X[i, :]) - Escaping_Energy * abs(
|
||||
Jump_strength * Rabbit_Location - X[i, :]
|
||||
)
|
||||
|
||||
# phase 2: --------performing team rapid dives (leapfrog movements)----------
|
||||
|
||||
if (
|
||||
r < 0.5 and abs(Escaping_Energy) >= 0.5
|
||||
): # Soft besiege Eq. (10) in paper
|
||||
# rabbit try to escape by many zigzag deceptive motions
|
||||
Jump_strength = 2 * (1 - random.random())
|
||||
X1 = Rabbit_Location - Escaping_Energy * abs(
|
||||
Jump_strength * Rabbit_Location - X[i, :]
|
||||
)
|
||||
X1 = numpy.clip(X1, lb, ub)
|
||||
|
||||
if objf(X1) < fitness: # improved move?
|
||||
X[i, :] = X1.copy()
|
||||
else: # hawks perform levy-based short rapid dives around the rabbit
|
||||
X2 = (
|
||||
Rabbit_Location
|
||||
- Escaping_Energy
|
||||
* abs(Jump_strength * Rabbit_Location - X[i, :])
|
||||
+ numpy.multiply(numpy.random.randn(dim), Levy(dim))
|
||||
)
|
||||
X2 = numpy.clip(X2, lb, ub)
|
||||
if objf(X2) < fitness:
|
||||
X[i, :] = X2.copy()
|
||||
if (
|
||||
r < 0.5 and abs(Escaping_Energy) < 0.5
|
||||
): # Hard besiege Eq. (11) in paper
|
||||
Jump_strength = 2 * (1 - random.random())
|
||||
X1 = Rabbit_Location - Escaping_Energy * abs(
|
||||
Jump_strength * Rabbit_Location - X.mean(0)
|
||||
)
|
||||
X1 = numpy.clip(X1, lb, ub)
|
||||
|
||||
if objf(X1) < fitness: # improved move?
|
||||
X[i, :] = X1.copy()
|
||||
else: # Perform levy-based short rapid dives around the rabbit
|
||||
X2 = (
|
||||
Rabbit_Location
|
||||
- Escaping_Energy
|
||||
* abs(Jump_strength * Rabbit_Location - X.mean(0))
|
||||
+ numpy.multiply(numpy.random.randn(dim), Levy(dim))
|
||||
)
|
||||
X2 = numpy.clip(X2, lb, ub)
|
||||
if objf(X2) < fitness:
|
||||
X[i, :] = X2.copy()
|
||||
|
||||
convergence_curve[t] = Rabbit_Energy
|
||||
if t % 1 == 0:
|
||||
print(
|
||||
[
|
||||
"At iteration "
|
||||
+ str(t)
|
||||
+ " the best fitness is "
|
||||
+ str(Rabbit_Energy)
|
||||
]
|
||||
)
|
||||
t = t + 1
|
||||
|
||||
timerEnd = time.time()
|
||||
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
s.executionTime = timerEnd - timerStart
|
||||
s.convergence = convergence_curve
|
||||
s.optimizer = "HHO"
|
||||
s.objfname = objf.__name__
|
||||
s.best = Rabbit_Energy
|
||||
s.bestIndividual = Rabbit_Location
|
||||
|
||||
return s
|
||||
|
||||
|
||||
def Levy(dim):
|
||||
beta = 1.5
|
||||
sigma = (
|
||||
math.gamma(1 + beta)
|
||||
* math.sin(math.pi * beta / 2)
|
||||
/ (math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2))
|
||||
) ** (1 / beta)
|
||||
u = 0.01 * numpy.random.randn(dim) * sigma
|
||||
v = numpy.random.randn(dim)
|
||||
zz = numpy.power(numpy.absolute(v), (1 / beta))
|
||||
step = numpy.divide(u, zz)
|
||||
return step
|
118
evolopy/JAYA.py
Normal file
118
evolopy/JAYA.py
Normal file
|
@ -0,0 +1,118 @@
|
|||
""" JAYA Algorithm """
|
||||
|
||||
import random
|
||||
import numpy
|
||||
import math
|
||||
from solution import solution
|
||||
import time
|
||||
|
||||
|
||||
def JAYA(objf, lb, ub, dim, SearchAgents_no, Max_iter):
|
||||
|
||||
# Best and Worst position initialization
|
||||
Best_pos = numpy.zeros(dim)
|
||||
Best_score = float("inf")
|
||||
|
||||
Worst_pos = numpy.zeros(dim)
|
||||
Worst_score = float(0)
|
||||
|
||||
fitness_matrix = numpy.zeros((SearchAgents_no))
|
||||
|
||||
if not isinstance(lb, list):
|
||||
lb = [lb] * dim
|
||||
if not isinstance(ub, list):
|
||||
ub = [ub] * dim
|
||||
|
||||
# Initialize the positions of search agents
|
||||
Positions = numpy.zeros((SearchAgents_no, dim))
|
||||
for i in range(dim):
|
||||
Positions[:, i] = (
|
||||
numpy.random.uniform(0, 1, SearchAgents_no) * (ub[i] - lb[i]) + lb[i]
|
||||
)
|
||||
|
||||
for i in range(0, SearchAgents_no):
|
||||
|
||||
# Return back the search agents that go beyond the boundaries of the search space
|
||||
for j in range(dim):
|
||||
Positions[i, j] = numpy.clip(Positions[i, j], lb[j], ub[j])
|
||||
|
||||
# Calculate objective function for each search agent
|
||||
fitness = objf(Positions[i])
|
||||
fitness_matrix[i] = fitness
|
||||
|
||||
if fitness < Best_score:
|
||||
Best_score = fitness # Update Best_Score
|
||||
Best_pos = Positions[i]
|
||||
|
||||
if fitness > Worst_score:
|
||||
Worst_score = fitness # Update Worst_Score
|
||||
Worst_pos = Positions[i]
|
||||
|
||||
Convergence_curve = numpy.zeros(Max_iter)
|
||||
s = solution()
|
||||
|
||||
# Loop counter
|
||||
print('JAYA is optimizing "' + objf.__name__ + '"')
|
||||
|
||||
timerStart = time.time()
|
||||
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
|
||||
# Main loop
|
||||
for l in range(0, Max_iter):
|
||||
|
||||
# Update the Position of search agents
|
||||
for i in range(0, SearchAgents_no):
|
||||
New_Position = numpy.zeros(dim)
|
||||
for j in range(0, dim):
|
||||
|
||||
# Update r1, r2
|
||||
r1 = random.random()
|
||||
r2 = random.random()
|
||||
|
||||
# JAYA Equation
|
||||
New_Position[j] = (
|
||||
Positions[i][j]
|
||||
+ r1 * (Best_pos[j] - abs(Positions[i, j]))
|
||||
- r2 * (Worst_pos[j] - abs(Positions[i, j]))
|
||||
)
|
||||
|
||||
# checking if New_Position[j] lies in search space
|
||||
if New_Position[j] > ub[j]:
|
||||
New_Position[j] = ub[j]
|
||||
if New_Position[j] < lb[j]:
|
||||
New_Position[j] = lb[j]
|
||||
|
||||
new_fitness = objf(New_Position)
|
||||
current_fit = fitness_matrix[i]
|
||||
|
||||
# replacing current element with new element if it has better fitness
|
||||
if new_fitness < current_fit:
|
||||
Positions[i] = New_Position
|
||||
fitness_matrix[i] = new_fitness
|
||||
|
||||
# finding the best and worst element
|
||||
for i in range(SearchAgents_no):
|
||||
if fitness_matrix[i] < Best_score:
|
||||
Best_score = fitness_matrix[i]
|
||||
Best_pos = Positions[i, :].copy()
|
||||
|
||||
if fitness_matrix[i] > Worst_score:
|
||||
Worst_score = fitness_matrix[i]
|
||||
Worst_pos = Positions[i, :].copy()
|
||||
|
||||
Convergence_curve[l] = Best_score
|
||||
|
||||
if l % 1 == 0:
|
||||
print(
|
||||
["At iteration " + str(l) + " the best fitness is " + str(Best_score)]
|
||||
)
|
||||
|
||||
timerEnd = time.time()
|
||||
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
s.executionTime = timerEnd - timerStart
|
||||
s.convergence = Convergence_curve
|
||||
s.optimizer = "JAYA"
|
||||
s.bestIndividual = Best_pos
|
||||
s.objfname = objf.__name__
|
||||
|
||||
return s
|
179
evolopy/MFO.py
Normal file
179
evolopy/MFO.py
Normal file
|
@ -0,0 +1,179 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon May 16 10:42:18 2016
|
||||
|
||||
@author: hossam
|
||||
"""
|
||||
|
||||
import random
|
||||
import numpy
|
||||
import math
|
||||
from solution import solution
|
||||
import time
|
||||
|
||||
|
||||
def MFO(objf, lb, ub, dim, N, Max_iteration):
|
||||
|
||||
# Max_iteration=1000
|
||||
# lb=-100
|
||||
# ub=100
|
||||
# dim=30
|
||||
#N = 50 # Number of search agents
|
||||
if not isinstance(lb, list):
|
||||
lb = [lb] * dim
|
||||
if not isinstance(ub, list):
|
||||
ub = [ub] * dim
|
||||
|
||||
# Initialize the positions of moths
|
||||
Moth_pos = numpy.zeros((N, dim))
|
||||
for i in range(dim):
|
||||
Moth_pos[:, i] = numpy.random.uniform(0, 1, N) * (ub[i] - lb[i]) + lb[i]
|
||||
Moth_fitness = numpy.full(N, float("inf"))
|
||||
# Moth_fitness=numpy.fell(float("inf"))
|
||||
|
||||
Convergence_curve = numpy.zeros(Max_iteration)
|
||||
|
||||
sorted_population = numpy.copy(Moth_pos)
|
||||
fitness_sorted = numpy.zeros(N)
|
||||
#####################
|
||||
best_flames = numpy.copy(Moth_pos)
|
||||
best_flame_fitness = numpy.zeros(N)
|
||||
####################
|
||||
double_population = numpy.zeros((2 * N, dim))
|
||||
double_fitness = numpy.zeros(2 * N)
|
||||
|
||||
double_sorted_population = numpy.zeros((2 * N, dim))
|
||||
double_fitness_sorted = numpy.zeros(2 * N)
|
||||
#########################
|
||||
previous_population = numpy.zeros((N, dim))
|
||||
previous_fitness = numpy.zeros(N)
|
||||
|
||||
s = solution()
|
||||
|
||||
print('MFO is optimizing "' + objf.__name__ + '"')
|
||||
|
||||
timerStart = time.time()
|
||||
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
|
||||
Iteration = 1
|
||||
|
||||
# Main loop
|
||||
while Iteration < Max_iteration:
|
||||
|
||||
# Number of flames Eq. (3.14) in the paper
|
||||
Flame_no = round(N - Iteration * ((N - 1) / Max_iteration))
|
||||
|
||||
for i in range(0, N):
|
||||
|
||||
# Check if moths go out of the search spaceand bring it back
|
||||
for j in range(dim):
|
||||
Moth_pos[i, j] = numpy.clip(Moth_pos[i, j], lb[j], ub[j])
|
||||
|
||||
# evaluate moths
|
||||
Moth_fitness[i] = objf(Moth_pos[i, :])
|
||||
|
||||
if Iteration == 1:
|
||||
# Sort the first population of moths
|
||||
fitness_sorted = numpy.sort(Moth_fitness)
|
||||
I = numpy.argsort(Moth_fitness)
|
||||
|
||||
sorted_population = Moth_pos[I, :]
|
||||
|
||||
# Update the flames
|
||||
best_flames = sorted_population
|
||||
best_flame_fitness = fitness_sorted
|
||||
else:
|
||||
#
|
||||
# # Sort the moths
|
||||
double_population = numpy.concatenate(
|
||||
(previous_population, best_flames), axis=0
|
||||
)
|
||||
double_fitness = numpy.concatenate(
|
||||
(previous_fitness, best_flame_fitness), axis=0
|
||||
)
|
||||
#
|
||||
double_fitness_sorted = numpy.sort(double_fitness)
|
||||
I2 = numpy.argsort(double_fitness)
|
||||
#
|
||||
#
|
||||
for newindex in range(0, 2 * N):
|
||||
double_sorted_population[newindex, :] = numpy.array(
|
||||
double_population[I2[newindex], :]
|
||||
)
|
||||
|
||||
fitness_sorted = double_fitness_sorted[0:N]
|
||||
sorted_population = double_sorted_population[0:N, :]
|
||||
#
|
||||
# # Update the flames
|
||||
best_flames = sorted_population
|
||||
best_flame_fitness = fitness_sorted
|
||||
|
||||
#
|
||||
# # Update the position best flame obtained so far
|
||||
Best_flame_score = fitness_sorted[0]
|
||||
Best_flame_pos = sorted_population[0, :]
|
||||
#
|
||||
previous_population = Moth_pos
|
||||
previous_fitness = Moth_fitness
|
||||
#
|
||||
# a linearly dicreases from -1 to -2 to calculate t in Eq. (3.12)
|
||||
a = -1 + Iteration * ((-1) / Max_iteration)
|
||||
|
||||
# Loop counter
|
||||
for i in range(0, N):
|
||||
#
|
||||
for j in range(0, dim):
|
||||
if (
|
||||
i <= Flame_no
|
||||
): # Update the position of the moth with respect to its corresponsing flame
|
||||
#
|
||||
# D in Eq. (3.13)
|
||||
distance_to_flame = abs(sorted_population[i, j] - Moth_pos[i, j])
|
||||
b = 1
|
||||
t = (a - 1) * random.random() + 1
|
||||
#
|
||||
# % Eq. (3.12)
|
||||
Moth_pos[i, j] = (
|
||||
distance_to_flame * math.exp(b * t) * math.cos(t * 2 * math.pi)
|
||||
+ sorted_population[i, j]
|
||||
)
|
||||
# end
|
||||
#
|
||||
if (
|
||||
i > Flame_no
|
||||
): # Upaate the position of the moth with respct to one flame
|
||||
#
|
||||
# % Eq. (3.13)
|
||||
distance_to_flame = abs(sorted_population[i, j] - Moth_pos[i, j])
|
||||
b = 1
|
||||
t = (a - 1) * random.random() + 1
|
||||
#
|
||||
# % Eq. (3.12)
|
||||
Moth_pos[i, j] = (
|
||||
distance_to_flame * math.exp(b * t) * math.cos(t * 2 * math.pi)
|
||||
+ sorted_population[Flame_no, j]
|
||||
)
|
||||
|
||||
Convergence_curve[Iteration] = Best_flame_score
|
||||
# Display best fitness along the iteration
|
||||
if Iteration % 1 == 0:
|
||||
print(
|
||||
[
|
||||
"At iteration "
|
||||
+ str(Iteration)
|
||||
+ " the best fitness is "
|
||||
+ str(Best_flame_score)
|
||||
]
|
||||
)
|
||||
|
||||
Iteration = Iteration + 1
|
||||
|
||||
timerEnd = time.time()
|
||||
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
s.executionTime = timerEnd - timerStart
|
||||
s.convergence = Convergence_curve
|
||||
s.optimizer = "MFO"
|
||||
s.bestIndividual = Best_flame_pos
|
||||
s.objfname = objf.__name__
|
||||
|
||||
return s
|
168
evolopy/MVO.py
Normal file
168
evolopy/MVO.py
Normal file
|
@ -0,0 +1,168 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Wed May 11 17:06:34 2016
|
||||
|
||||
@author: hossam
|
||||
"""
|
||||
import random
|
||||
import numpy
|
||||
import time
|
||||
import math
|
||||
import sklearn
|
||||
from numpy import asarray
|
||||
from sklearn.preprocessing import normalize
|
||||
from solution import solution
|
||||
|
||||
|
||||
def normr(Mat):
|
||||
"""normalize the columns of the matrix
|
||||
B= normr(A) normalizes the row
|
||||
the dtype of A is float"""
|
||||
Mat = Mat.reshape(1, -1)
|
||||
# Enforce dtype float
|
||||
if Mat.dtype != "float":
|
||||
Mat = asarray(Mat, dtype=float)
|
||||
|
||||
# if statement to enforce dtype float
|
||||
B = normalize(Mat, norm="l2", axis=1)
|
||||
B = numpy.reshape(B, -1)
|
||||
return B
|
||||
|
||||
|
||||
def randk(t):
|
||||
if (t % 2) == 0:
|
||||
s = 0.25
|
||||
else:
|
||||
s = 0.75
|
||||
return s
|
||||
|
||||
|
||||
def RouletteWheelSelection(weights):
|
||||
accumulation = numpy.cumsum(weights)
|
||||
p = random.random() * accumulation[-1]
|
||||
chosen_index = -1
|
||||
for index in range(0, len(accumulation)):
|
||||
if accumulation[index] > p:
|
||||
chosen_index = index
|
||||
break
|
||||
|
||||
choice = chosen_index
|
||||
|
||||
return choice
|
||||
|
||||
|
||||
def MVO(objf, lb, ub, dim, N, Max_time):
|
||||
|
||||
"parameters"
|
||||
# dim=30
|
||||
# lb=-100
|
||||
# ub=100
|
||||
WEP_Max = 1
|
||||
WEP_Min = 0.2
|
||||
# Max_time=1000
|
||||
# N=50
|
||||
if not isinstance(lb, list):
|
||||
lb = [lb] * dim
|
||||
if not isinstance(ub, list):
|
||||
ub = [ub] * dim
|
||||
|
||||
Universes = numpy.zeros((N, dim))
|
||||
for i in range(dim):
|
||||
Universes[:, i] = numpy.random.uniform(0, 1, N) * (ub[i] - lb[i]) + lb[i]
|
||||
|
||||
Sorted_universes = numpy.copy(Universes)
|
||||
|
||||
convergence = numpy.zeros(Max_time)
|
||||
|
||||
Best_universe = [0] * dim
|
||||
Best_universe_Inflation_rate = float("inf")
|
||||
|
||||
s = solution()
|
||||
|
||||
Time = 1
|
||||
############################################
|
||||
print('MVO is optimizing "' + objf.__name__ + '"')
|
||||
|
||||
timerStart = time.time()
|
||||
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
while Time < Max_time + 1:
|
||||
|
||||
"Eq. (3.3) in the paper"
|
||||
WEP = WEP_Min + Time * ((WEP_Max - WEP_Min) / Max_time)
|
||||
|
||||
TDR = 1 - (math.pow(Time, 1 / 6) / math.pow(Max_time, 1 / 6))
|
||||
|
||||
Inflation_rates = [0] * len(Universes)
|
||||
|
||||
for i in range(0, N):
|
||||
for j in range(dim):
|
||||
Universes[i, j] = numpy.clip(Universes[i, j], lb[j], ub[j])
|
||||
|
||||
Inflation_rates[i] = objf(Universes[i, :])
|
||||
|
||||
if Inflation_rates[i] < Best_universe_Inflation_rate:
|
||||
|
||||
Best_universe_Inflation_rate = Inflation_rates[i]
|
||||
Best_universe = numpy.array(Universes[i, :])
|
||||
|
||||
sorted_Inflation_rates = numpy.sort(Inflation_rates)
|
||||
sorted_indexes = numpy.argsort(Inflation_rates)
|
||||
|
||||
for newindex in range(0, N):
|
||||
Sorted_universes[newindex, :] = numpy.array(
|
||||
Universes[sorted_indexes[newindex], :]
|
||||
)
|
||||
|
||||
normalized_sorted_Inflation_rates = numpy.copy(normr(sorted_Inflation_rates))
|
||||
|
||||
Universes[0, :] = numpy.array(Sorted_universes[0, :])
|
||||
|
||||
for i in range(1, N):
|
||||
Back_hole_index = i
|
||||
for j in range(0, dim):
|
||||
r1 = random.random()
|
||||
|
||||
if r1 < normalized_sorted_Inflation_rates[i]:
|
||||
White_hole_index = RouletteWheelSelection(-sorted_Inflation_rates)
|
||||
|
||||
if White_hole_index == -1:
|
||||
White_hole_index = 0
|
||||
White_hole_index = 0
|
||||
Universes[Back_hole_index, j] = Sorted_universes[
|
||||
White_hole_index, j
|
||||
]
|
||||
|
||||
r2 = random.random()
|
||||
|
||||
if r2 < WEP:
|
||||
r3 = random.random()
|
||||
if r3 < 0.5:
|
||||
Universes[i, j] = Best_universe[j] + TDR * (
|
||||
(ub[j] - lb[j]) * random.random() + lb[j]
|
||||
) # random.uniform(0,1)+lb);
|
||||
if r3 > 0.5:
|
||||
Universes[i, j] = Best_universe[j] - TDR * (
|
||||
(ub[j] - lb[j]) * random.random() + lb[j]
|
||||
) # random.uniform(0,1)+lb);
|
||||
|
||||
convergence[Time - 1] = Best_universe_Inflation_rate
|
||||
if Time % 1 == 0:
|
||||
print(
|
||||
[
|
||||
"At iteration "
|
||||
+ str(Time)
|
||||
+ " the best fitness is "
|
||||
+ str(Best_universe_Inflation_rate)
|
||||
]
|
||||
)
|
||||
|
||||
Time = Time + 1
|
||||
timerEnd = time.time()
|
||||
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
s.executionTime = timerEnd - timerStart
|
||||
s.convergence = convergence
|
||||
s.optimizer = "MVO"
|
||||
s.bestIndividual = Best_universe
|
||||
s.objfname = objf.__name__
|
||||
|
||||
return s
|
103
evolopy/PSO.py
Normal file
103
evolopy/PSO.py
Normal file
|
@ -0,0 +1,103 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Sun May 15 22:37:00 2016
|
||||
|
||||
@author: Hossam Faris
|
||||
"""
|
||||
|
||||
import random
|
||||
import numpy
|
||||
from solution import solution
|
||||
import time
|
||||
|
||||
|
||||
def PSO(objf, lb, ub, dim, PopSize, iters):
|
||||
|
||||
# PSO parameters
|
||||
|
||||
Vmax = 6
|
||||
wMax = 0.9
|
||||
wMin = 0.2
|
||||
c1 = 2
|
||||
c2 = 2
|
||||
|
||||
s = solution()
|
||||
if not isinstance(lb, list):
|
||||
lb = [lb] * dim
|
||||
if not isinstance(ub, list):
|
||||
ub = [ub] * dim
|
||||
|
||||
######################## Initializations
|
||||
|
||||
vel = numpy.zeros((PopSize, dim))
|
||||
|
||||
pBestScore = numpy.zeros(PopSize)
|
||||
pBestScore.fill(float("inf"))
|
||||
|
||||
pBest = numpy.zeros((PopSize, dim))
|
||||
gBest = numpy.zeros(dim)
|
||||
|
||||
gBestScore = float("inf")
|
||||
|
||||
pos = numpy.zeros((PopSize, dim))
|
||||
for i in range(dim):
|
||||
pos[:, i] = numpy.random.uniform(0, 1, PopSize) * (ub[i] - lb[i]) + lb[i]
|
||||
|
||||
convergence_curve = numpy.zeros(iters)
|
||||
|
||||
############################################
|
||||
print('PSO is optimizing "' + objf.__name__ + '"')
|
||||
|
||||
timerStart = time.time()
|
||||
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
|
||||
for l in range(0, iters):
|
||||
for i in range(0, PopSize):
|
||||
# pos[i,:]=checkBounds(pos[i,:],lb,ub)
|
||||
for j in range(dim):
|
||||
pos[i, j] = numpy.clip(pos[i, j], lb[j], ub[j])
|
||||
# Calculate objective function for each particle
|
||||
fitness = objf(pos[i, :])
|
||||
|
||||
if pBestScore[i] > fitness:
|
||||
pBestScore[i] = fitness
|
||||
pBest[i, :] = pos[i, :].copy()
|
||||
|
||||
if gBestScore > fitness:
|
||||
gBestScore = fitness
|
||||
gBest = pos[i, :].copy()
|
||||
|
||||
# Update the W of PSO
|
||||
w = wMax - l * ((wMax - wMin) / iters)
|
||||
|
||||
for i in range(0, PopSize):
|
||||
for j in range(0, dim):
|
||||
r1 = random.random()
|
||||
r2 = random.random()
|
||||
vel[i, j] = (
|
||||
w * vel[i, j]
|
||||
+ c1 * r1 * (pBest[i, j] - pos[i, j])
|
||||
+ c2 * r2 * (gBest[j] - pos[i, j])
|
||||
)
|
||||
|
||||
if vel[i, j] > Vmax:
|
||||
vel[i, j] = Vmax
|
||||
|
||||
if vel[i, j] < -Vmax:
|
||||
vel[i, j] = -Vmax
|
||||
|
||||
pos[i, j] = pos[i, j] + vel[i, j]
|
||||
|
||||
convergence_curve[l] = gBestScore
|
||||
|
||||
if l % 1 == 0:
|
||||
print(["At iteration " + str(l + 1) + " the best fitness is " + str(gBestScore)])
|
||||
timerEnd = time.time()
|
||||
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
s.executionTime = timerEnd - timerStart
|
||||
s.convergence = convergence_curve
|
||||
s.optimizer = "PSO"
|
||||
s.bestIndividual = gBest
|
||||
s.objfname = objf.__name__
|
||||
|
||||
return s
|
93
evolopy/SCA.py
Normal file
93
evolopy/SCA.py
Normal file
|
@ -0,0 +1,93 @@
|
|||
""" Sine Cosine OPtimization Algorithm """
|
||||
|
||||
import random
|
||||
import numpy
|
||||
import math
|
||||
from solution import solution
|
||||
import time
|
||||
|
||||
|
||||
def SCA(objf, lb, ub, dim, SearchAgents_no, Max_iter):
|
||||
|
||||
# destination_pos
|
||||
Dest_pos = numpy.zeros(dim)
|
||||
Dest_score = float("inf")
|
||||
|
||||
if not isinstance(lb, list):
|
||||
lb = [lb] * dim
|
||||
if not isinstance(ub, list):
|
||||
ub = [ub] * dim
|
||||
|
||||
# Initialize the positions of search agents
|
||||
Positions = numpy.zeros((SearchAgents_no, dim))
|
||||
for i in range(dim):
|
||||
Positions[:, i] = (
|
||||
numpy.random.uniform(0, 1, SearchAgents_no) * (ub[i] - lb[i]) + lb[i]
|
||||
)
|
||||
|
||||
Convergence_curve = numpy.zeros(Max_iter)
|
||||
s = solution()
|
||||
|
||||
# Loop counter
|
||||
print('SCA is optimizing "' + objf.__name__ + '"')
|
||||
|
||||
timerStart = time.time()
|
||||
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
|
||||
# Main loop
|
||||
for l in range(0, Max_iter):
|
||||
for i in range(0, SearchAgents_no):
|
||||
|
||||
# Return back the search agents that go beyond the boundaries of the search space
|
||||
for j in range(dim):
|
||||
Positions[i, j] = numpy.clip(Positions[i, j], lb[j], ub[j])
|
||||
|
||||
# Calculate objective function for each search agent
|
||||
fitness = objf(Positions[i, :])
|
||||
|
||||
if fitness < Dest_score:
|
||||
Dest_score = fitness # Update Dest_Score
|
||||
Dest_pos = Positions[i, :].copy()
|
||||
|
||||
# Eq. (3.4)
|
||||
a = 2
|
||||
Max_iteration = Max_iter
|
||||
r1 = a - l * ((a) / Max_iteration) # r1 decreases linearly from a to 0
|
||||
|
||||
# Update the Position of search agents
|
||||
for i in range(0, SearchAgents_no):
|
||||
for j in range(0, dim):
|
||||
|
||||
# Update r2, r3, and r4 for Eq. (3.3)
|
||||
r2 = (2 * numpy.pi) * random.random()
|
||||
r3 = 2 * random.random()
|
||||
r4 = random.random()
|
||||
|
||||
# Eq. (3.3)
|
||||
if r4 < (0.5):
|
||||
# Eq. (3.1)
|
||||
Positions[i, j] = Positions[i, j] + (
|
||||
r1 * numpy.sin(r2) * abs(r3 * Dest_pos[j] - Positions[i, j])
|
||||
)
|
||||
else:
|
||||
# Eq. (3.2)
|
||||
Positions[i, j] = Positions[i, j] + (
|
||||
r1 * numpy.cos(r2) * abs(r3 * Dest_pos[j] - Positions[i, j])
|
||||
)
|
||||
|
||||
Convergence_curve[l] = Dest_score
|
||||
|
||||
if l % 1 == 0:
|
||||
print(
|
||||
["At iteration " + str(l) + " the best fitness is " + str(Dest_score)]
|
||||
)
|
||||
|
||||
timerEnd = time.time()
|
||||
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
s.executionTime = timerEnd - timerStart
|
||||
s.convergence = Convergence_curve
|
||||
s.optimizer = "SCA"
|
||||
s.bestIndividual = Dest_pos
|
||||
s.objfname = objf.__name__
|
||||
|
||||
return s
|
125
evolopy/SSA.py
Normal file
125
evolopy/SSA.py
Normal file
|
@ -0,0 +1,125 @@
|
|||
import random
|
||||
import numpy
|
||||
import math
|
||||
from solution import solution
|
||||
import time
|
||||
|
||||
|
||||
def SSA(objf, lb, ub, dim, N, Max_iteration):
|
||||
|
||||
# Max_iteration=1000
|
||||
# lb=-100
|
||||
# ub=100
|
||||
# dim=30
|
||||
N = 50 # Number of search agents
|
||||
if not isinstance(lb, list):
|
||||
lb = [lb] * dim
|
||||
if not isinstance(ub, list):
|
||||
ub = [ub] * dim
|
||||
Convergence_curve = numpy.zeros(Max_iteration)
|
||||
|
||||
# Initialize the positions of salps
|
||||
SalpPositions = numpy.zeros((N, dim))
|
||||
for i in range(dim):
|
||||
SalpPositions[:, i] = numpy.random.uniform(0, 1, N) * (ub[i] - lb[i]) + lb[i]
|
||||
SalpFitness = numpy.full(N, float("inf"))
|
||||
|
||||
FoodPosition = numpy.zeros(dim)
|
||||
FoodFitness = float("inf")
|
||||
# Moth_fitness=numpy.fell(float("inf"))
|
||||
|
||||
s = solution()
|
||||
|
||||
print('SSA is optimizing "' + objf.__name__ + '"')
|
||||
|
||||
timerStart = time.time()
|
||||
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
|
||||
for i in range(0, N):
|
||||
# evaluate moths
|
||||
SalpFitness[i] = objf(SalpPositions[i, :])
|
||||
|
||||
sorted_salps_fitness = numpy.sort(SalpFitness)
|
||||
I = numpy.argsort(SalpFitness)
|
||||
|
||||
Sorted_salps = numpy.copy(SalpPositions[I, :])
|
||||
|
||||
FoodPosition = numpy.copy(Sorted_salps[0, :])
|
||||
FoodFitness = sorted_salps_fitness[0]
|
||||
|
||||
Iteration = 1
|
||||
|
||||
# Main loop
|
||||
while Iteration < Max_iteration:
|
||||
|
||||
# Number of flames Eq. (3.14) in the paper
|
||||
# Flame_no=round(N-Iteration*((N-1)/Max_iteration));
|
||||
|
||||
c1 = 2 * math.exp(-((4 * Iteration / Max_iteration) ** 2))
|
||||
# Eq. (3.2) in the paper
|
||||
|
||||
for i in range(0, N):
|
||||
|
||||
SalpPositions = numpy.transpose(SalpPositions)
|
||||
|
||||
if i < N / 2:
|
||||
for j in range(0, dim):
|
||||
c2 = random.random()
|
||||
c3 = random.random()
|
||||
# Eq. (3.1) in the paper
|
||||
if c3 < 0.5:
|
||||
SalpPositions[j, i] = FoodPosition[j] + c1 * (
|
||||
(ub[j] - lb[j]) * c2 + lb[j]
|
||||
)
|
||||
else:
|
||||
SalpPositions[j, i] = FoodPosition[j] - c1 * (
|
||||
(ub[j] - lb[j]) * c2 + lb[j]
|
||||
)
|
||||
|
||||
####################
|
||||
|
||||
elif i >= N / 2 and i < N + 1:
|
||||
point1 = SalpPositions[:, i - 1]
|
||||
point2 = SalpPositions[:, i]
|
||||
|
||||
SalpPositions[:, i] = (point2 + point1) / 2
|
||||
# Eq. (3.4) in the paper
|
||||
|
||||
SalpPositions = numpy.transpose(SalpPositions)
|
||||
|
||||
for i in range(0, N):
|
||||
|
||||
# Check if salps go out of the search spaceand bring it back
|
||||
for j in range(dim):
|
||||
SalpPositions[i, j] = numpy.clip(SalpPositions[i, j], lb[j], ub[j])
|
||||
|
||||
SalpFitness[i] = objf(SalpPositions[i, :])
|
||||
|
||||
if SalpFitness[i] < FoodFitness:
|
||||
FoodPosition = numpy.copy(SalpPositions[i, :])
|
||||
FoodFitness = SalpFitness[i]
|
||||
|
||||
# Display best fitness along the iteration
|
||||
if Iteration % 1 == 0:
|
||||
print(
|
||||
[
|
||||
"At iteration "
|
||||
+ str(Iteration)
|
||||
+ " the best fitness is "
|
||||
+ str(FoodFitness)
|
||||
]
|
||||
)
|
||||
|
||||
Convergence_curve[Iteration] = FoodFitness
|
||||
|
||||
Iteration = Iteration + 1
|
||||
|
||||
timerEnd = time.time()
|
||||
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
s.executionTime = timerEnd - timerStart
|
||||
s.convergence = Convergence_curve
|
||||
s.optimizer = "SSA"
|
||||
s.bestIndividual = FoodPosition
|
||||
s.objfname = objf.__name__
|
||||
|
||||
return s
|
132
evolopy/WOA.py
Normal file
132
evolopy/WOA.py
Normal file
|
@ -0,0 +1,132 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon May 16 14:19:49 2016
|
||||
|
||||
@author: hossam
|
||||
"""
|
||||
import random
|
||||
import numpy
|
||||
import math
|
||||
from solution import solution
|
||||
import time
|
||||
|
||||
|
||||
def WOA(objf, lb, ub, dim, SearchAgents_no, Max_iter):
|
||||
|
||||
# dim=30
|
||||
# SearchAgents_no=50
|
||||
# lb=-100
|
||||
# ub=100
|
||||
# Max_iter=500
|
||||
if not isinstance(lb, list):
|
||||
lb = [lb] * dim
|
||||
if not isinstance(ub, list):
|
||||
ub = [ub] * dim
|
||||
|
||||
# initialize position vector and score for the leader
|
||||
Leader_pos = numpy.zeros(dim)
|
||||
Leader_score = float("inf") # change this to -inf for maximization problems
|
||||
|
||||
# Initialize the positions of search agents
|
||||
Positions = numpy.zeros((SearchAgents_no, dim))
|
||||
for i in range(dim):
|
||||
Positions[:, i] = (
|
||||
numpy.random.uniform(0, 1, SearchAgents_no) * (ub[i] - lb[i]) + lb[i]
|
||||
)
|
||||
|
||||
# Initialize convergence
|
||||
convergence_curve = numpy.zeros(Max_iter)
|
||||
|
||||
############################
|
||||
s = solution()
|
||||
|
||||
print('WOA is optimizing "' + objf.__name__ + '"')
|
||||
|
||||
timerStart = time.time()
|
||||
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
############################
|
||||
|
||||
t = 0 # Loop counter
|
||||
|
||||
# Main loop
|
||||
while t < Max_iter:
|
||||
for i in range(0, SearchAgents_no):
|
||||
|
||||
# Return back the search agents that go beyond the boundaries of the search space
|
||||
|
||||
# Positions[i,:]=checkBounds(Positions[i,:],lb,ub)
|
||||
for j in range(dim):
|
||||
Positions[i, j] = numpy.clip(Positions[i, j], lb[j], ub[j])
|
||||
|
||||
# Calculate objective function for each search agent
|
||||
fitness = objf(Positions[i, :])
|
||||
|
||||
# Update the leader
|
||||
if fitness < Leader_score: # Change this to > for maximization problem
|
||||
Leader_score = fitness
|
||||
# Update alpha
|
||||
Leader_pos = Positions[
|
||||
i, :
|
||||
].copy() # copy current whale position into the leader position
|
||||
|
||||
a = 2 - t * ((2) / Max_iter)
|
||||
# a decreases linearly fron 2 to 0 in Eq. (2.3)
|
||||
|
||||
# a2 linearly decreases from -1 to -2 to calculate t in Eq. (3.12)
|
||||
a2 = -1 + t * ((-1) / Max_iter)
|
||||
|
||||
# Update the Position of search agents
|
||||
for i in range(0, SearchAgents_no):
|
||||
r1 = random.random() # r1 is a random number in [0,1]
|
||||
r2 = random.random() # r2 is a random number in [0,1]
|
||||
|
||||
A = 2 * a * r1 - a # Eq. (2.3) in the paper
|
||||
C = 2 * r2 # Eq. (2.4) in the paper
|
||||
|
||||
b = 1
|
||||
# parameters in Eq. (2.5)
|
||||
l = (a2 - 1) * random.random() + 1 # parameters in Eq. (2.5)
|
||||
|
||||
p = random.random() # p in Eq. (2.6)
|
||||
|
||||
for j in range(0, dim):
|
||||
|
||||
if p < 0.5:
|
||||
if abs(A) >= 1:
|
||||
rand_leader_index = math.floor(
|
||||
SearchAgents_no * random.random()
|
||||
)
|
||||
X_rand = Positions[rand_leader_index, :]
|
||||
D_X_rand = abs(C * X_rand[j] - Positions[i, j])
|
||||
Positions[i, j] = X_rand[j] - A * D_X_rand
|
||||
|
||||
elif abs(A) < 1:
|
||||
D_Leader = abs(C * Leader_pos[j] - Positions[i, j])
|
||||
Positions[i, j] = Leader_pos[j] - A * D_Leader
|
||||
|
||||
elif p >= 0.5:
|
||||
|
||||
distance2Leader = abs(Leader_pos[j] - Positions[i, j])
|
||||
# Eq. (2.5)
|
||||
Positions[i, j] = (
|
||||
distance2Leader * math.exp(b * l) * math.cos(l * 2 * math.pi)
|
||||
+ Leader_pos[j]
|
||||
)
|
||||
|
||||
convergence_curve[t] = Leader_score
|
||||
if t % 1 == 0:
|
||||
print(
|
||||
["At iteration " + str(t) + " the best fitness is " + str(Leader_score)]
|
||||
)
|
||||
t = t + 1
|
||||
|
||||
timerEnd = time.time()
|
||||
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
|
||||
s.executionTime = timerEnd - timerStart
|
||||
s.convergence = convergence_curve
|
||||
s.optimizer = "WOA"
|
||||
s.objfname = objf.__name__
|
||||
s.best = Leader_score
|
||||
s.bestIndividual = Leader_pos
|
||||
|
||||
return s
|
Loading…
Reference in a new issue