Skip to content

Commit

Permalink
VERSION 0.3.2
Browse files Browse the repository at this point in the history
  • Loading branch information
Julian Blank committed Oct 21, 2019
1 parent 6d8cb13 commit f0aab83
Show file tree
Hide file tree
Showing 52 changed files with 650 additions and 187 deletions.
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ However, for instance executing NSGA2:
problem = get_problem("zdt2")
algorithm = NSGA2(pop_size=100, elimate_duplicates=True)
algorithm = NSGA2(pop_size=100, eliminate_duplicates=True)
res = minimize(problem,
algorithm,
Expand Down
11 changes: 5 additions & 6 deletions benchmark/benchmark_nelder_mead.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,9 @@
import os
import pickle

from pymoo.algorithms.so_genetic_algorithm import ga
from pymoo.algorithms.so_nelder_mead import nelder_mead
from pymoo.factory import get_problem, get_termination
from pymoo.operators.crossover.nelder_mead_crossover import NelderMeadCrossover
from pymoo.algorithms.so_nelder_mead import NelderMead

from pymoo.factory import get_problem

setup = [
"go-amgm",
Expand Down Expand Up @@ -222,7 +221,7 @@ def add_with_variables(D, problem, n_vars):
prefix = "runs"

# name of the experiment
name = "nelder-mead-0.3.1"
name = "nelder-mead-0.3.2"

# number of runs to execute
n_runs = 10
Expand All @@ -234,7 +233,7 @@ def add_with_variables(D, problem, n_vars):

problem = get_problem(_problem)

method = nelder_mead(n_max_restarts=100)
method = NelderMead(n_max_local_restarts=2)

for run in range(1, n_runs + 1):
fname = "%s_%s.run" % (_problem, run)
Expand Down
7 changes: 4 additions & 3 deletions benchmark/benchmark_nsga2.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
import os
import pickle

from pymoo.algorithms.nsga2 import nsga2
from pymoo.algorithms.nsga2 import NSGA2

from pymoo.factory import get_sampling, get_crossover, get_mutation, get_problem
from pymoo.operators.crossover.simulated_binary_crossover import SimulatedBinaryCrossover
from pymoo.operators.mutation.polynomial_mutation import PolynomialMutation
Expand Down Expand Up @@ -90,7 +91,7 @@
prefix = "runs"

# name of the experiment
name = "pynsga2-0.3.1"
name = "pynsga2-0.3.2"

# number of runs to execute
n_runs = 100
Expand All @@ -107,7 +108,7 @@
s = setup[_problem]
problem = s['problem']

method = nsga2(
method = NSGA2(
pop_size=s['pop_size'],
crossover=s['crossover'],
mutation=s['mutation'],
Expand Down
6 changes: 3 additions & 3 deletions benchmark/benchmark_nsga3.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import os
import pickle

from pymoo.algorithms.nsga3 import nsga3
from pymoo.algorithms.nsga3 import NSGA3
from pymoo.factory import get_problem, get_reference_directions
from pymoo.operators.crossover.simulated_binary_crossover import SimulatedBinaryCrossover
from pymoo.operators.mutation.polynomial_mutation import PolynomialMutation
Expand Down Expand Up @@ -206,7 +206,7 @@ def get_setup(n_obj):
prefix = "runs"

# name of the experiment
name = "pynsga3-0.3.1"
name = "pynsga3-0.3.2"

# number of runs to execute
n_runs = 50
Expand All @@ -222,7 +222,7 @@ def get_setup(n_obj):
s = setup[_problem]
problem = s['problem']

method = nsga3(
method = NSGA3(
s['ref_dirs'],
pop_size=s['pop_size'],
crossover=s['crossover'],
Expand Down
8 changes: 0 additions & 8 deletions pymoo/algorithms/moead.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,12 +130,4 @@ def _next(self):
pop[N[I]] = off


# =========================================================================================================
# Interface
# =========================================================================================================

def moead(*args, **kwargs):
return MOEAD(*args, **kwargs)


parse_doc_string(MOEAD.__init__)
8 changes: 0 additions & 8 deletions pymoo/algorithms/nsga2.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,12 +202,4 @@ def calc_crowding_distance(F, filter_out_duplicates=True):
return crowding


# =========================================================================================================
# Interface
# =========================================================================================================

def nsga2(*args, **kwargs):
return NSGA2(*args, **kwargs)


parse_doc_string(NSGA2.__init__)
8 changes: 0 additions & 8 deletions pymoo/algorithms/nsga3.py
Original file line number Diff line number Diff line change
Expand Up @@ -335,12 +335,4 @@ def calc_niche_count(n_niches, niche_of_individuals):
return niche_count


# =========================================================================================================
# Interface
# =========================================================================================================

def nsga3(*args, **kwargs):
return NSGA3(*args, **kwargs)


parse_doc_string(NSGA3.__init__)
9 changes: 0 additions & 9 deletions pymoo/algorithms/rnsga2.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,13 +185,4 @@ def calc_norm_pref_distance(A, B, weights, ideal, nadir):
return np.reshape(N, (A.shape[0], B.shape[0]))


# =========================================================================================================
# Interface
# =========================================================================================================


def rnsga2(*args, **kwargs):
return RNSGA2(*args, **kwargs)


parse_doc_string(RNSGA2.__init__)
9 changes: 0 additions & 9 deletions pymoo/algorithms/rnsga3.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,13 +244,4 @@ def line_plane_intersection(l0, l1, p0, p_no, epsilon=1e-6):
return ref_proj


# =========================================================================================================
# Interface
# =========================================================================================================


def rnsga3(*args, **kwargs):
return RNSGA3(*args, **kwargs)


parse_doc_string(RNSGA3.__init__)
48 changes: 48 additions & 0 deletions pymoo/algorithms/so_adam.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import numpy as np

from pymoo.algorithms.so_gradient_descent import GradientBasedAlgorithm


class Adam(GradientBasedAlgorithm):

def __init__(self, X,
alpha=0.005,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
**kwargs) -> None:
super().__init__(X, **kwargs)

self.alpha = alpha
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon

self.t = 0
self.m_t = 0
self.v_t = 0

def apply(self):
X, dX = self.X, self.dX

self.t += 1
beta_1, beta_2 = self.beta_1, self.beta_2

# update moving average of gradient and squared gradient
self.m_t = beta_1 * self.m_t + (1 - beta_1) * dX
self.v_t = beta_2 * self.v_t + (1 - beta_2) * (dX * dX)

# calculates the bias-corrected estimates
m_cap = self.m_t / (1 - (beta_1 ** self.t))
v_cap = self.v_t / (1 - (beta_2 ** self.t))

# do the gradient update
self.X = X - (self.alpha * m_cap) / (np.sqrt(v_cap) + self.epsilon)

def restart(self):
self.t = 0
self.m_t = 0
self.v_t = 0
self.alpha /= 2


10 changes: 2 additions & 8 deletions pymoo/algorithms/so_de.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

from pymoo.algorithms.genetic_algorithm import GeneticAlgorithm
from pymoo.docs import parse_doc_string
from pymoo.model.termination import SingleObjectiveToleranceBasedTermination
from pymoo.operators.crossover.differental_evolution_crossover import DifferentialEvolutionCrossover
from pymoo.operators.crossover.exponential_crossover import ExponentialCrossover
from pymoo.operators.crossover.uniform_crossover import UniformCrossover
Expand Down Expand Up @@ -77,6 +78,7 @@ def __init__(self,
survival=None,
**kwargs)

self.default_termination = SingleObjectiveToleranceBasedTermination()
self.func_display_attrs = disp_single_objective

def _next(self):
Expand Down Expand Up @@ -132,12 +134,4 @@ def _next(self):
return pop


# =========================================================================================================
# Interface
# =========================================================================================================

def de(*args, **kwargs):
return DE(*args, **kwargs)


parse_doc_string(DE.__init__)
12 changes: 2 additions & 10 deletions pymoo/algorithms/so_genetic_algorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from pymoo.algorithms.genetic_algorithm import GeneticAlgorithm
from pymoo.docs import parse_doc_string
from pymoo.model.survival import Survival
from pymoo.model.termination import SingleObjectiveToleranceBasedTermination
from pymoo.operators.crossover.simulated_binary_crossover import SimulatedBinaryCrossover
from pymoo.operators.mutation.polynomial_mutation import PolynomialMutation
from pymoo.operators.sampling.random_sampling import FloatRandomSampling
Expand Down Expand Up @@ -67,6 +68,7 @@ def __init__(self,
**kwargs)

self.func_display_attrs = disp_single_objective
self.default_termination = SingleObjectiveToleranceBasedTermination()


class FitnessSurvival(Survival):
Expand All @@ -83,14 +85,4 @@ def _do(self, problem, pop, n_survive, out=None, **kwargs):
return pop[np.argsort(F[:, 0])[:n_survive]]


# =========================================================================================================
# Interface
# =========================================================================================================


def ga(*args, **kwargs):
return GA(*args, **kwargs)


parse_doc_string(GA.__init__)

89 changes: 89 additions & 0 deletions pymoo/algorithms/so_gradient_descent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import numpy as np

from pymoo.model.algorithm import Algorithm
from pymoo.model.population import Population
from pymoo.model.termination import SingleObjectiveToleranceBasedTermination
from pymoo.operators.repair.out_of_bounds_repair import repair_out_of_bounds
from pymoo.util.display import disp_single_objective


class GradientBasedAlgorithm(Algorithm):

def __init__(self, X, dX=None, objective=0, **kwargs) -> None:
super().__init__(**kwargs)
self.func_display_attrs = disp_single_objective
self.objective = objective
self.n_restarts = 0
self.default_termination = SingleObjectiveToleranceBasedTermination()

self.X, self.dX = X, dX
self.F, self.CV = None, None

if self.X.ndim == 1:
self.X = np.atleast_2d(X)

def _initialize(self):
self._next()

def _next(self):

# create a copy from the current values - if restart is necessary
_X = np.copy(self.X)

# if the gradient was not provided yet evaluate it
if self.F is None or self.dX is None:
# evaluate the problem and get the information of gradients
F, dX, CV = self.problem.evaluate(self.X, return_values_of=["F", "dF", "CV"])

# because we only consider one objective here
F = F[:, [self.objective]]
dX = dX[:, self.objective]

# increase the evaluation count
self.evaluator.n_eval += len(self.X)

has_improved = self.F is None or np.any(F < self.F)
is_gradient_valid = np.all(~np.isnan(dX))

# if the gradient did lead to an improvement
if has_improved:

self.F, self.dX, self.CV = F, dX, CV

# if the gradient is valid and has no nan values
if is_gradient_valid:

# make the step and check out of bounds for X
self.apply()
self.X = repair_out_of_bounds(self.problem, self.X)

# set the population object for automatic print
self.pop = Population(len(self.X)).set("X", self.X, "F", self.F,
"CV", self.CV, "feasible", self.CV <= 0)

# otherwise end the termination form now on
else:
print("WARNING: GRADIENT ERROR", self.dX)
self.termination.force_termination = True

# otherwise do a restart of the algorithm
else:
self.X = _X
self.restart()
self.n_restarts += 1

# set the gradient to none to be ready for the next iteration
self.dX = None


class GradientDescent(GradientBasedAlgorithm):

def __init__(self, X, learning_rate=0.005, **kwargs) -> None:
super().__init__(X, **kwargs)
self.learning_rate = learning_rate

def restart(self):
self.learning_rate /= 2

def apply(self):
self.X = self.X - self.learning_rate * self.dX
Loading

0 comments on commit f0aab83

Please sign in to comment.