rename n_dim to size, rename n_trials to budget, swap their order

This commit is contained in:
Connor Olding 2023-05-07 07:30:55 -07:00
parent 6778965d70
commit b0810cee8c
12 changed files with 193 additions and 193 deletions

View File

@ -3,14 +3,14 @@ from .random import another_random_cube
from ..utilities import wrap_untrustworthy, final
def dlib_cube(objective, n_trials, n_dim):
if n_dim > 35:
return another_random_cube(objective, n_trials, n_dim)
def dlib_cube(objective, size, budget):
if size > 35:
return another_random_cube(objective, size, budget)
_objective = wrap_untrustworthy(objective, n_trials)
_objective = wrap_untrustworthy(objective, budget)
def __objective(*args):
return _objective(list(args))
find_min_global(__objective, [0.0] * n_dim, [1.0] * n_dim, n_trials)
find_min_global(__objective, [0.0] * size, [1.0] * size, budget)
return _objective(final)

View File

@ -43,18 +43,18 @@ def make_evolopy(optimizer_name, popsize=None):
}
optimizer = optimizers[optimizer_name.upper()]
def f(objective, n_trials, n_dim):
# ps = n_dim if popsize is None else popsize
def f(objective, size, budget):
# ps = size if popsize is None else popsize
# ps = max(4, ps) if optimizer_name.upper() == "DE" else ps
ps = int(4 + 3 * np.log(n_dim)) if popsize is None else popsize
ps = int(4 + 3 * np.log(size)) if popsize is None else popsize
if optimizer_name.upper() == "GA" and ps & 1:
ps += 1 # force popsize to be even
_objective = wrap_untrustworthy(
objective, n_trials, bounding="sine", raising=True
objective, budget, bounding="sine", raising=True
)
try:
optimizer(_objective, 0.0, 1.0, n_dim, ps, n_trials)
optimizer(_objective, 0.0, 1.0, size, ps, budget)
except ExhaustedTrialsError:
pass

View File

@ -21,10 +21,10 @@ def _fix_logging(original_function):
def make_biteopt(depth=1):
from fcmaes.optimizer import Bite_cpp
def f(objective, n_trials, n_dim):
_objective = wrap_untrustworthy(objective, n_trials)
bounds = Bounds([0.0] * n_dim, [1.0] * n_dim)
optim = Bite_cpp(max_evaluations=n_trials, M=depth)
def f(objective, size, budget):
_objective = wrap_untrustworthy(objective, budget)
bounds = Bounds([0.0] * size, [1.0] * size)
optim = Bite_cpp(max_evaluations=budget, M=depth)
_xopt, _fopt, _feval_count = optim.minimize(_objective, bounds)
return _objective(final)
@ -37,10 +37,10 @@ def make_biteopt(depth=1):
def make_csma(isigma=3):
from fcmaes.optimizer import Csma_cpp
def f(objective, n_trials, n_dim):
_objective = wrap_untrustworthy(objective, n_trials)
bounds = Bounds([0.0] * n_dim, [1.0] * n_dim)
optim = Csma_cpp(max_evaluations=n_trials)
def f(objective, size, budget):
_objective = wrap_untrustworthy(objective, budget)
bounds = Bounds([0.0] * size, [1.0] * size)
optim = Csma_cpp(max_evaluations=budget)
_xopt, _fopt, _feval_count = optim.minimize(
_objective, bounds, sdevs=1 / isigma
)
@ -55,12 +55,12 @@ def make_csma(isigma=3):
def make_fcmaes(popsize=None):
from fcmaes.optimizer import Cma_cpp
def f(objective, n_trials, n_dim):
_objective = wrap_untrustworthy(objective, n_trials)
bounds = Bounds([0.0] * n_dim, [1.0] * n_dim)
ps = int(4 + 3 * np.log(n_dim)) if popsize is None else popsize
def f(objective, size, budget):
_objective = wrap_untrustworthy(objective, budget)
bounds = Bounds([0.0] * size, [1.0] * size)
ps = int(4 + 3 * np.log(size)) if popsize is None else popsize
optim = Cma_cpp(
max_evaluations=n_trials,
max_evaluations=budget,
popsize=ps,
stop_hist=0.0,
workers=1,
@ -79,11 +79,11 @@ def make_fcmaes(popsize=None):
def make_crfmnes(popsize=None):
from fcmaes.optimizer import Crfmnes_cpp
def f(objective, n_trials, n_dim):
_objective = wrap_untrustworthy(objective, n_trials)
bounds = Bounds([0.0] * n_dim, [1.0] * n_dim)
ps = int(4 + 3 * np.log(n_dim)) if popsize is None else popsize
optim = Crfmnes_cpp(max_evaluations=n_trials, popsize=ps)
def f(objective, size, budget):
_objective = wrap_untrustworthy(objective, budget)
bounds = Bounds([0.0] * size, [1.0] * size)
ps = int(4 + 3 * np.log(size)) if popsize is None else popsize
optim = Crfmnes_cpp(max_evaluations=budget, popsize=ps)
_xopt, _fopt, _feval_count = optim.minimize(_objective, bounds, sdevs=1 / 3)
return _objective(final)
@ -98,10 +98,10 @@ _broken = """
def make_lde(popsize=None):
from fcmaes.optimizer import LDe_cpp
def f(objective, n_trials, n_dim):
_objective = wrap_untrustworthy(objective, n_trials)
bounds = Bounds([0.0] * n_dim, [1.0] * n_dim)
optim = LDe_cpp(max_evaluations=n_trials, popsize=popsize)
def f(objective, size, budget):
_objective = wrap_untrustworthy(objective, budget)
bounds = Bounds([0.0] * size, [1.0] * size)
optim = LDe_cpp(max_evaluations=budget, popsize=popsize)
_xopt, _fopt, _feval_count = optim.minimize(_objective, bounds)
return _objective(final)
@ -116,10 +116,10 @@ def make_lde(popsize=None):
def make_da(local=True):
from fcmaes.optimizer import Da_cpp
def f(objective, n_trials, n_dim):
_objective = wrap_untrustworthy(objective, n_trials)
bounds = Bounds([0.0] * n_dim, [1.0] * n_dim)
optim = Da_cpp(max_evaluations=n_trials, use_local_search=local)
def f(objective, size, budget):
_objective = wrap_untrustworthy(objective, budget)
bounds = Bounds([0.0] * size, [1.0] * size)
optim = Da_cpp(max_evaluations=budget, use_local_search=local)
_xopt, _fopt, _feval_count = optim.minimize(_objective, bounds)
return _objective(final)
@ -134,10 +134,10 @@ def make_da(local=True):
def make_gclde(popsize=None):
from fcmaes.optimizer import GCLDE_cpp
def f(objective, n_trials, n_dim):
_objective = wrap_untrustworthy(objective, n_trials)
bounds = Bounds([0.0] * n_dim, [1.0] * n_dim)
optim = GCLDE_cpp(max_evaluations=n_trials, popsize=popsize)
def f(objective, size, budget):
_objective = wrap_untrustworthy(objective, budget)
bounds = Bounds([0.0] * size, [1.0] * size)
optim = GCLDE_cpp(max_evaluations=budget, popsize=popsize)
_xopt, _fopt, _feval_count = optim.minimize(_objective, bounds)
return _objective(final)
@ -151,10 +151,10 @@ def make_gclde(popsize=None):
def make_lclde(popsize=None):
from fcmaes.optimizer import LCLDE_cpp
def f(objective, n_trials, n_dim):
_objective = wrap_untrustworthy(objective, n_trials)
bounds = Bounds([0.0] * n_dim, [1.0] * n_dim)
optim = LCLDE_cpp(max_evaluations=n_trials, popsize=popsize)
def f(objective, size, budget):
_objective = wrap_untrustworthy(objective, budget)
bounds = Bounds([0.0] * size, [1.0] * size)
optim = LCLDE_cpp(max_evaluations=budget, popsize=popsize)
_xopt, _fopt, _feval_count = optim.minimize(_objective, bounds)
return _objective(final)

View File

@ -303,8 +303,8 @@ assert ol.RSLSQP is ol.RSQP, "weirdness is gone, please adjust accordingly"
assert ol.SLSQP is ol.SQP, "weirdness is gone, please adjust accordingly"
def nevergrad_cube_factory(optimizer, objective, n_trials, n_dim):
instrument = ng.p.Array(lower=0, upper=1, shape=(n_dim,)) # better sigma
def nevergrad_cube_factory(optimizer, objective, size, budget):
instrument = ng.p.Array(lower=0, upper=1, shape=(size,)) # better sigma
# ev.RBO, ev.QRBO, ev.MidQRBO, and ev.LBO still complain, though:
# /home/py/.local/lib/python3.10/site-packages/nevergrad/parametrization/_datalayers.py:107: NevergradRuntimeWarning: Bounds are 1.0 sigma away from each other at the closest, you should aim for at least 3 for better quality.
if optimizer in (ev.RBO, ev.QRBO, ev.MidQRBO, ev.LBO):
@ -313,7 +313,7 @@ def nevergrad_cube_factory(optimizer, objective, n_trials, n_dim):
opt = optimizer
assert opt is not None, optimizer
optimizer = opt(parametrization=instrument, budget=n_trials, num_workers=1)
optimizer = opt(parametrization=instrument, budget=budget, num_workers=1)
feval_count = 0

View File

@ -47,7 +47,7 @@ NLOPTIMIZERS = {
}
def nlopt_cube_factory(objective, n_trials, n_dim, method):
def nlopt_cube_factory(objective, size, budget, method):
optim = NLOPTIMIZERS[method]
feval_count = 0
@ -57,19 +57,19 @@ def nlopt_cube_factory(objective, n_trials, n_dim, method):
nonlocal feval_count, best_so_far
fx = objective(x)
feval_count += 1
if feval_count <= n_trials:
if feval_count <= budget:
if best_so_far is None or fx < best_so_far[0]:
best_so_far = (fx, x)
return fx
opt = nlopt.opt(optim, n_dim)
opt.set_lower_bounds([0.0] * n_dim)
opt.set_upper_bounds([1.0] * n_dim)
opt = nlopt.opt(optim, size)
opt.set_lower_bounds([0.0] * size)
opt.set_upper_bounds([1.0] * size)
opt.set_min_objective(_objective)
opt.set_maxeval(n_trials)
opt.set_maxeval(budget)
try:
opt.optimize([0.5] * n_dim)
opt.optimize([0.5] * size)
except nlopt.RoundoffLimited as e:
print( # FIXME: de-uglify this!
"\033[33m",
@ -86,106 +86,106 @@ def nlopt_cube_factory(objective, n_trials, n_dim, method):
return fopt, xopt, feval_count
def nlopt_ags_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "gn_ags")
def nlopt_ags_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "gn_ags")
def nlopt_crs2_lm_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "gn_crs2_lm")
def nlopt_crs2_lm_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "gn_crs2_lm")
def nlopt_direct_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "gn_direct")
def nlopt_direct_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "gn_direct")
def nlopt_direct_l_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "gn_direct_l")
def nlopt_direct_l_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "gn_direct_l")
def nlopt_direct_l_noscal_cube(objective, n_trials, n_dim):
def nlopt_direct_l_noscal_cube(objective, size, budget):
return nlopt_cube_factory(
objective, n_trials, n_dim, "gn_direct_l_noscal"
objective, size, budget, "gn_direct_l_noscal"
)
def nlopt_direct_lr_cube(objective, n_trials, n_dim):
def nlopt_direct_lr_cube(objective, size, budget):
return nlopt_cube_factory(
objective, n_trials, n_dim, "gn_direct_l_rand"
objective, size, budget, "gn_direct_l_rand"
)
def nlopt_direct_lr_noscal_cube(objective, n_trials, n_dim):
def nlopt_direct_lr_noscal_cube(objective, size, budget):
return nlopt_cube_factory(
objective, n_trials, n_dim, "gn_direct_l_rand_noscal"
objective, size, budget, "gn_direct_l_rand_noscal"
)
def nlopt_direct_noscal_cube(objective, n_trials, n_dim):
def nlopt_direct_noscal_cube(objective, size, budget):
return nlopt_cube_factory(
objective, n_trials, n_dim, "gn_direct_noscal"
objective, size, budget, "gn_direct_noscal"
)
def nlopt_esch_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "gn_esch")
def nlopt_esch_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "gn_esch")
def nlopt_isres_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "gn_isres")
def nlopt_isres_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "gn_isres")
def nlopt_mlsl_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "gn_mlsl")
def nlopt_mlsl_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "gn_mlsl")
def nlopt_mlsl_lds_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "gn_mlsl_lds")
def nlopt_mlsl_lds_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "gn_mlsl_lds")
def nlopt_orig_direct_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "gn_orig_direct")
def nlopt_orig_direct_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "gn_orig_direct")
def nlopt_orig_direct_l_cube(objective, n_trials, n_dim):
def nlopt_orig_direct_l_cube(objective, size, budget):
return nlopt_cube_factory(
objective, n_trials, n_dim, "gn_orig_direct_l"
objective, size, budget, "gn_orig_direct_l"
)
def nlopt_auglag_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "ln_auglag")
def nlopt_auglag_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "ln_auglag")
def nlopt_auglag_eq_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "ln_auglag_eq")
def nlopt_auglag_eq_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "ln_auglag_eq")
def nlopt_bobyqa_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "ln_bobyqa")
def nlopt_bobyqa_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "ln_bobyqa")
def nlopt_cobyla_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "ln_cobyla")
def nlopt_cobyla_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "ln_cobyla")
def nlopt_neldermead_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "ln_neldermead")
def nlopt_neldermead_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "ln_neldermead")
def nlopt_newuoa_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "ln_newuoa")
def nlopt_newuoa_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "ln_newuoa")
def nlopt_newuoa_bound_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "ln_newuoa_bound")
def nlopt_newuoa_bound_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "ln_newuoa_bound")
def nlopt_praxis_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "ln_praxis")
def nlopt_praxis_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "ln_praxis")
def nlopt_sbplx_cube(objective, n_trials, n_dim):
return nlopt_cube_factory(objective, n_trials, n_dim, "ln_sbplx")
def nlopt_sbplx_cube(objective, size, budget):
return nlopt_cube_factory(objective, size, budget, "ln_sbplx")
NLOPT_OPTIMIZERS = [

View File

@ -6,7 +6,7 @@ import numpy as np
def make_birect(deepness=23, *, longest=False, pruning=False):
from ..internal.birect import birect
def f(objective, n_trials, n_dim):
def f(objective, size, budget):
feval_count = 0
def _objective(x):
@ -16,10 +16,10 @@ def make_birect(deepness=23, *, longest=False, pruning=False):
xopt, fopt = birect(
_objective,
(0,) * n_dim,
(1,) * n_dim,
(0,) * size,
(1,) * size,
min_diag=2.0 ** -float(deepness),
max_evals=n_trials,
max_evals=budget,
by_longest=longest,
pruning=pruning,
F=np.float64,
@ -39,7 +39,7 @@ def make_soo(deepness=None, *, K=3):
assert K >= 2
from ..internal.soo import soo
def f(objective, n_trials, n_dim):
def f(objective, size, budget):
feval_count = 0
def _objective(x):
@ -48,7 +48,7 @@ def make_soo(deepness=None, *, K=3):
return objective(x)
xopt, history = soo(
_objective, np.full(n_dim, 0.5), 0.5, n_trials, K=K, h_max=deepness
_objective, np.full(size, 0.5), 0.5, budget, K=K, h_max=deepness
)
fopt = history[-1]
return fopt, xopt, feval_count
@ -63,11 +63,11 @@ def make_mercury(
):
from ..internal.hg import minimize as hg
def f(objective, n_trials, n_dim):
_objective = wrap_untrustworthy(objective, n_trials, bounding=bounding)
def f(objective, size, budget):
_objective = wrap_untrustworthy(objective, budget, bounding=bounding)
init = (0.5,) * n_dim
iterations = (n_trials - 1 + popsize * 2) // (popsize * 2 + 1)
init = (0.5,) * size
iterations = (budget - 1 + popsize * 2) // (popsize * 2 + 1)
center, history = hg(
_objective,
init,
@ -81,12 +81,12 @@ def make_mercury(
# use our remaining evals on something, anything.
feval_count = _objective(check)
wasted = max(0, -(feval_count - n_trials))
wasted = max(0, -(feval_count - budget))
if wasted:
print(f"wasting {wasted} of {n_trials} evaluations")
print(f"wasting {wasted} of {budget} evaluations")
for _ in range(wasted):
prng = np.random.default_rng()
x = prng.uniform(size=n_dim)
x = prng.uniform(size=size)
fx = _objective(x)
return _objective(final)

View File

@ -2,34 +2,34 @@ from ..utilities import phi
import numpy as np
def another_random_cube(objective, n_trials, n_dim, seed=None):
def another_random_cube(objective, size, budget, seed=None):
prng = np.random.default_rng(seed)
fopt = None
xopt = None
for i in range(n_trials):
x = prng.uniform(size=n_dim)
for i in range(budget):
x = prng.uniform(size=size)
fx = objective(x)
if fopt is None or xopt is None or fx < fopt:
fopt = fx
xopt = x
return fopt, xopt, n_trials
return fopt, xopt, budget
def quasirandom_cube(objective, n_trials, n_dim):
def quasirandom_cube(objective, size, budget):
# http://extremelearning.com.au/unreasonable-effectiveness-of-quasirandom-sequences/
magic = phi(n_dim)
alpha = np.zeros(n_dim)
for i in range(n_dim):
magic = phi(size)
alpha = np.zeros(size)
for i in range(size):
alpha[i] = pow(1 / magic, i + 1) % 1
xs = np.zeros((n_trials, n_dim))
xs = np.zeros((size, budget))
xs[0, :] = 0.5 # first point is always dead center
for i in range(1, n_trials):
for i in range(1, budget):
xs[i] = (xs[i - 1] + alpha) % 1
best_so_far = None
for i, x in zip(range(n_trials), xs):
for i, x in zip(range(budget), xs):
fx = objective(x)
if best_so_far is None or fx < best_so_far[0]:
best_so_far = (fx, x)
fopt, xopt = best_so_far
return fopt, xopt, n_trials
return fopt, xopt, budget

View File

@ -23,13 +23,13 @@ def make_scipy(method, *, jacobian=None, hessian=None):
# "trust-krylov",
), method
def f(objective, n_trials, n_dim):
def f(objective, size, budget):
prng = np.random.default_rng()
_objective = wrap_untrustworthy(
objective, n_trials, raising=True, bounding="sine"
objective, budget, raising=True, bounding="sine"
)
x0 = np.full(n_dim, 0.5)
bounds = scopt.Bounds([0.0] * n_dim, [1.0] * n_dim)
x0 = np.full(size, 0.5)
bounds = scopt.Bounds([0.0] * size, [1.0] * size)
# jac = "cs" if method == "dogleg" else None # doesn't work
jac = "2-point" if jacobian is None else jacobian
hess = "2-point" if hessian is None else hessian
@ -37,16 +37,16 @@ def make_scipy(method, *, jacobian=None, hessian=None):
# can be used to approximate the Hessian. Available quasi-Newton methods
# implementing this interface are: BFGS; SR1.
tol = None # 0.0
# options = dict(maxfun=n_trials) if method == "TNC" else dict(maxiter=n_trials)
# options = dict(maxfun=budget) if method == "TNC" else dict(maxiter=budget)
if method in ("BFGS", "CG", "COBYLA", "SLSQP", "trust-constr"):
options = dict(maxiter=n_trials)
options = dict(maxiter=budget)
elif method in ("Nelder-Mead", "Powell"):
options = dict(maxfev=n_trials, maxiter=n_trials)
options = dict(maxfev=budget, maxiter=budget)
elif method in ("L-BFGS-B", "TNC"):
options = dict(maxfun=n_trials, maxiter=n_trials)
options = dict(maxfun=budget, maxiter=budget)
else:
options = dict(maxfun=n_trials, maxfev=n_trials, maxiter=n_trials)
options = dict(maxfun=budget, maxfev=budget, maxiter=budget)
# silence some warnings:
if method in ("Nelder-Mead", "Powell"):
@ -67,12 +67,12 @@ def make_scipy(method, *, jacobian=None, hessian=None):
def check_evals():
evals = _objective(check)
checks.append(evals)
return evals < n_trials
return evals < budget
first_try = True
while check_evals():
if not first_try:
x0 = prng.uniform(size=n_dim)
x0 = prng.uniform(size=size)
try:
res = scopt.minimize(
@ -91,7 +91,7 @@ def make_scipy(method, *, jacobian=None, hessian=None):
# well, this is pointless.
fopt, xopt, feval_count = res.fun, res.x, res.nfev
# print("success:", res.success)
# if not res.success and res.nfev < n_trials // 2:
# if not res.success and res.nfev < budget // 2:
shut_up = (
method == "SLSQP"
and res.message == "Inequality constraints incompatible"
@ -99,7 +99,7 @@ def make_scipy(method, *, jacobian=None, hessian=None):
res.message
== "The maximum number of function evaluations is exceeded."
)
if not shut_up and not res.success and res.nfev < n_trials // 3:
if not shut_up and not res.success and res.nfev < budget // 3:
print("", method, res, "", sep="\n")
first_try = False
@ -126,7 +126,7 @@ def make_scipy(method, *, jacobian=None, hessian=None):
return f
def scipy_basinhopping_cube(objective, n_trials, n_dim):
def scipy_basinhopping_cube(objective, size, budget):
progress = 1e-2 # TODO: make configurable?
# NOTE: could also callbacks to extract solutions instead of wrapping objective functions?
@ -136,14 +136,14 @@ def scipy_basinhopping_cube(objective, n_trials, n_dim):
def dummy_minimizer(fun, x0, args, **options):
return scopt.OptimizeResult(x=x0, fun=fun(x0), success=True, nfev=1)
x0 = np.full(n_dim, 0.5)
x0 = np.full(size, 0.5)
res = scopt.basinhopping(
objective,
x0,
minimizer_kwargs=dict(method=dummy_minimizer),
accept_test=accept_bounded,
disp=False,
niter=n_trials,
niter=budget,
# TODO: try without any progress vars at all.
T=progress,
stepsize=progress / 2,
@ -153,13 +153,13 @@ def scipy_basinhopping_cube(objective, n_trials, n_dim):
return fopt, xopt, feval_count
def scipy_direct_cube(objective, n_trials, n_dim):
bounds = scopt.Bounds([0.0] * n_dim, [1.0] * n_dim)
def scipy_direct_cube(objective, size, budget):
bounds = scopt.Bounds([0.0] * size, [1.0] * size)
# TODO: try different values of eps. default 0.0001
res = scopt.direct(
objective,
bounds=bounds,
maxfun=n_trials,
maxfun=budget,
maxiter=1_000_000,
vol_tol=0,
)
@ -168,13 +168,13 @@ def scipy_direct_cube(objective, n_trials, n_dim):
return fopt, xopt, feval_count
def scipy_direct_l_cube(objective, n_trials, n_dim):
bounds = scopt.Bounds([0.0] * n_dim, [1.0] * n_dim)
def scipy_direct_l_cube(objective, size, budget):
bounds = scopt.Bounds([0.0] * size, [1.0] * size)
# TODO: try different values of eps. default 0.0001
res = scopt.direct(
objective,
bounds=bounds,
maxfun=n_trials,
maxfun=budget,
maxiter=1_000_000,
vol_tol=0,
locally_biased=True,
@ -198,13 +198,13 @@ def make_shgo(method="cobyla", init="large", it=1, mei=False, li=False, ftol=12)
# https://docs.scipy.org/doc/scipy/reference/optimize.minimize-cobyla.html
pass
def f(objective, n_trials, n_dim):
def f(objective, size, budget):
_objective = wrap_untrustworthy(
objective, n_trials, bounding="clip", raising=True
objective, budget, bounding="clip", raising=True
)
bounds = Bounds([0.0] * n_dim, [1.0] * n_dim)
bounds = Bounds([0.0] * size, [1.0] * size)
npt = dict(
large=(n_dim + 1) * (n_dim + 2) // 2, medium=2 * n_dim + 1, small=n_dim + 1
large=(size + 1) * (size + 2) // 2, medium=2 * size + 1, small=size + 1
)[init]
_method = method if method != "cobyqa" else method # TODO: handle cobyqa case.
try:
@ -214,7 +214,7 @@ def make_shgo(method="cobyla", init="large", it=1, mei=False, li=False, ftol=12)
n=npt,
iters=it,
minimizer_kwargs=dict(method=_method, ftol=10**-ftol),
options=dict(maxfev=n_trials, minimize_every_iter=mei, local_iters=li),
options=dict(maxfev=budget, minimize_every_iter=mei, local_iters=li),
sampling_method="simplicial",
)
except ExhaustedTrialsError:

View File

@ -28,8 +28,8 @@ def flipit(transformations, flippy):
transformations[flip_dim] = (mul, add)
def make_objective(problem, n_dim, *, fix_stuff=0):
obj = problem(n_dim)
def make_objective(problem, size, *, fix_stuff=0):
obj = problem(size)
name = problem.__name__
flippy = 0 # when positive, helps removes positive correlations in solutions
trippy = None # when not None, moves solution away from center: (dim, dir)
@ -41,7 +41,7 @@ def make_objective(problem, n_dim, *, fix_stuff=0):
elif name == "Csendes" or name == "Infinity":
# this problem is weird... let's avoid division by zero, okay?
# these problems are duplicates of each other. weird.
replacement = n_dim * (2 + np.sin(1))
replacement = size * (2 + np.sin(1))
_fun = obj.fun
obj.fun = lambda x: replacement if np.any(x == 0.0) else _fun(x)
elif name == "Keane":
@ -73,18 +73,18 @@ def make_objective(problem, n_dim, *, fix_stuff=0):
obj.fun = lambda x: _fun(x + adjust)
if fix_stuff >= 1:
stuff = {2: too_positive_2, 3: too_positive_3, 4: too_positive_4}[n_dim]
stuff = {2: too_positive_2, 3: too_positive_3, 4: too_positive_4}[size]
if name.lower() in stuff:
# too positively correlated, do some evil.
ind = stuff.index(name.lower())
flippy = ind % n_dim + 1 # uniformly select a dimension to "flip"
flippy = ind % size + 1 # uniformly select a dimension to "flip"
if fix_stuff >= 2:
stuff = {2: too_centered_2, 3: too_centered_3, 4: too_centered_4}[n_dim]
stuff = {2: too_centered_2, 3: too_centered_3, 4: too_centered_4}[size]
if name.lower() in stuff:
# uniformly select offsets to "trip".
ind = stuff.index(name.lower())
trippy = (ind % n_dim, ind // n_dim % n_dim) # (dim, dir)
trippy = (ind % size, ind // size % size) # (dim, dir)
transformations = [make_transform(lo, hi) for lo, hi in obj.bounds]
@ -108,15 +108,15 @@ def make_objective(problem, n_dim, *, fix_stuff=0):
return objective
def make_objectives(n_dim, n_trials=None, fix_stuff=0):
problems = all_problems[n_dim]
return [make_objective(problem, n_dim, fix_stuff=fix_stuff) for problem in problems]
def make_objectives(size, budget=None, fix_stuff=0):
problems = all_problems[size]
return [make_objective(problem, size, fix_stuff=fix_stuff) for problem in problems]
def find_objective(query, n_dim=None):
def find_objective(query, size=None):
results = []
for p_dim, problems in all_problems.items():
if n_dim is not None and p_dim != n_dim:
if size is not None and p_dim != size:
continue
for problem in problems:
if problem.__name__.lower() == query.lower():
@ -311,14 +311,14 @@ def main(argv, display=True):
percents = dict(frugal_percent=1.0, greedy_percent=2.0)
which = parties[argv[1]] if len(argv) > 1 else parties["standard"]
n_dim = int(argv[2]) if len(argv) > 2 else -2
n_trials = int(argv[3]) if len(argv) > 3 else fib(abs(n_dim) + 4) * 10
size = int(argv[2]) if len(argv) > 2 else -2
budget = int(argv[3]) if len(argv) > 3 else fib(abs(size) + 4) * 10
place_names = ("1st", "2nd", "3rd", "4th")
assert n_dim < 0, "unsupported in this version"
n_dim = abs(n_dim)
assert size < 0, "unsupported in this version"
size = abs(size)
place_scores = (5, 3, 2, 1)
objectives = GO_BENCHMARKS[n_dim] # * multiple
objectives = GO_BENCHMARKS[size] # * multiple
optimizers = list(which) # copy
before = len(optimizers)
@ -349,8 +349,8 @@ def main(argv, display=True):
wrapped = COWrap(
objective,
optimizer=optimizer,
n_trials=n_trials,
n_dim=n_dim,
budget=budget,
size=size,
**percents,
)
else:
@ -380,7 +380,7 @@ def main(argv, display=True):
note(
f"Using {opt_name} to optimize {obj_realname} ({obj_name}) [{run}] ..."
)
_ = optimizer(wrapped, n_trials=n_trials, n_dim=n_dim)
_ = optimizer(wrapped, size=size, budget=budget)
fopt, xopt = wrapped.finish()
result = (fopt, opt_name, wrapped.history)
results.setdefault(obj_name, []).append(result)
@ -436,7 +436,7 @@ def main(argv, display=True):
f"\n\033[1m{blah} scoring optimizers:\033[m"
f" (awards={place_scores})"
f" (obj={len(objectives)}, opt={len(optimizers)})"
f" (dims={n_dim}, evals={n_trials})"
f" (dims={size}, evals={budget})"
)
for opt_name, opt_point in sorted(points.items(), key=lambda t: -t[1]):
# place = place_names[i] if i < len(place_names) else " "
@ -464,7 +464,7 @@ def main(argv, display=True):
f"\n\033[1malternatively scored optimizers:\033[m"
f" (awards={place_scores})"
f" (obj={len(objectives)}, opt={len(optimizers)})"
f" (dims={n_dim}, evals={n_trials})"
f" (dims={size}, evals={budget})"
)
for opt_name, opt_score in sorted(more_scores.items(), key=lambda t: -t[1]):
# if opt_score < 1: continue

View File

@ -10,7 +10,7 @@ class ExhaustedTrialsError(Exception):
def wrap_untrustworthy(
objective, n_trials, *, raising=False, bounding=None, softplus=False, eps=0.0
objective, budget, *, raising=False, bounding=None, softplus=False, eps=0.0
):
# also handles bounding now, so it may be used for other purposes as well. whoops.
feval_count = 0
@ -24,13 +24,13 @@ def wrap_untrustworthy(
assert best_so_far is not None
fopt, xopt = best_so_far
return fopt, xopt, feval_count
if raising and feval_count >= n_trials:
if raising and feval_count >= budget:
raise ExhaustedTrialsError()
if bounding is not None:
x = do_bounding(x, bounding)
fx = objective(x)
feval_count += 1
if n_trials is None or feval_count <= n_trials:
if budget is None or feval_count <= budget:
if best_so_far is None or fx < best_so_far[0]:
best_so_far = (fx, x.copy())
return scalar_softplus(fx) + eps if softplus else fx

View File

@ -105,7 +105,7 @@ def needs_rerun(key, value):
if not value["history"]: # not sure what happened here
return True
n_dim = len(value["xopt"])
size = len(value["xopt"])
ng = []
kd = decode_key(key)

View File

@ -49,7 +49,7 @@ def do_bounding(x, method="clip"):
# FIXME: we need a way to determine the previous (or center) x somehow?
from bitten_snes import _project_with
x = _project_with(x, old, np.array([[0.0, 1.0] * n_dim]), clipping=0.5)
x = _project_with(x, old, np.array([[0.0, 1.0] * size]), clipping=0.5)
elif method == "tria":
hp = np.pi / 2
x = np.abs(np.arcsin(np.sin(x * hp)) / hp)
@ -66,7 +66,7 @@ class OWrap:
def __init__(
self,
objective,
n_trials,
budget,
frugal_percent=1.0,
greedy_percent=2.0,
history_frequency=10,
@ -75,7 +75,7 @@ class OWrap:
self.best_so_far = None
self.warning = None
self.objective = objective
self.n_trials = n_trials
self.budget = budget
self.__name__ = objective.__name__ # for evolopy
self.frugal_percent = float(frugal_percent)
self.greedy_percent = float(greedy_percent)
@ -114,7 +114,7 @@ class OWrap:
assert np.isfinite(fx), "f(x) is not finite (NaN or Inf or -Inf)"
self.feval_count += 1
if self.feval_count <= self.n_trials:
if self.feval_count <= self.budget:
if self.best_so_far is None or fx < self.best_so_far[0]:
self.best_so_far = (fx, x)
@ -129,11 +129,11 @@ class OWrap:
m33(f"{optimizer_name} did not abide to bounds")
if self.warning == "finite":
m33(f"{optimizer_name} passed a non-finite value")
if self.feval_count >= self.n_trials * self.greedy_percent:
m33(f"{optimizer_name} got greedy ({self.feval_count}>{self.n_trials})")
# if self.feval_count <= self.n_trials * 0.95:
if self.feval_count < self.n_trials * self.frugal_percent:
m34(f"{optimizer_name} was frugal ({self.feval_count}<{self.n_trials})")
if self.feval_count >= self.budget * self.greedy_percent:
m33(f"{optimizer_name} got greedy ({self.feval_count}>{self.budget})")
# if self.feval_count <= self.budget * 0.95:
if self.feval_count < self.budget * self.frugal_percent:
m34(f"{optimizer_name} was frugal ({self.feval_count}<{self.budget})")
return self.best_so_far
@property
@ -146,11 +146,11 @@ class OWrap:
class COWrap:
def __init__(self, objective, *, optimizer, n_trials, n_dim, **kwargs):
def __init__(self, objective, *, optimizer, size, budget, **kwargs):
self._objective = objective
self.optimizer = optimizer
self.n_trials = n_trials
self.n_dim = n_dim
self.budget = budget
self.size = size
self.kwargs = kwargs
self._dirty = False
self._history = None
@ -193,7 +193,7 @@ class COWrap:
@property
def cache_name(self):
opt_name = self.optimizer.__name__
return f"COWrap_d{self.n_dim:02}_n{self.n_trials:03}_{opt_name}"
return f"COWrap_d{self.size:02}_n{self.budget:03}_{opt_name}"
@property
def history(self):
@ -242,7 +242,7 @@ class COWrap:
def reset_objective(self):
self._dirty = False
self.ow = OWrap(self._objective, self.n_trials, **self.kwargs)
self.ow = OWrap(self._objective, self.budget, **self.kwargs)
self._check_cache()
def _check_cache(self):
@ -282,7 +282,7 @@ class COWrap:
# fopt, xopt = self.ow.best_so_far
fopt, xopt = self.ow.finish(self.optimizer.__name__)
expected_length = self.n_trials // self.ow.history_frequency
expected_length = self.budget // self.ow.history_frequency
history = [float(fval) for fval in self.ow.history]
history += [fopt] * (expected_length - len(history))