Skip to content

Commit

Permalink
Release 0.2.2 (#19)
Browse files Browse the repository at this point in the history
* fix empty subspace, fixes #10

* don't require dim for HessianApproximation init, fixes #5

* add more dimension checks

* fix remove slack in subproblem

* add full truncation strategy

* add multistart test

* add tests for nonfinite returns

* add tests for dimension checks

* fix indef testcase

* extend subproblem tests

* add tests for hessian approximation dimension checks

* version bump

* fix tests

* MOAR TESTS

* add test

* fix flake

* fix check

* add refinement strategy

* turn convergence info into warning, remove epsilon check

* disable refinement for multistart

* fix flake

* fix doc

* fix flake

* remove option to provide iminbr and rename

* remove unused import

* remove small delta exit flag

* add typehints, always add gradient if tr_step.alpha < 1, add single reflect

* fixup typehints

* fixup indexing error

* fix convergence message formatting

* change maxiter/maxtime to warnings

* update default options after benchmark

* bump version

* refactor trust region

* fix import

* fix flake

* fix nan fvaldiff (#17)

* create one logger instance per optimization to avoid multiprocessing … (#16)

* create one logger instance per optimization to avoid multiprocessing issues, fixes #13

* fixup

* fix flake

* fixup

* fix doc

* fixup logging

* fixup logging

* use lstsq when computing newton direction for singular matrices (#18)

* use lstsq when computing newton direction for singular matrices

* fix flake py version

* version bump
  • Loading branch information
FFroehlich authored Jan 15, 2021
1 parent c753ba8 commit 276ac96
Show file tree
Hide file tree
Showing 10 changed files with 711 additions and 637 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/flake.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v1
with:
python-version: 3.8.6
python-version: 3.9.1
architecture: x64

- uses: actions/checkout@v1
Expand Down
2 changes: 1 addition & 1 deletion fides/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,6 @@
# flake8: noqa
from .minimize import Optimizer
from .hessian_approximation import SR1, BFGS, DFP, HessianApproximation
from .logging import logger
from .logging import create_logger
from .version import __version__
from .constants import Options, SubSpaceDim, StepBackStrategy, ExitFlag
34 changes: 27 additions & 7 deletions fides/logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,30 @@

import logging

logger = logging.getLogger('fides')
ch = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger_count = 0


def create_logger(level: int) -> logging.Logger:
"""
Creates a logger instance. To avoid unnecessary locks during
multithreading, different logger instance should be created for every
:param level:
logging level
:return:
logger instance
"""
global logger_count
logger_count += 1
# add logger count to differentiate between different fides
# optimization instances and avoid deadlocks
logger = logging.getLogger(f'fides_{logger_count}')
ch = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - fides - %(levelname)s - %(message)s'
)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel(level)
return logger
97 changes: 54 additions & 43 deletions fides/minimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@
from .trust_region import trust_region, Step
from .hessian_approximation import HessianApproximation
from .constants import Options, ExitFlag, DEFAULT_OPTIONS
from .logging import logger
from .logging import create_logger

from typing import Callable, Dict, Optional, Tuple
from typing import Callable, Dict, Optional, Tuple, Union


class Optimizer:
Expand All @@ -40,7 +40,9 @@ class Optimizer:
:ivar starttime: Time at which optimization was started
:ivar iteration: Current iteration
:ivar converged: Flag indicating whether optimization has converged
:ivar exitflag:
:ivar exitflag: ExitFlag to indicate reason for termination
:ivar verbose: Verbosity level for logging
:ivar logger: logger instance
"""
def __init__(self, fun: Callable,
ub: np.ndarray,
Expand Down Expand Up @@ -112,7 +114,8 @@ def __init__(self, fun: Callable,
self.iteration: int = 0
self.converged: bool = False
self.exitflag: ExitFlag = ExitFlag.DID_NOT_RUN
logger.setLevel(verbose)
self.verbose: int = verbose
self.logger: Union[logging.Logger, None] = None

def _reset(self):
self.starttime = time.time()
Expand All @@ -121,6 +124,7 @@ def _reset(self):
self.delta: float = self.get_option(Options.DELTA_INIT)
self.delta_iter: float = self.delta
self.fval_min = np.inf
self.logger = create_logger(self.verbose)

def minimize(self, x0: np.ndarray):
"""
Expand Down Expand Up @@ -232,6 +236,7 @@ def minimize(self, x0: np.ndarray):
subspace_dim=self.get_option(Options.SUBSPACE_DIM),
stepback_strategy=self.get_option(Options.STEPBACK_STRAT),
refine_stepback=self.get_option(Options.REFINE_STEPBACK),
logger=self.logger
)

x_new = self.x + step.s + step.s0
Expand Down Expand Up @@ -385,7 +390,7 @@ def check_convergence(self, fval, x, grad) -> None:

if np.isclose(fval, self.fval, atol=fatol, rtol=frtol):
self.exitflag = ExitFlag.FTOL
logger.warning(
self.logger.warning(
'Stopping as function difference '
f'{np.abs(self.fval - fval):.2E} was smaller than specified '
f'tolerances (atol={fatol:.2E}, rtol={frtol:.2E})'
Expand All @@ -394,23 +399,23 @@ def check_convergence(self, fval, x, grad) -> None:

elif np.isclose(x, self.x, atol=xatol, rtol=xrtol).all():
self.exitflag = ExitFlag.XTOL
logger.warning(
self.logger.warning(
'Stopping as step was smaller than specified tolerances ('
f'atol={xatol:.2E}, rtol={xrtol:.2E})'
)
converged = True

elif gnorm <= gatol:
self.exitflag = ExitFlag.GTOL
logger.warning(
self.logger.warning(
'Stopping as gradient norm satisfies absolute convergence '
f'criteria: {gnorm:.2E} < {gatol:.2E}'
)
converged = True

elif gnorm <= grtol * self.fval:
self.exitflag = ExitFlag.GTOL
logger.warning(
self.logger.warning(
'Stopping as gradient norm satisfies relative convergence '
f'criteria: {gnorm:.2E} < {grtol:.2E} * {self.fval:.2E}'
)
Expand All @@ -433,7 +438,7 @@ def check_continue(self) -> bool:
maxiter = self.get_option(Options.MAXITER)
if self.iteration >= maxiter:
self.exitflag = ExitFlag.MAXITER
logger.warning(
self.logger.warning(
f'Stopping as maximum number of iterations {maxiter} was '
f'exceeded.'
)
Expand All @@ -445,7 +450,7 @@ def check_continue(self) -> bool:
avg_iter_time = time_elapsed/(self.iteration + (self.iteration == 0))
if time_remaining < avg_iter_time:
self.exitflag = ExitFlag.MAXTIME
logger.warning(
self.logger.warning(
f'Stopping as maximum runtime {maxtime} is expected to be '
f'exceeded in the next iteration.'
)
Expand Down Expand Up @@ -511,22 +516,24 @@ def log_step(self, accepted: bool, step: Step, fval: float):
for count in [step.reflection_count, step.truncation_count]
]

if np.isnan(fval):
if not np.isfinite(fval):
fval = self.fval
logger.info(f'{" " * iterspaces}{self.iteration}'
f' | {fval if accepted else self.fval:.3E}'
f' | {(fval - self.fval)*accepted:+.2E}'
f' | {step.qpval:+.2E}'
f' | {self.tr_ratio:+.2E}'
f' | {self.delta_iter:.2E}'
f' | {norm(self.grad):.2E}'
f' | {normdx:.2E}'
f' | {step.theta:.2E}'
f' | {step.alpha:.2E}'
f' | {step.type}{" " * steptypespaces}'
f' | {" " * reflspaces}{step.reflection_count}'
f' | {" " * trunspaces}{step.truncation_count}'
f' | {int(accepted)}')
self.logger.info(
f'{" " * iterspaces}{self.iteration}'
f' | {fval if accepted else self.fval:+.3E}'
f' | {(fval - self.fval):+.2E}'
f' | {step.qpval:+.2E}'
f' | {self.tr_ratio:+.2E}'
f' | {self.delta_iter:.2E}'
f' | {norm(self.grad):.2E}'
f' | {normdx:.2E}'
f' | {step.theta:.2E}'
f' | {step.alpha:.2E}'
f' | {step.type}{" " * steptypespaces}'
f' | {" " * reflspaces}{step.reflection_count}'
f' | {" " * trunspaces}{step.truncation_count}'
f' | {int(accepted)}'
)

def log_step_initial(self):
"""
Expand All @@ -535,20 +542,22 @@ def log_step_initial(self):

iterspaces = max(len(str(self.get_option(Options.MAXITER))), 5) - \
len(str(self.iteration))
logger.info(f'{" " * iterspaces}{self.iteration}'
f' | {self.fval:.3E}'
f' | NaN '
f' | NaN '
f' | NaN '
f' | {self.delta:.2E}'
f' | {norm(self.grad):.2E}'
f' | NaN '
f' | NaN '
f' | NaN '
f' | NaN '
f' | NaN '
f' | NaN '
f' | {int(np.isfinite(self.fval))}')
self.logger.info(
f'{" " * iterspaces}{self.iteration}'
f' | {self.fval:+.3E}'
f' | NaN '
f' | NaN '
f' | NaN '
f' | {self.delta:.2E}'
f' | {norm(self.grad):.2E}'
f' | NaN '
f' | NaN '
f' | NaN '
f' | NaN '
f' | NaN '
f' | NaN '
f' | {int(np.isfinite(self.fval))}'
)

def log_header(self):
"""
Expand All @@ -557,10 +566,12 @@ def log_header(self):
"""
iterspaces = len(str(self.get_option(Options.MAXITER))) - 5

logger.info(f'{" " * iterspaces} iter '
f'| fval | fval diff | pred diff | tr ratio '
f'| delta | ||g|| | ||step|| | theta | alpha '
f'| step | refl | trun | accept')
self.logger.info(
f'{" " * iterspaces} iter '
f'| fval | fval diff | pred diff | tr ratio '
f'| delta | ||g|| | ||step|| | theta | alpha '
f'| step | refl | trun | accept'
)

def check_finite(self,
grad: Optional[np.ndarray] = None,
Expand Down
Loading

0 comments on commit 276ac96

Please sign in to comment.