Skip to content

Commit

Permalink
Merge pull request #314 from novonordisk-research/develop
Browse files Browse the repository at this point in the history
Update D_optimal_branch
  • Loading branch information
RuneChristensen-NN authored Jan 16, 2025
2 parents bbb42c7 + ca6d80b commit 1ba800c
Show file tree
Hide file tree
Showing 4 changed files with 60 additions and 33 deletions.
20 changes: 19 additions & 1 deletion ProcessOptimizer/tests/test_utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from re import M
import pytest
import tempfile
import warnings

from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
Expand All @@ -10,6 +11,7 @@
from ProcessOptimizer import load
from ProcessOptimizer import dump
from ProcessOptimizer import expected_minimum
from ProcessOptimizer import Optimizer
from ProcessOptimizer.model_systems.benchmarks import bench1
from ProcessOptimizer.model_systems.benchmarks import bench3
from ProcessOptimizer.learning import (
Expand All @@ -26,7 +28,7 @@
dimensions_aslist,
)
from ProcessOptimizer.space import normalize_dimensions
from ProcessOptimizer.space.constraints import SumEquals
from ProcessOptimizer.space.constraints import Single, SumEquals


def check_optimization_results_equality(res_1, res_2):
Expand Down Expand Up @@ -132,6 +134,22 @@ def test_expected_minimum_min():
assert f_min == f_min2


@pytest.mark.fast_test
def test_expected_minimum_other_constraint():
opt = Optimizer([[0, 1], [0, 1]], n_initial_points=2)
opt.tell([0, 0], 1)
opt.tell([1, 1], 0)
opt.set_constraints([Single(1, 1, "integer")])
res = opt.get_result()
# Note that this test is not thread-safe If it starts failing intermittenetly, it may
# be due to some other running in parallel raising a warning. This can be fixed by
# mocking the warnings module, but as we don't have separate test requirements, we
# don't want to add a dependency on the mock module.
with warnings.catch_warnings(record=True) as w:
expected_minimum(res, random_state=1)
assert len(w) == 1


@pytest.mark.fast_test
def test_expected_minimum_max():
res = gp_minimize(
Expand Down
24 changes: 13 additions & 11 deletions ProcessOptimizer/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,17 +273,19 @@ def func(x):

xs = [res.x]
if n_random_starts > 0:
if hasattr(res.constraints, "sum_equals"):
# If we have a SumEquals constraint, create samples that respect it
xs = []
xs.extend(
res.constraints.sumequal_sampling(
n_samples=n_random_starts, random_state=random_state
)
)
if res.constraints:
if len(res.constraints.sum_equals) > 0:
# If we have a SumEquals constraint, create samples that respect it
xs = []
xs.extend(
res.constraints.sumequal_sampling(
n_samples=n_random_starts, random_state=random_state
)
)
else:
warn("Optimizer has constraints which expected_minimum() does not necessarily respect.")

else:
if res.constraints:
warn.warning("Optimizer has constraints which expected_minimum() does not necessarily respect.")
# For all other cases (and constraints) we use random sampling
xs.extend(res.space.rvs(n_random_starts, random_state=random_state))
xs = res.space.transform(xs)
Expand All @@ -293,7 +295,7 @@ def func(x):

cons = None
# Prepare a linear constraint, if applicable
if hasattr(res.constraints, "sum_equals"):
if res.constraints and len(res.constraints.sum_equals) > 0:
A = np.zeros((1, res.space.transformed_n_dims))
value = res.constraints.sum_equals[0].value
for dim in res.constraints.sum_equals[0].dimensions:
Expand Down
14 changes: 9 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,14 +64,18 @@ Below is an image of the Booth function.

Suppose you are given the task of minimizing the function on the domain only using empirical observations and without any analytical function. <br/>
Working with the ProcessOptimizer package you simply define the `Space` and create an `Optimizer` object.<br/>
The `Space` object takes a list of dimensions which can either be `Real`, `Integer` or `Categorical`. `Real` dimensions are defined by the maximum and minimum values.<br/>
The `Optimizer` object initialized below uses GP (Gaussian Process). This means that after each step a Gaussian Process is fitted to the observations, which is used as a posterior distribution. Combined with an acquisition function the next point that should be explored can be determined. Notice that this process only takes place once n_initial_points of initial data has been aqcuired. In this case `LHS = True` (latin hypercube sampling) has been used as the initial sampling strategy for the first 6 points.
The `Space` object takes a list of dimensions which can either be `Real`, `Integer` or `Categorical`. `Real` dimensions are defined by the maximum and minimum values. Another way to initialize a `Real` dimension is to pass a list of two `float`s to the `Space` constructor.<br/>
```python
import ProcessOptimizer as po

SPACE = po.Space([Real(0,5), Real(0,5)])

opt = po.Optimizer(SPACE, base_estimator = "GP", n_initial_points = 6, lhs = True)
SPACE = po.Space([[0.0, 5.0], [0.0, 5.0]])
print(SPACE)
>>>Space([Real(low=0.0, high=5.0, prior='uniform', transform='identity'),
Real(low=0.0, high=5.0, prior='uniform', transform='identity')])
```
The `Optimizer` object initialized below uses `"GP"` (Gaussian Process). This means that after each step a Gaussian Process is fitted to the observations, which is used as a posterior distribution. Combined with an acquisition function the next point that should be explored can be determined. Notice that this process only takes place once n_initial_points of initial data has been aqcuired.
```python
opt = po.Optimizer(SPACE, base_estimator = "GP", n_initial_points = 6)
```
The optimizer can now be used in steps by calling the `.ask()` function, evaluating the function at the given point and use `.tell()` the `Optimizer` the result. In practise it would work like this. First ask the optimizer for the next point to perform an experiment:
```python
Expand Down
35 changes: 19 additions & 16 deletions examples/Length scale bounds.ipynb

Large diffs are not rendered by default.

0 comments on commit 1ba800c

Please sign in to comment.