diff --git a/docs/source/objectives.rst b/docs/source/objectives.rst index da8e28a..58e89f9 100644 --- a/docs/source/objectives.rst +++ b/docs/source/objectives.rst @@ -21,7 +21,7 @@ We can construct an objective to maximize some output with objective = Objective(name="some_output", target="max") # or "min" -Given some data, the ``Objective`` object will try to model the quantity ``y1`` and find the corresponding inputs that maximize it. +Given some data, the ``Objective`` object will try to model the quantity ``some_output`` and find the corresponding inputs that maximize it. We can also apply a transform to the value to make it more Gaussian when we fit to it. This is especially useful when the quantity tends to be non-Gaussian, like with a beam flux. @@ -29,9 +29,9 @@ This is especially useful when the quantity tends to be non-Gaussian, like with from blop import Objective - objective = Objective(name="some_output", target="max", transform="log") + objective_with_log_transform = Objective(name="some_output", target="max", transform="log") - objective = Objective(name="some_output", target="max", transform="arctanh") + objective_with_arctanh_transform = Objective(name="some_output", target="max", transform="arctanh") .. code-block:: python @@ -62,13 +62,13 @@ This is useful in optimization problems like .. code-block:: python # ensure the color is approximately green - objective = Objective(name="peak_wavelength", target=(520, 530), units="nm") + color_bjective = Objective(name="peak_wavelength", target=(520, 530), units="nm") # ensure the beam is smaller than 10 microns - objective = Objective(name="beam_width", target=(-np.inf, 10), units="um", transform="log") + width_objective = Objective(name="beam_width", target=(-np.inf, 10), units="um", transform="log") # ensure our flux is at least some value - objective = Objective(name="beam_flux", target=(1.0, np.inf), transform="log") + flux_objective = Objective(name="beam_flux", target=(1.0, np.inf), transform="log") diff --git a/src/blop/agent.py b/src/blop/agent.py index 231ab23..0a170cd 100644 --- a/src/blop/agent.py +++ b/src/blop/agent.py @@ -198,7 +198,7 @@ def sample(self, n: int = DEFAULT_MAX_SAMPLES, method: str = "quasi-random") -> else: raise ValueError("'method' argument must be one of ['quasi-random', 'random', 'grid'].") - return self.dofs.untransform(X) + return self.dofs.subset(active=True).untransform(X) def ask(self, acq_func_identifier="qei", n=1, route=True, sequential=True, upsample=1, **acq_func_kwargs): """Ask the agent for the best point to sample, given an acquisition function. @@ -260,7 +260,7 @@ def ask(self, acq_func_identifier="qei", n=1, route=True, sequential=True, upsam # this includes both RO and non-RO DOFs. # and is in the transformed model space - candidates = self.dofs.untransform(candidates).numpy() + candidates = self.dofs.subset(active=True).untransform(candidates).numpy() p = self.posterior(candidates) if hasattr(self, "model") else None @@ -586,10 +586,10 @@ def scalarized_fitnesses(self, weights="default", constrained=True): return torch.where(c, f, -np.inf) def argmax_best_f(self, weights="default"): - return self.scalarized_fitnesses(weights=weights, constrained=True).argmax() + return int(self.scalarized_fitnesses(weights=weights, constrained=True).argmax()) def best_f(self, weights="default"): - return self.scalarized_fitnesses(weights=weights, constrained=True).max() + return float(self.scalarized_fitnesses(weights=weights, constrained=True).max()) @property def pareto_front_mask(self): @@ -741,7 +741,7 @@ def _sample_domain(self): Read-only DOFs are set to exactly their last known value. Discrete DOFs are relaxed to some continuous domain. """ - return self.dofs.transform(self.dofs.subset(active=True).search_domain.T) + return self.dofs.subset(active=True).transform(self.dofs.subset(active=True).search_domain.T) @property def _model_input_transform(self): @@ -802,7 +802,7 @@ def _set_hypers(self, hypers): self.validity_constraint.load_state_dict(hypers["validity_constraint"]) def constraint(self, x): - x = self.dofs.transform(x) + x = self.dofs.subset(active=True).transform(x) p = torch.ones(x.shape[:-1]) for obj in self.active_objs: @@ -922,12 +922,12 @@ def train_targets(self, index=None, **subset_kwargs): @property def best(self): """Returns all data for the best point.""" - return self.table.loc[int(self.argmax_best_f())] + return self.table.loc[self.argmax_best_f()] @property def best_inputs(self): """Returns the value of each DOF at the best point.""" - return self.table.loc[int(self.argmax_best_f()), self.dofs.names].to_dict() + return self.table.loc[self.argmax_best_f(), self.dofs.names].to_dict() def go_to(self, **positions): """Set all settable DOFs to a given position. DOF/value pairs should be supplied as kwargs, e.g. as diff --git a/src/blop/dofs.py b/src/blop/dofs.py index 8ecce32..5d3b90c 100644 --- a/src/blop/dofs.py +++ b/src/blop/dofs.py @@ -53,10 +53,11 @@ def _validate_continuous_dof_domains(search_domain, trust_domain, domain, read_o search_domain \\subseteq trust_domain \\subseteq domain """ if not read_only: + if len(search_domain) != 2: + raise ValueError(f"Bad search domain {search_domain}. The search domain must have length 2.") try: search_domain = tuple((float(search_domain[0]), float(search_domain[1]))) - assert len(search_domain) == 2 - except: # noqa + except TypeError: raise ValueError("If type='continuous', then 'search_domain' must be a tuple of two numbers.") if search_domain[0] >= search_domain[1]: @@ -72,7 +73,7 @@ def _validate_continuous_dof_domains(search_domain, trust_domain, domain, read_o if (trust_domain is not None) and (domain is not None): if (trust_domain[0] < domain[0]) or (trust_domain[1] > domain[1]): - raise ValueError(f"The trust domain {trust_domain} must be a subset of the trust domain {domain}.") + raise ValueError(f"The trust domain {trust_domain} must be a subset of the domain {domain}.") def _validate_discrete_dof_domains(search_domain, trust_domain): @@ -374,21 +375,20 @@ def transform(self, X): """ Transform X to the transformed unit hypercube. """ - - if X.shape[-1] != len(self.subset(active=True)): - raise ValueError() + if X.shape[-1] != len(self): + raise ValueError(f"Cannot transform points with shape {X.shape} using DOFs with dimension {len(self)}.") if not isinstance(X, torch.Tensor): X = torch.tensor(X, dtype=torch.double) - return torch.cat([dof._transform(X[..., i]).unsqueeze(-1) for i, dof in enumerate(self.subset(active=True))], dim=-1) + return torch.cat([dof._transform(X[..., i]).unsqueeze(-1) for i, dof in enumerate(self.dofs)], dim=-1) def untransform(self, X): """ Transform the transformed unit hypercube to the search domain. """ - if X.shape[-1] != len(self.subset(active=True)): - raise ValueError() + if X.shape[-1] != len(self): + raise ValueError(f"Cannot untransform points with shape {X.shape} using DOFs with dimension {len(self)}.") if not isinstance(X, torch.Tensor): X = torch.tensor(X, dtype=torch.double) diff --git a/src/blop/objectives.py b/src/blop/objectives.py index 10e4784..f99e22b 100644 --- a/src/blop/objectives.py +++ b/src/blop/objectives.py @@ -389,7 +389,7 @@ def transform(self, Y): Transform the experiment space to the model space. """ if Y.shape[-1] != len(self): - raise ValueError() + raise ValueError(f"Cannot transform points with shape {Y.shape} using DOFs with dimension {len(self)}.") if not isinstance(Y, torch.Tensor): Y = torch.tensor(Y, dtype=torch.double) @@ -401,7 +401,7 @@ def untransform(self, Y): Transform the model space to the experiment space. """ if Y.shape[-1] != len(self): - raise ValueError() + raise ValueError(f"Cannot untransform points with shape {Y.shape} using DOFs with dimension {len(self)}.") if not isinstance(Y, torch.Tensor): Y = torch.tensor(Y, dtype=torch.double) diff --git a/src/blop/plotting.py b/src/blop/plotting.py index 551f80b..2f69d9d 100644 --- a/src/blop/plotting.py +++ b/src/blop/plotting.py @@ -155,7 +155,7 @@ def _plot_objs_many_dofs(agent, axes=(0, 1), shading="nearest", cmap=DEFAULT_COL test_x = test_inputs[..., 0, axes[0]].detach().squeeze().numpy() test_y = test_inputs[..., 0, axes[1]].detach().squeeze().numpy() - model_inputs = agent.dofs.transform(test_inputs) + model_inputs = agent.dofs.subset(active=True).transform(test_inputs) for obj_index, obj in enumerate(agent.objectives): targets = agent.train_targets(obj.name).squeeze(-1).numpy() diff --git a/src/blop/tests/conftest.py b/src/blop/tests/conftest.py index 6506f53..9c81da8 100644 --- a/src/blop/tests/conftest.py +++ b/src/blop/tests/conftest.py @@ -107,7 +107,7 @@ def digestion(db, uid): @pytest.fixture(scope="function") def constrained_agent(db): """ - https://en.wikipedia.org/wiki/Test_functions_for_optimization + Chankong and Haimes function from https://en.wikipedia.org/wiki/Test_functions_for_optimization """ def digestion(db, uid):