Skip to content

Commit

Permalink
better targeting method
Browse files Browse the repository at this point in the history
  • Loading branch information
Thomas Morris committed Nov 10, 2023
1 parent 440bc16 commit 6576bc8
Show file tree
Hide file tree
Showing 9 changed files with 57 additions and 58 deletions.
60 changes: 21 additions & 39 deletions bloptools/bayesian/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ def __init__(
tolerate_acquisition_errors=False,
sample_center_on_init=False,
trigger_delay: float = 0,
train_every: int = 1,
):
"""
A Bayesian optimization agent.
Expand Down Expand Up @@ -107,6 +108,7 @@ def __init__(

self.tolerate_acquisition_errors = tolerate_acquisition_errors

self.train_every = train_every
self.trigger_delay = trigger_delay
self.sample_center_on_init = sample_center_on_init

Expand Down Expand Up @@ -135,12 +137,17 @@ def tell(self, x: Mapping, y: Mapping, metadata=None, append=True, train_models=
A dict of hyperparameters for the model to assume a priori.
"""

# n_before_tell = len(self.table)

new_table = pd.DataFrame({**x, **y, **metadata} if metadata is not None else {**x, **y})
self.table = pd.concat([self.table, new_table]) if append else new_table
self.table.index = np.arange(len(self.table))

# n_after_tell = len(self.table)

# TODO: should be a check per model
if len(self.table) > 2:
# if n_before_tell % self.train_every != n_after_tell % self.train_every:
self._update_models(train=train_models, a_priori_hypers=hypers)

def _update_models(self, train=True, skew_dims=None, a_priori_hypers=None):
Expand Down Expand Up @@ -257,13 +264,14 @@ def ask(self, acq_func_identifier="qei", n=1, route=True, sequential=True):
raw_samples=RAW_SAMPLES, # used for intialization heuristic
)

x = candidates.numpy().astype(float)
# this includes both RO and non-RO DOFs
candidates = candidates.numpy()

active_dofs_are_read_only = np.array([dof.read_only for dof in self.dofs.subset(active=True)])

acq_points = x[..., ~active_dofs_are_read_only]
read_only_X = x[..., active_dofs_are_read_only]
acq_func_meta["read_only_values"] = read_only_X
acq_points = candidates[..., ~active_dofs_are_read_only]
read_only_values = candidates[..., active_dofs_are_read_only]
acq_func_meta["read_only_values"] = read_only_values

else:
acqf_obj = None
Expand Down Expand Up @@ -484,51 +492,25 @@ def get_objective_targets(self, i):

return targets

# def _get_objective_targets(self, i):
# """Returns the targets (what we fit to) for an objective, given the objective index."""
# obj = self.objectives[i]

# targets = self.table.loc[:, obj.name].values.copy()

# # check that targets values are inside acceptable values
# valid = (targets > obj.limits[0]) & (targets < obj.limits[1])
# targets = np.where(valid, targets, np.nan)

# # transform if needed
# if obj.log:
# targets = np.where(valid, np.log(targets), np.nan)
# if obj.target not in ["min", "max"]:
# targets = -np.square(np.log(targets) - np.log(obj.target))

# else:
# if obj.target not in ["min", "max"]:
# targets = -np.square(targets - obj.target)

# if obj.target == "min":
# targets *= -1

# return targets

@property
def scalarizing_transform(self):
return ScalarizedPosteriorTransform(weights=self.objective_weights_torch, offset=0)

@property
def targeting_transform(self):
return TargetingPosteriorTransform(weights=self.objective_weights_torch, targets=self.pseudo_targets)
return TargetingPosteriorTransform(weights=self.objective_weights_torch, targets=self.objectives.targets)

@property
def pseudo_targets(self):
"""Targets for the posterior transform"""
return torch.tensor(
[
1.e32
if obj.target == "max"
else -1.e32
if obj.target == "min"
else np.log(obj.target) if obj.log
else obj.target
for i, obj in enumerate(self.objectives)
self.objectives_targets[..., i].max()
if t == "max"
else self.objectives_targets[..., i].min()
if t == "min"
else t
for i, t in enumerate(self.objectives.targets)
]
)

Expand Down Expand Up @@ -677,7 +659,7 @@ def save_hypers(self, filepath):
"""Save the agent's fitted hyperparameters to a given filepath."""
hypers = self.hypers
with h5py.File(filepath, "w") as f:
for model_key in hypers.names():
for model_key in hypers.keys():
f.create_group(model_key)
for param_key, param_value in hypers[model_key].items():
f[model_key].create_dataset(param_key, data=param_value)
Expand All @@ -687,7 +669,7 @@ def load_hypers(filepath):
"""Load hyperparameters from a file."""
hypers = {}
with h5py.File(filepath, "r") as f:
for model_key in f.names():
for model_key in f.keys():
hypers[model_key] = OrderedDict()
for param_key, param_value in f[model_key].items():
hypers[model_key][param_key] = torch.tensor(np.atleast_1d(param_value[()]))
Expand Down
4 changes: 2 additions & 2 deletions bloptools/bayesian/objectives.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,9 @@ def __len__(self):
@property
def summary(self):
summary = pd.DataFrame(columns=OBJ_FIELDS)
for i, obj in enumerate(self.objectives):
for obj in self.objectives:
for col in summary.columns:
summary.loc[i, col] = getattr(obj, col)
summary.loc[obj.name, col] = getattr(obj, col)

# convert dtypes
for attr in ["log"]:
Expand Down
25 changes: 21 additions & 4 deletions bloptools/bayesian/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,29 @@ def __init__(self, weights: Tensor, targets: Tensor) -> None:
offset: An offset to be added to posterior mean.
"""
super().__init__()
self.register_buffer("targets", targets)
self.targets = targets
self.register_buffer("weights", weights)

self.sampled_transform = lambda y: -(y - self.targets).abs() @ self.weights.unsqueeze(-1)
self.mean_transform = lambda mean, var: -(mean - self.targets).abs() @ self.weights.unsqueeze(-1)
self.variance_transform = lambda mean, var: -var @ self.weights.unsqueeze(-1)
def sampled_transform(self, y):
for i, target in enumerate(self.targets):
if target == "min":
y[..., i] = -y[..., i]
elif target != "max":
y[..., i] = -(y[..., i] - target).abs()

return y @ self.weights.unsqueeze(-1)

def mean_transform(self, mean, var):
for i, target in enumerate(self.targets):
if target == "min":
mean[..., i] = -mean[..., i]
elif target != "max":
mean[..., i] = -(mean[..., i] - target).abs()

return mean @ self.weights.unsqueeze(-1)

def variance_transform(self, mean, var):
return var @ self.weights.unsqueeze(-1)

def evaluate(self, Y: Tensor) -> Tensor:
r"""Evaluate the transform on a set of outcomes.
Expand Down
8 changes: 4 additions & 4 deletions bloptools/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def agent(db):
DOF(name="x2", limits=(-8.0, 8.0)),
]

objectives = [Objective(key="himmelblau", target="min")]
objectives = [Objective(name="himmelblau", target="min")]

agent = Agent(
dofs=dofs,
Expand All @@ -78,8 +78,8 @@ def digestion(db, uid):
products = db[uid].table()

for index, entry in products.iterrows():
products.loc[index, "ST1"] = functions.styblinski_tang(entry.x1, entry.x2)
products.loc[index, "ST2"] = functions.styblinski_tang(entry.x1, -entry.x2)
products.loc[index, "obj1"] = functions.himmelblau(entry.x1, entry.x2)
products.loc[index, "obj2"] = functions.himmelblau(entry.x2, entry.x1)

return products

Expand All @@ -88,7 +88,7 @@ def digestion(db, uid):
DOF(name="x2", limits=(-5.0, 5.0)),
]

objectives = [Objective(key="ST1", target="min"), Objective(key="ST2", target="min")]
objectives = [Objective(name="obj1", target="min"), Objective(name="obj2", target="min")]

agent = Agent(
dofs=dofs,
Expand Down
2 changes: 1 addition & 1 deletion bloptools/tests/test_passive_dofs.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def test_passive_dofs(RE, db):
]

objectives = [
Objective(key="himmelblau", target="min"),
Objective(name="himmelblau", target="min"),
]

agent = Agent(
Expand Down
6 changes: 3 additions & 3 deletions docs/source/tutorials/himmelblau.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@
"source": [
"from bloptools.bayesian import Objective\n",
"\n",
"objectives = [Objective(key=\"himmelblau\", target=\"min\")]"
"objectives = [Objective(name=\"himmelblau\", target=\"min\")]"
]
},
{
Expand Down Expand Up @@ -295,7 +295,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "Python 3.10.0 ('nsls2')",
"language": "python",
"name": "python3"
},
Expand All @@ -313,7 +313,7 @@
},
"vscode": {
"interpreter": {
"hash": "eee21ccc240bdddd7cf04478199e20f7257541e2f592ca1a4d34ebdc0225d742"
"hash": "857d19a2fd370900ed798add63a0e418d98c0c9c9169a1442a8e3b86b5805755"
}
}
},
Expand Down
4 changes: 2 additions & 2 deletions docs/source/tutorials/hyperparameters.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@
"]\n",
"\n",
"objectives = [\n",
" Objective(key=\"booth\", target=\"min\"),\n",
" Objective(name=\"booth\", target=\"min\"),\n",
"]\n",
"\n",
"\n",
Expand Down Expand Up @@ -145,7 +145,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.0"
"version": "3.11.5"
},
"vscode": {
"interpreter": {
Expand Down
4 changes: 2 additions & 2 deletions docs/source/tutorials/passive-dofs.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
"]\n",
"\n",
"objectives = [\n",
" Objective(key=\"himmelblau\", target=\"min\"),\n",
" Objective(name=\"himmelblau\", target=\"min\"),\n",
"]\n",
"\n",
"agent = Agent(\n",
Expand Down Expand Up @@ -93,7 +93,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.0"
"version": "3.11.5"
},
"vscode": {
"interpreter": {
Expand Down
2 changes: 1 addition & 1 deletion docs/wip/constrained-himmelblau copy.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
"X1, X2 = np.meshgrid(x1, x2)\n",
"from bloptools.tasks import Task\n",
"\n",
"task = Task(key=\"himmelblau\", kind=\"min\")\n",
"task = Task(name=\"himmelblau\", kind=\"min\")\n",
"F = functions.constrained_himmelblau(X1, X2)\n",
"\n",
"plt.pcolormesh(x1, x2, F, norm=mpl.colors.LogNorm(), cmap=\"gnuplot\")\n",
Expand Down

0 comments on commit 6576bc8

Please sign in to comment.