-
Notifications
You must be signed in to change notification settings - Fork 0
/
bo.py
110 lines (103 loc) · 4.39 KB
/
bo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
"""Classical (Naive) Bayesian optimization"""
import botorch
import torch
import botorch.acquisition as acqf
def generate_batch(model,
X,
Y,
batch_size=1,
X_pending=None,
n_candidates=None,
bounds = None,
num_restarts=10,
raw_samples=512,
acqfn="ei",
**kwargs):
"""Generate a set of next best experimentes by Bayesian optimization.
Parameters
----------
model : GpyTorch Gaussian process model
Surrogate model generated by GpyTorch GP interfaces
X : pyTorch tensor with a shape of (n_training_samples, feature_size) of floats
Current set of experimental design after featurziation.
Should be the same as the training X used in the GpyTorch GP model.
Y : pyTorch tensor with a shape of (n_training_samples, 1) of floats
Current measurements using X experimental design.
Should be the same as the training Y used in the GpyTorch GP model.
batch_size : int, default=1
Number of next experiments to be added
X_pending : pyTorch tensor with a shape of (n_pending_samples, feature_size) of floats
Current search space of experimental design after featurziation.
n_candidates : int, default=None
Size of the sampling within the search space
num_restarts : int, default=10
Number of starting points for multistart acquisition function optimization.
raw_samples : int, default=512
Number of samples for initialization.
acqfn : str, default="ei"
Acqusition function choices. Must be chosen from "ei", "pi", "ucb" and "ts"
**kwargs : list of str
Options passed into BoTorch optimization function
Returns
-------
X_next : pyTorch tensor with a shape of (batch_size, feature_size) of floats
Selected experiments by BO.
acq_value : pyTorch tensor with a shape of of shape (batch_size,) of floats
Acqusition values for the selected experiments
Notes
-----
This implementation follows the tutorial from BoTorch
(https://botorch.org/tutorials/closed_loop_botorch_only)
References
----------
M. Balandat, B. Karrer, D. R. Jiang, S. Daulton, B. Letham, A. G. Wilson,
and E. Bakshy. BoTorch: A Framework for Efficient Monte-Carlo Bayesian
Optimization. Advances in Neural Information Processing Systems 33, 2020.
"""
dtype = X.dtype
device = X.get_device()
if acqfn == "ts":
if n_candidates is None:
n_candidates = min(5000, max(2000, 200 * X.shape[-1]))
from contextlib import ExitStack
from botorch.generation import MaxPosteriorSampling
if X_pending == None:
from torch.quasirandom import SobolEngine
sobol = SobolEngine(X.shape[-1], scramble=True)
X_cand = sobol.draw(n_candidates)
else:
if X_pending.shape[0] < n_candidates:
n_candidates = int(0.8 * X_pending.shape[0])
id_choice = np.random.choice(
range(X_pending.shape[0]), n_candidates, replace=False)
X_cand = X_pending[id_choice, :].to(dtype=dtype, device=device)
with ExitStack() as es:
es.enter_context(gpts.max_cholesky_size(float("inf")))
thompson_sampling = MaxPosteriorSampling(
model=model, replacement=False)
X_next = thompson_sampling(X_cand, num_samples=batch_size)
acq_value = None
else:
from botorch.optim import optimize_acqf, optimize_acqf_discrete
if acqfn == "ei":
acq = acqf.monte_carlo.qExpectedImprovement(model, Y.max())
elif acqfn == "pi":
acq = acqf.monte_carlo.qProbabilityOfImprovement(model, Y.max())
if acqfn == "ucb":
acq = acqf.monte_carlo.qUpperConfidenceBound(model, 0.1)
if X_pending == None:
X_next, acq_value = optimize_acqf(
acq,
q=batch_size,
bounds = bounds,
num_restarts=num_restarts,
raw_samples=raw_samples,
**kwargs)
else:
X_next, acq_value = optimize_acqf_discrete(
acq,
choices=X_pending,
q=batch_size,
max_batch_size=2048,
**kwargs)
return X_next, acq_value