rng = check_random_state(random_state)
// Bounds
num_params = len(bounds)
lower_bounds, upper_bounds = extract_bounds(bounds)
// Default estimator
if base_estimator is None:
base_estimator = GradientBoostingQuantileRegressor(random_state=rng)
// Record the points and function values evaluated as part of
// the minimization
Xi = np.zeros((maxiter, num_params))
yi = np.zeros(maxiter)
// Initialize with random points
if n_start == 0:
raise ValueError("Need at least one starting point.")
if maxiter == 0:
raise ValueError("Need to perform at least one iteration.")
n_start = min(n_start, maxiter)
Xi[:n_start] = _random_points(
lower_bounds, upper_bounds, n_points=n_start, random_state=rng)
best_x = Xi[:n_start].ravel()
yi[:n_start] = [func(xi) for xi in Xi[:n_start]]
best_y = np.min(yi[:n_start])
models = []
for i in range(n_start, maxiter):
rgr = clone(base_estimator)
// only the first i points are meaningful
rgr.fit(Xi[:i, :], yi[:i])
models.append(rgr)
// `rgr` predicts constants for each leaf which means that the EI
// has zero gradient over large distances. As a result we can not
// use gradient based optimisers like BFGS, use random sampling
// for the moment.
x0 = _random_points(lower_bounds, upper_bounds,
n_points=n_points,
random_state=rng)
best = np.argmax(gaussian_ei(x0, rgr, best_y))
Xi[i] = x0[best].ravel()
yi[i] = func(x0[best])
if yi[i] < best_y:
best_y = yi[i]