diff --git a/src/orion/analysis/__init__.py b/src/orion/analysis/__init__.py index 9df822b23..f99864856 100644 --- a/src/orion/analysis/__init__.py +++ b/src/orion/analysis/__init__.py @@ -7,5 +7,6 @@ from orion.analysis.lpi_utils import lpi from orion.analysis.partial_dependency_utils import partial_dependency from orion.analysis.regret_utils import regret +from orion.analysis.symbolic_explanation import symbolic_explanation __all__ = ["average", "ranking", "lpi", "partial_dependency", "regret"] diff --git a/src/orion/analysis/partial_dependency_utils.py b/src/orion/analysis/partial_dependency_utils.py index 9989b95db..8c7c0ea10 100644 --- a/src/orion/analysis/partial_dependency_utils.py +++ b/src/orion/analysis/partial_dependency_utils.py @@ -76,9 +76,10 @@ def partial_dependency( if trials.empty or trials.shape[0] == 0: return {} - data = to_numpy(trials, space) - data = flatten_numpy(data, flattened_space) - model = train_regressor(model, data, **kwargs) + if isinstance(model, str): + data = to_numpy(trials, space) + data = flatten_numpy(data, flattened_space) + model = train_regressor(model, data, **kwargs) data = [ format_trials.trial_to_tuple(trial, flattened_space) diff --git a/src/orion/analysis/symbolic_explanation.py b/src/orion/analysis/symbolic_explanation.py new file mode 100644 index 000000000..b6054e51b --- /dev/null +++ b/src/orion/analysis/symbolic_explanation.py @@ -0,0 +1,256 @@ +""" +Module for symbolic explanation. + +Large chunk of codes are copied from this repository: https://github.com/automl/symbolic-explanations +""" + +from __future__ import annotations + +import logging +from dataclasses import dataclass, field +from typing import List + +import numpy as np +import pandas as pd +import sympy +from gplearn import functions +from gplearn.functions import make_function +from gplearn.genetic import SymbolicRegressor + +from orion.algo.space import Categorical, Space +from orion.analysis.base import flatten_numpy, flatten_params, to_numpy, train_regressor +from orion.core.utils import format_trials +from orion.core.worker.transformer import build_required_space + +# TODO: Add dependencies: +# gplearn +# sympy + +logger = logging.getLogger(__name__) + + +# TODO: May need to set a_max as a parameter so user can change it. +def safe_exp(x): + return np.clip(x, a_min=None, a_max=100000) + + +@dataclass +class SymbolicRegressorParams: + population_size: int = 5000 + generations: int = 20 + metric: str = "rmse" + parsimony_coefficient: float = 0.0001 + random_state: int | None = None + verbose: int = 1 + function_set: list = field( + default_factory=lambda: [ + "add", + "sub", + "mul", + "div", + "sqrt", + "log", + "sin", + "cos", + "abs", + make_function(function=safe_exp, arity=1, name="exp"), + ] + ) + + +SymbolicRegressorParams(metric="test") + + +def convert_symb(symb, space: Space, n_decimals: int = None) -> sympy.core.expr: + """ + Convert a fitted symbolic regression to a simplified and potentially rounded mathematical expression. + Warning: eval is used in this function, thus it should not be used on unsanitized input (see + https://docs.sympy.org/latest/modules/core.html?highlight=eval#module-sympy.core.sympify). + + Parameters + ---------- + symb: Fitted symbolic regressor to find a simplified expression for. + n_dim: Number of input dimensions. If input has only a single dimension, X0 in expression is exchanged by x. + n_decimals: If set, round floats in the expression to this number of decimals. + + Returns + ------- + symb_conv: Converted mathematical expression. + """ + + # sqrt is protected function in gplearn, always returning sqrt(abs(x)) + sqrt_pos = [] + prev_sqrt_inserts = 0 + for i, f in enumerate(symb._program.program): + if isinstance(f, functions._Function) and f.name == "sqrt": + sqrt_pos.append(i) + for i in sqrt_pos: + symb._program.program.insert(i + prev_sqrt_inserts + 1, functions.abs1) + prev_sqrt_inserts += 1 + + # log is protected function in gplearn, always returning sqrt(abs(x)) + log_pos = [] + prev_log_inserts = 0 + for i, f in enumerate(symb._program.program): + if isinstance(f, functions._Function) and f.name == "log": + log_pos.append(i) + for i in log_pos: + symb._program.program.insert(i + prev_log_inserts + 1, functions.abs1) + prev_log_inserts += 1 + + symb_str = str(symb._program) + + converter = { + "sub": lambda x, y: x - y, + "div": lambda x, y: x / y, + "mul": lambda x, y: x * y, + "add": lambda x, y: x + y, + "neg": lambda x: -x, + "pow": lambda x, y: x**y, + } + + converter.update( + { + f"X{i}": sympy.symbols(dim.name, real=True) + for i, dim in enumerate(space.values()) + } + ) + + if symb._program.length_ > 300: + print( + f"Expression of length {symb._program._length} too long to convert, return raw string." + ) + return symb_str + + symb_conv = sympy.sympify( + symb_str.replace("[", "").replace("]", ""), locals=converter + ) + if n_decimals: + # Make sure also floats deeper in the expression tree are rounded + for a in sympy.preorder_traversal(symb_conv): + if isinstance(a, sympy.core.numbers.Float): + symb_conv = symb_conv.subs(a, round(a, n_decimals)) + + return symb_conv + + +def simplify_formula( + symb_model: SymbolicRegressor, space: Space, n_decimals: int = 3 +) -> str: + conv_expr = convert_symb(symb_model, space=space, n_decimals=n_decimals) + + return conv_expr + + +def symbolic_explanation( + trials: pd.DataFrame, + space: Space, + params: List[str] | None = None, + model="RandomForestRegressor", + n_samples: int = 50, + sampling_seed: int | None = None, + timeout: float = 300, + symbolic_regressor_params: SymbolicRegressorParams = SymbolicRegressorParams(), + **kwargs, +) -> SymbolicRegressor: + """ + Calculates a symbolic explanation of the effect of parameters + on the objective based on a collection of + :class:`orion.core.worker.trial.Trial`. + + For more information on the method, + see original paper at https://openreview.net/forum?id=JQwAc91sg_x. + + Segel, Sarah, et al. "Symbolic explanations for hyperparameter + optimization." AutoML Conference 2023. + + Parameters + ---------- + trials: DataFrame or dict + A dataframe of trials containing, at least, the columns 'objective' and 'id'. Or a dict + equivalent. + + space: Space object + A space object from an experiment. + + params: list of str, optional + The parameters to include in the computation. All parameters are included by default. + + model: str + Name of the regression model to use. Can be one of + - AdaBoostRegressor + - BaggingRegressor + - ExtraTreesRegressor + - GradientBoostingRegressor + - RandomForestRegressor (Default) + + n_samples: int + Number of samples to randomly generate for fitting the surrogate model. + Default is 50. + + sampling_seed: int + Seed used to sample the points for fitting the surrogate model. + + timeout: float + Number of seconds before the evolutionary algorithm is stopped. + + symbolic_regressor_params: SymbolicRegressorParams + Dataclass for the parameters for the ``SymbolicRegressor``. + + **kwargs + Arguments for the regressor model. + + Returns + ------- + SymbolicRegressor + A SymbolicRegressor fitted on the trials. + """ + + # TODO: Need to handle multi-fidelity. Maybe only pick the highest fidelity... + + # TODO: Validate that the history (`trials`) is large enough to make sense compared to the number of sampled + # points from the surrogate. + + if any(isinstance(dim, Categorical) for dim in space): + raise ValueError( + "Symbolic explanation does not support categorical dimensions yet." + ) + + params = flatten_params(space, params) + + flattened_space = build_required_space( + space, + dist_requirement="linear", + type_requirement="numerical", + shape_requirement="flattened", + ) + + if trials.empty or trials.shape[0] == 0: + return {} + + data = to_numpy(trials, space) + data = flatten_numpy(data, flattened_space) + model = train_regressor(model, data, **kwargs) + + # Sample random points for fitting the symbolic regressor. + data = [ + format_trials.trial_to_tuple(trial, flattened_space) + for trial in flattened_space.sample(n_samples, seed=sampling_seed) + ] + X_train = pd.DataFrame(data, columns=flattened_space.keys()).to_numpy() + + Y_train = model.predict(X_train) + + symbolic_regressor = SymbolicRegressor( + population_size=symbolic_regressor_params.population_size, + generations=symbolic_regressor_params.generations, + function_set=symbolic_regressor_params.function_set, + metric=symbolic_regressor_params.metric, + parsimony_coefficient=symbolic_regressor_params.parsimony_coefficient, + verbose=symbolic_regressor_params.verbose, + random_state=symbolic_regressor_params.random_state, + ) + + symbolic_regressor.fit(X_train, Y_train) + + return symbolic_regressor diff --git a/src/orion/plotting/backend_plotly.py b/src/orion/plotting/backend_plotly.py index 5e1fa5079..7dd63344e 100644 --- a/src/orion/plotting/backend_plotly.py +++ b/src/orion/plotting/backend_plotly.py @@ -10,11 +10,16 @@ import pandas as pd import plotly.express as px import plotly.graph_objects as go +import sympy from plotly.subplots import make_subplots import orion.analysis import orion.analysis.base from orion.algo.space import Categorical, Fidelity +from orion.analysis.symbolic_explanation import ( + SymbolicRegressorParams, + simplify_formula, +) from orion.core.worker.transformer import build_required_space @@ -194,7 +199,6 @@ def reformat_competitions(experiments): return competitions def build_groups(competitions): - if not isinstance(competitions, dict): rankings = [] for competition in competitions: @@ -488,6 +492,263 @@ def _plot_scatter(x, y, df): return fig +def symbolic_explanation( + experiment, + with_evc_tree=True, + params=None, + smoothing=0.85, + n_grid_points=10, + n_samples=50, + colorscale="Blues", + model="RandomForestRegressor", + model_kwargs=None, + timeout=300, + n_decimals=3, + sampling_seed=None, + symbolic_regressor_params=SymbolicRegressorParams(), + verbose_hover=True, +): + """Plotly implementation of `orion.plotting.symbolic_explanation`""" + + def build_data(): + """Builds the dataframe for the plot""" + df = experiment.to_pandas(with_evc_tree=with_evc_tree) + + names = list(experiment.space.keys()) + df["params"] = df[names].apply(_format_hyperparameters, args=(names,), axis=1) + + df = df.loc[df["status"] == "completed"] + symbolic_regressor = orion.analysis.symbolic_explanation( + trials=df, + space=experiment.space, + params=params, + model=model, + n_samples=n_samples, + timeout=timeout, + n_decimals=n_decimals, + sampling_seed=sampling_seed, + symbolic_regressor_params=symbolic_regressor_params, + **model_kwargs, + ) + + data = orion.analysis.partial_dependency( + experiment.to_pandas(), + experiment.space, + model=symbolic_regressor, + n_grid_points=n_grid_points, + ) + + latex_formula = sympy.latex( + simplify_formula( + symbolic_regressor, experiment.space, n_decimals=n_decimals + ) + ) + + df = _flatten_dims(df, experiment.space) + return (df, data, latex_formula) + + def _set_scale(figure, dims, x, y): + for axis, dim in zip("xy", dims): + if "reciprocal" in dim.prior_name or dim.type == "fidelity": + getattr(figure, f"update_{axis}axes")(type="log", row=y, col=x) + + def _plot_marginalized_avg(data, x_name): + return go.Scatter( + x=data[0][x_name], + y=data[1], + mode="lines", + name=None, + showlegend=False, + line=dict( + color=px.colors.qualitative.D3[0], + ), + ) + + def _plot_marginalized_std(data, x_name): + return go.Scatter( + x=list(data[0][x_name]) + list(data[0][x_name])[::-1], + y=list(data[1] - data[2]) + list(data[1] + data[2])[::-1], + mode="lines", + name=None, + fill="toself", + showlegend=False, + line=dict( + color=px.colors.qualitative.D3[0], + width=0, + ), + ) + + def _plot_contour(data, x_name, y_name): + return go.Contour( + x=data[0][x_name], + y=data[0][y_name], + z=data[1], + connectgaps=True, + # Share the same color range across contour plots + coloraxis="coloraxis", + line_smoothing=smoothing, + # To show labels + contours=dict( + coloring="heatmap", + showlabels=True, # show labels on contours + labelfont=dict( # label font properties + size=12, + color="white", + ), + ), + ) + + def _plot_scatter(x, y, df): + return go.Scatter( + x=x, + y=y, + marker={ + "line": {"width": 0.5, "color": "Grey"}, + "color": "black", + "size": 5, + }, + mode="markers", + opacity=0.5, + showlegend=False, + customdata=list(zip(df["id"], df["suggested"], df["params"])), + hovertemplate=_template_trials(verbose_hover), + ) + + if model_kwargs is None: + model_kwargs = {} + + df, data, latex_formula = build_data() + + if not data: + return go.Figure() + + params = [ + param_names for param_names in data.keys() if isinstance(param_names, str) + ] + + flattened_space = build_required_space( + experiment.space, + shape_requirement="flattened", + ) + + fig = make_subplots( + rows=len(params), + cols=len(params), + shared_xaxes=True, + shared_yaxes=False, + ) + + fig.update_layout(paper_bgcolor="rgba(0,0,0,0)", plot_bgcolor="rgba(0,0,0,0)") + cmin = float("inf") + cmax = -float("inf") + + for x_i in range(len(params)): + x_name = params[x_i] + fig.add_trace( + _plot_marginalized_avg(data[x_name], x_name), + row=x_i + 1, + col=x_i + 1, + ) + fig.add_trace( + _plot_marginalized_std(data[x_name], x_name), + row=x_i + 1, + col=x_i + 1, + ) + fig.add_trace( + _plot_scatter(df[x_name], df["objective"], df), + row=x_i + 1, + col=x_i + 1, + ) + + _set_scale(fig, [flattened_space[x_name]], x_i + 1, x_i + 1) + + fig.update_xaxes(title_text=x_name, row=len(params), col=x_i + 1) + if x_i > 0: + fig.update_yaxes(title_text=x_name, row=x_i + 1, col=1) + else: + fig.update_yaxes(title_text="Objective", row=x_i + 1, col=x_i + 1) + + for y_i in range(x_i + 1, len(params)): + y_name = params[y_i] + fig.add_trace( + _plot_contour( + data[(x_name, y_name)], + x_name, + y_name, + ), + row=y_i + 1, + col=x_i + 1, + ) + fig.add_trace( + _plot_scatter(df[x_name], df[y_name], df), + row=y_i + 1, + col=x_i + 1, + ) + + cmin = min(cmin, data[(x_name, y_name)][1].min()) + cmax = max(cmax, data[(x_name, y_name)][1].max()) + + _set_scale( + fig, + [flattened_space[name] for name in [x_name, y_name]], + x_i + 1, + y_i + 1, + ) + + for x_i in range(len(params)): + plot_id = len(params) * x_i + x_i + 1 + if plot_id > 1: + key = f"yaxis{plot_id}_range" + else: + key = "yaxis_range" + fig.update_layout(**{key: [cmin, cmax]}) + + fig.update_layout( + title=f"Partial dependencies on symbolic explanation\nfor experiment '{experiment.name}'", + ) + fig.layout.coloraxis.colorbar.title = "Objective" + + fig.update_layout(coloraxis=dict(colorscale=colorscale), showlegend=False) + + fmin = min([data[params[0]][1].min(), df["objective"].min()]) + fmax = max([data[params[0]][1].max(), df["objective"].max()]) + + fig.update_layout( + **{ + f"yaxis{len(experiment.space)}_visible": False, + f"yaxis{len(experiment.space)}_range": [fmin, fmax], + } + ) + + last_param = list(experiment.space.values())[-1] + xlims = last_param = last_param.interval() + fig.add_annotation( + x=sum(xlims) / 2, + y=fmax, + font=dict(size=24), + height=30, + text="Best formula found", + showarrow=False, + align="right", + valign="bottom", + col=0, + row=1, + ) + fig.add_annotation( + x=sum(xlims) / 2, + y=(fmax - fmin) / (len(params) * 0.75 + 0.5) + fmin, + font=dict(size=24), + text=r"${}$".format(latex_formula), + showarrow=False, + align="right", + valign="top", + col=0, + row=1, + ) + + return fig + + def regret( experiment, with_evc_tree=True, order_by="suggested", verbose_hover=True, **kwargs ): diff --git a/src/orion/plotting/base.py b/src/orion/plotting/base.py index 8694be0c0..4bb4381a7 100644 --- a/src/orion/plotting/base.py +++ b/src/orion/plotting/base.py @@ -3,6 +3,7 @@ ============================= """ import orion.plotting.backend_plotly as backend +from orion.analysis.symbolic_explanation import SymbolicRegressorParams def lpi( @@ -188,6 +189,94 @@ def partial_dependencies( ) +def symbolic_explanation( + experiment, + with_evc_tree=True, + params=None, + smoothing=0.85, + verbose_hover=True, + n_grid_points=10, + n_samples=50, + colorscale="Blues", + model="RandomForestRegressor", + model_kwargs=None, + timeout=300, + n_decimals=3, + sampling_seed=None, + symbolic_regressor_params=SymbolicRegressorParams(), +): + """ + Make contour plots to visualize the search space of each combination of params. + + Parameters + ---------- + experiment: ExperimentClient or Experiment + The orion object containing the experiment data + + with_evc_tree: bool, optional + Fetch all trials from the EVC tree. + Default: True + + params: list of str, optional + Indicates the parameters to include in the plots. All parameters are included by default. + + smoothing: float, optional + Smoothing applied to the countor plot. 0 corresponds to no smoothing. Default is 0.85. + + verbose_hover: bool + Indicates whether to display the hyperparameter in hover tooltips. True by default. + + colorscale: str, optional + The colorscale used for the contour plots. Supported values depends on the backend. + Default is 'Blues'. + + n_grid_points: int, optional + Number of points in the grid to compute partial dependency. Default is 10. + + n_samples: int, optional + Number of samples to randomly generate the grid used to compute the partial dependency. + Default is 50. + + model: str + Name of the regression model to use. Can be one of + - AdaBoostRegressor + - BaggingRegressor + - ExtraTreesRegressor + - GradientBoostingRegressor + - RandomForestRegressor (Default) + + model_kwargs: dict, optional + Arguments for the regressor model. + + Returns + ------- + plotly.graph_objects.Figure + + Raises + ------ + ValueError + If no experiment is provided. + + """ + + return backend.symbolic_explanation( + experiment, + with_evc_tree=with_evc_tree, + params=params, + smoothing=smoothing, + verbose_hover=verbose_hover, + n_grid_points=n_grid_points, + n_samples=n_samples, + colorscale=colorscale, + model=model, + model_kwargs=model_kwargs, + timeout=timeout, + n_decimals=n_decimals, + sampling_seed=sampling_seed, + symbolic_regressor_params=symbolic_regressor_params, + ) + + def rankings(experiments, with_evc_tree=True, order_by="suggested", **kwargs): """ Make a plot to visually compare the ranking of different hyper-optimization processes. @@ -434,6 +523,7 @@ def regrets(experiments, with_evc_tree=True, order_by="suggested", **kwargs): "lpi": lpi, "parallel_coordinates": parallel_coordinates, "partial_dependencies": partial_dependencies, + "symbolic_explanation": symbolic_explanation, "regret": regret, "regrets": regrets, "rankings": rankings, @@ -446,6 +536,7 @@ def regrets(experiments, with_evc_tree=True, order_by="suggested", **kwargs): "lpi": lpi, "parallel_coordinates": parallel_coordinates, "partial_dependencies": partial_dependencies, + "symbolic_explanation": symbolic_explanation, "regret": regret, } @@ -507,6 +598,11 @@ def partial_dependencies(self, **kwargs): __doc__ = partial_dependencies.__doc__ return self(kind="partial_dependencies", **kwargs) + def symbolic_explanation(self, **kwargs): + """Compute symbolic explanation and make partial dependency plots based on the formula found.""" + __doc__ = partial_dependencies.__doc__ + return self(kind="symbolic_explanation", **kwargs) + def regret(self, **kwargs): """Make a plot to visualize the performance of the hyper-optimization process.""" __doc__ = regret.__doc__