diff --git a/src/optimagic/optimization/algorithm.py b/src/optimagic/optimization/algorithm.py index 7f776cf90..c52245c4f 100644 --- a/src/optimagic/optimization/algorithm.py +++ b/src/optimagic/optimization/algorithm.py @@ -10,12 +10,16 @@ from optimagic.exceptions import InvalidAlgoInfoError, InvalidAlgoOptionError from optimagic.logging.types import StepStatus +from optimagic.optimization.convergence_report import get_convergence_report from optimagic.optimization.history import History from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) +from optimagic.optimization.optimize_result import OptimizeResult +from optimagic.parameters.conversion import Converter from optimagic.type_conversion import TYPE_CONVERTERS -from optimagic.typing import AggregationLevel +from optimagic.typing import AggregationLevel, Direction, ExtraResultFields +from optimagic.utilities import isscalar @dataclass(frozen=True) @@ -142,6 +146,56 @@ def __post_init__(self) -> None: ) raise TypeError(msg) + def create_optimize_result( + self, + converter: Converter, + solver_type: AggregationLevel, + extra_fields: ExtraResultFields, + ) -> OptimizeResult: + """Process an internal optimizer result.""" + params = converter.params_from_internal(self.x) + if isscalar(self.fun): + fun = float(self.fun) + elif solver_type == AggregationLevel.LIKELIHOOD: + fun = float(np.sum(self.fun)) + elif solver_type == AggregationLevel.LEAST_SQUARES: + fun = np.dot(self.fun, self.fun) + + if extra_fields.direction == Direction.MAXIMIZE: + fun = -fun + + if self.history is not None: + conv_report = get_convergence_report( + history=self.history, direction=extra_fields.direction + ) + else: + conv_report = None + + out = OptimizeResult( + params=params, + fun=fun, + start_fun=extra_fields.start_fun, + start_params=extra_fields.start_params, + algorithm=extra_fields.algorithm, + direction=extra_fields.direction.value, + n_free=extra_fields.n_free, + message=self.message, + success=self.success, + n_fun_evals=self.n_fun_evals, + n_jac_evals=self.n_jac_evals, + n_hess_evals=self.n_hess_evals, + n_iterations=self.n_iterations, + status=self.status, + jac=self.jac, + hess=self.hess, + hess_inv=self.hess_inv, + max_constraint_violation=self.max_constraint_violation, + history=self.history, + algorithm_output=self.info, + convergence_report=conv_report, + ) + return out + class AlgorithmMeta(ABCMeta): """Metaclass to get repr, algo_info and name for classes, not just instances.""" @@ -234,7 +288,7 @@ def solve_internal_problem( problem: InternalOptimizationProblem, x0: NDArray[np.float64], step_id: int, - ) -> InternalOptimizeResult: + ) -> OptimizeResult: problem = problem.with_new_history().with_step_id(step_id) if problem.logger: @@ -242,17 +296,23 @@ def solve_internal_problem( step_id, {"status": str(StepStatus.RUNNING.value)} ) - result = self._solve_internal_problem(problem, x0) + raw_res = self._solve_internal_problem(problem, x0) - if (not self.algo_info.disable_history) and (result.history is None): - result = replace(result, history=problem.history) + if (not self.algo_info.disable_history) and (raw_res.history is None): + raw_res = replace(raw_res, history=problem.history) if problem.logger: problem.logger.step_store.update( step_id, {"status": str(StepStatus.COMPLETE.value)} ) - return result + res = raw_res.create_optimize_result( + converter=problem.converter, + solver_type=self.algo_info.solver_type, + extra_fields=problem.static_result_fields, + ) + + return res def with_option_if_applicable(self, **kwargs: Any) -> Self: """Call with_option only with applicable keyword arguments.""" diff --git a/src/optimagic/optimization/multistart.py b/src/optimagic/optimization/multistart.py index c3d4cf3e1..c6de25bff 100644 --- a/src/optimagic/optimization/multistart.py +++ b/src/optimagic/optimization/multistart.py @@ -12,7 +12,6 @@ """ import warnings -from dataclasses import replace from typing import Literal import numpy as np @@ -21,7 +20,7 @@ from optimagic.logging.logger import LogStore from optimagic.logging.types import StepStatus -from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult +from optimagic.optimization.algorithm import Algorithm from optimagic.optimization.internal_optimization_problem import ( InternalBounds, InternalOptimizationProblem, @@ -30,6 +29,8 @@ from optimagic.optimization.optimization_logging import ( log_scheduled_steps_and_get_ids, ) +from optimagic.optimization.optimize_result import OptimizeResult +from optimagic.optimization.process_results import process_multistart_result from optimagic.typing import AggregationLevel, ErrorHandling from optimagic.utilities import get_rng @@ -42,7 +43,7 @@ def run_multistart_optimization( options: InternalMultistartOptions, logger: LogStore | None, error_handling: ErrorHandling, -) -> InternalOptimizeResult: +) -> OptimizeResult: steps = determine_steps(options.n_samples, stopping_maxopt=options.stopping_maxopt) scheduled_steps = log_scheduled_steps_and_get_ids( @@ -159,6 +160,7 @@ def single_optimization(x0, step_id): results=batch_results, convergence_criteria=convergence_criteria, solver_type=local_algorithm.algo_info.solver_type, + converter=internal_problem.converter, ) opt_counter += len(batch) if is_converged: @@ -176,7 +178,12 @@ def single_optimization(x0, step_id): } raw_res = state["best_res"] - res = replace(raw_res, multistart_info=multistart_info) + res = process_multistart_result( + raw_res=raw_res, + converter=internal_problem.converter, + extra_fields=internal_problem.static_result_fields, + multistart_info=multistart_info, + ) return res @@ -371,7 +378,12 @@ def get_batched_optimization_sample(sorted_sample, stopping_maxopt, batch_size): def update_convergence_state( - current_state, starts, results, convergence_criteria, solver_type + current_state, + starts, + results, + convergence_criteria, + solver_type, + converter, ): """Update the state of all quantities related to convergence. @@ -389,6 +401,7 @@ def update_convergence_state( convergence_criteria (dict): Dict with the entries "xtol" and "max_discoveries" solver_type: The aggregation level of the local optimizer. Needed to interpret the output of the internal criterion function. + converter: The converter to map between internal and external parameter spaces. Returns: @@ -422,7 +435,7 @@ def update_convergence_state( # ================================================================================== valid_results = [results[i] for i in valid_indices] valid_starts = [starts[i] for i in valid_indices] - valid_new_x = [res.x for res in valid_results] + valid_new_x = [converter.params_to_internal(res.params) for res in valid_results] valid_new_y = [] # make the criterion output scalar if a least squares optimizer returns an diff --git a/src/optimagic/optimization/optimize.py b/src/optimagic/optimization/optimize.py index fc1cbdeef..5a383ff86 100644 --- a/src/optimagic/optimization/optimize.py +++ b/src/optimagic/optimization/optimize.py @@ -48,10 +48,6 @@ ) from optimagic.optimization.optimization_logging import log_scheduled_steps_and_get_ids from optimagic.optimization.optimize_result import OptimizeResult -from optimagic.optimization.process_results import ( - process_multistart_result, - process_single_result, -) from optimagic.parameters.bounds import Bounds from optimagic.parameters.conversion import ( get_converter, @@ -644,7 +640,7 @@ def _optimize(problem: OptimizationProblem) -> OptimizeResult: logger=logger, )[0] - raw_res = problem.algorithm.solve_internal_problem(internal_problem, x, step_id) + res = problem.algorithm.solve_internal_problem(internal_problem, x, step_id) else: multistart_options = get_internal_multistart_options_from_public( @@ -658,7 +654,7 @@ def _optimize(problem: OptimizationProblem) -> OptimizeResult: upper=internal_params.soft_upper_bounds, ) - raw_res = run_multistart_optimization( + res = run_multistart_optimization( local_algorithm=problem.algorithm, internal_problem=internal_problem, x=x, @@ -672,21 +668,6 @@ def _optimize(problem: OptimizationProblem) -> OptimizeResult: # Process the result # ================================================================================== - if problem.multistart is None: - res = process_single_result( - raw_res=raw_res, - converter=converter, - solver_type=problem.algorithm.algo_info.solver_type, - extra_fields=extra_fields, - ) - else: - res = process_multistart_result( - raw_res=raw_res, - converter=converter, - solver_type=problem.algorithm.algo_info.solver_type, - extra_fields=extra_fields, - ) - log_reader: LogReader[Any] | None if logger is not None: assert problem.logging is not None diff --git a/src/optimagic/optimization/process_results.py b/src/optimagic/optimization/process_results.py index 2843c7325..890437668 100644 --- a/src/optimagic/optimization/process_results.py +++ b/src/optimagic/optimization/process_results.py @@ -1,91 +1,29 @@ -from dataclasses import replace +import copy from typing import Any import numpy as np -from optimagic.optimization.algorithm import InternalOptimizeResult from optimagic.optimization.convergence_report import get_convergence_report from optimagic.optimization.optimize_result import MultistartInfo, OptimizeResult from optimagic.parameters.conversion import Converter -from optimagic.typing import AggregationLevel, Direction, ExtraResultFields -from optimagic.utilities import isscalar - - -def process_single_result( - raw_res: InternalOptimizeResult, - converter: Converter, - solver_type: AggregationLevel, - extra_fields: ExtraResultFields, -) -> OptimizeResult: - """Process an internal optimizer result.""" - params = converter.params_from_internal(raw_res.x) - if isscalar(raw_res.fun): - fun = float(raw_res.fun) - elif solver_type == AggregationLevel.LIKELIHOOD: - fun = float(np.sum(raw_res.fun)) - elif solver_type == AggregationLevel.LEAST_SQUARES: - fun = np.dot(raw_res.fun, raw_res.fun) - - if extra_fields.direction == Direction.MAXIMIZE: - fun = -fun - - if raw_res.history is not None: - conv_report = get_convergence_report( - history=raw_res.history, direction=extra_fields.direction - ) - else: - conv_report = None - - out = OptimizeResult( - params=params, - fun=fun, - start_fun=extra_fields.start_fun, - start_params=extra_fields.start_params, - algorithm=extra_fields.algorithm, - direction=extra_fields.direction.value, - n_free=extra_fields.n_free, - message=raw_res.message, - success=raw_res.success, - n_fun_evals=raw_res.n_fun_evals, - n_jac_evals=raw_res.n_jac_evals, - n_hess_evals=raw_res.n_hess_evals, - n_iterations=raw_res.n_iterations, - status=raw_res.status, - jac=raw_res.jac, - hess=raw_res.hess, - hess_inv=raw_res.hess_inv, - max_constraint_violation=raw_res.max_constraint_violation, - history=raw_res.history, - algorithm_output=raw_res.info, - convergence_report=conv_report, - ) - return out +from optimagic.typing import Direction, ExtraResultFields def process_multistart_result( - raw_res: InternalOptimizeResult, + raw_res: OptimizeResult, converter: Converter, - solver_type: AggregationLevel, extra_fields: ExtraResultFields, + multistart_info: dict[str, Any], ) -> OptimizeResult: """Process results of internal optimizers.""" - if raw_res.multistart_info is None: - raise ValueError("Multistart info is missing.") if isinstance(raw_res, str): res = _dummy_result_from_traceback(raw_res, extra_fields) else: - res = process_single_result( - raw_res=raw_res, - converter=converter, - solver_type=solver_type, - extra_fields=extra_fields, - ) - + res = raw_res info = _process_multistart_info( - raw_res.multistart_info, + multistart_info, converter=converter, - solver_type=solver_type, extra_fields=extra_fields, ) @@ -118,24 +56,15 @@ def process_multistart_result( def _process_multistart_info( info: dict[str, Any], converter: Converter, - solver_type: AggregationLevel, extra_fields: ExtraResultFields, ) -> MultistartInfo: starts = [converter.params_from_internal(x) for x in info["start_parameters"]] optima = [] for res, start in zip(info["local_optima"], starts, strict=False): - replacements = { - "start_params": start, - "start_fun": None, - } - - processed = process_single_result( - res, - converter=converter, - solver_type=solver_type, - extra_fields=replace(extra_fields, **replacements), - ) + processed = copy.copy(res) + processed.start_params = start + processed.start_fun = None optima.append(processed) sample = [converter.params_from_internal(x) for x in info["exploration_sample"]] diff --git a/tests/optimagic/optimization/test_multistart.py b/tests/optimagic/optimization/test_multistart.py index 06ec00236..b774c1ef7 100644 --- a/tests/optimagic/optimization/test_multistart.py +++ b/tests/optimagic/optimization/test_multistart.py @@ -6,13 +6,13 @@ import pytest from numpy.testing import assert_array_almost_equal as aaae -from optimagic.optimization.algorithm import InternalOptimizeResult from optimagic.optimization.multistart import ( _draw_exploration_sample, get_batched_optimization_sample, run_explorations, update_convergence_state, ) +from optimagic.optimization.optimize_result import OptimizeResult @pytest.fixture() @@ -129,13 +129,23 @@ def starts(): @pytest.fixture() def results(): - res = InternalOptimizeResult( - x=np.arange(3) + 1e-10, + res = OptimizeResult( + params=np.arange(3) + 1e-10, fun=4, + start_fun=5, + start_params=np.arange(3), + algorithm="bla", + direction="minimize", + n_free=3, ) return [res] +class DummyConverter: + def params_to_internal(self, params): + return params + + def test_update_state_converged(current_state, starts, results): criteria = { "xtol": 1e-3, @@ -148,6 +158,7 @@ def test_update_state_converged(current_state, starts, results): results=results, convergence_criteria=criteria, solver_type="value", + converter=DummyConverter(), ) aaae(new_state["best_x"], np.arange(3)) @@ -171,6 +182,7 @@ def test_update_state_not_converged(current_state, starts, results): results=results, convergence_criteria=criteria, solver_type="value", + converter=DummyConverter(), ) assert not is_converged