Skip to content

Commit

Permalink
complete scipy.optimize._lsq stubs
Browse files Browse the repository at this point in the history
  • Loading branch information
jorenham committed Oct 2, 2024
1 parent f4964ae commit 9e97f56
Show file tree
Hide file tree
Showing 8 changed files with 400 additions and 128 deletions.
2 changes: 2 additions & 0 deletions scipy-stubs/_typing.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ __all__ = [
"AnyScalar",
"AnyShape",
"Array0D",
"Casting",
"CorrelateMode",
"NanPolicy",
"Seed",
Expand Down Expand Up @@ -54,6 +55,7 @@ AnyShape: TypeAlias = op.CanIndex | Sequence[op.CanIndex]
# numpy literals
RNG: TypeAlias = np.random.Generator | np.random.RandomState
Seed: TypeAlias = int | RNG
Casting: TypeAlias = Literal["no", "equiv", "safe", "same_kind", "unsafe"]
CorrelateMode: TypeAlias = Literal["valid", "same", "full"]

# scipy literals
Expand Down
25 changes: 20 additions & 5 deletions scipy-stubs/optimize/_lsq/bvls.pyi
Original file line number Diff line number Diff line change
@@ -1,6 +1,21 @@
from scipy._typing import Untyped
from scipy.optimize import OptimizeResult as OptimizeResult
from .common import print_header_linear as print_header_linear, print_iteration_linear as print_iteration_linear
from typing import Any, Literal

def compute_kkt_optimality(g, on_bound) -> Untyped: ...
def bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose, rcond: Untyped | None = None) -> Untyped: ...
import numpy as np
import numpy.typing as npt
import scipy._typing as spt
from scipy.optimize import OptimizeResult

def compute_kkt_optimality(g: npt.NDArray[np.floating[Any]] | spt.AnyReal, on_bound: spt.AnyReal) -> np.floating[Any] | float: ...

# TODO(jorenham): custom `OptimizeResult` return type
def bvls(
A: npt.NDArray[np.floating[Any]],
b: npt.NDArray[np.floating[Any]],
x_lsq: npt.NDArray[np.floating[Any]],
lb: npt.NDArray[np.floating[Any]],
ub: npt.NDArray[np.floating[Any]],
tol: spt.AnyReal,
max_iter: spt.AnyInt,
verbose: Literal[0, 1, 2],
rcond: spt.AnyReal | None = None,
) -> OptimizeResult: ...
181 changes: 149 additions & 32 deletions scipy-stubs/optimize/_lsq/common.pyi
Original file line number Diff line number Diff line change
@@ -1,35 +1,152 @@
from scipy._typing import Untyped
from scipy.linalg import LinAlgError as LinAlgError, cho_factor as cho_factor, cho_solve as cho_solve
from scipy.sparse import issparse as issparse
from scipy.sparse.linalg import LinearOperator as LinearOperator, aslinearoperator as aslinearoperator
from typing import Any, Final, Literal, TypeAlias

EPS: Untyped
import numpy as np
import numpy.typing as npt
import optype.numpy as onpt
import scipy._typing as spt
from scipy.sparse import sparray, spmatrix
from scipy.sparse.linalg import LinearOperator

def intersect_trust_region(x, s, Delta) -> Untyped: ...
_SparseArray: TypeAlias = sparray | spmatrix

EPS: Final[float]

def intersect_trust_region(
x: npt.ArrayLike,
s: npt.ArrayLike,
Delta: spt.AnyReal,
) -> tuple[float | np.float64, float | np.float64]: ...
def solve_lsq_trust_region(
n, m, uf, s, V, Delta, initial_alpha: Untyped | None = None, rtol: float = 0.01, max_iter: int = 10
) -> Untyped: ...
def solve_trust_region_2d(B, g, Delta) -> Untyped: ...
def update_tr_radius(Delta, actual_reduction, predicted_reduction, step_norm, bound_hit) -> Untyped: ...
def build_quadratic_1d(J, g, s, diag: Untyped | None = None, s0: Untyped | None = None) -> Untyped: ...
def minimize_quadratic_1d(a, b, lb, ub, c: int = 0) -> Untyped: ...
def evaluate_quadratic(J, g, s, diag: Untyped | None = None) -> Untyped: ...
def in_bounds(x, lb, ub) -> Untyped: ...
def step_size_to_bound(x, s, lb, ub) -> Untyped: ...
def find_active_constraints(x, lb, ub, rtol: float = 1e-10) -> Untyped: ...
def make_strictly_feasible(x, lb, ub, rstep: float = 1e-10) -> Untyped: ...
def CL_scaling_vector(x, g, lb, ub) -> Untyped: ...
def reflective_transformation(y, lb, ub) -> Untyped: ...
def print_header_nonlinear(): ...
def print_iteration_nonlinear(iteration, nfev, cost, cost_reduction, step_norm, optimality): ...
def print_header_linear(): ...
def print_iteration_linear(iteration, cost, cost_reduction, step_norm, optimality): ...
def compute_grad(J, f) -> Untyped: ...
def compute_jac_scale(J, scale_inv_old: Untyped | None = None) -> Untyped: ...
def left_multiplied_operator(J, d) -> Untyped: ...
def right_multiplied_operator(J, d) -> Untyped: ...
def regularized_lsq_operator(J, diag) -> Untyped: ...
def right_multiply(J, d, copy: bool = True) -> Untyped: ...
def left_multiply(J, d, copy: bool = True) -> Untyped: ...
def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol) -> Untyped: ...
def scale_for_robust_loss_function(J, f, rho) -> Untyped: ...
n: int,
m: int,
uf: npt.NDArray[np.floating[Any]],
s: npt.NDArray[np.floating[Any]],
V: npt.NDArray[np.floating[Any]],
Delta: spt.AnyReal,
initial_alpha: spt.AnyReal | None = None,
rtol: spt.AnyReal = 0.01,
max_iter: spt.AnyInt = 10,
) -> tuple[onpt.Array[tuple[int], np.float64], float, int]: ...
def solve_trust_region_2d(
B: npt.ArrayLike,
g: npt.ArrayLike,
Delta: spt.AnyReal,
) -> tuple[onpt.Array[tuple[Literal[2]], np.float64], bool]: ...
def update_tr_radius(
Delta: spt.AnyReal,
actual_reduction: spt.AnyReal,
predicted_reduction: spt.AnyReal,
step_norm: spt.AnyReal,
bound_hit: spt.AnyBool,
) -> tuple[float, float]: ...
def build_quadratic_1d(
J: npt.NDArray[np.floating[Any]] | _SparseArray | LinearOperator,
g: npt.NDArray[np.floating[Any]],
s: npt.NDArray[np.floating[Any]],
diag: npt.NDArray[np.floating[Any]] | None = None,
s0: npt.NDArray[np.floating[Any]] | None = None,
) -> tuple[float, float, float]: ...
def minimize_quadratic_1d(
a: spt.AnyReal,
b: spt.AnyReal,
lb: npt.ArrayLike,
ub: npt.ArrayLike,
c: spt.AnyReal = 0,
) -> tuple[float, float]: ...
def evaluate_quadratic(
J: npt.NDArray[np.floating[Any]] | _SparseArray | LinearOperator,
g: npt.NDArray[np.floating[Any]],
s: npt.NDArray[np.floating[Any]],
diag: npt.NDArray[np.floating[Any]] | None = None,
) -> np.float64 | onpt.Array[tuple[int], np.float64]: ...
def in_bounds(x: npt.NDArray[np.floating[Any]], lb: npt.ArrayLike, ub: npt.ArrayLike) -> np.bool_: ...
def step_size_to_bound(
x: npt.ArrayLike,
s: npt.ArrayLike,
lb: npt.ArrayLike,
ub: npt.ArrayLike,
) -> tuple[float, npt.NDArray[np.int_]]: ...
def find_active_constraints(
x: npt.ArrayLike,
lb: npt.ArrayLike,
ub: npt.ArrayLike,
rtol: spt.AnyReal = 1e-10,
) -> npt.NDArray[np.int_]: ...
def make_strictly_feasible(
x: npt.NDArray[np.floating[Any]],
lb: npt.ArrayLike,
ub: npt.ArrayLike,
rstep: spt.AnyReal = 1e-10,
) -> npt.NDArray[np.floating[Any]]: ...
def CL_scaling_vector(
x: npt.NDArray[np.floating[Any]],
g: npt.NDArray[np.floating[Any]],
lb: npt.ArrayLike,
ub: npt.ArrayLike,
) -> tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]: ...
def reflective_transformation(
y: npt.NDArray[np.floating[Any]],
lb: npt.ArrayLike,
ub: npt.ArrayLike,
) -> tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]: ...
def print_header_nonlinear() -> None: ...
def print_iteration_nonlinear(
iteration: int,
nfev: int,
cost: float,
cost_reduction: float,
step_norm: float,
optimality: float,
) -> None: ...
def print_header_linear() -> None: ...
def print_iteration_linear(
iteration: int,
cost: float,
cost_reduction: float,
step_norm: float,
optimality: float,
) -> None: ...
def compute_grad(
J: npt.NDArray[np.floating[Any]] | _SparseArray | LinearOperator,
f: npt.NDArray[np.floating[Any]],
) -> npt.NDArray[np.floating[Any]] | _SparseArray: ...
def compute_jac_scale(
J: npt.NDArray[np.floating[Any]] | _SparseArray | LinearOperator,
scale_inv_old: npt.NDArray[np.floating[Any]] | spt.AnyReal | None = None,
) -> tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]: ...
def left_multiplied_operator(
J: npt.NDArray[np.floating[Any]] | _SparseArray | LinearOperator,
d: npt.NDArray[np.floating[Any]],
) -> LinearOperator: ...
def right_multiplied_operator(
J: npt.NDArray[np.floating[Any]] | _SparseArray | LinearOperator,
d: npt.NDArray[np.floating[Any]],
) -> LinearOperator: ...
def regularized_lsq_operator(
J: npt.NDArray[np.floating[Any]] | _SparseArray | LinearOperator,
diag: npt.NDArray[np.floating[Any]],
) -> LinearOperator: ...
def right_multiply(
J: npt.NDArray[np.floating[Any]] | _SparseArray | LinearOperator,
d: npt.NDArray[np.floating[Any]],
copy: bool = True,
) -> npt.NDArray[np.floating[Any]] | _SparseArray | LinearOperator: ...
def left_multiply(
J: npt.NDArray[np.floating[Any]] | _SparseArray | LinearOperator,
d: npt.NDArray[np.floating[Any]],
copy: bool = True,
) -> npt.NDArray[np.floating[Any]] | _SparseArray | LinearOperator: ...
def check_termination(
dF: spt.AnyReal,
F: spt.AnyReal,
dx_norm: spt.AnyReal,
x_norm: spt.AnyReal,
ratio: spt.AnyReal,
ftol: spt.AnyReal,
xtol: spt.AnyReal,
) -> Literal[2, 3, 4] | None: ...
def scale_for_robust_loss_function(
J: npt.NDArray[np.floating[Any]] | _SparseArray | LinearOperator,
f: spt.AnyReal,
rho: npt.NDArray[np.floating[Any]],
) -> tuple[npt.NDArray[np.floating[Any]] | _SparseArray | LinearOperator, spt.AnyReal]: ...
99 changes: 76 additions & 23 deletions scipy-stubs/optimize/_lsq/dogbox.pyi
Original file line number Diff line number Diff line change
@@ -1,24 +1,77 @@
from scipy._typing import Untyped
from scipy.optimize import OptimizeResult as OptimizeResult
from scipy.sparse.linalg import LinearOperator as LinearOperator, aslinearoperator as aslinearoperator, lsmr as lsmr
from .common import (
build_quadratic_1d as build_quadratic_1d,
check_termination as check_termination,
compute_grad as compute_grad,
compute_jac_scale as compute_jac_scale,
evaluate_quadratic as evaluate_quadratic,
in_bounds as in_bounds,
minimize_quadratic_1d as minimize_quadratic_1d,
print_header_nonlinear as print_header_nonlinear,
print_iteration_nonlinear as print_iteration_nonlinear,
scale_for_robust_loss_function as scale_for_robust_loss_function,
step_size_to_bound as step_size_to_bound,
update_tr_radius as update_tr_radius,
)

def lsmr_operator(Jop, d, active_set) -> Untyped: ...
def find_intersection(x, tr_bounds, lb, ub) -> Untyped: ...
def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub) -> Untyped: ...
from collections.abc import Callable, Mapping
from typing import Any, Literal, TypeAlias
from typing_extensions import TypeVar

import numpy as np
import optype.numpy as onpt
from scipy.optimize import OptimizeResult
from scipy.optimize._typing import SolverLSQ
from scipy.sparse import sparray, spmatrix
from scipy.sparse.linalg import LinearOperator

# TODO: custom `OptimizeResult``

_SCT_i = TypeVar("_SCT_i", bound=np.integer[Any], default=np.int_)
_SCT_f = TypeVar("_SCT_f", bound=np.floating[Any], default=np.float64)

_N_x = TypeVar("_N_x", bound=int, default=int)
_N_f = TypeVar("_N_f", bound=int, default=int)

_ValueFloat: TypeAlias = float | _SCT_f

_VectorBool: TypeAlias = onpt.Array[tuple[_N_x], np.bool_]
_VectorInt: TypeAlias = onpt.Array[tuple[_N_x], _SCT_i]
_VectorFloat: TypeAlias = onpt.Array[tuple[_N_x], _SCT_f]
_MatrixFloat: TypeAlias = onpt.Array[tuple[_N_x, _N_f], _SCT_f] | sparray | spmatrix | LinearOperator

_FunResid: TypeAlias = Callable[[_VectorFloat[_N_x]], _VectorFloat[_N_f]]
# this type-alias is a workaround to get the correct oder of type params
_FunJac: TypeAlias = Callable[[_VectorFloat[_N_x], _VectorFloat[_N_f]], _MatrixFloat[_N_f, _N_x]]
_FunLoss: TypeAlias = Callable[[_VectorFloat[_N_x]], _ValueFloat]

def lsmr_operator(
Jop: LinearOperator,
d: _VectorFloat[_N_x, _SCT_f],
active_set: _VectorBool[_N_x],
) -> LinearOperator: ...
def find_intersection(
x: _VectorFloat[_N_x],
tr_bounds: _VectorFloat[_N_x],
lb: _VectorFloat[_N_x],
ub: _VectorFloat[_N_x],
) -> tuple[
_VectorFloat[_N_x],
_VectorFloat[_N_x],
_VectorBool[_N_x],
_VectorBool[_N_x],
_VectorBool[_N_x],
_VectorBool[_N_x],
]: ...
def dogleg_step(
x: _VectorFloat[_N_x],
newton_step: _VectorFloat[_N_x],
g: _VectorFloat[_N_x],
a: _ValueFloat,
b: _ValueFloat,
tr_bounds: _VectorFloat[_N_x],
lb: _VectorFloat[_N_x],
ub: _VectorFloat[_N_x],
) -> tuple[_VectorFloat[_N_x], _VectorInt[_N_x], np.bool_]: ...
def dogbox(
fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose
) -> Untyped: ...
fun: _FunResid[_N_x, _N_f],
jac: _FunJac[_N_x, _N_f],
x0: _VectorFloat[_N_x],
f0: _VectorFloat[_N_f],
J0: _MatrixFloat[_N_f, _N_x],
lb: _VectorFloat[_N_x],
ub: _VectorFloat[_N_x],
ftol: _ValueFloat,
xtol: _ValueFloat,
gtol: _ValueFloat,
max_nfev: int,
x_scale: Literal["jac"] | _ValueFloat | _VectorFloat[_N_f],
loss_function: _FunLoss[_N_x],
tr_solver: SolverLSQ,
tr_options: Mapping[str, object],
verbose: bool,
) -> OptimizeResult: ...
42 changes: 30 additions & 12 deletions scipy-stubs/optimize/_lsq/lsq_linear.pyi
Original file line number Diff line number Diff line change
@@ -1,21 +1,39 @@
from typing import Final
from collections.abc import Sequence
from typing import Any, Final, Literal, TypeAlias

from scipy._typing import Untyped
import numpy as np
import optype.numpy as onpt
from scipy.optimize import OptimizeResult
from scipy.optimize._typing import Bound
from scipy.sparse import sparray, spmatrix
from scipy.sparse.linalg import LinearOperator

_ScalarB1: TypeAlias = bool | np.bool_
_ScalarF8: TypeAlias = float | np.float64
_VectorF8: TypeAlias = onpt.Array[tuple[int], np.float64]

_ScalarInt_co: TypeAlias = np.integer[Any]
_ScalarFloat_co: TypeAlias = np.floating[Any] | _ScalarInt_co

_ScalarLikeFloat_co: TypeAlias = float | _ScalarFloat_co
_VectorLikeFloat_co: TypeAlias = Sequence[_ScalarLikeFloat_co] | onpt.CanArray[tuple[int], np.dtype[_ScalarFloat_co]]
_MatrixLikeFloat_co: TypeAlias = Sequence[_VectorLikeFloat_co] | onpt.CanArray[tuple[int, int], np.dtype[_ScalarFloat_co]]

_SparseArray: TypeAlias = sparray | spmatrix

TERMINATION_MESSAGES: Final[dict[int, str]]

def prepare_bounds(bounds, n) -> Untyped: ...
def prepare_bounds(bounds: Bound, n: int) -> tuple[_ScalarF8, _ScalarF8] | tuple[_VectorF8, _VectorF8]: ...
def lsq_linear(
A,
b,
bounds=...,
method: str = "trf",
A: LinearOperator | _SparseArray | _MatrixLikeFloat_co,
b: _VectorLikeFloat_co,
bounds: Bound = ...,
method: Literal["trf", "bvls"] = "trf",
tol: float = 1e-10,
lsq_solver: Untyped | None = None,
lsmr_tol: Untyped | None = None,
max_iter: Untyped | None = None,
verbose: int = 0,
lsq_solver: Literal["exact", "lsmr"] | None = None,
lsmr_tol: Literal["auto"] | float | None = None,
max_iter: int | None = None,
verbose: Literal[0, 1] | _ScalarB1 = 0,
*,
lsmr_maxiter: Untyped | None = None,
lsmr_maxiter: int | None = None,
) -> OptimizeResult: ...
Loading

0 comments on commit 9e97f56

Please sign in to comment.