From 5b5b6af178005d5f5cc8082bc74a28ade0cadd04 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 13 Oct 2024 01:01:37 +0200 Subject: [PATCH] fix `scipy.stats.mstats` stubtests --- scipy-stubs/_typing.pyi | 5 +- scipy-stubs/stats/_mstats_basic.pyi | 62 +- scipy-stubs/stats/_mstats_extras.pyi | 16 +- scipy-stubs/stats/_stats_mstats_common.pyi | 59 +- scipy-stubs/stats/_stats_py.pyi | 997 ++++++++++++++++----- scipy-stubs/stats/_typing.pyi | 43 + tests/stubtest/allowlist.txt | 1 + 7 files changed, 895 insertions(+), 288 deletions(-) create mode 100644 scipy-stubs/stats/_typing.pyi diff --git a/scipy-stubs/_typing.pyi b/scipy-stubs/_typing.pyi index 9382b9f5..d818414a 100644 --- a/scipy-stubs/_typing.pyi +++ b/scipy-stubs/_typing.pyi @@ -1,4 +1,5 @@ -# Helper types for internal use (type-check only). +# NOTE(scipy-stubs): This ia a module only exists `if typing.TYPE_CHECKING: ...` + from os import PathLike from collections.abc import Callable, Sequence from typing import IO, Any, Literal, Protocol, TypeAlias, type_check_only @@ -10,6 +11,7 @@ import optype.numpy as onpt __all__ = [ "RNG", + "Alternative", "AnyBool", "AnyChar", "AnyComplex", @@ -74,6 +76,7 @@ CorrelateMode: TypeAlias = Literal["valid", "same", "full"] # scipy literals NanPolicy: TypeAlias = Literal["raise", "propagate", "omit"] +Alternative: TypeAlias = Literal["two-sided", "less", "greater"] # used in `scipy.linalg.blas` and `scipy.linalg.lapack` @type_check_only diff --git a/scipy-stubs/stats/_mstats_basic.pyi b/scipy-stubs/stats/_mstats_basic.pyi index 784991be..ae5d2a3f 100644 --- a/scipy-stubs/stats/_mstats_basic.pyi +++ b/scipy-stubs/stats/_mstats_basic.pyi @@ -130,7 +130,7 @@ trimdoc: str def argstoarray(*args) -> Untyped: ... def find_repeats(arr) -> Untyped: ... def count_tied_groups(x, use_missing: bool = False) -> Untyped: ... -def rankdata(data, axis: Untyped | None = None, use_missing: bool = False) -> Untyped: ... +def rankdata(data, axis: int | None = None, use_missing: bool = False) -> Untyped: ... def mode(a, axis: int = 0) -> Untyped: ... def msign(x) -> Untyped: ... def pearsonr(x, y) -> Untyped: ... @@ -138,7 +138,7 @@ def spearmanr( x, y: Untyped | None = None, use_ties: bool = True, - axis: Untyped | None = None, + axis: int | None = None, nan_policy: str = "propagate", alternative: str = "two-sided", ) -> Untyped: ... @@ -164,63 +164,75 @@ def kruskal(*args) -> Untyped: ... def ks_1samp(x, cdf, args=(), alternative: str = "two-sided", method: str = "auto") -> Untyped: ... def ks_2samp(data1, data2, alternative: str = "two-sided", method: str = "auto") -> Untyped: ... def kstest(data1, data2, args=(), alternative: str = "two-sided", method: str = "auto") -> Untyped: ... -def trima(a, limits: Untyped | None = None, inclusive=(True, True)) -> Untyped: ... -def trimr(a, limits: Untyped | None = None, inclusive=(True, True), axis: Untyped | None = None) -> Untyped: ... +def trima(a, limits: Untyped | None = None, inclusive: tuple[bool, bool] = (True, True)) -> Untyped: ... +def trimr(a, limits: Untyped | None = None, inclusive: tuple[bool, bool] = (True, True), axis: int | None = None) -> Untyped: ... def trim( a, limits: Untyped | None = None, - inclusive=(True, True), + inclusive: tuple[bool, bool] = (True, True), relative: bool = False, - axis: Untyped | None = None, + axis: int | None = None, ) -> Untyped: ... def trimboth( data, proportiontocut: float = 0.2, - inclusive=(True, True), - axis: Untyped | None = None, + inclusive: tuple[bool, bool] = (True, True), + axis: int | None = None, ) -> Untyped: ... def trimtail( data, proportiontocut: float = 0.2, tail: str = "left", - inclusive=(True, True), - axis: Untyped | None = None, + inclusive: tuple[bool, bool] = (True, True), + axis: int | None = None, ) -> Untyped: ... def trimmed_mean( a, limits=(0.1, 0.1), - inclusive=(1, 1), + inclusive: tuple[bool, bool] = (1, 1), relative: bool = True, - axis: Untyped | None = None, + axis: int | None = None, ) -> Untyped: ... def trimmed_var( a, limits=(0.1, 0.1), - inclusive=(1, 1), + inclusive: tuple[bool, bool] = (1, 1), relative: bool = True, - axis: Untyped | None = None, + axis: int | None = None, ddof: int = 0, ) -> Untyped: ... def trimmed_std( a, limits=(0.1, 0.1), - inclusive=(1, 1), + inclusive: tuple[bool, bool] = (1, 1), relative: bool = True, - axis: Untyped | None = None, + axis: int | None = None, ddof: int = 0, ) -> Untyped: ... -def trimmed_stde(a, limits=(0.1, 0.1), inclusive=(1, 1), axis: Untyped | None = None) -> Untyped: ... -def tmean(a, limits: Untyped | None = None, inclusive=(True, True), axis: Untyped | None = None) -> Untyped: ... -def tvar(a, limits: Untyped | None = None, inclusive=(True, True), axis: int = 0, ddof: int = 1) -> Untyped: ... +def trimmed_stde(a, limits=(0.1, 0.1), inclusive: tuple[bool, bool] = (1, 1), axis: int | None = None) -> Untyped: ... +def tmean(a, limits: Untyped | None = None, inclusive: tuple[bool, bool] = (True, True), axis: int | None = None) -> Untyped: ... +def tvar( + a, + limits: Untyped | None = None, + inclusive: tuple[bool, bool] = (True, True), + axis: int = 0, + ddof: int = 1, +) -> Untyped: ... def tmin(a, lowerlimit: Untyped | None = None, axis: int = 0, inclusive: bool = True) -> Untyped: ... def tmax(a, upperlimit: Untyped | None = None, axis: int = 0, inclusive: bool = True) -> Untyped: ... -def tsem(a, limits: Untyped | None = None, inclusive=(True, True), axis: int = 0, ddof: int = 1) -> Untyped: ... +def tsem( + a, + limits: Untyped | None = None, + inclusive: tuple[bool, bool] = (True, True), + axis: int = 0, + ddof: int = 1, +) -> Untyped: ... def winsorize( a, limits: Untyped | None = None, - inclusive=(True, True), + inclusive: tuple[bool, bool] = (True, True), inplace: bool = False, - axis: Untyped | None = None, + axis: int | None = None, nan_policy: str = "propagate", ) -> Untyped: ... def moment(a, moment: int = 1, axis: int = 0) -> Untyped: ... @@ -228,16 +240,16 @@ def variation(a, axis: int = 0, ddof: int = 0) -> Untyped: ... def skew(a, axis: int = 0, bias: bool = True) -> Untyped: ... def kurtosis(a, axis: int = 0, fisher: bool = True, bias: bool = True) -> Untyped: ... def describe(a, axis: int = 0, ddof: int = 0, bias: bool = True) -> Untyped: ... -def stde_median(data, axis: Untyped | None = None) -> Untyped: ... +def stde_median(data, axis: int | None = None) -> Untyped: ... def skewtest(a, axis: int = 0, alternative: str = "two-sided") -> Untyped: ... def kurtosistest(a, axis: int = 0, alternative: str = "two-sided") -> Untyped: ... def normaltest(a, axis: int = 0) -> Untyped: ... def mquantiles( a, - prob=(0.25, 0.5, 0.75), + prob=[0.25, 0.5, 0.75], alphap: float = 0.4, betap: float = 0.4, - axis: Untyped | None = None, + axis: int | None = None, limit=(), ) -> Untyped: ... def scoreatpercentile(data, per, limit=(), alphap: float = 0.4, betap: float = 0.4) -> Untyped: ... diff --git a/scipy-stubs/stats/_mstats_extras.pyi b/scipy-stubs/stats/_mstats_extras.pyi index 4661c6d8..47510ef6 100644 --- a/scipy-stubs/stats/_mstats_extras.pyi +++ b/scipy-stubs/stats/_mstats_extras.pyi @@ -13,19 +13,19 @@ __all__ = [ "trimmed_mean_ci", ] -def hdquantiles(data, prob=(0.25, 0.5, 0.75), axis: Untyped | None = None, var: bool = False) -> Untyped: ... +def hdquantiles(data, prob=[0.25, 0.5, 0.75], axis: int | None = None, var: bool = False) -> Untyped: ... def hdmedian(data, axis: int = -1, var: bool = False) -> Untyped: ... -def hdquantiles_sd(data, prob=(0.25, 0.5, 0.75), axis: Untyped | None = None) -> Untyped: ... +def hdquantiles_sd(data, prob=[0.25, 0.5, 0.75], axis: int | None = None) -> Untyped: ... def trimmed_mean_ci( data, limits=(0.2, 0.2), inclusive=(True, True), alpha: float = 0.05, - axis: Untyped | None = None, + axis: int | None = None, ) -> Untyped: ... -def mjci(data, prob=(0.25, 0.5, 0.75), axis: Untyped | None = None) -> Untyped: ... -def mquantiles_cimj(data, prob=(0.25, 0.5, 0.75), alpha: float = 0.05, axis: Untyped | None = None) -> Untyped: ... -def median_cihs(data, alpha: float = 0.05, axis: Untyped | None = None) -> Untyped: ... -def compare_medians_ms(group_1, group_2, axis: Untyped | None = None) -> Untyped: ... -def idealfourths(data, axis: Untyped | None = None) -> Untyped: ... +def mjci(data, prob=[0.25, 0.5, 0.75], axis: int | None = None) -> Untyped: ... +def mquantiles_cimj(data, prob=[0.25, 0.5, 0.75], alpha: float = 0.05, axis: int | None = None) -> Untyped: ... +def median_cihs(data, alpha: float = 0.05, axis: int | None = None) -> Untyped: ... +def compare_medians_ms(group_1, group_2, axis: int | None = None) -> Untyped: ... +def idealfourths(data, axis: int | None = None) -> Untyped: ... def rsh(data, points: Untyped | None = None) -> Untyped: ... diff --git a/scipy-stubs/stats/_stats_mstats_common.pyi b/scipy-stubs/stats/_stats_mstats_common.pyi index df13816c..9490919c 100644 --- a/scipy-stubs/stats/_stats_mstats_common.pyi +++ b/scipy-stubs/stats/_stats_mstats_common.pyi @@ -1,8 +1,57 @@ -from scipy._typing import Untyped +from typing import Any, Literal, TypeAlias +from typing_extensions import Self + +import numpy as np +import numpy.typing as npt from . import distributions as distributions +from ._typing import BaseBunch + +__all__ = ["_find_repeats", "siegelslopes", "theilslopes"] + +_Method: TypeAlias = Literal["hierarchical", "separate"] + +class SiegelslopesResult(BaseBunch[float, float]): + def __new__(_cls, slope: float, intercept: float) -> Self: ... + def __init__(self, /, slope: float, intercept: float) -> None: ... + @property + def slope(self, /) -> float: ... + @property + def intercept(self, /) -> float: ... -TheilslopesResult: Untyped -SiegelslopesResult: Untyped +class TheilslopesResult(BaseBunch[np.float64, np.float64, np.float64, np.float64]): + def __new__( + _cls, + slope: np.float64, + intercept: np.float64, + low_slope: np.float64, + high_slope: np.float64, + ) -> Self: ... + def __init__( + self, + /, + slope: np.float64, + intercept: np.float64, + low_slope: np.float64, + high_slope: np.float64, + ) -> None: ... + @property + def slope(self, /) -> np.float64: ... + @property + def intercept(self, /) -> np.float64: ... + @property + def low_slope(self, /) -> np.float64: ... + @property + def high_slope(self, /) -> np.float64: ... -def theilslopes(y, x: Untyped | None = None, alpha: float = 0.95, method: str = "separate") -> Untyped: ... -def siegelslopes(y, x: Untyped | None = None, method: str = "hierarchical") -> Untyped: ... +def _find_repeats(arr: npt.NDArray[np.number[Any]]) -> tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]]: ... +def siegelslopes( + y: npt.ArrayLike, + x: npt.ArrayLike | None = None, + method: _Method = "hierarchical", +) -> SiegelslopesResult: ... +def theilslopes( + y: npt.ArrayLike, + x: npt.ArrayLike | None = None, + alpha: float | np.floating[Any] = 0.95, + method: _Method = "separate", +) -> TheilslopesResult: ... diff --git a/scipy-stubs/stats/_stats_py.pyi b/scipy-stubs/stats/_stats_py.pyi index 9d33956e..6af9a386 100644 --- a/scipy-stubs/stats/_stats_py.pyi +++ b/scipy-stubs/stats/_stats_py.pyi @@ -1,8 +1,17 @@ from dataclasses import dataclass -from typing import NamedTuple - -from scipy._typing import Untyped +from collections.abc import Callable, Sequence +from types import ModuleType +from typing import Any, Generic, Literal, Protocol, TypeAlias, overload, type_check_only +from typing_extensions import NamedTuple, Self, TypeVar + +import numpy as np +import numpy.typing as npt +import optype.numpy as onpt +from numpy._typing import _ArrayLikeBool_co, _ArrayLikeFloat_co, _ArrayLikeInt_co +from scipy._typing import Alternative, AnyReal, NanPolicy, Seed +from ._resampling import BootstrapMethod, ResamplingMethod from ._stats_mstats_common import siegelslopes, theilslopes +from ._typing import BaseBunch __all__ = [ "alexandergovern", @@ -74,291 +83,781 @@ __all__ = [ "zscore", ] -SignificanceResult: Untyped -KstestResult: Untyped -LinregressResult: Untyped - -class DescribeResult(NamedTuple): - nobs: Untyped - minmax: Untyped - mean: Untyped - variance: Untyped - skewness: Untyped - kurtosis: Untyped +_SCT = TypeVar("_SCT", bound=np.generic, default=np.generic) + +_Int1D: TypeAlias = np.integer[Any] +_Float1D: TypeAlias = np.floating[Any] +_Real1D: TypeAlias = _Int1D | _Float1D + +_SCT_int = TypeVar("_SCT_int", bound=_Int1D, default=_Int1D) +_SCT_float = TypeVar("_SCT_float", bound=_Float1D, default=_Float1D) +_SCT_real = TypeVar("_SCT_real", bound=_Real1D, default=_Real1D) + +_GenericND: TypeAlias = _SCT | npt.NDArray[_SCT] +_IntND: TypeAlias = _GenericND[_SCT_int] +_FloatND: TypeAlias = _GenericND[_SCT_float] +_RealND: TypeAlias = _GenericND[_SCT_real] + +_Interpolation: TypeAlias = Literal["linear", "lower", "higher", "nearest", "midpoint"] +_PowerDivergenceStatistic: TypeAlias = Literal[ + "pearson", + "log-likelihood", + "freeman-tukey", + "mod-log-likelihood", + "neyman", + "cressie-read", +] -class ModeResult(NamedTuple): - mode: Untyped - count: Untyped # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] +_NDT_int = TypeVar("_NDT_int", bound=int | _IntND, default=int | _IntND) +_NDT_float = TypeVar("_NDT_float", bound=float | _FloatND, default=float | _FloatND) +_NDT_real = TypeVar("_NDT_real", bound=float | _RealND, default=float | _RealND) -class SkewtestResult(NamedTuple): - statistic: Untyped - pvalue: Untyped +@type_check_only +class _RVSCallable(Protocol): + def __call__(self, /, *, size: int | tuple[int, ...]) -> npt.NDArray[np.floating[Any]]: ... -class KurtosistestResult(NamedTuple): - statistic: Untyped - pvalue: Untyped +class _SimpleNormal: + @overload + def cdf(self, /, x: np.bool_ | np.uint8 | np.int8 | np.uint16 | np.int16 | np.float16 | np.float32) -> np.float32: ... + @overload + def cdf(self, /, x: np.uint32 | np.int32 | np.uint64 | np.int64 | np.float64) -> np.float64: ... + @overload + def cdf(self, /, x: np.complex64) -> np.complex64: ... + @overload + def cdf(self, /, x: np.complex128 | np.clongdouble) -> np.complex64: ... + @overload + def cdf(self, /, x: bool) -> np.float32: ... + @overload + def cdf(self, /, x: float) -> np.float64 | np.float32: ... + @overload + def cdf(self, /, x: complex) -> np.complex128 | np.float64 | np.float32: ... + sf = cdf -class NormaltestResult(NamedTuple): - statistic: Untyped - pvalue: Untyped +class _SimpleChi2: + df: int + def __init__(self, /, df: int) -> None: ... + @overload + def sf(self, /, x: float | np.float64 | np.uint64 | np.int64 | np.uint32 | np.int32 | np.bool_) -> np.float64: ... + @overload + def sf(self, /, x: np.float32 | np.uint16 | np.int16 | np.uint8 | np.int8) -> np.float32: ... + +@type_check_only +class _TestResultTuple(NamedTuple, Generic[_NDT_float]): + statistic: _NDT_float + pvalue: _NDT_float + +class SkewtestResult(_TestResultTuple[_NDT_float], Generic[_NDT_float]): ... +class KurtosistestResult(_TestResultTuple[_NDT_float], Generic[_NDT_float]): ... +class NormaltestResult(_TestResultTuple[_NDT_float], Generic[_NDT_float]): ... +class Ttest_indResult(_TestResultTuple[_NDT_float], Generic[_NDT_float]): ... +class Power_divergenceResult(_TestResultTuple[_NDT_float], Generic[_NDT_float]): ... +class RanksumsResult(_TestResultTuple[_NDT_float], Generic[_NDT_float]): ... +class KruskalResult(_TestResultTuple[_NDT_float], Generic[_NDT_float]): ... +class FriedmanchisquareResult(_TestResultTuple[_NDT_float], Generic[_NDT_float]): ... +class BrunnerMunzelResult(_TestResultTuple[_NDT_float], Generic[_NDT_float]): ... +class F_onewayResult(_TestResultTuple[_NDT_float], Generic[_NDT_float]): ... + +class ConfidenceInterval(NamedTuple, Generic[_NDT_float]): + low: _NDT_float + high: _NDT_float + +class DescribeResult(NamedTuple, Generic[_NDT_real, _NDT_float]): + nobs: int + minmax: tuple[_NDT_real, _NDT_real] + mean: _NDT_float + variance: _NDT_float + skewness: _NDT_float + kurtosis: _NDT_float + +class ModeResult(NamedTuple, Generic[_NDT_real, _NDT_int]): + mode: _NDT_real + count: _NDT_int # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] class HistogramResult(NamedTuple): - count: Untyped # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] - lowerlimit: Untyped - binsize: Untyped - extrapoints: Untyped + count: onpt.Array[tuple[int], np.float64] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + lowerlimit: Literal[0] | np.floating[Any] + binsize: onpt.Array[tuple[int], np.float64] + extrapoints: int class CumfreqResult(NamedTuple): - cumcount: Untyped - lowerlimit: Untyped - binsize: Untyped - extrapoints: Untyped + cumcount: onpt.Array[tuple[int], np.float64] + lowerlimit: Literal[0] | np.floating[Any] + binsize: onpt.Array[tuple[int], np.float64] + extrapoints: int class RelfreqResult(NamedTuple): - frequency: Untyped - lowerlimit: Untyped - binsize: Untyped - extrapoints: Untyped + frequency: onpt.Array[tuple[int], np.float64] + lowerlimit: Literal[0] | np.floating[Any] + binsize: onpt.Array[tuple[int], np.float64] + extrapoints: int + +class SigmaclipResult(NamedTuple, Generic[_SCT_real, _SCT_float]): + clipped: onpt.Array[tuple[int], _SCT_real] + lower: _SCT_float + upper: _SCT_float -class SigmaclipResult(NamedTuple): - clipped: Untyped - lower: Untyped - upper: Untyped +class RepeatedResults(NamedTuple): + values: onpt.Array[tuple[int], np.float64] + counts: onpt.Array[tuple[int], np.intp] @dataclass class AlexanderGovernResult: statistic: float pvalue: float -class F_onewayResult(NamedTuple): - statistic: Untyped - pvalue: Untyped - -class ConfidenceInterval(NamedTuple): - low: Untyped - high: Untyped - -class PearsonRResultBase(NamedTuple): - statistic: Untyped - pvalue: Untyped - -class PearsonRResult(PearsonRResultBase): - correlation: Untyped - def __init__(self, statistic, pvalue, alternative, n, x, y, axis) -> None: ... - def confidence_interval(self, confidence_level: float = 0.95, method: Untyped | None = None) -> Untyped: ... - -class TtestResultBase(NamedTuple): - statistic: Untyped - pvalue: Untyped - @property - def df(self) -> Untyped: ... - -class TtestResult(TtestResultBase): - def __init__( - self, - statistic, - pvalue, - df, - alternative, - standard_error, - estimate, - statistic_np: Untyped | None = None, - xp: Untyped | None = None, - ) -> None: ... - def confidence_interval(self, confidence_level: float = 0.95) -> Untyped: ... - -class Ttest_indResult(NamedTuple): - statistic: Untyped - pvalue: Untyped - -class Power_divergenceResult(NamedTuple): - statistic: Untyped - pvalue: Untyped - -class RanksumsResult(NamedTuple): - statistic: Untyped - pvalue: Untyped - -class KruskalResult(NamedTuple): - statistic: Untyped - pvalue: Untyped - -class FriedmanchisquareResult(NamedTuple): - statistic: Untyped - pvalue: Untyped - -class BrunnerMunzelResult(NamedTuple): - statistic: Untyped - pvalue: Untyped - @dataclass class QuantileTestResult: statistic: float statistic_type: int pvalue: float - def confidence_interval(self, confidence_level: float = 0.95) -> Untyped: ... + _alternative: list[str] + _x: npt.NDArray[_Real1D] + _p: float + def confidence_interval(self, confidence_level: float = 0.95) -> float: ... -class RepeatedResults(NamedTuple): - values: Untyped - counts: Untyped +@type_check_only +class _TestResultBunch(BaseBunch[_NDT_float, _NDT_float], Generic[_NDT_float]): + @property + def statistic(self, /) -> _NDT_float: ... + @property + def pvalue(self, /) -> _NDT_float: ... + def __new__(_cls, statistic: _NDT_float, pvalue: _NDT_float) -> Self: ... + def __init__(self, /, statistic: _NDT_float, pvalue: _NDT_float) -> None: ... + +class SignificanceResult(_TestResultBunch[_NDT_float]): ... +class PearsonRResultBase(_TestResultBunch[_NDT_float], Generic[_NDT_float]): ... + +class PearsonRResult(PearsonRResultBase[_NDT_float], Generic[_NDT_float]): + _alternative: Alternative + _n: int + _x: npt.NDArray[_Real1D] + _y: npt.NDArray[_Real1D] + _axis: int + correlation: _NDT_float # alias for `statistic` + def __init__( # pyright: ignore[reportInconsistentConstructor] + self, + /, + statistic: _NDT_float, + pvalue: _NDT_float, + alternative: Alternative, + n: int, + x: npt.NDArray[_Real1D], + y: npt.NDArray[_Real1D], + axis: int, + ) -> None: ... + def confidence_interval( + self, + /, + confidence_level: float = 0.95, + method: BootstrapMethod | None = None, + ) -> ConfidenceInterval[_NDT_float]: ... -class _SimpleNormal: - def cdf(self, x) -> Untyped: ... - def sf(self, x) -> Untyped: ... - def isf(self, x) -> Untyped: ... +class TtestResultBase(_TestResultBunch[_NDT_float], Generic[_NDT_float]): + @property + def df(self, /) -> _NDT_float: ... + def __new__(_cls, statistic: _NDT_float, pvalue: _NDT_float, *, df: _NDT_float) -> Self: ... + def __init__(self, /, statistic: _NDT_float, pvalue: _NDT_float, *, df: _NDT_float) -> None: ... + +class TtestResult(TtestResultBase[_NDT_float], Generic[_NDT_float]): + _alternative: Alternative + _standard_error: _NDT_float + _estimate: _NDT_float + _statistic_np: _NDT_float + _dtype: np.dtype[np.floating[Any]] + _xp: ModuleType + + def __init__( # pyright: ignore[reportInconsistentConstructor] + self, + /, + statistic: _NDT_float, + pvalue: _NDT_float, + df: _NDT_float, + alternative: Alternative, + standard_error: _NDT_float, + estimate: _NDT_float, + statistic_np: _NDT_float | None = None, + xp: ModuleType | None = None, + ) -> None: ... + def confidence_interval(self, /, confidence_level: float = 0.95) -> ConfidenceInterval[_NDT_float]: ... -class _SimpleChi2: - df: Untyped - def __init__(self, df) -> None: ... - def cdf(self, x) -> Untyped: ... - def sf(self, x) -> Untyped: ... - -class _SimpleBeta: - a: Untyped - b: Untyped - loc: Untyped - scale: Untyped - def __init__(self, a, b, *, loc: Untyped | None = None, scale: Untyped | None = None): ... - def cdf(self, x) -> Untyped: ... - def sf(self, x) -> Untyped: ... - -class _SimpleStudentT: - df: Untyped - def __init__(self, df) -> None: ... - def cdf(self, t) -> Untyped: ... - def sf(self, t) -> Untyped: ... +class KstestResult(_TestResultBunch[np.float64]): + @property + def statistic_location(self, /) -> np.float64: ... + @property + def statistic_sign(self, /) -> np.int8: ... + def __new__( + _cls, + statistic: np.float64, + pvalue: np.float64, + *, + statistic_location: np.float64, + statistic_sign: np.int8, + ) -> Self: ... + def __init__( + self, + /, + statistic: np.float64, + pvalue: np.float64, + *, + statistic_location: np.float64, + statistic_sign: np.int8, + ) -> None: ... Ks_2sampResult = KstestResult -def gmean(a, axis: int = 0, dtype: Untyped | None = None, weights: Untyped | None = None) -> Untyped: ... -def hmean(a, axis: int = 0, dtype: Untyped | None = None, *, weights: Untyped | None = None) -> Untyped: ... -def pmean(a, p, *, axis: int = 0, dtype: Untyped | None = None, weights: Untyped | None = None) -> Untyped: ... -def mode(a, axis: int = 0, nan_policy: str = "propagate", keepdims: bool = False) -> Untyped: ... -def tmean(a, limits: Untyped | None = None, inclusive=(True, True), axis: Untyped | None = None) -> Untyped: ... -def tvar(a, limits: Untyped | None = None, inclusive=(True, True), axis: int = 0, ddof: int = 1) -> Untyped: ... +class LinregressResult(BaseBunch[np.float64, np.float64, np.float64, float | np.float64, float | np.float64]): + @property + def slope(self, /) -> np.float64: ... + @property + def intercept(self, /) -> np.float64: ... + @property + def rvalue(self, /) -> np.float64: ... + @property + def pvalue(self, /) -> float | np.float64: ... + @property + def stderr(self, /) -> float | np.float64: ... + @property + def intercept_stderr(self, /) -> float | np.float64: ... + def __new__( + _cls, + slope: np.float64, + intercept: np.float64, + rvalue: np.float64, + pvalue: float | np.float64, + stderr: float | np.float64, + *, + intercept_stderr: float | np.float64, + ) -> Self: ... + def __init__( + self, + /, + slope: np.float64, + intercept: np.float64, + rvalue: np.float64, + pvalue: float | np.float64, + stderr: float | np.float64, + *, + intercept_stderr: float | np.float64, + ) -> None: ... + +def gmean( + a: _ArrayLikeFloat_co, + axis: int | None = 0, + dtype: npt.DTypeLike | None = None, + weights: _ArrayLikeFloat_co | None = None, + *, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> _RealND: ... +def hmean( + a: _ArrayLikeFloat_co, + axis: int | None = 0, + dtype: npt.DTypeLike | None = None, + *, + weights: _ArrayLikeFloat_co | None = None, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> _RealND: ... +def pmean( + a: _ArrayLikeFloat_co, + p: float | _Real1D, + *, + axis: int | None = 0, + dtype: npt.DTypeLike | None = None, + weights: _ArrayLikeFloat_co | None = None, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> _RealND: ... + +# +def mode( + a: _ArrayLikeFloat_co, + axis: int | None = 0, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> _RealND: ... + +# +def tmean( + a: _ArrayLikeFloat_co, + limits: tuple[float | _Real1D, float | _Real1D] | None = None, + inclusive: tuple[bool, bool] = (True, True), + axis: int | None = None, + *, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> _FloatND: ... +def tvar( + a: _ArrayLikeFloat_co, + limits: tuple[AnyReal, AnyReal] | None = None, + inclusive: tuple[bool, bool] = (True, True), + axis: int | None = 0, + ddof: int = 1, + *, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> _FloatND: ... def tmin( - a, - lowerlimit: Untyped | None = None, - axis: int = 0, + a: _ArrayLikeFloat_co, + lowerlimit: float | _Real1D | None = None, + axis: int | None = 0, inclusive: bool = True, - nan_policy: str = "propagate", -) -> Untyped: ... + nan_policy: NanPolicy = "propagate", + *, + keepdims: bool = False, +) -> _RealND: ... def tmax( - a, - upperlimit: Untyped | None = None, - axis: int = 0, + a: _ArrayLikeFloat_co, + upperlimit: float | _Real1D | None = None, + axis: int | None = 0, inclusive: bool = True, - nan_policy: str = "propagate", -) -> Untyped: ... -def tstd(a, limits: Untyped | None = None, inclusive=(True, True), axis: int = 0, ddof: int = 1) -> Untyped: ... -def tsem(a, limits: Untyped | None = None, inclusive=(True, True), axis: int = 0, ddof: int = 1) -> Untyped: ... -def moment(a, order: int = 1, axis: int = 0, nan_policy: str = "propagate", *, center: Untyped | None = None) -> Untyped: ... -def skew(a, axis: int = 0, bias: bool = True, nan_policy: str = "propagate") -> Untyped: ... -def kurtosis(a, axis: int = 0, fisher: bool = True, bias: bool = True, nan_policy: str = "propagate") -> Untyped: ... -def describe(a, axis: int = 0, ddof: int = 1, bias: bool = True, nan_policy: str = "propagate") -> Untyped: ... -def skewtest(a, axis: int = 0, nan_policy: str = "propagate", alternative: str = "two-sided") -> Untyped: ... -def kurtosistest(a, axis: int = 0, nan_policy: str = "propagate", alternative: str = "two-sided") -> Untyped: ... -def normaltest(a, axis: int = 0, nan_policy: str = "propagate") -> Untyped: ... -def jarque_bera(x, *, axis: Untyped | None = None) -> Untyped: ... -def scoreatpercentile(a, per, limit=(), interpolation_method: str = "fraction", axis: Untyped | None = None) -> Untyped: ... -def percentileofscore(a, score, kind: str = "rank", nan_policy: str = "propagate") -> Untyped: ... -def cumfreq(a, numbins: int = 10, defaultreallimits: Untyped | None = None, weights: Untyped | None = None) -> Untyped: ... -def relfreq(a, numbins: int = 10, defaultreallimits: Untyped | None = None, weights: Untyped | None = None) -> Untyped: ... -def obrientransform(*samples) -> Untyped: ... -def sem(a, axis: int = 0, ddof: int = 1, nan_policy: str = "propagate") -> Untyped: ... -def zscore(a, axis: int = 0, ddof: int = 0, nan_policy: str = "propagate") -> Untyped: ... -def gzscore(a, *, axis: int = 0, ddof: int = 0, nan_policy: str = "propagate") -> Untyped: ... -def zmap(scores, compare, axis: int = 0, ddof: int = 0, nan_policy: str = "propagate") -> Untyped: ... -def gstd(a, axis: int = 0, ddof: int = 1) -> Untyped: ... + nan_policy: NanPolicy = "propagate", + *, + keepdims: bool = False, +) -> _RealND: ... +def tstd( + a: _ArrayLikeFloat_co, + limits: tuple[float | _Real1D, float | _Real1D] | None = None, + inclusive: tuple[bool, bool] = (True, True), + axis: int | None = 0, + ddof: int = 1, + *, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> _FloatND: ... +def tsem( + a: _ArrayLikeFloat_co, + limits: tuple[float | _Real1D, float | _Real1D] | None = None, + inclusive: tuple[bool, bool] = (True, True), + axis: int | None = 0, + ddof: int = 1, + *, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> _FloatND: ... + +# +def moment( + a: _ArrayLikeFloat_co, + order: int = 1, + axis: int | None = 0, + nan_policy: NanPolicy = "propagate", + *, + center: float | _Float1D | None = None, + keepdims: bool = False, +) -> _FloatND: ... +def skew( + a: _ArrayLikeFloat_co, + axis: int | None = 0, + bias: bool = True, + nan_policy: NanPolicy = "propagate", + *, + keepdims: bool = False, +) -> _FloatND: ... +def kurtosis( + a: _ArrayLikeFloat_co, + axis: int | None = 0, + fisher: bool = True, + bias: bool = True, + nan_policy: NanPolicy = "propagate", + *, + keepdims: bool = False, +) -> _FloatND: ... +def describe( + a: _ArrayLikeFloat_co, + axis: int | None = 0, + ddof: int = 1, + bias: bool = True, + nan_policy: NanPolicy = "propagate", +) -> DescribeResult: ... + +# +def skewtest( + a: _ArrayLikeFloat_co, + axis: int | None = 0, + nan_policy: NanPolicy = "propagate", + alternative: Alternative = "two-sided", + *, + keepdims: bool = False, +) -> SkewtestResult: ... +def kurtosistest( + a: _ArrayLikeFloat_co, + axis: int | None = 0, + nan_policy: NanPolicy = "propagate", + alternative: Alternative = "two-sided", + *, + keepdims: bool = False, +) -> KurtosistestResult: ... +def normaltest( + a: _ArrayLikeFloat_co, + axis: int | None = 0, + nan_policy: NanPolicy = "propagate", + *, + keepdims: bool = False, +) -> NormaltestResult: ... +def jarque_bera( + x: _ArrayLikeFloat_co, + *, + axis: int | None = None, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> SignificanceResult: ... + +# +def scoreatpercentile( + a: _ArrayLikeFloat_co, + per: _ArrayLikeFloat_co, + limit: tuple[float | _Real1D, float | _Real1D] | tuple[()] = (), + interpolation_method: Literal["fraction", "lower", "higher"] = "fraction", + axis: int | None = None, +) -> _FloatND: ... +def percentileofscore( + a: _ArrayLikeFloat_co, + score: _ArrayLikeFloat_co, + kind: Literal["rank", "weak", "strict", "mean"] = "rank", + nan_policy: NanPolicy = "propagate", +) -> float | np.float64: ... + +# +def cumfreq( + a: _ArrayLikeFloat_co, + numbins: int = 10, + defaultreallimits: tuple[float | _Real1D, float | _Real1D] | None = None, + weights: _ArrayLikeFloat_co | None = None, +) -> CumfreqResult: ... +def relfreq( + a: _ArrayLikeFloat_co, + numbins: int = 10, + defaultreallimits: tuple[float | _Real1D, float | _Real1D] | None = None, + weights: _ArrayLikeFloat_co | None = None, +) -> RelfreqResult: ... + +# +def obrientransform( + *samples: _ArrayLikeFloat_co, +) -> onpt.Array[tuple[int, int], _Float1D] | onpt.Array[tuple[int], np.object_]: ... + +# +def sem( + a: _ArrayLikeFloat_co, + axis: int | None = 0, + ddof: int = 1, + nan_policy: NanPolicy = "propagate", + *, + keepdims: bool = False, +) -> _FloatND: ... +def zscore( + a: _ArrayLikeFloat_co, + axis: int | None = 0, + ddof: int = 0, + nan_policy: NanPolicy = "propagate", +) -> npt.NDArray[_Float1D]: ... +def gzscore( + a: _ArrayLikeFloat_co, + *, + axis: int | None = 0, + ddof: int = 0, + nan_policy: NanPolicy = "propagate", +) -> npt.NDArray[_Float1D]: ... +def zmap( + scores: _ArrayLikeFloat_co, + compare: _ArrayLikeFloat_co, + axis: int | None = 0, + ddof: int = 0, + nan_policy: NanPolicy = "propagate", +) -> npt.NDArray[_Float1D]: ... + +# +def gstd(a: _ArrayLikeFloat_co, axis: int | None = 0, ddof: int = 1) -> _FloatND: ... def iqr( - x, - axis: Untyped | None = None, - rng=(25, 75), - scale: float = 1.0, - nan_policy: str = "propagate", - interpolation: str = "linear", - keepdims: bool = False, -) -> Untyped: ... -def median_abs_deviation(x, axis: int = 0, center=..., scale: float = 1.0, nan_policy: str = "propagate") -> Untyped: ... -def sigmaclip(a, low: float = 4.0, high: float = 4.0) -> Untyped: ... -def trimboth(a, proportiontocut, axis: int = 0) -> Untyped: ... -def trim1(a, proportiontocut, tail: str = "right", axis: int = 0) -> Untyped: ... -def trim_mean(a, proportiontocut, axis: int = 0) -> Untyped: ... -def f_oneway(*samples, axis: int = 0) -> Untyped: ... -def alexandergovern(*samples, nan_policy: str = "propagate", axis: int = 0) -> Untyped: ... -def pearsonr(x, y, *, alternative: str = "two-sided", method: Untyped | None = None, axis: int = 0) -> Untyped: ... -def fisher_exact(table, alternative: str = "two-sided") -> Untyped: ... + x: _ArrayLikeFloat_co, + axis: int | Sequence[int] | None = None, + rng: tuple[float, float] = (25, 75), + scale: Literal["normal"] | _ArrayLikeFloat_co = 1.0, + nan_policy: NanPolicy = "propagate", + interpolation: _Interpolation = "linear", + keepdims: bool = False, +) -> _FloatND: ... +def median_abs_deviation( + x: _ArrayLikeFloat_co, + axis: int | None = 0, + center: np.ufunc | Callable[[_NDT_float, int | None], _NDT_float] = ..., + scale: Literal["normal"] | float = 1.0, + nan_policy: NanPolicy = "propagate", +) -> _FloatND: ... + +# +def sigmaclip(a: _ArrayLikeFloat_co, low: float = 4.0, high: float = 4.0) -> SigmaclipResult: ... +def trimboth(a: _ArrayLikeFloat_co, proportiontocut: float, axis: int | None = 0) -> npt.NDArray[_Real1D]: ... +def trim1(a: _ArrayLikeFloat_co, proportiontocut: float, tail: str = "right", axis: int | None = 0) -> npt.NDArray[_Real1D]: ... +def trim_mean(a: _ArrayLikeFloat_co, proportiontocut: float, axis: int | None = 0) -> _FloatND: ... + +# +def f_oneway( + *samples: _ArrayLikeFloat_co, + nan_policy: NanPolicy = "propagate", + axis: int | None = 0, + keepdims: bool = False, +) -> F_onewayResult: ... +def alexandergovern( + *samples: _ArrayLikeFloat_co, + nan_policy: NanPolicy = "propagate", + axis: int | None = 0, + keepdims: bool = False, +) -> AlexanderGovernResult: ... +def pearsonr( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + *, + alternative: Alternative = "two-sided", + method: ResamplingMethod | None = None, + axis: int | None = 0, +) -> PearsonRResult: ... +def fisher_exact( + table: npt.NDArray[_Real1D], + alternative: Alternative = "two-sided", +) -> SignificanceResult[float]: ... + +# def spearmanr( - a, - b: Untyped | None = None, - axis: int = 0, - nan_policy: str = "propagate", - alternative: str = "two-sided", -) -> Untyped: ... -def pointbiserialr(x, y) -> Untyped: ... + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co | None = None, + axis: int | None = 0, + nan_policy: NanPolicy = "propagate", + alternative: Alternative = "two-sided", +) -> SignificanceResult: ... +def pointbiserialr(x: _ArrayLikeBool_co, y: _ArrayLikeFloat_co) -> SignificanceResult[float]: ... def kendalltau( - x, - y, - *, - nan_policy: str = "propagate", - method: str = "auto", - variant: str = "b", - alternative: str = "two-sided", -) -> Untyped: ... -def weightedtau(x, y, rank: bool = True, weigher: Untyped | None = None, additive: bool = True) -> Untyped: ... -def pack_TtestResult(statistic, pvalue, df, alternative, standard_error, estimate) -> Untyped: ... -def unpack_TtestResult(res) -> Untyped: ... -def ttest_1samp(a, popmean, axis: int = 0, nan_policy: str = "propagate", alternative: str = "two-sided") -> Untyped: ... + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + *, + nan_policy: NanPolicy = "propagate", + method: Literal["auto", "asymptotic", "exact"] = "auto", + variant: Literal["b", "c"] = "b", + alternative: Alternative = "two-sided", +) -> SignificanceResult[float]: ... +def weightedtau( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + rank: bool | _ArrayLikeInt_co = True, + weigher: Callable[[int], float | _Real1D] | None = None, + additive: bool = True, +) -> SignificanceResult[float]: ... + +# +def pack_TtestResult( + statistic: _NDT_float, + pvalue: _NDT_float, + df: _NDT_float, + alternative: Alternative, + standard_error: _NDT_float, + estimate: _NDT_float, +) -> TtestResult[_NDT_float]: ... +def unpack_TtestResult( + res: TtestResult[_NDT_float], +) -> tuple[ + _NDT_float, + _NDT_float, + _NDT_float, + Alternative, + _NDT_float, + _NDT_float, +]: ... + +# +def ttest_1samp( + a: _ArrayLikeFloat_co, + popmean: _ArrayLikeFloat_co, + axis: int | None = 0, + nan_policy: NanPolicy = "propagate", + alternative: Alternative = "two-sided", + *, + keepdims: bool = False, +) -> TtestResult: ... def ttest_ind_from_stats( - mean1, - std1, - nobs1, - mean2, - std2, - nobs2, + mean1: _ArrayLikeFloat_co, + std1: _ArrayLikeFloat_co, + nobs1: _ArrayLikeInt_co, + mean2: _ArrayLikeFloat_co, + std2: _ArrayLikeFloat_co, + nobs2: _ArrayLikeInt_co, equal_var: bool = True, - alternative: str = "two-sided", -) -> Untyped: ... + alternative: Alternative = "two-sided", +) -> Ttest_indResult: ... def ttest_ind( - a, - b, - axis: int = 0, + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axis: int | None = 0, equal_var: bool = True, - nan_policy: str = "propagate", - permutations: Untyped | None = None, - random_state: Untyped | None = None, - alternative: str = "two-sided", - trim: int = 0, -) -> Untyped: ... -def ttest_rel(a, b, axis: int = 0, nan_policy: str = "propagate", alternative: str = "two-sided") -> Untyped: ... + nan_policy: NanPolicy = "propagate", + permutations: float | None = None, + random_state: Seed | None = None, + alternative: Alternative = "two-sided", + trim: float = 0, + *, + keepdims: bool = False, +) -> TtestResult: ... +def ttest_rel( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axis: int | None = 0, + nan_policy: NanPolicy = "propagate", + alternative: Alternative = "two-sided", + *, + keepdims: bool = False, +) -> TtestResult: ... + +# def power_divergence( - f_obs, - f_exp: Untyped | None = None, + f_obs: _ArrayLikeFloat_co, + f_exp: _ArrayLikeFloat_co | None = None, + ddof: int = 0, + axis: int | None = 0, + lambda_: _PowerDivergenceStatistic | float | None = None, +) -> Power_divergenceResult: ... +def chisquare( + f_obs: _ArrayLikeFloat_co, + f_exp: _ArrayLikeFloat_co | None = None, ddof: int = 0, - axis: int = 0, - lambda_: Untyped | None = None, -) -> Untyped: ... -def chisquare(f_obs, f_exp: Untyped | None = None, ddof: int = 0, axis: int = 0) -> Untyped: ... -def ks_1samp(x, cdf, args=(), alternative: str = "two-sided", method: str = "auto") -> Untyped: ... -def ks_2samp(data1, data2, alternative: str = "two-sided", method: str = "auto") -> Untyped: ... -def kstest(rvs, cdf, args=(), N: int = 20, alternative: str = "two-sided", method: str = "auto") -> Untyped: ... -def tiecorrect(rankvals) -> Untyped: ... -def ranksums(x, y, alternative: str = "two-sided") -> Untyped: ... -def kruskal(*samples, nan_policy: str = "propagate") -> Untyped: ... -def friedmanchisquare(*samples) -> Untyped: ... -def brunnermunzel(x, y, alternative: str = "two-sided", distribution: str = "t", nan_policy: str = "propagate") -> Untyped: ... -def combine_pvalues(pvalues, method: str = "fisher", weights: Untyped | None = None, *, axis: int = 0) -> Untyped: ... -def quantile_test_iv(x, q, p, alternative) -> Untyped: ... -def quantile_test(x, *, q: int = 0, p: float = 0.5, alternative: str = "two-sided") -> Untyped: ... + axis: int | None = 0, +) -> Power_divergenceResult: ... + +# +def ks_1samp( + x: _ArrayLikeFloat_co, + cdf: Callable[[float], float | _Real1D], + args: tuple[object, ...] = (), + alternative: Alternative = "two-sided", + method: Literal["auto", "exact", "approx", "asymp"] = "auto", + *, + axis: int | None = 0, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> KstestResult: ... +def ks_2samp( + data1: _ArrayLikeFloat_co, + data2: _ArrayLikeFloat_co, + alternative: Alternative = "two-sided", + method: Literal["auto", "exact", "asymp"] = "auto", + *, + axis: int | None = 0, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> KstestResult: ... +def kstest( + rvs: str | _ArrayLikeFloat_co | _RVSCallable, + cdf: str | _ArrayLikeFloat_co | Callable[[float], float | _Float1D], + args: tuple[object, ...] = (), + N: int = 20, + alternative: Alternative = "two-sided", + method: Literal["auto", "exact", "approx", "asymp"] = "auto", + *, + axis: int | None = 0, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> KstestResult: ... + +# +def tiecorrect(rankvals: _ArrayLikeInt_co) -> float | np.float64: ... + +# +def ranksums( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + alternative: Alternative = "two-sided", + *, + axis: int | None = 0, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> RanksumsResult: ... + +# +def kruskal( + *samples: _ArrayLikeFloat_co, + nan_policy: NanPolicy = "propagate", + axis: int | None = 0, + keepdims: bool = False, +) -> KruskalResult: ... +def friedmanchisquare( + *samples: _ArrayLikeFloat_co, + axis: int | None = 0, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> FriedmanchisquareResult: ... +def brunnermunzel( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + alternative: Alternative = "two-sided", + distribution: Literal["t", "normal"] = "t", + nan_policy: NanPolicy = "propagate", + *, + keepdims: bool = False, + axis: int | None = 0, +) -> BrunnerMunzelResult: ... + +# +def combine_pvalues( + pvalues: _ArrayLikeFloat_co, + method: Literal["fisher", "pearson", "tippett", "stouffer", "mudholkar_george"] = "fisher", + weights: _ArrayLikeFloat_co | None = None, + *, + axis: int | None = 0, + nan_policy: NanPolicy = "propagate", + keepdims: bool = False, +) -> SignificanceResult: ... + +# +def quantile_test_iv( # undocumented + x: _ArrayLikeFloat_co, + q: float | _Real1D, + p: float | _Float1D, + alternative: Alternative, +) -> tuple[npt.NDArray[_Real1D], _Real1D, np.floating[Any], Alternative]: ... +def quantile_test( + x: _ArrayLikeFloat_co, + *, + q: float | _Real1D = 0, + p: float | _Float1D = 0.5, + alternative: Alternative = "two-sided", +) -> QuantileTestResult: ... + +# def wasserstein_distance_nd( - u_values, - v_values, - u_weights: Untyped | None = None, - v_weights: Untyped | None = None, -) -> Untyped: ... -def wasserstein_distance(u_values, v_values, u_weights: Untyped | None = None, v_weights: Untyped | None = None) -> Untyped: ... -def energy_distance(u_values, v_values, u_weights: Untyped | None = None, v_weights: Untyped | None = None) -> Untyped: ... -def find_repeats(arr) -> Untyped: ... -def rankdata(a, method: str = "average", *, axis: Untyped | None = None, nan_policy: str = "propagate") -> Untyped: ... -def expectile(a, alpha: float = 0.5, *, weights: Untyped | None = None) -> Untyped: ... -def linregress(x, y: Untyped | None = None, alternative: str = "two-sided") -> Untyped: ... + u_values: _ArrayLikeFloat_co, + v_values: _ArrayLikeFloat_co, + u_weights: _ArrayLikeFloat_co | None = None, + v_weights: _ArrayLikeFloat_co | None = None, +) -> float | np.float64: ... +def wasserstein_distance( + u_values: _ArrayLikeFloat_co, + v_values: _ArrayLikeFloat_co, + u_weights: _ArrayLikeFloat_co | None = None, + v_weights: _ArrayLikeFloat_co | None = None, +) -> np.float64: ... +def energy_distance( + u_values: _ArrayLikeFloat_co, + v_values: _ArrayLikeFloat_co, + u_weights: _ArrayLikeFloat_co | None = None, + v_weights: _ArrayLikeFloat_co | None = None, +) -> np.float64: ... + +# +def find_repeats(arr: _ArrayLikeFloat_co) -> RepeatedResults: ... +def rankdata( + a: _ArrayLikeFloat_co, + method: Literal["average", "min", "max", "dense", "ordinal"] = "average", + *, + axis: int | None = None, + nan_policy: NanPolicy = "propagate", +) -> npt.NDArray[_Real1D]: ... +def expectile( + a: _ArrayLikeFloat_co, + alpha: float = 0.5, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> np.float64: ... +def linregress( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co | None = None, + alternative: Alternative = "two-sided", +) -> LinregressResult: ... diff --git a/scipy-stubs/stats/_typing.pyi b/scipy-stubs/stats/_typing.pyi new file mode 100644 index 00000000..a35f6e22 --- /dev/null +++ b/scipy-stubs/stats/_typing.pyi @@ -0,0 +1,43 @@ +# NOTE(scipy-stubs): This ia a module only exists `if typing.TYPE_CHECKING: ...` + +import abc +from typing import final, type_check_only +from typing_extensions import Self, TypeVarTuple, Unpack + +__all__ = ("BaseBunch",) + +_Ts = TypeVarTuple("_Ts") + +@type_check_only +class BaseBunch(tuple[Unpack[_Ts]]): + # A helper baseclass for annotating the return type of a *specific* + # `scipy._lib.bunch._make_tuple_bunch` call. + # + # NOTE: Subtypes must implement: + # + # - `def __new__(_cls, {fields}, *, {extra_fields}) -> Self: ...` + # - `def __init__(self, /, {fields}, *, {extra_fields}) -> None: ...` + # + # NOTE: The `_cls` parameter in `__new__` must be kept as-is, and shouldn't + # be made positional only. + # + # NOTE: Each field in `{fields}` and `{extra_fields}` must be implemented as + # a (read-only) `@property` + # NOTE: The (variadic) generic type parameters coorespond to the types of + # `{fields}`, **not** `{extra_fields}` + + @abc.abstractmethod + def __new__(_cls) -> Self: ... + @abc.abstractmethod + def __init__(self, /) -> None: ... + @final + def __getnewargs_ex__(self, /) -> tuple[tuple[Unpack[_Ts]], dict[str, object]]: ... + + # NOTE: `._fields` and `._extra_fields` are mutually exclusive (disjoint) + @property + def _fields(self, /) -> tuple[str, ...]: ... + @property + def _extra_fields(self, /) -> tuple[str, ...]: ... + + # NOTE: `._asdict()` includes both `{fields}` and `{extra_fields}` + def _asdict(self, /) -> dict[str, object]: ... diff --git a/tests/stubtest/allowlist.txt b/tests/stubtest/allowlist.txt index ead125e1..d0ba2b8c 100644 --- a/tests/stubtest/allowlist.txt +++ b/tests/stubtest/allowlist.txt @@ -3,6 +3,7 @@ scipy._typing scipy.integrate._typing scipy.ndimage._typing scipy.optimize._typing +scipy.stats._typing # submodules scipy._lib.array_api_compat.*