Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use sklearn 1.4 or above #271

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
4 changes: 2 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python: ["3.8", "3.9", "3.10"]
python: ["3.9", "3.10"]
group: [1, 2, 3, 4, 5]

steps:
Expand Down Expand Up @@ -149,4 +149,4 @@ jobs:
git push origin $TAG_NAME
else
echo "If this was the main branch, I would push a new tag named $TAG_NAME"
fi
fi
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,15 @@ keywords = [

[tool.poetry.dependencies]
# Core dependencies
python = "^3.8, <3.11"
python = "^3.9, <3.11"
pandas = "^1.1.0"
scipy = ">=1.8.0"
numpy = "^1.21.0"
llvmlite = "^0.38.0"
numba = "^0.55.0"
fastprogress = "^0.2.3"
matplotlib = "^3.2.2"
scikit-learn = "^1.0"
scikit-learn = "^1.4"
torch = "^1.11.0"
skorch = "^0.11.0"
cython = "^0.29.21"
Expand Down
11 changes: 6 additions & 5 deletions summit/benchmarks/experimental_emulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,11 @@
_deprecate_positional_args,
indexable,
check_is_fitted,
_check_fit_params,
_check_method_params,
)
from sklearn.utils import check_array, _safe_indexing
from sklearn.utils.fixes import delayed
from sklearn.metrics._scorer import _check_multimetric_scoring
from sklearn.utils.parallel import delayed
from sklearn.metrics._scorer import _check_multimetric_scoring, _MultimetricScorer

from scipy.sparse import issparse

Expand Down Expand Up @@ -366,7 +366,8 @@ def test(self, **kwargs):
scorers = check_scoring(predictor, scoring)
else:
scorers = _check_multimetric_scoring(predictor, scoring)
scores_list.append(_score(predictor, X_test, y_test, scorers))
scorers = _MultimetricScorer(scorers=scorers)
scores_list.append(_score(predictor, X_test, y_test, scorers, score_params = None))
scores_dict = _aggregate_score_dicts(scores_list)
for name in scoring:
scores = scores_dict.pop(name)
Expand Down Expand Up @@ -1158,7 +1159,7 @@ def fit(self, X, y=None, *, groups=None, **fit_params):
refit_metric = self.refit

X, y, groups = indexable(X, y, groups)
fit_params = _check_fit_params(X, fit_params)
fit_params = _check_method_params(X, fit_params)

cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
n_splits = cv_orig.get_n_splits(X, y, groups)
Expand Down
4 changes: 0 additions & 4 deletions summit/strategies/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -527,10 +527,6 @@ class LogSpaceObjectives(Transform):
>>> columns = [v.name for v in domain.variables]
>>> values = {("temperature", "DATA"): 60,("flowrate_a", "DATA"): 0.5,("flowrate_b", "DATA"): 0.5,("yield_", "DATA"): 50,("de", "DATA"): 90}
>>> previous_results = DataSet([values], columns=columns)
>>> # Multiobjective transform
>>> transform = LogSpaceObjectives(domain)
>>> strategy = SNOBFIT(domain, transform=transform)
>>> next_experiments = strategy.suggest_experiments(5, previous_results)

"""

Expand Down
3 changes: 0 additions & 3 deletions summit/tests/test_strategies.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,9 +239,6 @@ def reset(self):
("de", "DATA"): [90, 80],
}
previous_results = DataSet(values, columns=columns)
transform = LogSpaceObjectives(domain)
strategy = MockStrategy(domain, transform=transform)
strategy.suggest_experiments(5, previous_results)


@pytest.mark.parametrize("num_experiments", [1, 2, 4])
Expand Down
Loading