diff --git a/autots/evaluator/auto_ts.py b/autots/evaluator/auto_ts.py index dfe014d2..315c79bc 100644 --- a/autots/evaluator/auto_ts.py +++ b/autots/evaluator/auto_ts.py @@ -1548,6 +1548,11 @@ def fit( return self def validation_agg(self): + self.initial_results.model_results['Score'] = generate_score( + self.initial_results.model_results, + metric_weighting=self.metric_weighting, + prediction_interval=self.prediction_interval, + ) self.validation_results = copy.copy(self.initial_results) self.validation_results = validation_aggregation( self.validation_results, df_train=self.df_wide_numeric @@ -2194,11 +2199,19 @@ def export_template( extra_mods.append( export_template.nsmallest(1, columns=metric).copy() ) + # and no ensemble version + extra_mods.append( + export_template[export_template['Ensemble'] == 0].nsmallest(1, columns=metric).copy() + ) if max_metrics is not None: for metric in max_metrics: extra_mods.append( export_template.nlargest(1, columns=metric).copy() ) + # and no ensemble version + extra_mods.append( + export_template[export_template['Ensemble'] == 0].nlargest(1, columns=metric).copy() + ) if str(max_per_model_class).isdigit(): export_template = ( export_template.sort_values('Score', ascending=True) diff --git a/docs/build/doctrees/environment.pickle b/docs/build/doctrees/environment.pickle index 1eb5b8c8..9dfa73e4 100644 Binary files a/docs/build/doctrees/environment.pickle and b/docs/build/doctrees/environment.pickle differ diff --git a/docs/build/doctrees/index.doctree b/docs/build/doctrees/index.doctree index 1dbbefc0..2948de85 100644 Binary files a/docs/build/doctrees/index.doctree and b/docs/build/doctrees/index.doctree differ diff --git a/docs/build/doctrees/source/autots.datasets.doctree b/docs/build/doctrees/source/autots.datasets.doctree index 8d3aa18a..79fd9864 100644 Binary files a/docs/build/doctrees/source/autots.datasets.doctree and b/docs/build/doctrees/source/autots.datasets.doctree differ diff --git a/docs/build/doctrees/source/autots.doctree b/docs/build/doctrees/source/autots.doctree index e6653b46..51866332 100644 Binary files a/docs/build/doctrees/source/autots.doctree and b/docs/build/doctrees/source/autots.doctree differ diff --git a/docs/build/doctrees/source/autots.evaluator.doctree b/docs/build/doctrees/source/autots.evaluator.doctree index 7703b17c..5a327165 100644 Binary files a/docs/build/doctrees/source/autots.evaluator.doctree and b/docs/build/doctrees/source/autots.evaluator.doctree differ diff --git a/docs/build/doctrees/source/autots.models.doctree b/docs/build/doctrees/source/autots.models.doctree index 8c5dadb1..d89b41c4 100644 Binary files a/docs/build/doctrees/source/autots.models.doctree and b/docs/build/doctrees/source/autots.models.doctree differ diff --git a/docs/build/doctrees/source/autots.templates.doctree b/docs/build/doctrees/source/autots.templates.doctree index 5e75b3f5..15065bb0 100644 Binary files a/docs/build/doctrees/source/autots.templates.doctree and b/docs/build/doctrees/source/autots.templates.doctree differ diff --git a/docs/build/doctrees/source/autots.tools.doctree b/docs/build/doctrees/source/autots.tools.doctree index 922af681..d7c799ce 100644 Binary files a/docs/build/doctrees/source/autots.tools.doctree and b/docs/build/doctrees/source/autots.tools.doctree differ diff --git a/docs/build/doctrees/source/intro.doctree b/docs/build/doctrees/source/intro.doctree index dec7f886..ca24dd76 100644 Binary files a/docs/build/doctrees/source/intro.doctree and b/docs/build/doctrees/source/intro.doctree differ diff --git a/docs/build/doctrees/source/modules.doctree b/docs/build/doctrees/source/modules.doctree index aeb9a21b..d34662db 100644 Binary files a/docs/build/doctrees/source/modules.doctree and b/docs/build/doctrees/source/modules.doctree differ diff --git a/docs/build/doctrees/source/tutorial.doctree b/docs/build/doctrees/source/tutorial.doctree index ed1a5943..45f17f12 100644 Binary files a/docs/build/doctrees/source/tutorial.doctree and b/docs/build/doctrees/source/tutorial.doctree differ diff --git a/docs/build/html/.buildinfo b/docs/build/html/.buildinfo index 75514961..4a6d98a6 100644 --- a/docs/build/html/.buildinfo +++ b/docs/build/html/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 1f7b3cdc5940e728de375b92d31d88b6 +config: cd7239cc7dc0c2f0136aaa8bd24d37d1 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/build/html/_static/basic.css b/docs/build/html/_static/basic.css index 7577acb1..30fee9d0 100644 --- a/docs/build/html/_static/basic.css +++ b/docs/build/html/_static/basic.css @@ -237,6 +237,10 @@ a.headerlink { visibility: hidden; } +a:visited { + color: #551A8B; +} + h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, @@ -670,6 +674,16 @@ dd { margin-left: 30px; } +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + dl > dd:last-child, dl > dd:last-child > :last-child { margin-bottom: 0; @@ -738,6 +752,14 @@ abbr, acronym { cursor: help; } +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + /* -- code displays --------------------------------------------------------- */ pre { diff --git a/docs/build/html/_static/documentation_options.js b/docs/build/html/_static/documentation_options.js index f04691fd..7ce78999 100644 --- a/docs/build/html/_static/documentation_options.js +++ b/docs/build/html/_static/documentation_options.js @@ -1,5 +1,4 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), +const DOCUMENTATION_OPTIONS = { VERSION: '0.6.10', LANGUAGE: 'en', COLLAPSE_INDEX: false, diff --git a/docs/build/html/_static/searchtools.js b/docs/build/html/_static/searchtools.js index 97d56a74..7918c3fa 100644 --- a/docs/build/html/_static/searchtools.js +++ b/docs/build/html/_static/searchtools.js @@ -57,12 +57,12 @@ const _removeChildren = (element) => { const _escapeRegExp = (string) => string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string -const _displayItem = (item, searchTerms) => { +const _displayItem = (item, searchTerms, highlightTerms) => { const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; - const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT; const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + const contentRoot = document.documentElement.dataset.content_root; const [docName, title, anchor, descr, score, _filename] = item; @@ -75,20 +75,24 @@ const _displayItem = (item, searchTerms) => { if (dirname.match(/\/index\/$/)) dirname = dirname.substring(0, dirname.length - 6); else if (dirname === "index/") dirname = ""; - requestUrl = docUrlRoot + dirname; + requestUrl = contentRoot + dirname; linkUrl = requestUrl; } else { // normal html builders - requestUrl = docUrlRoot + docName + docFileSuffix; + requestUrl = contentRoot + docName + docFileSuffix; linkUrl = docName + docLinkSuffix; } let linkEl = listItem.appendChild(document.createElement("a")); linkEl.href = linkUrl + anchor; linkEl.dataset.score = score; linkEl.innerHTML = title; - if (descr) + if (descr) { listItem.appendChild(document.createElement("span")).innerHTML = " (" + descr + ")"; + // highlight search terms in the description + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + } else if (showSearchSummary) fetch(requestUrl) .then((responseData) => responseData.text()) @@ -97,6 +101,9 @@ const _displayItem = (item, searchTerms) => { listItem.appendChild( Search.makeSearchSummary(data, searchTerms) ); + // highlight search terms in the summary + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); }); Search.output.appendChild(listItem); }; @@ -115,14 +122,15 @@ const _finishSearch = (resultCount) => { const _displayNextItem = ( results, resultCount, - searchTerms + searchTerms, + highlightTerms, ) => { // results left, load the summary and display it // this is intended to be dynamic (don't sub resultsCount) if (results.length) { - _displayItem(results.pop(), searchTerms); + _displayItem(results.pop(), searchTerms, highlightTerms); setTimeout( - () => _displayNextItem(results, resultCount, searchTerms), + () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), 5 ); } @@ -360,7 +368,7 @@ const Search = { // console.info("search results:", Search.lastresults); // print the results - _displayNextItem(results, results.length, searchTerms); + _displayNextItem(results, results.length, searchTerms, highlightTerms); }, /** diff --git a/docs/build/html/_static/sphinx_highlight.js b/docs/build/html/_static/sphinx_highlight.js index aae669d7..8a96c69a 100644 --- a/docs/build/html/_static/sphinx_highlight.js +++ b/docs/build/html/_static/sphinx_highlight.js @@ -29,14 +29,19 @@ const _highlight = (node, addItems, text, className) => { } span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); parent.insertBefore( span, parent.insertBefore( - document.createTextNode(val.substr(pos + text.length)), + rest, node.nextSibling ) ); node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); if (isInSVG) { const rect = document.createElementNS( @@ -140,5 +145,10 @@ const SphinxHighlight = { }, }; -_ready(SphinxHighlight.highlightSearchWords); -_ready(SphinxHighlight.initEscapeListener); +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/docs/build/html/genindex.html b/docs/build/html/genindex.html index fac1e00e..f8d31de9 100644 --- a/docs/build/html/genindex.html +++ b/docs/build/html/genindex.html @@ -1,16 +1,24 @@ - - + + + + + Index — AutoTS 0.6.10 documentation - - - - - + + + + + @@ -3305,21 +3313,5 @@

Quick search

- - \ No newline at end of file diff --git a/docs/build/html/index.html b/docs/build/html/index.html index c0bf0f78..fa094eb8 100644 --- a/docs/build/html/index.html +++ b/docs/build/html/index.html @@ -1,17 +1,25 @@ - - + + + + + AutoTS — AutoTS 0.6.10 documentation - - - - - + + + + + @@ -32,10 +40,10 @@
-

AutoTS

+

AutoTS

autots is an automated time series forecasting package for Python.

-

Installation

+

Installation

pip install autots
 
@@ -43,7 +51,7 @@

Installation -

Getting Started

+

Getting Started

  • Intro
  • @@ -71,7 +79,7 @@

    Getting Started -

    Modules API

    +

    Modules API

    • autots
        @@ -82,7 +90,7 @@

        Modules API -

        Indices and tables

        +

        Indices and tables

        • Index

        • Module Index

        • @@ -172,21 +180,5 @@

          Quick search

          - - \ No newline at end of file diff --git a/docs/build/html/py-modindex.html b/docs/build/html/py-modindex.html index b359b1fc..70621795 100644 --- a/docs/build/html/py-modindex.html +++ b/docs/build/html/py-modindex.html @@ -1,16 +1,24 @@ - - + + + + + Python Module Index — AutoTS 0.6.10 documentation - - - - - + + + + + @@ -384,21 +392,5 @@

          Quick search

          - - \ No newline at end of file diff --git a/docs/build/html/search.html b/docs/build/html/search.html index 3df5ee7b..5174fc8f 100644 --- a/docs/build/html/search.html +++ b/docs/build/html/search.html @@ -1,17 +1,25 @@ - - + + + + + Search — AutoTS 0.6.10 documentation - - + + - - - + + + @@ -133,21 +141,5 @@

          Related Topics

          - - \ No newline at end of file diff --git a/docs/build/html/searchindex.js b/docs/build/html/searchindex.js index 1dde861e..f5e5d72c 100644 --- a/docs/build/html/searchindex.js +++ b/docs/build/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["index", "source/autots", "source/autots.datasets", "source/autots.evaluator", "source/autots.models", "source/autots.templates", "source/autots.tools", "source/intro", "source/modules", "source/tutorial"], "filenames": ["index.rst", "source\\autots.rst", "source\\autots.datasets.rst", "source\\autots.evaluator.rst", "source\\autots.models.rst", "source\\autots.templates.rst", "source\\autots.tools.rst", "source\\intro.rst", "source\\modules.rst", "source\\tutorial.rst"], "titles": ["AutoTS", "autots package", "autots.datasets package", "autots.evaluator package", "autots.models package", "autots.templates package", "autots.tools package", "Intro", "autots", "Tutorial"], "terms": {"i": [0, 1, 2, 3, 4, 6, 7, 9], "an": [0, 1, 2, 3, 4, 6, 7, 9], "autom": [0, 1, 3, 7, 9], "time": [0, 1, 3, 4, 6, 7, 9], "seri": [0, 1, 2, 3, 4, 6, 7], "forecast": [0, 1, 2, 3, 4, 6, 7], "packag": [0, 7, 8], "python": [0, 1, 3, 4, 6, 7, 9], "pip": [0, 2, 7, 9], "requir": [0, 1, 2, 3, 4, 6, 7], "3": [0, 1, 3, 4, 5, 6, 9], "6": [0, 1, 3, 5, 6, 9], "numpi": [0, 1, 3, 4, 6, 9], "panda": [0, 1, 3, 4, 6, 7, 9], "statsmodel": [0, 1, 6, 8, 9], "scikit": [0, 4, 6, 7, 9], "learn": [0, 1, 4, 6, 7, 9], "intro": 0, "content": [0, 8], "basic": [0, 1, 3, 5, 6, 8, 9], "us": [0, 1, 2, 3, 4, 6], "tip": [0, 9], "speed": [0, 1, 3, 4], "larg": [0, 1, 4, 6, 9], "data": [0, 1, 2, 3, 4, 6], "how": [0, 1, 3, 4, 6, 9], "contribut": [0, 1, 3, 9], "tutori": [0, 7], "extend": [0, 6, 7], "deploy": 0, "templat": [0, 1, 3, 4, 7, 8], "import": [0, 1, 2, 3, 5, 6, 7], "export": [0, 1, 2, 3, 4, 5, 7], "depend": [0, 1, 3, 4, 6, 7], "version": [0, 1, 3, 4, 6], "caveat": 0, "advic": 0, "simul": [0, 4, 7], "event": [0, 1, 2, 3, 7], "risk": [0, 1, 3, 7], "anomali": [0, 1, 3, 4, 6, 8], "detect": [0, 1, 3, 6, 8], "transform": [0, 1, 3, 4, 5, 7, 8], "independ": [0, 4, 6, 7], "model": [0, 1, 3, 5, 6, 7, 8], "index": [0, 1, 2, 3, 4, 5, 6, 9], "search": [0, 1, 2, 3, 4, 7, 9], "page": [0, 1, 2], "dataset": [1, 3, 4, 6, 7, 8, 9], "submodul": [1, 8], "fred": [1, 8], "get_fred_data": [1, 2], "load_artifici": [1, 2, 8], "load_daili": [1, 2, 7, 8, 9], "load_hourli": [1, 2, 8, 9], "load_linear": [1, 2, 8], "load_live_daili": [1, 2, 8, 9], "load_monthli": [1, 2, 8, 9], "load_sin": [1, 2, 8], "load_weekdai": [1, 2, 8], "load_weekli": [1, 2, 8], "load_yearli": [1, 2, 8], "load_zero": [1, 2], "evalu": [1, 4, 5, 8, 9], "anomaly_detector": [1, 4, 8, 9], "anomalydetector": [1, 3, 8, 9], "fit": [1, 3, 4, 6, 7, 8, 9], "fit_anomaly_classifi": [1, 3, 6, 8], "get_new_param": [1, 3, 4, 6, 8, 9], "plot": [1, 3, 4, 7, 8, 9], "score_to_anomali": [1, 3, 6, 8], "holidaydetector": [1, 3, 6, 8, 9], "dates_to_holidai": [1, 3, 4, 6, 8, 9], "plot_anomali": [1, 3, 8], "auto_model": [1, 5, 8], "modelmonst": [1, 3], "modelpredict": [1, 3], "fit_data": [1, 3, 4, 8], "predict": [1, 3, 4, 6, 7, 8, 9], "newgenetictempl": [1, 3], "randomtempl": [1, 3], "templateevalobject": [1, 3], "full_mae_id": [1, 3, 4], "full_mae_error": [1, 3, 4], "concat": [1, 3, 5, 6], "load": [1, 2, 3, 4, 5, 7, 9], "save": [1, 3, 4, 6, 7], "templatewizard": [1, 3], "uniquetempl": [1, 3], "back_forecast": [1, 3, 8], "create_model_id": [1, 3], "dict_recombin": [1, 3], "generate_scor": [1, 3], "generate_score_per_seri": [1, 3], "horizontal_template_to_model_list": [1, 3], "model_forecast": [1, 3, 8, 9], "random_model": [1, 3], "remove_leading_zero": [1, 3, 9], "trans_dict_recomb": [1, 3], "unpack_ensemble_model": [1, 3, 5], "validation_aggreg": [1, 3], "auto_t": [1, 8, 9], "best_model": [1, 3, 5, 8, 9], "best_model_nam": [1, 3, 8, 9], "best_model_param": [1, 3, 8, 9], "best_model_transformation_param": [1, 3, 8, 9], "best_model_ensembl": [1, 3, 8, 9], "regression_check": [1, 3, 8], "df_wide_numer": [1, 3, 7, 8, 9], "score_per_seri": [1, 3, 4, 8], "best_model_per_series_map": [1, 3, 8], "best_model_per_series_scor": [1, 3, 8], "diagnose_param": [1, 3, 8], "expand_horizont": [1, 3, 8], "export_best_model": [1, 3, 8], "export_templ": [1, 3, 5, 8, 9], "failure_r": [1, 3, 8], "get_metric_corr": [1, 3, 8], "horizontal_per_gener": [1, 3, 8], "horizontal_to_df": [1, 3, 8], "import_best_model": [1, 3, 8], "import_result": [1, 3, 7, 8], "import_templ": [1, 3, 8, 9], "list_failed_model_typ": [1, 3, 8], "load_templ": [1, 3, 8], "mosaic_to_df": [1, 3, 8, 9], "parse_best_model": [1, 3, 8], "plot_back_forecast": [1, 3, 8], "plot_backforecast": [1, 3, 8, 9], "plot_generation_loss": [1, 3, 8, 9], "plot_horizont": [1, 3, 8, 9], "plot_horizontal_model_count": [1, 3, 8], "plot_horizontal_per_gener": [1, 3, 8, 9], "plot_horizontal_transform": [1, 3, 8, 9], "plot_metric_corr": [1, 3, 8], "plot_per_series_error": [1, 3, 8, 9], "plot_per_series_map": [1, 3, 8, 9], "plot_per_series_smap": [1, 3, 8], "plot_transformer_failure_r": [1, 3, 8], "plot_valid": [1, 3, 8], "result": [1, 2, 3, 4, 6, 7, 8, 9], "retrieve_validation_forecast": [1, 3, 8], "save_templ": [1, 3, 8], "validation_agg": [1, 3, 8], "initial_result": [1, 3, 4, 8], "model_result": [1, 3, 4, 5, 7, 8], "error_correl": [1, 3], "fake_regressor": [1, 3, 9], "benchmark": [1, 8], "run": [1, 2, 3, 4, 5, 6, 7], "event_forecast": [1, 8], "eventriskforecast": [1, 3, 8, 9], "predict_histor": [1, 3, 8, 9], "generate_result_window": [1, 3, 8], "generate_risk_arrai": [1, 3, 8], "generate_historic_risk_arrai": [1, 3, 8, 9], "set_limit": [1, 3, 8], "plot_ev": [1, 3, 8, 9], "extract_result_window": [1, 3], "extract_window_index": [1, 3], "set_limit_forecast": [1, 3], "set_limit_forecast_histor": [1, 3], "metric": [1, 2, 4, 7, 8], "array_last_v": [1, 3], "chi_squared_hist_distribution_loss": [1, 3], "contain": [1, 3, 4, 6, 9], "contour": [1, 3, 4, 9], "default_scal": [1, 3], "dwae": [1, 3], "full_metric_evalu": [1, 3], "kde": [1, 3], "kde_kl_dist": [1, 3], "kl_diverg": [1, 3], "linear": [1, 3, 4, 5, 6, 9], "mae": [1, 3, 4, 9], "mda": [1, 3, 9], "mean_absolute_differential_error": [1, 3], "mean_absolute_error": [1, 3], "meda": [1, 3], "median_absolute_error": [1, 3], "mlvb": [1, 3], "mqae": [1, 3, 4], "msle": [1, 3], "numpy_ffil": [1, 3], "oda": [1, 3], "pinball_loss": [1, 3], "precomp_wasserstein": [1, 3], "qae": [1, 3], "rmse": [1, 3, 4, 9], "root_mean_square_error": [1, 3], "rp": [1, 3], "scaled_pinball_loss": [1, 3], "smape": [1, 3, 4, 9], "smooth": [1, 3, 4, 6, 9], "spl": [1, 3, 4, 9], "symmetric_mean_absolute_percentage_error": [1, 3], "threshold_loss": [1, 3], "unsorted_wasserstein": [1, 3], "wasserstein": [1, 3], "valid": [1, 4, 7, 8], "extract_seasonal_val_period": [1, 3], "generate_validation_indic": [1, 3], "validate_num_valid": [1, 3], "arch": [1, 3, 8, 9], "get_param": [1, 4, 8], "base": [1, 3, 6, 8, 9], "modelobject": [1, 3, 4], "basic_profil": [1, 4], "create_forecast_index": [1, 4, 8], "predictionobject": [1, 3, 4], "model_nam": [1, 3, 4, 9], "model_paramet": [1, 4], "transformation_paramet": [1, 4], "upper_forecast": [1, 3, 4, 7, 9], "lower_forecast": [1, 3, 4, 7, 9], "long_form_result": [1, 4, 9], "total_runtim": [1, 4], "apply_constraint": [1, 4], "extract_ensemble_runtim": [1, 4], "plot_df": [1, 4], "plot_ensemble_runtim": [1, 4], "plot_grid": [1, 4], "calculate_peak_dens": [1, 4], "create_seaborn_palette_from_cmap": [1, 4], "extract_single_series_from_horz": [1, 4], "extract_single_transform": [1, 4], "plot_distribut": [1, 4], "averagevaluena": [1, 3, 4, 5, 9], "balltreemultivariatemotif": [1, 4, 9], "constantna": [1, 4, 9], "fft": [1, 4, 8, 9], "kalmanstatespac": [1, 4, 9], "cost_funct": [1, 4], "tune_observational_nois": [1, 4], "lastvaluena": [1, 3, 4, 9], "metricmotif": [1, 3, 4, 9], "motif": [1, 3, 4, 9], "motifsimul": [1, 4, 9], "nvar": [1, 4, 9], "seasonalna": [1, 3, 4, 9], "seasonalitymotif": [1, 3, 4, 5, 9], "sectionalmotif": [1, 3, 4, 9], "zeroesna": [1, 3, 4], "looped_motif": [1, 4], "predict_reservoir": [1, 4], "cassandra": [1, 5, 6, 8, 9], "bayesianmultioutputregress": [1, 4], "sample_posterior": [1, 4], "plot_forecast": [1, 4, 8], "plot_compon": [1, 4, 8], "plot_trend": [1, 4, 8], "return_compon": [1, 3, 4, 8], "analyze_trend": [1, 4, 8], "auto_fit": [1, 4, 8], "base_scal": [1, 4, 8], "compare_actual_compon": [1, 4, 8], "create_t": [1, 4, 8], "cross_valid": [1, 4, 8, 9], "feature_import": [1, 4, 8], "next_fit": [1, 4, 8], "plot_th": [1, 4, 8], "predict_new_product": [1, 4, 8], "process_compon": [1, 4, 6, 8], "rolling_trend": [1, 4, 8], "scale_data": [1, 4, 8], "to_origin_spac": [1, 4, 8], "treatment_causal_impact": [1, 4, 8], "holiday_detector": [1, 4, 8], "score": [1, 3, 4, 5, 6, 8, 9], "holiday_count": [1, 4, 8], "holidai": [1, 3, 4, 8, 9], "param": [1, 2, 3, 4, 6, 8, 9], "x_arrai": [1, 4, 8], "predict_x_arrai": [1, 4, 8], "trend_train": [1, 4, 8], "predicted_trend": [1, 4, 8], "clean_regressor": [1, 4], "cost_function_dwa": [1, 4], "cost_function_l1": [1, 4], "cost_function_l1_posit": [1, 4], "cost_function_l2": [1, 4], "cost_function_quantil": [1, 4], "fit_linear_model": [1, 4], "lstsq_minim": [1, 4], "lstsq_solv": [1, 4], "dnn": [1, 8], "kerasrnn": [1, 4], "transformer_build_model": [1, 4], "transformer_encod": [1, 4], "ensembl": [1, 3, 5, 7, 8], "bestnensembl": [1, 4], "distensembl": [1, 4], "ensembleforecast": [1, 4], "ensembletemplategener": [1, 4], "hdistensembl": [1, 4], "horizontalensembl": [1, 4], "horizontaltemplategener": [1, 4], "mosaicensembl": [1, 4], "find_pattern": [1, 4], "generalize_horizont": [1, 4], "generate_crosshair_scor": [1, 4], "generate_crosshair_score_list": [1, 4], "generate_mosaic_templ": [1, 4], "horizontal_classifi": [1, 4], "horizontal_xi": [1, 4], "is_horizont": [1, 4], "is_mosa": [1, 4], "mlens_help": [1, 4], "mosaic_classifi": [1, 4], "mosaic_or_horizont": [1, 4], "mosaic_to_horizont": [1, 4, 9], "mosaic_xi": [1, 4], "n_limited_horz": [1, 4], "parse_forecast_length": [1, 4], "parse_horizont": [1, 4], "parse_mosa": [1, 4], "process_mosaic_arrai": [1, 4], "summarize_seri": [1, 4], "gluont": [1, 3, 8, 9], "greykit": [1, 8, 9], "seek_the_oracl": [1, 4], "matrix_var": [1, 8], "latc": [1, 4, 9], "mar": [1, 4, 9], "rrvar": [1, 4, 9], "tmf": [1, 4, 9], "conj_grad_w": [1, 4], "conj_grad_x": [1, 4], "dmd": [1, 4], "dmd4cast": [1, 4], "ell_w": [1, 4], "ell_x": [1, 4], "generate_psi": [1, 4], "latc_imput": [1, 4], "latc_predictor": [1, 4], "mat2ten": [1, 4], "svt_tnn": [1, 4], "ten2mat": [1, 4], "update_cg": [1, 4], "var": [1, 4, 9], "var4cast": [1, 4], "mlensembl": [1, 8], "create_featur": [1, 4], "model_list": [1, 3, 7, 8, 9], "auto_model_list": [1, 4], "model_list_to_dict": [1, 4], "neural_forecast": [1, 8], "neuralforecast": [1, 4, 5, 9], "prophet": [1, 3, 6, 8, 9], "fbprophet": [1, 4, 9], "neuralprophet": [1, 4, 9], "pytorch": [1, 8, 9], "pytorchforecast": [1, 4, 9], "sklearn": [1, 6, 7, 8, 9], "componentanalysi": [1, 4, 9], "datepartregress": [1, 3, 4, 5, 6, 9], "multivariateregress": [1, 4, 9], "preprocessingregress": [1, 4, 9], "rollingregress": [1, 4, 9], "univariateregress": [1, 4, 9], "vectorizedmultioutputgpr": [1, 4], "predict_proba": [1, 4], "windowregress": [1, 4, 9], "generate_classifier_param": [1, 4], "generate_regressor_param": [1, 4], "retrieve_classifi": [1, 4], "retrieve_regressor": [1, 4], "rolling_x_regressor": [1, 4], "rolling_x_regressor_regressor": [1, 4], "ardl": [1, 4, 9], "arima": [1, 4, 5, 6, 9], "dynamicfactor": [1, 4, 9], "dynamicfactormq": [1, 4, 9], "et": [1, 3, 4, 6, 9], "glm": [1, 3, 4, 6, 9], "gl": [1, 3, 4, 6, 9], "theta": [1, 4, 9], "unobservedcompon": [1, 4, 9], "varmax": [1, 4, 9], "vecm": [1, 4, 6, 9], "arima_seek_the_oracl": [1, 4], "glm_forecast_by_column": [1, 4], "tfp": [1, 8], "tfpregress": [1, 4, 9], "tfpregressor": [1, 4], "tensorflowst": [1, 4, 9], "tide": [1, 5, 8, 9], "timecovari": [1, 4], "get_covari": [1, 4], "timeseriesdata": [1, 4], "test_val_gen": [1, 4], "tf_dataset": [1, 4], "train_gen": [1, 4], "get_holidai": [1, 4], "mae_loss": [1, 4], "mape": [1, 3, 4], "nrmse": [1, 4], "wape": [1, 4], "gener": [1, 2, 3, 4, 6, 7, 8, 9], "general_templ": [1, 5], "tool": [1, 2, 3, 4, 7, 8, 9], "anomaly_util": [1, 8], "anomaly_df_to_holidai": [1, 6], "anomaly_new_param": [1, 6], "create_dates_df": [1, 6], "detect_anomali": [1, 6], "holiday_new_param": [1, 6], "limits_to_anomali": [1, 6], "loop_sk_outli": [1, 6], "nonparametric_multivari": [1, 6], "sk_outlier": [1, 6], "values_to_anomali": [1, 6], "zscore_survival_funct": [1, 6], "calendar": [1, 3, 8], "gregorian_to_chines": [1, 6], "gregorian_to_christian_lunar": [1, 6], "gregorian_to_hebrew": [1, 6], "gregorian_to_islam": [1, 6], "heb_is_leap": [1, 6], "lunar_from_lunar": [1, 6], "lunar_from_lunar_ful": [1, 6], "to_jd": [1, 6], "cointegr": [1, 4, 8], "btcd_decompos": [1, 6], "coint_johansen": [1, 6], "fourier_seri": [1, 6], "lagmat": [1, 6], "cpu_count": [1, 8], "set_n_job": [1, 6], "fast_kalman": [1, 8], "usag": 1, "exampl": [1, 2, 3, 4, 7], "gaussian": [1, 4, 6], "empti": [1, 2, 3, 4, 6], "unvectorize_st": [1, 6], "unvectorize_var": [1, 6], "kalmanfilt": [1, 6], "comput": [1, 3, 4, 6], "em": [1, 6], "em_observation_nois": [1, 6], "em_process_nois": [1, 6], "predict_next": [1, 6], "predict_observ": [1, 6], "smooth_curr": [1, 6], "updat": [1, 4, 6, 9], "autoshap": [1, 6], "ddot": [1, 6], "ddot_t_right": [1, 6], "ddot_t_right_old": [1, 6], "dinv": [1, 6], "douter": [1, 6], "em_initial_st": [1, 6], "ensure_matrix": [1, 6], "holt_winters_damped_matric": [1, 6], "new_kalman_param": [1, 6], "priv_smooth": [1, 6], "priv_update_with_nan_check": [1, 6], "random_state_spac": [1, 6], "update_with_nan_check": [1, 6], "fourier_extrapol": [1, 6], "hierarchi": [1, 3, 8], "reconcil": [1, 6], "holiday_flag": [1, 6], "query_holidai": [1, 6], "imput": [1, 4, 8], "fillna": [1, 3, 5, 6, 9], "seasonalitymotifimput": [1, 6], "simpleseasonalitymotifimput": [1, 6], "biased_ffil": [1, 6], "fake_date_fil": [1, 6], "fake_date_fill_old": [1, 6], "fill_forward": [1, 6], "fill_forward_alt": [1, 6], "fill_mean": [1, 6], "fill_mean_old": [1, 6], "fill_median": [1, 6], "fill_median_old": [1, 6], "fill_zero": [1, 6], "fillna_np": [1, 6], "rolling_mean": [1, 6], "lunar": [1, 8], "dco": [1, 6], "dsin": [1, 6], "fixangl": [1, 6], "kepler": [1, 6], "moon_phas": [1, 6], "moon_phase_df": [1, 6], "phase_str": [1, 6], "todeg": [1, 6], "torad": [1, 6], "percentil": [1, 8], "nan_percentil": [1, 6], "nan_quantil": [1, 6], "trimmed_mean": [1, 6], "probabilist": [1, 3, 4, 7, 8, 9], "point_to_prob": [1, 6], "variable_point_to_prob": [1, 6], "historic_quantil": [1, 6], "inferred_norm": [1, 6], "percentileofscore_appli": [1, 6], "profil": [1, 8], "data_profil": [1, 6], "regressor": [1, 3, 4, 7, 8], "create_lagged_regressor": [1, 6, 8], "create_regressor": [1, 6, 8], "season": [1, 3, 4, 8, 9], "create_datepart_compon": [1, 6], "create_seasonality_featur": [1, 6], "date_part": [1, 6], "fourier_df": [1, 6], "random_datepart": [1, 6], "seasonal_independent_match": [1, 6], "seasonal_int": [1, 6], "seasonal_window_match": [1, 6], "shape": [1, 2, 3, 4, 7, 8, 9], "numerictransform": [1, 6], "fit_transform": [1, 6, 8, 9], "inverse_transform": [1, 6, 7, 8, 9], "clean_weight": [1, 6], "df_cleanup": [1, 6], "freq_to_timedelta": [1, 6], "infer_frequ": [1, 6, 8], "long_to_wid": [1, 6, 8, 9], "simple_train_test_split": [1, 6], "split_digits_and_non_digit": [1, 6], "subset_seri": [1, 6], "wide_to_3d": [1, 6], "threshold": [1, 3, 4, 8, 9], "nonparametricthreshold": [1, 6], "compare_to_epsilon": [1, 6], "find_epsilon": [1, 6], "prune_anom": [1, 6], "score_anomali": [1, 6], "consecutive_group": [1, 6], "nonparametr": [1, 3, 6], "alignlastdiff": [1, 6], "alignlastvalu": [1, 6], "find_centerpoint": [1, 6], "anomalyremov": [1, 6], "bkbandpassfilt": [1, 6], "btcd": [1, 6], "centerlastvalu": [1, 6], "centersplit": [1, 6], "clipoutli": [1, 6], "cumsumtransform": [1, 6], "datepartregressiontransform": [1, 6], "detrend": [1, 4, 6, 9], "diffsmooth": [1, 6], "differencedtransform": [1, 3, 6, 9], "discret": [1, 6], "ewmafilt": [1, 6], "emptytransform": [1, 6], "fftdecomposit": [1, 6], "fftfilter": [1, 6], "fastica": [1, 6], "generaltransform": [1, 6, 8, 9], "fill_na": [1, 6, 8], "retrieve_transform": [1, 6, 8], "hpfilter": [1, 6], "historicvalu": [1, 6], "holidaytransform": [1, 6], "intermittentoccurr": [1, 6], "kalmansmooth": [1, 6], "levelshiftmag": [1, 6], "levelshifttransform": [1, 6], "locallineartrend": [1, 6], "meandiffer": [1, 6], "pca": [1, 4, 6], "pctchangetransform": [1, 6], "positiveshift": [1, 6], "randomtransform": [1, 6, 8], "regressionfilt": [1, 6], "replaceconst": [1, 6], "rollingmeantransform": [1, 3, 6], "round": [1, 3, 6, 7], "stlfilter": [1, 6], "scipyfilt": [1, 6, 9], "seasonaldiffer": [1, 6], "sintrend": [1, 6], "fit_sin": [1, 6], "slice": [1, 3, 6, 9], "statsmodelsfilt": [1, 6], "bkfilter": [1, 6, 9], "cffilter": [1, 6], "convolution_filt": [1, 6], "bkfilter_st": [1, 6], "clip_outli": [1, 6], "exponential_decai": [1, 6], "get_transformer_param": [1, 6], "random_clean": [1, 6], "remove_outli": [1, 6], "simple_context_slic": [1, 6], "transformer_list_to_dict": [1, 6], "window_funct": [1, 8], "chunk_reshap": [1, 6], "last_window": [1, 6], "np_2d_arang": [1, 6], "retrieve_closest_indic": [1, 6], "rolling_window_view": [1, 6], "sliding_window_view": [1, 6], "window_id_mak": [1, 6], "window_lin_reg": [1, 6], "window_lin_reg_mean": [1, 6], "window_lin_reg_mean_no_nan": [1, 6], "window_mak": [1, 6], "window_maker_2": [1, 6], "window_maker_3": [1, 6], "window_sum_mean": [1, 6], "window_sum_mean_nan_tail": [1, 6], "window_sum_nan_mean": [1, 6], "select": [1, 4, 6, 7, 9], "http": [1, 2, 3, 4, 6, 9], "github": [1, 4, 6, 7, 9], "com": [1, 2, 4, 6, 9], "winedarksea": 1, "class": [1, 3, 4, 6, 7, 9], "output": [1, 2, 3, 4, 6, 7, 9], "multivari": [1, 3, 4, 6, 7, 9], "method": [1, 3, 4, 5, 6, 7, 9], "zscore": [1, 3, 6], "transform_dict": [1, 3, 6], "transformation_param": [1, 3, 4, 6, 9], "0": [1, 2, 3, 4, 5, 6, 7, 9], "datepart_method": [1, 3, 4, 6], "simple_3": [1, 3, 6], "regression_model": [1, 3, 4, 5, 6], "elasticnet": [1, 3, 6], "model_param": [1, 3, 4, 6, 9], "forecast_param": [1, 3, 6, 9], "none": [1, 2, 3, 4, 6, 7, 9], "method_param": [1, 3, 6], "eval_period": [1, 3, 6, 9], "isolated_onli": [1, 3, 6], "fals": [1, 2, 3, 4, 5, 6, 7, 9], "n_job": [1, 3, 4, 6, 7, 9], "1": [1, 2, 3, 4, 5, 6, 7, 9], "object": [1, 2, 3, 4, 6, 7, 9], "df": [1, 2, 3, 4, 6, 7, 9], "all": [1, 2, 3, 4, 6, 7], "return": [1, 2, 3, 4, 6], "paramet": [1, 2, 3, 4, 6, 7], "pd": [1, 3, 4, 5, 6, 9], "datafram": [1, 2, 3, 4, 6, 7, 9], "wide": [1, 2, 3, 4, 6, 7], "style": [1, 2, 3, 4, 6, 7, 9], "classif": [1, 3, 6], "outlier": [1, 3, 6, 9], "": [1, 3, 4, 6, 7, 9], "static": [1, 3, 4, 6], "random": [1, 2, 3, 4, 6, 9], "new": [1, 3, 4, 6, 9], "combin": [1, 3, 4, 6, 7, 9], "str": [1, 2, 3, 4, 6, 9], "fast": [1, 3, 4, 5, 6, 7, 9], "deep": [1, 3, 7, 9], "default": [1, 2, 3, 4, 6, 7, 9], "ani": [1, 3, 4, 6, 7, 9], "name": [1, 2, 3, 4, 6, 7], "ie": [1, 2, 3, 4, 6, 7, 9], "iqr": [1, 3], "specifi": [1, 3, 4, 6, 9], "onli": [1, 3, 4, 6, 7, 9], "series_nam": [1, 3], "titl": [1, 3, 4], "plot_kwarg": [1, 3], "A": [1, 3, 4, 6, 7], "decisiontre": [1, 3, 4, 5, 6], "ar": [1, 2, 3, 4, 6, 7, 9], "nonstandard": [1, 3, 6], "forecast_length": [1, 3, 4, 6, 7, 9], "int": [1, 2, 3, 4, 6], "14": [1, 3, 4, 9], "frequenc": [1, 2, 3, 4, 6, 7], "infer": [1, 3, 4, 6, 7, 9], "prediction_interv": [1, 3, 4, 6, 7, 9], "float": [1, 2, 3, 4, 6, 9], "9": [1, 3, 4, 6, 7, 9], "max_gener": [1, 3, 7, 9], "20": [1, 2, 3, 4, 6, 9], "no_neg": [1, 3, 9], "bool": [1, 2, 3, 4, 6], "constraint": [1, 3, 4, 9], "initial_templ": [1, 3, 9], "random_se": [1, 2, 3, 4, 6, 9], "2022": [1, 3, 4, 6], "holiday_countri": [1, 3, 4, 6], "u": [1, 2, 3, 4, 6, 9], "subset": [1, 3, 4, 7, 9], "aggfunc": [1, 3, 6, 7, 9], "first": [1, 2, 3, 4, 6, 7, 9], "na_toler": [1, 3, 6], "metric_weight": [1, 3, 7, 9], "dict": [1, 2, 3, 4, 6, 7], "containment_weight": [1, 3, 9], "contour_weight": [1, 3, 9], "01": [1, 2, 3, 4, 6, 7, 9], "imle_weight": [1, 3, 9], "made_weight": [1, 3, 9], "05": [1, 2, 3, 4, 6, 9], "mae_weight": [1, 3, 9], "2": [1, 2, 3, 4, 6, 7, 9], "mage_weight": [1, 3, 9], "mle_weight": [1, 3, 9], "oda_weight": [1, 3], "001": [1, 3, 4, 6], "rmse_weight": [1, 3, 9], "runtime_weight": [1, 3, 7, 9], "smape_weight": [1, 3, 9], "5": [1, 2, 3, 4, 5, 6, 9], "spl_weight": [1, 3, 9], "wasserstein_weight": [1, 3], "drop_most_rec": [1, 3, 6, 7, 9], "drop_data_older_than_period": [1, 3, 6, 9], "transformer_list": [1, 3, 5, 6, 7, 9], "auto": [1, 3, 4, 6, 7, 9], "transformer_max_depth": [1, 3, 5, 6, 7], "models_mod": [1, 3, 9], "num_valid": [1, 3, 4, 5, 7, 9], "models_to_valid": [1, 3, 7, 9], "15": [1, 3, 4, 6, 9], "max_per_model_class": [1, 3, 5, 9], "validation_method": [1, 3, 4, 7, 9], "backward": [1, 3, 4, 6, 7, 9], "min_allowed_train_perc": [1, 3, 4, 6], "prefill_na": [1, 3, 6, 9], "introduce_na": [1, 3], "preclean": [1, 3], "model_interrupt": [1, 3, 7], "true": [1, 2, 3, 4, 5, 6, 7, 9], "generation_timeout": [1, 3], "current_model_fil": [1, 3], "force_gc": [1, 3], "verbos": [1, 3, 4, 6, 9], "genet": [1, 3, 7, 9], "algorithm": [1, 3, 4, 6, 7, 9], "number": [1, 2, 3, 4, 6, 7, 9], "period": [1, 2, 3, 4, 6, 9], "over": [1, 3, 4, 6, 7, 9], "which": [1, 2, 3, 4, 6, 7, 9], "can": [1, 2, 3, 4, 6, 7], "overriden": [1, 3], "later": [1, 3, 6], "when": [1, 3, 4, 6, 7, 9], "you": [1, 3, 4, 6, 7], "don": [1, 3, 4, 6, 9], "t": [1, 2, 3, 4, 5, 6], "have": [1, 2, 3, 4, 6, 7, 9], "much": [1, 2, 3, 6, 9], "histor": [1, 3, 4, 6, 9], "small": [1, 3, 4, 6, 9], "length": [1, 2, 3, 4, 6, 9], "full": [1, 3, 6, 9], "desir": [1, 3, 4, 6, 9], "lenght": [1, 3], "usual": [1, 2, 3, 4, 6, 7, 9], "best": [1, 3, 4, 6, 7, 9], "possibl": [1, 3, 4, 6, 7, 9], "approach": [1, 3, 4, 6, 9], "given": [1, 3, 4, 6, 7, 9], "limit": [1, 3, 4, 6, 7, 9], "specif": [1, 2, 3, 4, 6, 7, 9], "datetim": [1, 2, 3, 4, 6, 7, 9], "offset": [1, 3, 6, 9], "forc": [1, 3, 4, 9], "rollup": [1, 3, 9], "daili": [1, 2, 3, 4, 6, 7, 9], "input": [1, 3, 4, 6, 7, 9], "m": [1, 2, 3, 4, 5, 6, 9], "monthli": [1, 2, 3, 6, 7, 9], "uncertainti": [1, 3, 4, 6], "rang": [1, 3, 4, 6, 9], "upper": [1, 3, 4, 6, 7, 9], "lower": [1, 3, 4, 6, 7, 9], "adjust": [1, 3, 4, 6, 7, 9], "rare": [1, 3, 4, 9], "match": [1, 2, 3, 4, 6, 9], "actual": [1, 3, 4, 6, 9], "more": [1, 2, 3, 4, 6, 7], "longer": [1, 3, 9], "runtim": [1, 3, 4, 7, 9], "better": [1, 2, 3, 4, 9], "accuraci": [1, 3, 4, 7, 9], "It": [1, 3, 4, 6, 7, 9], "call": [1, 2, 3, 4, 6, 9], "max": [1, 2, 3, 4, 6, 7, 9], "becaus": [1, 3, 4, 6, 7, 9], "somedai": [1, 3], "earli": [1, 3], "stop": [1, 3, 6, 7], "option": [1, 3, 4, 6, 7], "now": [1, 3, 4, 6, 9], "thi": [1, 2, 3, 4, 6, 7, 9], "just": [1, 2, 3, 4, 6], "exact": [1, 3, 6], "neg": [1, 3, 4], "up": [1, 2, 3, 6, 9], "valu": [1, 2, 3, 4, 6, 7, 9], "st": [1, 2, 3, 4, 6, 9], "dev": [1, 3, 4, 6, 9], "abov": [1, 3, 4, 6, 9], "below": [1, 2, 3, 6, 9], "min": [1, 3, 4, 9], "constrain": [1, 3, 6, 9], "also": [1, 3, 4, 6, 7], "instead": [1, 2, 3, 4, 6], "accept": [1, 3, 6, 9], "dictionari": [1, 3, 4, 6, 9], "follow": [1, 3, 4, 6, 9], "kei": [1, 2, 3, 4, 9], "constraint_method": [1, 3, 4], "one": [1, 3, 4, 6, 9], "stdev_min": [1, 3, 4], "stdev": [1, 3, 4], "mean": [1, 3, 4, 5, 6, 9], "absolut": [1, 3, 4, 9], "arrai": [1, 3, 4, 6, 9], "final": [1, 3, 4, 6, 9], "each": [1, 2, 3, 4, 6, 7, 9], "quantil": [1, 3, 4, 6, 9], "constraint_regular": [1, 3, 4], "where": [1, 3, 4, 6, 7, 9], "hard": [1, 3, 4, 9], "cutoff": [1, 3, 4, 6], "between": [1, 2, 3, 4, 6, 7, 9], "penalti": [1, 3, 4], "term": [1, 3, 4], "upper_constraint": [1, 3, 4], "unus": [1, 3, 4, 6], "lower_constraint": [1, 3, 4], "bound": [1, 3, 4, 6, 7, 9], "appli": [1, 3, 4, 6, 7, 9], "otherwis": [1, 2, 3, 4, 6], "list": [1, 2, 3, 4, 6, 7], "comma": [1, 3, 9], "separ": [1, 3, 4, 6, 9], "string": [1, 3, 4, 6, 9], "simpl": [1, 3, 4, 6, 7], "distanc": [1, 3, 4, 6, 7, 9], "horizont": [1, 3, 4, 7, 9], "mosaic": [1, 3, 4, 7, 9], "subsampl": [1, 3], "randomli": [1, 3, 6], "start": [1, 2, 3, 4, 5, 6, 7, 9], "includ": [1, 3, 4, 6, 7, 9], "both": [1, 3, 6, 9], "previou": [1, 3, 6], "self": [1, 3, 4], "seed": [1, 2, 3, 6], "allow": [1, 3, 4, 6, 7, 9], "slightli": [1, 3, 6], "consist": [1, 3, 6, 9], "pass": [1, 2, 3, 4, 6, 7], "through": [1, 3, 4, 6, 7, 9], "some": [1, 2, 3, 4, 6, 7, 9], "maximum": [1, 3, 6, 9], "onc": [1, 3, 4], "mani": [1, 3, 4, 6, 7, 9], "take": [1, 3, 4, 6, 7, 9], "column": [1, 2, 3, 4, 5, 6, 7], "unless": [1, 3, 4, 9], "case": [1, 2, 3, 4, 6, 9], "same": [1, 2, 3, 4, 6, 9], "roll": [1, 3, 4, 6, 9], "higher": [1, 3, 4, 6, 7, 9], "duplic": [1, 3, 6], "timestamp": [1, 3, 4, 6], "remov": [1, 3, 4, 6, 9], "try": [1, 2, 3, 6, 9], "np": [1, 3, 4, 6, 9], "sum": [1, 3, 6, 9], "bewar": [1, 3, 6, 9], "numer": [1, 3, 4, 6, 9], "aggreg": [1, 3, 6, 7, 9], "like": [1, 2, 3, 4, 6, 9], "work": [1, 2, 3, 4, 6, 9], "non": [1, 3, 4, 6, 9], "chang": [1, 3, 6, 9], "nan": [1, 3, 4, 6, 7, 9], "drop": [1, 3, 5, 6, 9], "thei": [1, 3, 4, 6, 7, 9], "than": [1, 3, 4, 6, 9], "percent": [1, 2, 3, 6, 9], "95": [1, 3, 6, 9], "here": [1, 3, 4, 6, 9], "would": [1, 3, 4, 9], "weight": [1, 3, 4, 6, 7, 9], "assign": [1, 3], "effect": [1, 3, 4, 6, 9], "rank": [1, 3, 4, 6], "n": [1, 3, 4, 5, 6, 9], "most": [1, 2, 3, 4, 6, 7, 9], "recent": [1, 2, 3, 4, 6, 9], "point": [1, 3, 4, 6, 7, 9], "sai": [1, 3, 7, 9], "sale": [1, 3, 6, 9], "current": [1, 2, 3, 4, 6, 7, 9], "unfinish": [1, 3], "month": [1, 3, 6, 7, 9], "occur": [1, 3, 6, 9], "after": [1, 3, 4, 6, 7, 9], "aggregr": [1, 3], "so": [1, 2, 3, 4, 6, 7, 9], "whatev": [1, 3, 4], "alia": [1, 3, 4, 6], "prob": [1, 3], "affect": [1, 3, 4, 6], "algorithim": [1, 3], "from": [1, 2, 3, 4, 5, 6, 7, 9], "probabl": [1, 2, 3, 4, 6, 7, 9], "note": [1, 2, 3, 4, 6], "doe": [1, 3, 4, 6, 9], "initi": [1, 3, 4, 6, 9], "alias": [1, 3, 4, 6], "superfast": [1, 3, 7, 9], "scalabl": [1, 3, 7], "should": [1, 3, 4, 6, 9], "fewer": [1, 2, 3, 9], "memori": [1, 3, 4, 6, 9], "issu": [1, 3, 4, 7, 9], "scale": [1, 3, 4, 6, 7, 9], "sequenti": [1, 3], "faster": [1, 2, 3, 4, 6, 7], "newli": [1, 3], "sporad": [1, 3], "util": [1, 3, 4, 6, 7, 9], "slower": [1, 3, 7, 9], "user": [1, 3, 4, 6, 7, 9], "mode": [1, 3, 4, 7], "capabl": [1, 3, 9], "gradient_boost": [1, 3], "neuralnet": [1, 3, 4], "regress": [1, 3, 4, 6], "cross": [1, 3, 4, 7], "perform": [1, 3, 6, 7, 9], "train": [1, 3, 4, 6, 7], "test": [1, 2, 3, 4, 6, 9], "split": [1, 3, 4, 6, 9], "confus": [1, 3, 4, 6, 7, 9], "eval": [1, 3], "segment": [1, 3, 6, 9], "total": [1, 3, 4, 6], "avail": [1, 3, 4, 6, 7], "out": [1, 3, 4, 7, 9], "50": [1, 3, 4], "top": [1, 3, 6, 7, 9], "Or": [1, 3], "tri": [1, 3, 7, 9], "99": [1, 3, 4], "100": [1, 3, 4, 6, 7, 9], "If": [1, 3, 4, 6, 7, 9], "addit": [1, 3, 4, 6, 9], "per_seri": [1, 3, 4], "ad": [1, 3, 4, 6, 7], "what": [1, 2, 3, 4], "famili": [1, 3, 4], "even": [1, 3, 4, 7, 9], "integ": [1, 3, 6], "recenc": [1, 3], "shorter": [1, 3, 6], "set": [1, 2, 3, 4, 6, 7, 9], "equal": [1, 3, 4, 6, 9], "size": [1, 3, 4, 6, 9], "poetic": [1, 3], "less": [1, 3, 4, 6, 9], "strategi": [1, 3], "other": [1, 2, 3, 4, 6, 7], "similar": [1, 3, 4, 6, 7, 9], "364": [1, 3, 6, 9], "year": [1, 3, 6], "immedi": [1, 3, 4, 6, 9], "automat": [1, 3, 6, 7, 9], "find": [1, 3, 4, 6, 7, 9], "section": [1, 3, 7, 9], "custom": [1, 3, 4, 6], "need": [1, 2, 3, 4, 6, 7], "validation_index": [1, 3, 9], "datetimeindex": [1, 3, 4, 6, 7, 9], "tail": [1, 3, 6, 9], "els": [1, 2, 3, 4, 6, 7, 9], "rais": [1, 3, 6], "error": [1, 3, 4, 6, 7, 9], "10": [1, 3, 4, 6, 9], "mandat": [1, 3], "unrecommend": [1, 3], "replac": [1, 3, 6], "lead": [1, 3, 7, 9], "zero": [1, 2, 3, 4, 6, 9], "collect": [1, 3, 4, 6, 7], "hasn": [1, 3], "yet": [1, 3, 4, 6, 9], "fill": [1, 3, 4, 6, 7], "leav": [1, 3, 9], "interpol": [1, 3, 4, 6], "recommend": [1, 3, 6, 7, 9], "median": [1, 3, 4, 6], "mai": [1, 2, 3, 4, 6, 7, 9], "assum": [1, 3, 6, 9], "whether": [1, 2, 3, 4, 6], "last": [1, 3, 4, 6, 9], "help": [1, 3, 4, 6, 7, 9], "make": [1, 2, 3, 4, 6, 7, 9], "robust": [1, 3, 4, 6], "introduc": [1, 3], "row": [1, 2, 3, 5, 6], "Will": [1, 3, 4, 6], "keyboardinterrupt": [1, 3, 7], "quit": [1, 3, 6, 9], "entir": [1, 3, 6, 7, 9], "program": [1, 3], "attempt": [1, 3, 6, 9], "conjunct": [1, 3], "result_fil": [1, 3, 7], "accident": [1, 3], "complet": [1, 3, 4, 6], "termin": [1, 3], "end_gener": [1, 3], "end": [1, 2, 3, 6], "skip": [1, 2, 3, 4, 6], "again": [1, 3, 9], "minut": [1, 3], "proceed": [1, 3], "check": [1, 3, 6, 7, 9], "offer": [1, 3, 9], "approxim": [1, 3, 6], "timeout": [1, 2, 3], "overal": [1, 3, 6, 9], "cap": [1, 3, 6], "per": [1, 3, 4, 6, 9], "file": [1, 3, 9], "path": [1, 3], "write": [1, 3, 4, 5], "disk": [1, 3], "debug": [1, 3], "crash": [1, 3, 4], "json": [1, 3, 4, 5, 9], "append": [1, 3], "gc": [1, 3], "won": [1, 2, 3, 4, 6, 7, 9], "differ": [1, 3, 4, 6, 7, 9], "reduc": [1, 2, 3, 4, 7, 9], "give": [1, 3, 6, 7], "core": [1, 3, 4, 6, 7], "parallel": [1, 3, 4, 7, 9], "process": [1, 3, 4, 6], "joblib": [1, 3, 4, 9], "context": [1, 3, 4], "manag": [1, 3, 4, 6, 9], "type": [1, 2, 3, 4, 6, 7, 9], "id": [1, 2, 3, 4, 6, 7], "future_regressor": [1, 3, 4, 6, 9], "n_split": [1, 3, 9], "creat": [1, 2, 3, 4, 6, 9], "backcast": [1, 3, 6], "back": [1, 3, 4, 6, 9], "OF": [1, 3], "sampl": [1, 2, 3, 4, 6, 7, 9], "often": [1, 3, 6, 7, 9], "As": [1, 3, 6, 9], "repres": [1, 3, 4, 6, 9], "real": [1, 3, 4, 9], "world": [1, 3, 4, 9], "There": [1, 3, 7, 9], "jump": [1, 3, 9], "chunk": [1, 3, 9], "arg": [1, 3, 4, 6], "except": [1, 3, 4], "piec": [1, 3, 9], "fastest": [1, 3], "observ": [1, 3, 4, 6], "level": [1, 3, 4, 6, 7, 9], "function": [1, 3, 4, 6, 7, 9], "standard": [1, 3, 4, 6], "access": [1, 3, 9], "isn": [1, 3, 4, 6, 9], "classic": [1, 3], "percentag": [1, 3, 9], "intend": [1, 3, 9], "quick": [1, 3, 9], "visual": [1, 3, 9], "statist": [1, 3, 4, 6, 7], "see": [1, 3, 4, 6, 7, 9], "target": [1, 3, 4, 6, 9], "waterfall_plot": [1, 3], "explain": [1, 3, 4], "caus": [1, 3, 4, 9], "measur": [1, 2, 3, 6, 9], "outcom": [1, 3, 4, 9], "shap": [1, 3], "coeffici": [1, 3], "correl": [1, 3], "show": [1, 3, 4, 9], "waterfal": [1, 3], "enabl": [1, 3], "expand": [1, 3, 4, 6], "rerun": [1, 3, 9], "filenam": [1, 3], "kwarg": [1, 2, 3, 4, 6], "ever": [1, 3, 6], "40": [1, 3, 6], "include_result": [1, 3], "unpack_ensembl": [1, 3], "min_metr": [1, 3], "max_metr": [1, 3], "reusabl": [1, 3], "csv": [1, 3, 5, 9], "slowest": [1, 3, 6, 9], "diagnost": [1, 3, 4], "compon": [1, 3, 4, 6], "larger": [1, 3, 4, 6, 9], "count": [1, 3, 4, 6], "lowest": [1, 3, 4, 6], "wai": [1, 3, 4, 6], "major": [1, 3, 9], "part": [1, 3, 4, 6, 9], "addon": [1, 3], "result_set": [1, 3], "fraction": [1, 3, 9], "date_col": [1, 3, 6, 7, 9], "value_col": [1, 3, 6, 7, 9], "id_col": [1, 3, 6, 7, 9], "grouping_id": [1, 3, 6], "suppli": [1, 3, 4, 6, 9], "three": [1, 3, 7, 9], "identifi": [1, 3, 4, 6, 9], "singl": [1, 3, 4, 6, 7, 9], "extern": [1, 3, 9], "colname1": [1, 3], "colname2": [1, 3], "increas": [1, 2, 3, 4, 7, 9], "left": [1, 3, 6, 9], "blank": [1, 3], "its": [1, 3, 4, 9], "tabl": [1, 3, 4], "pickl": [1, 3], "inform": [1, 3, 4, 6], "series_id": [1, 3, 4, 6, 7, 9], "group_id": [1, 3, 6], "map": [1, 3, 4], "x": [1, 3, 4, 5, 6, 9], "retain": [1, 3], "potenti": [1, 3, 6, 9], "futur": [1, 3, 4, 6, 9], "setup": [1, 3], "involv": [1, 3], "percent_best": [1, 3], "among": [1, 3, 9], "across": [1, 3, 4, 7, 9], "helper": [1, 3], "import_target": [1, 3], "enforce_model_list": [1, 3], "include_ensembl": [1, 3], "overrid": [1, 3], "exist": [1, 3, 4, 6, 9], "add": [1, 3, 4, 6, 9], "anoth": [1, 3, 6], "add_on": [1, 3], "include_horizont": [1, 3], "force_valid": [1, 3], "previous": [1, 3, 6], "must": [1, 2, 3, 4, 6, 9], "done": [1, 3, 7, 9], "befor": [1, 3, 4, 6, 7, 9], "locat": [1, 3], "alreadi": [1, 3, 4, 6, 7, 9], "keep": [1, 3, 4, 6], "init": [1, 3, 4], "anywai": [1, 3], "unpack": [1, 3], "kept": [1, 3], "overridden": [1, 3], "keep_ensembl": [1, 3, 5], "get": [1, 2, 3, 4, 6, 7, 9], "sent": [1, 3], "regardless": [1, 3, 4], "weird": [1, 3], "behavior": [1, 3, 6], "wtih": [1, 3], "In": [1, 3, 4, 6, 7, 9], "validate_import": [1, 3], "eras": [1, 3], "fail": [1, 3, 4, 9], "had": [1, 3, 4], "least": [1, 3, 6, 9], "success": [1, 3, 6], "funciton": [1, 3], "readabl": [1, 3, 9], "start_dat": [1, 2, 3, 4, 7, 9], "alpha": [1, 3, 4, 6], "25": [1, 3, 4, 6], "facecolor": [1, 3, 4], "black": [1, 3, 4], "loc": [1, 3, 4], "accur": [1, 3, 7, 9], "gain": [1, 3, 6, 9], "improv": [1, 3, 6, 7, 9], "doesn": [1, 3, 6, 9], "account": [1, 3, 6], "benefit": [1, 3, 9], "seen": [1, 3, 9], "max_seri": [1, 3], "chosen": [1, 3, 7, 9], "common": [1, 3, 6, 9], "model_id": [1, 3, 4], "color_list": [1, 3], "top_n": [1, 3], "frequent": [1, 3], "factor": [1, 3, 4], "nest": [1, 3, 9], "well": [1, 3, 4, 6, 7, 9], "do": [1, 3, 4, 6, 9], "slow": [1, 2, 3, 4, 6, 9], "captur": [1, 3, 4, 9], "hex": [1, 3], "color": [1, 3, 4], "bar": [1, 3, 6], "col": [1, 3, 4, 6], "The": [1, 3, 4, 6, 7, 9], "highli": [1, 3, 4, 9], "those": [1, 3, 4, 6, 9], "mostli": [1, 3, 4, 6, 9], "unscal": [1, 3, 9], "ones": [1, 3, 9], "max_name_char": [1, 3], "ff9912": [1, 3], "figsiz": [1, 3, 4], "12": [1, 3, 4, 5, 6, 7, 9], "4": [1, 3, 4, 5, 6, 7, 9], "kind": [1, 3, 6, 9], "upper_clip": [1, 3], "1000": [1, 3, 4, 6, 9], "avg": [1, 3, 4, 6], "sort": [1, 3, 6], "chop": [1, 3], "tupl": [1, 2, 3, 4, 6], "axi": [1, 3, 4, 6, 9], "pie": [1, 3, 9], "prevent": [1, 3, 4, 9], "unnecessari": [1, 3], "distort": [1, 3], "To": [1, 3, 9], "compat": [1, 3], "necessarili": [1, 3, 9], "maintain": [1, 3, 6, 7, 9], "prefer": [1, 3], "failur": [1, 2, 3], "rate": [1, 3, 4], "ignor": [1, 2, 3, 4, 6], "due": [1, 2, 3, 6, 9], "df_wide": [1, 3, 4, 6, 9], "end_dat": [1, 3], "compare_horizont": [1, 3], "include_bound": [1, 3, 4], "35": [1, 3, 9], "start_color": [1, 3], "darkr": [1, 3], "end_color": [1, 3], "a2ad9c": [1, 3], "reforecast": [1, 3], "validation_forecast": [1, 3], "cach": [1, 3], "store": [1, 3, 4, 6, 9], "refer": [1, 3, 9], "best_model_id": [1, 3], "overlap": [1, 3, 9], "graph": [1, 3], "reader": [1, 3], "compar": [1, 3, 4, 6, 9], "place": [1, 3, 6, 9], "begin": [1, 3, 4, 6, 9], "either": [1, 3, 4, 6, 7, 9], "worst": [1, 3], "versu": [1, 3], "vline": [1, 3, 4], "val": [1, 3, 4], "marker": [1, 3], "just_point_forecast": [1, 3, 4], "fail_on_forecast_nan": [1, 3], "date": [1, 2, 3, 4, 6, 7, 9], "update_fit": [1, 3], "underli": [1, 3, 4, 9], "retrain": [1, 3], "interv": [1, 3, 4, 6], "design": [1, 3, 6, 7, 9], "high": [1, 3, 6, 7, 9], "suffici": [1, 3, 9], "without": [1, 3, 6, 7, 9], "ahead": [1, 3, 4, 6, 9], "__init__": [1, 3, 4], "prediction_object": [1, 3], "Not": [1, 2, 3, 4, 6], "implement": [1, 3, 4, 6, 9], "present": [1, 2, 3, 4, 6, 9], "strongli": [1, 3], "ha": [1, 3, 4, 6, 7, 9], "metadata": [1, 3, 4], "conveni": [1, 3, 6, 9], "id_nam": [1, 3, 4], "seriesid": [1, 2, 3, 4], "value_nam": [1, 3, 4], "interval_nam": [1, 3, 4], "predictioninterv": [1, 3, 4], "preprocessing_transform": [1, 4, 5], "basescal": [1, 4], "past_impacts_intervent": [1, 4], "common_fouri": [1, 4, 6], "ar_lag": [1, 4], "ar_interaction_season": [1, 4], "anomaly_detector_param": [1, 3, 4, 6], "anomaly_intervent": [1, 4], "holiday_detector_param": [1, 4, 6], "holiday_countries_us": [1, 4, 6], "multivariate_featur": [1, 4], "multivariate_transform": [1, 4], "regressor_transform": [1, 4], "regressors_us": [1, 4], "linear_model": [1, 4], "randomwalk_n": [1, 4], "trend_window": [1, 4], "30": [1, 3, 4, 6, 7], "trend_standin": [1, 4], "trend_anomaly_detector_param": [1, 4], "trend_transform": [1, 4], "trend_model": [1, 4], "modelparamet": [1, 3, 4, 5, 9], "trend_phi": [1, 4], "max_colinear": [1, 4], "998": [1, 4], "max_multicolinear": [1, 4], "decomposit": [1, 4, 6], "advanc": [1, 3, 4], "trend": [1, 4, 6], "preprocess": [1, 4, 6, 7, 9], "tunc": [1, 4], "etiam": [1, 4], "fati": [1, 4], "aperit": [1, 4], "futuri": [1, 4], "ora": [1, 4], "dei": [1, 4], "iussu": [1, 4], "umquam": [1, 4], "credita": [1, 4], "teucri": [1, 4], "Nos": [1, 4], "delubra": [1, 4], "deum": [1, 4], "miseri": [1, 4], "quibu": [1, 4], "ultimu": [1, 4], "esset": [1, 4], "ill": [1, 4], "di": [1, 4], "festa": [1, 4], "velamu": [1, 4], "frond": [1, 4], "urbem": [1, 4], "aeneid": [1, 4], "246": [1, 4], "249": [1, 4], "impact": [1, 3, 4, 6, 9], "uniqu": [1, 3, 4, 6], "past": [1, 4, 6, 9], "outsid": [1, 4, 9], "unforecast": [1, 4, 6], "accordingli": [1, 4, 9], "origin": [1, 3, 4, 6, 9], "product": [1, 4, 6, 7, 9], "goal": [1, 4], "temporari": [1, 4], "whose": [1, 4, 6], "rel": [1, 3, 4, 6, 7, 9], "known": [1, 3, 4, 7, 9], "essenti": [1, 3, 4, 9], "estim": [1, 4, 6, 9], "raw": [1, 4, 6], "presenc": [1, 4], "warn": [1, 3, 4, 6], "about": [1, 3, 4, 6], "remove_excess_anomali": [1, 4, 6], "detector": [1, 3, 4, 6], "reli": [1, 4, 9], "alwai": [1, 3, 4, 6, 9], "element": [1, 2, 4, 6], "histori": [1, 2, 3, 4, 6], "intern": [1, 3, 4, 6, 7, 9], "attribut": [1, 3, 4, 9], "figur": [1, 3, 4], "expect": [1, 3, 4, 6, 7, 9], "latest": [1, 4], "code": [1, 3, 4, 5, 6, 7], "dai": [1, 2, 3, 4, 6, 9], "7": [1, 3, 4, 6, 9], "weekli": [1, 2, 4], "For": [1, 2, 3, 4, 7, 9], "slope": [1, 4], "analysi": [1, 4, 6], "posit": [1, 3, 4, 6, 9], "sign": [1, 4], "exactli": [1, 4, 6], "regression_typ": [1, 4, 5, 6, 9], "pattern": [1, 3, 4, 6, 9], "inaccur": [1, 4], "flag": [1, 3, 4, 6, 9], "keep_col": [1, 4], "keep_cols_idx": [1, 4], "dtindex": [1, 4, 6], "regressor_per_seri": [1, 4], "flag_regressor": [1, 4], "categorical_group": [1, 4], "past_impact": [1, 4], "future_impact": [1, 4], "regressor_forecast_model": [1, 4], "regressor_forecast_model_param": [1, 4], "regressor_forecast_transform": [1, 4], "include_histori": [1, 4], "tune": [1, 4], "16": [1, 3, 4], "anomaly_color": [1, 4], "darkslateblu": [1, 4], "holiday_color": [1, 4], "darkgreen": [1, 4], "trend_anomaly_color": [1, 4], "slategrai": [1, 4], "point_siz": [1, 4], "know": [1, 4, 9], "d4f74f": [1, 4], "82ab5a": [1, 4], "ff6c05": [1, 4], "c12600": [1, 4], "new_df": [1, 4], "include_organ": [1, 4], "step": [1, 3, 4, 6, 9], "equival": [1, 4, 6, 9], "include_impact": [1, 4], "multipl": [1, 3, 4, 6, 7, 9], "trend_residu": [1, 4], "trans_method": [1, 4, 6, 9], "featur": [1, 4, 6, 7, 9], "space": [1, 2, 4, 6, 9], "intervention_d": [1, 4], "df_train": [1, 3, 4, 6, 9], "lower_limit": [1, 3, 6, 9], "upper_limit": [1, 3, 6, 9], "univariatemotif": [1, 3], "model_param_dict": [1, 3, 9], "distance_metr": [1, 3, 4, 6], "euclidean": [1, 3], "k": [1, 3, 4, 6], "pointed_method": [1, 3], "return_result_window": [1, 3, 4], "window": [1, 3, 4, 5, 6, 9], "model_transform_dict": [1, 3, 9], "pchip": [1, 3], "fix": [1, 3, 6, 9], "maxabsscal": [1, 3, 6], "model_forecast_kwarg": [1, 3], "321": [1, 3, 9], "future_regressor_train": [1, 3, 4, 9], "future_regressor_forecast": [1, 3, 4, 9], "close": [1, 3, 4, 6, 7, 9], "exceed": [1, 3, 6, 9], "four": [1, 3, 9], "calcul": [1, 3, 4, 6, 9], "direct": [1, 3, 4, 6, 9], "edg": [1, 2, 3, 6, 9], "y": [1, 2, 3, 4, 6, 9], "z": [1, 3, 4, 9], "primarili": [1, 3, 9], "num_seri": [1, 3, 4, 6, 9], "middl": [1, 3, 6], "too": [1, 2, 3, 6, 9], "flip": [1, 3], "ab": [1, 3, 4, 6], "l": [1, 3], "timestep": [1, 3, 6, 9], "two": [1, 3, 6, 9], "neighbor": [1, 3, 4], "resolut": [1, 3], "greater": [1, 3, 6, 9], "class_method": [1, 3], "standalon": [1, 3], "item": [1, 3, 6], "generaet_result_window": [1, 3], "fit_forecast": [1, 3], "result_window": [1, 3, 4], "forecast_df": [1, 3], "up_forecast_df": [1, 3], "low_forecast_df": [1, 3], "lower_limit_2d": [1, 3, 9], "upper_limit_2d": [1, 3, 9], "upper_risk_arrai": [1, 3, 9], "lower_risk_arrai": [1, 3, 9], "event_risk": [1, 3], "multivariatemotif": [1, 3, 9], "autots_kwarg": [1, 3], "shortcut": [1, 3], "suggest": [1, 3, 9], "normal": [1, 3, 4, 6], "model_method": [1, 3], "wa": [1, 3, 4, 6, 9], "num_sampl": [1, 3], "column_idx": [1, 3], "grai": [1, 3], "838996": [1, 3], "c0c0c0": [1, 3], "dcdcdc": [1, 3], "a9a9a9": [1, 3], "808080": [1, 3], "989898": [1, 3], "757575": [1, 3], "696969": [1, 3], "c9c0bb": [1, 3], "c8c8c8": [1, 3], "323232": [1, 3], "e5e4e2": [1, 3], "778899": [1, 3], "4f666a": [1, 3], "848482": [1, 3], "414a4c": [1, 3], "8a7f80": [1, 3], "c4c3d0": [1, 3], "bebeb": [1, 3], "dbd7d2": [1, 3], "up_low_color": [1, 3], "ff4500": [1, 3], "ff5349": [1, 3], "bar_color": [1, 3], "6495ed": [1, 3], "bar_ylim": [1, 3], "8": [1, 3, 4, 6, 9], "ylim": [1, 3], "barplot": [1, 3], "df_test": [1, 3, 9], "actuals_color": [1, 3], "00bfff": [1, 3], "v": [1, 3], "dt": [1, 2, 3, 6], "line": [1, 3, 4, 9], "manual": [1, 3, 9], "appropri": [1, 3, 4, 6, 7, 9], "assess": [1, 3, 9], "target_shap": [1, 3], "handl": [1, 3, 4, 9], "overview": [1, 3], "defin": [1, 3, 4, 6, 7, 9], "group": [1, 3, 4, 6], "reconcili": [1, 6, 9], "2020": [1, 3, 4, 6, 9], "mathemat": [1, 6], "chronolog": [1, 6], "fulli": [1, 4, 6], "under": [1, 6, 9], "condit": [1, 6], "primari": [1, 6], "intent": [1, 6], "invers": [1, 4, 6, 9], "na": [1, 4, 6], "filter": [1, 3, 4, 6, 9], "cannot": [1, 6, 9], "rollingmean": [1, 6], "pctchang": [1, 6], "cumsum": [1, 6], "ffill": [1, 5, 6], "forward": [1, 3, 6, 9], "until": [1, 6, 9], "reach": [1, 6], "miss": [1, 6, 9], "averag": [1, 3, 4, 6, 9], "rolling_mean_24": [1, 5, 6], "24": [1, 4, 6, 9], "ffill_mean_bias": [1, 6], "fake_d": [1, 5, 6], "shift": [1, 4, 6], "thu": [1, 3, 6, 9], "incorrect": [1, 6], "iterativeimput": [1, 6, 9], "iter": [1, 6], "minmaxscal": [1, 6], "powertransform": [1, 6], "quantiletransform": [1, 6], "standardscal": [1, 6], "robustscal": [1, 6], "worth": [1, 6], "n_compon": [1, 4, 6], "receiv": [1, 6, 7], "second_transform": [1, 6], "fixedrollingmean": [1, 6], "disabl": [1, 6], "rollingmean10": [1, 6], "rollingmean100thn": [1, 6], "len": [1, 3, 4, 6], "minimum": [1, 4, 6, 9], "convert": [1, 4, 6, 9], "pct_chang": [1, 6], "lot": [1, 4, 6, 9], "sin": [1, 6], "log": [1, 3, 6, 9], "necessari": [1, 4, 6, 7, 9], "lag": [1, 4, 6], "seasonaldifferencemean": [1, 6], "seasonaldifference7": [1, 6], "28": [1, 3, 4, 6], "parameter": [1, 6], "center": [1, 6], "around": [1, 4, 6], "record": [1, 2, 3, 5, 6, 7], "bin": [1, 3, 6], "move": [1, 3, 4, 6], "lose": [1, 6], "smoother": [1, 6], "scipi": [1, 4, 6, 9], "hp_filter": [1, 6], "decompos": [1, 6], "exponenti": [1, 4, 6, 9], "joint": [1, 6], "differenc": [1, 4, 6], "vector": [1, 3, 4, 6], "box": [1, 6], "tiao": [1, 6], "align": [1, 6], "tailor": [1, 6], "wish": [1, 6], "good": [1, 6, 9], "cheer": [1, 6], "local": [1, 4, 6], "state": [1, 4, 6], "clip": [1, 6], "std": [1, 6], "awai": [1, 6], "compens": [1, 6], "croston": [1, 6], "inspir": [1, 6, 9], "magnitud": [1, 2, 4, 6, 9], "occurr": [1, 6, 9], "intermitt": [1, 6], "fourier": [1, 6], "harmon": [1, 6], "reintroduc": [1, 6], "within": [1, 6], "diff": [1, 3, 6], "overwrit": [1, 6, 9], "baxter": [1, 6], "king": [1, 4, 6], "bandpass": [1, 6], "poisson": [1, 6], "applic": [1, 6], "techniqu": [1, 6], "directli": [1, 6, 7, 9], "fillzero": [1, 6], "undo": [1, 6], "mad": [1, 6], "classmethod": [1, 6], "retriev": [1, 2, 6], "legaci": [1, 6], "min_occurr": [1, 3, 6], "splash_threshold": [1, 3, 6], "65": [1, 3, 6], "use_dayofmonth_holidai": [1, 3, 6], "use_wkdom_holidai": [1, 3, 6], "use_wkdeom_holidai": [1, 3, 6], "use_lunar_holidai": [1, 3, 6], "use_lunar_weekdai": [1, 3, 6], "use_islamic_holidai": [1, 3, 6], "use_hebrew_holidai": [1, 3, 6], "holiday_impact": [1, 3, 6], "popul": [1, 3, 6], "day_holidai": [1, 3, 6], "long": [1, 2, 3, 4, 6, 7, 9], "join": [1, 3, 6], "rather": [1, 3, 6, 9], "format": [1, 2, 3, 4, 6, 7, 9], "series_flag": [1, 3, 6], "contan": [1, 3, 6], "holiday_nam": [1, 3, 6], "anomaly_scor": [1, 3, 6], "include_anomali": [1, 3], "03": [1, 4, 6], "04": [1, 6], "02": [1, 6], "06": [1, 4, 6], "002": [1, 6], "005": [1, 6], "na_prob_dict": [1, 6], "025": [1, 6], "knnimput": [1, 6], "iterativeimputerextratre": [1, 6], "0001": [1, 4, 6], "seasonalitymotifimputerlinmix": [1, 5, 6], "seasonalitymotifimputer1k": [1, 6], "datepartregressionimput": [1, 6], "fast_param": [1, 6], "superfast_param": [1, 6], "traditional_ord": [1, 6], "transformer_min_depth": [1, 6], "allow_non": [1, 6], "no_nan_fil": [1, 6], "choosen": [1, 6, 9], "signal": [1, 6, 9], "transformt": [1, 8], "summar": [1, 4, 6, 9], "backfil": [1, 6], "bfill": [1, 6], "head": [1, 3, 5, 6, 9], "regressor_train": [1, 6], "iloc": [1, 6, 9], "thing": [1, 4, 6, 9], "feature_agglomer": [1, 6], "gaussian_random_project": [1, 6], "deal": [1, 6, 9], "prefil": [1, 6], "elsewher": [1, 6], "regressor_forecast": [1, 6], "simple_binar": [1, 6], "encode_holiday_typ": [1, 6], "distribut": [1, 2, 3, 6, 7], "gamma": [1, 2, 4, 6], "univari": [1, 4, 6, 9], "holiday_regr_styl": [1, 6], "preprocessing_param": [1, 6], "datepart": [1, 4, 6], "been": [1, 3, 6, 9], "peopl": [1, 6], "NOT": [1, 3, 4, 6, 9], "machin": [1, 6, 7], "elabor": [1, 6], "build": [1, 6, 9], "And": [1, 4, 6, 7], "post": [1, 6, 7, 9], "hoc": [1, 6], "want": [1, 6, 9], "easili": [1, 6, 9], "categor": [1, 2, 6], "discard": [1, 6], "annoi": [1, 6], "countri": [1, 6], "pull": [1, 2, 4, 6], "req": [1, 3, 6], "pkg": [1, 6], "subdiv": [1, 6], "subdivis": [1, 6], "func": [1, 6], "resampl": [1, 6], "creation": [1, 4, 6], "swappabl": [1, 6], "infer_freq": [1, 6], "date_start": [1, 2], "date_end": [1, 2], "artif": [1, 2, 9], "wiki": [1, 2, 3], "germani": [1, 2], "thanksgiv": [1, 2, 9], "microsoft": [1, 2], "procter_": [1, 2], "26_gambl": [1, 2], "youtub": [1, 2], "united_st": [1, 2], "elizabeth_ii": [1, 2], "william_shakespear": [1, 2], "cleopatra": [1, 2], "george_washington": [1, 2], "chinese_new_year": [1, 2], "standard_devi": [1, 2, 9], "christma": [1, 2, 9], "list_of_highest": [1, 2], "grossing_film": [1, 2], "list_of_countries_that_have_gained_independence_from_the_united_kingdom": [1, 2], "periodic_t": [1, 2], "sourc": [1, 2, 6, 9], "wikimedia": [1, 2], "foundat": [1, 2], "traffic": [1, 2, 9], "mn": [1, 2], "dot": [1, 2], "via": [1, 2], "uci": [1, 2], "repositori": [1, 2], "2021": [1, 2, 3, 4, 9], "introduce_nan": [1, 2], "introduce_random": [1, 2], "123": [1, 2, 3, 6], "null": [1, 2], "observation_start": [1, 2], "observation_end": [1, 2], "fred_kei": [1, 2], "fred_seri": [1, 2, 9], "dgs10": [1, 2], "t5yie": [1, 2], "sp500": [1, 2], "dcoilwtico": [1, 2], "dexuseu": [1, 2], "wpu0911": [1, 2], "ticker": [1, 2, 9], "msft": [1, 2], "trends_list": [1, 2, 9], "cycl": [1, 2, 4], "trends_geo": [1, 2], "weather_data_typ": [1, 2], "awnd": [1, 2], "wsf2": [1, 2], "tavg": [1, 2], "weather_st": [1, 2, 9], "usw00094846": [1, 2], "usw00014925": [1, 2], "weather_year": [1, 2], "london_air_st": [1, 2, 9], "ct3": [1, 2], "sk8": [1, 2], "london_air_speci": [1, 2], "pm25": [1, 2], "london_air_dai": [1, 2], "180": [1, 2], "earthquake_dai": [1, 2], "earthquake_min_magnitud": [1, 2, 9], "gsa_kei": [1, 2], "gov_domain_list": [1, 2, 9], "nasa": [1, 2], "gov": [1, 2], "gov_domain_limit": [1, 2], "600": [1, 2], "wikipedia_pag": [1, 2, 9], "microsoft_offic": [1, 2], "wiki_languag": [1, 2], "en": [1, 2, 3, 6, 9], "weather_event_typ": [1, 2, 9], "28z": [1, 2], "29": [1, 2], "winter": [1, 2, 9], "weather": [1, 2, 9], "storm": [1, 2], "caiso_queri": [1, 2], "ene_slr": [1, 2], "300": [1, 2, 4], "sleep_second": [1, 2, 9], "activ": [1, 2, 4, 9], "internet": [1, 2, 9], "connect": [1, 2, 9], "respect": [1, 2, 6, 9], "free": [1, 2, 7], "heavili": [1, 2, 4, 6, 9], "exclud": [1, 2, 6], "d": [1, 2, 3, 4, 5, 6, 9], "earliest": [1, 2], "get_seri": [1, 2], "yfinanc": [1, 2, 9], "api": [1, 2, 7, 9], "restrict": [1, 2, 4], "stlouisf": [1, 2], "org": [1, 2, 3, 4, 6, 9], "doc": [1, 2, 4, 6, 7, 9], "api_kei": [1, 2], "html": [1, 2, 4, 6, 9], "fredapi": [1, 2, 9], "stock": [1, 2, 7, 9], "pypi": [1, 2], "keyword": [1, 2], "pytrend": [1, 2, 9], "ncei": [1, 2], "noaa": [1, 2], "ghcn": [1, 2], "prcp": [1, 2], "snow": [1, 2], "tmax": [1, 2], "tmin": [1, 2], "wsf1": [1, 2], "wsf5": [1, 2], "wsfg": [1, 2], "station": [1, 2], "londonair": [1, 2], "uk": [1, 2], "london_speci": [1, 2], "london": [1, 2], "air": [1, 2], "smallest": [1, 2, 3], "earthquak": [1, 2], "usg": [1, 2], "open": [1, 2, 5, 9], "gsa": [1, 2], "dap": [1, 2], "dist": [1, 2, 4, 9], "govern": [1, 2], "domain": [1, 2], "veri": [1, 2, 4, 6, 9], "usp": [1, 2], "ncbi": [1, 2], "nlm": [1, 2], "nih": [1, 2], "cdc": [1, 2], "ir": [1, 2], "usajob": [1, 2], "studentaid": [1, 2], "usembassi": [1, 2], "tsunami": [1, 2], "smaller": [1, 2, 3, 4, 6, 9], "10000": [1, 2], "wikipedia": [1, 2, 3], "encod": [1, 2, 3, 9], "underscor": [1, 2], "sever": [1, 2, 7, 9], "www1": [1, 2], "ncdc": [1, 2], "pub": [1, 2, 6], "swdi": [1, 2], "stormev": [1, 2], "csvfile": [1, 2], "pdf": [1, 2, 6], "hardcod": [1, 2], "queri": [1, 2, 6], "server": [1, 2], "download": [1, 2, 9], "feder": [1, 2], "reserv": [1, 2], "loui": [1, 2], "econom": [1, 2], "indic": [1, 2, 3, 6], "week": [1, 2], "petroleum": [1, 2], "industri": [1, 2], "eia": [1, 2], "annual": [1, 2], "cleaner": [1, 6], "pivot_t": [1, 6], "determin": [1, 4, 6], "provid": [1, 3, 4, 6, 9], "starttimestamp": [1, 3], "template_col": [1, 3], "transformationparamet": [1, 3, 4, 5], "horizontal_subset": [1, 3], "return_model": [1, 3], "model_count": [1, 3], "albeit": [1, 3, 9], "she": [1, 3], "turn": [1, 3], "me": [1, 3], "newt": [1, 3], "got": [1, 3, 4], "width": [1, 3, 6], "ask": [1, 3], "few": [1, 3], "cpu": [1, 3, 4, 6, 7, 9], "meant": [1, 3], "tranform": [1, 3], "instal": [2, 4, 6], "fredkei": 2, "seriesnamedict": 2, "simplest": [2, 9], "sure": [2, 6, 7, 9], "request": [2, 6, 7, 9], "pair": 2, "seriesnam": 2, "anyth": [2, 6], "second": [2, 4, 6, 9], "sleep": 2, "chanc": 2, "mon": [3, 6], "jul": [3, 6], "18": [3, 4], "19": [3, 4], "55": 3, "author": [3, 4, 6], "colin": [3, 4, 6, 9], "mid": [3, 6], "transformation_dict": [3, 4], "model_str": 3, "parameter_dict": 3, "feed": 3, "pipelin": 3, "submitted_paramet": 3, "sort_column": 3, "sort_ascend": 3, "max_result": 3, "recursive_count": 3, "old": [3, 9], "No": [3, 4, 6, 7], "mate": 3, "sanderson": 3, "submitted_paramt": 3, "hyperparamet": 3, "per_timestamp_smap": 3, "per_series_metr": [3, 4], "per_series_ma": 3, "per_series_rms": 3, "per_series_mad": 3, "per_series_contour": 3, "per_series_spl": 3, "per_series_ml": 3, "per_series_iml": 3, "per_series_max": 3, "per_series_oda": 3, "per_series_mqa": 3, "per_series_dwa": 3, "per_series_ewma": 3, "per_series_uwms": 3, "per_series_smooth": 3, "per_series_m": 3, "per_series_mats": 3, "per_series_wasserstein": 3, "per_series_dwd": 3, "correspond": [3, 4, 6], "order": [3, 4, 6, 9], "another_ev": 3, "merg": 3, "onto": 3, "validation_round": 3, "current_gener": 3, "traceback": 3, "mosaic_us": 3, "additional_msg": 3, "who": [3, 4], "tim": 3, "hyperparamt": 3, "prepar": 3, "info": [3, 6], "print": [3, 5, 6, 7, 9], "statement": 3, "keyboard": 3, "interrupt": [3, 7], "caught": [3, 4], "break": 3, "tracebook": 3, "represent": 3, "everi": [3, 4, 6, 9], "existing_templ": 3, "new_poss": 3, "selection_col": 3, "new_possibl": 3, "namess": 3, "judg": [3, 9], "hash": 3, "b": [3, 6], "recombin": 3, "ident": [3, 4], "made": [3, 4, 6, 9], "mle": [3, 9], "mage": [3, 9], "bigger": 3, "results_object": 3, "total_valid": 3, "models_to_us": [3, 4], "model_prob": 3, "counter": [3, 6], "n_model": 3, "keyword_format": 3, "preceed": [3, 9], "dict_arrai": 3, "recurs": [3, 5, 9], "unnest": 3, "validation_result": [3, 5, 7], "groupby_col": 3, "all_result": 3, "corr": 3, "onehot": 3, "poli": 3, "100000": [3, 6], "dimens": [3, 4, 6, 9], "fake": [3, 6], "purpos": [3, 6, 9], "fri": [3, 6], "nov": 3, "13": [3, 4, 9], "45": [3, 4], "base_models_onli": 3, "tensorflow": [3, 4, 9], "jan": [3, 4], "27": [3, 6], "36": [3, 4], "lag_1": [3, 4, 6], "lag_2": [3, 4], "nearest": [3, 4, 5, 6], "ndim": 3, "f": [3, 5, 9], "ae": 3, "precalcul": 3, "arr": [3, 6], "loss": [3, 4, 9], "chi": 3, "squar": [3, 6, 9], "histogram": 3, "unchang": 3, "flat": [3, 9], "concern": [3, 9], "bluff": 3, "river": 3, "elev": 3, "equiavel": 3, "last_of_arrai": [3, 4], "direciton": 3, "growth": [3, 4], "declin": 3, "scaler": [3, 4], "cumsum_a": [3, 4], "diff_a": [3, 4], "extra": [3, 9], "precomput": [3, 4], "effici": [3, 4, 6, 9], "loop": [3, 4], "worri": 3, "them": [3, 9], "detail": [3, 4, 6, 7, 9], "bandwidth": 3, "kl": 3, "diverg": 3, "p": [3, 4, 5, 6, 9], "q": [3, 4, 5, 6, 9], "epsilon": [3, 4, 6], "1e": [3, 6], "perecentag": 3, "progress": [3, 7, 9], "along": [3, 9], "differenti": [3, 9], "sole": 3, "optim": [3, 4, 7, 9], "unanchor": 3, "1d": [3, 6], "nan_flag": [3, 6], "baselin": 3, "naiv": [3, 4, 7, 9], "poorli": [3, 6, 9], "85": 3, "largest": [3, 9], "full_error": 3, "le": 3, "y_pred": [3, 4], "y_true": [3, 4], "penal": [3, 9], "underestim": [3, 9], "overestim": [3, 9], "avoid": [3, 6, 9], "divid": 3, "aren": [3, 4], "down": [3, 6, 9], "bad": [3, 9], "er": 3, "push": 3, "exclus": 3, "sqe": 3, "catlin": [3, 6, 7], "syllepsi": 3, "live": [3, 7], "22": [3, 4, 6], "categori": 3, "OR": 3, "being": [3, 4, 6, 7, 9], "pinbal": [3, 9], "gradient": 3, "volatil": [3, 9], "precomputed_spl": 3, "unmatch": 3, "poor": [3, 9], "penalty_threshold": 3, "view": [3, 6, 9], "2d": [3, 6], "strength": [3, 6], "earth": 3, "perhap": [3, 6], "relev": [3, 6], "unsort": 3, "extract": [3, 4], "py": [3, 7, 9], "amfm": 3, "possibli": [3, 4, 6], "modif": 3, "structur": [3, 4, 6], "11": [3, 9], "2023": [3, 4, 6, 7], "validation_param": 3, "etc": [3, 6, 9], "clean": [3, 6, 9], "beyond": [3, 4, 6], "constant": [4, 6], "vol": 4, "garch": 4, "o": [4, 6], "power": [4, 9], "rescal": 4, "maxit": 4, "200": [4, 6], "linux": [4, 6, 9], "distro": 4, "confid": [4, 6], "multiprocess": [4, 6, 9], "uniniti": 4, "fit_runtim": 4, "timedelta": 4, "hold": 4, "timeseri": [4, 6, 9], "last_dat": 4, "forecast_index": 4, "forecast_column": 4, "predict_runtim": 4, "transformation_runtim": 4, "per_timestamp": 4, "avg_metr": 4, "avg_metrics_weight": 4, "form": [4, 6, 9], "twice": [4, 6], "series_weight": 4, "per_timestamp_error": 4, "evalut": 4, "against": 4, "suboptim": 4, "update_datetime_nam": 4, "datetime_column": 4, "tell": [4, 9], "remove_zero": [4, 9], "right": [4, 6, 7], "title_substr": 4, "ax": [4, 6], "matplotlib": [4, 9], "dash": 4, "vertic": 4, "intens": 4, "shade": 4, "region": [4, 6], "xlim_right": 4, "grid": [4, 7], "group_col": 4, "y_col": 4, "totalruntimesecond": 4, "train_last_d": 4, "cmap_nam": 4, "gist_rainbow": 4, "runtimes_data": 4, "xlim": 4, "title_suffix": 4, "point_method": [4, 5], "canberra": [4, 6], "sample_fract": [4, 6], "adapt": 4, "struggl": 4, "short": 4, "max_window": [4, 6], "weighted_mean": 4, "midhing": [4, 6], "cdist": [4, 9], "closest": [4, 6, 9], "consid": [4, 9], "n_harmon": [4, 6], "state_transit": [4, 6], "process_nois": [4, 6], "observation_model": [4, 6], "observation_nois": [4, 6], "em_it": [4, 6], "undefin": 4, "solv": [4, 6, 9], "kalman": [4, 6, 9], "comparison_transform": 4, "combination_transform": 4, "comparison": [4, 6], "mse": [4, 9], "minkowski": 4, "5000": [4, 6], "tradeoff": [4, 6], "own": [4, 9], "gather": 4, "phrase_len": 4, "magnitude_pct_change_sign": 4, "share": 4, "l2": 4, "max_motif": 4, "recency_weight": 4, "cutoff_threshold": 4, "cutoff_minimum": 4, "dark": [4, 6], "magic": [4, 6], "evil": 4, "mastermind": 4, "project": [4, 7], "knn": 4, "interest": [4, 9], "togeth": [4, 6, 9], "pairwise_dist": 4, "amount": [4, 6, 9], "choos": [4, 9], "sign_biased_mean": 4, "ridge_param": 4, "5e": 4, "warmup_pt": [4, 6], "seed_pt": 4, "seed_weight": 4, "batch_siz": 4, "batch_method": 4, "input_ord": 4, "nonlinear": 4, "variabl": [4, 6, 9], "autoregress": 4, "next": [4, 6, 9], "reservoir": 4, "quantinfo": 4, "ng": 4, "rc": 4, "paper": [4, 7], "gauthier": 4, "j": [4, 6], "bollt": 4, "e": [4, 6], "griffith": 4, "al": 4, "nat": 4, "commun": [4, 9], "5564": 4, "doi": 4, "1038": 4, "s41467": 4, "021": 4, "25801": 4, "pointless": 4, "lambda": [4, 6], "ridg": 4, "realiti": 4, "warmup": 4, "fine": [4, 9], "linearli": 4, "batch": [4, 7], "lastvalu": [4, 6], "concerto": 4, "g": [4, 6], "minor": 4, "op": 4, "rv": 4, "315": 4, "produc": [4, 9], "nan_euclidean": [4, 6, 9], "include_differenc": [4, 6], "stride_s": [4, 6], "covari": [4, 6], "ratio": 4, "num_regressor_seri": 4, "ob": [4, 6], "xa": 4, "xb": 4, "r_arr": 4, "inner": 4, "hungri": 4, "big": 4, "linpack": [4, 9], "seem": [4, 9], "sensit": [4, 6, 9], "address": 4, "tue": 4, "sep": 4, "57": 4, "assist": 4, "crgillespie22": 4, "gaussian_prior_mean": 4, "wishart_prior_scal": 4, "wishart_dof_excess": 4, "bayesian": [4, 6], "conjug": 4, "prior": [4, 6], "encourag": [4, 9], "coef": 4, "regular": [4, 9], "peak": 4, "matrix": [4, 6], "varianc": 4, "nois": [4, 6], "while": [4, 7, 9], "return_std": 4, "n_sampl": 4, "in_d": 4, "prefix": 4, "regr_": 4, "15000": 4, "l1": 4, "cost": 4, "lin": 4, "reg": 4, "lamb": [4, 6], "identity_matrix": 4, "neural": 4, "net": 4, "rnn_type": 4, "lstm": 4, "kernel_initi": 4, "lecun_uniform": 4, "hidden_layer_s": 4, "32": [4, 6], "adam": 4, "huber": 4, "epoch": [4, 6], "wrapper": [4, 6], "kera": 4, "rnn": 4, "cell": 4, "gru": 4, "layer": 4, "compil": [4, 9], "tf": 4, "set_se": 4, "head_siz": 4, "256": 4, "num_head": 4, "ff_dim": 4, "num_transformer_block": 4, "mlp_unit": 4, "128": 4, "mlp_dropout": 4, "dropout": 4, "io": [4, 6], "timeseries_transformer_classif": 4, "input_shap": 4, "output_shap": [4, 6], "ensemble_param": 4, "forecasts_runtim": 4, "model_weight": 4, "incompat": [4, 9], "bestn": [4, 9], "forecast_id": 4, "forecast_runtim": 4, "forecasts_list": 4, "ensemble_str": 4, "prematched_seri": 4, "use_valid": 4, "subset_flag": 4, "per_series2": 4, "only_specifi": 4, "outer": [4, 6], "known_match": 4, "available_model": 4, "full_model": 4, "error_matrix": 4, "error_list": 4, "col_nam": 4, "smoothing_window": 4, "metric_nam": 4, "classifier_param": 4, "classifi": 4, "unknown": 4, "construct": [4, 5, 6, 9], "x_predict": 4, "ensemble_list": 4, "models_sourc": 4, "all_seri": 4, "forecast_period": [4, 9], "datestamp": 4, "retur": 4, "safety_model": 4, "local_result": 4, "total_v": 4, "describ": [4, 9], "releas": 4, "amazon": 4, "realli": [4, 6], "mxnet": [4, 9], "gui": 4, "sorta": 4, "mayb": 4, "deprec": [4, 6, 9], "sad": 4, "excel": [4, 9], "routin": 4, "stabil": 4, "strong": 4, "suit": 4, "gluon_model": 4, "deepar": 4, "learning_r": [4, 5], "context_length": 4, "npt": 4, "deepstat": 4, "wavenet": 4, "deepfactor": 4, "sff": 4, "mqcnn": 4, "deepvar": 4, "gpvar": 4, "nbeat": 4, "network": 4, "2forecastlength": [4, 6], "nforecastlength": 4, "unlik": [4, 6, 9], "df_index": 4, "freq": [4, 6, 9], "model_templ": 4, "silverkit": 4, "unitedst": 4, "inner_n_job": 4, "relat": [4, 9], "borrow": 4, "xinyu": 4, "chen": 4, "xinychen": 4, "transdim": 4, "medium": [4, 9], "articl": 4, "thrown": 4, "nan_to_num": 4, "pinv": 4, "On": [4, 9], "entri": 4, "dlascl": 4, "illeg": 4, "time_horizon": 4, "time_lag": 4, "lambda0": 4, "33333333": 4, "low": [4, 6, 9], "tensor": 4, "arxiv": [4, 6], "2104": 4, "14936": 4, "blob": 4, "master": 4, "mat": 4, "predictor": 4, "ipynb": 4, "rho": 4, "inner_maxit": 4, "tempor": 4, "sparse_mat": 4, "ind": 4, "w": [4, 5, 6], "psi": 4, "r": [4, 5, 6], "dynam": [4, 6, 9], "pred_step": 4, "sparse_tensor": 4, "rho0": 4, "recogn": [4, 7], "pred_time_step": 4, "time_interv": 4, "kernel": 4, "dim": [4, 6], "tau": 4, "aq": 4, "rold": 4, "delta": 4, "sun": 4, "expanded_binar": [4, 6], "ml": [4, 9], "aspect": 4, "n_seri": [4, 6], "variou": [4, 6], "nixtla": 4, "Be": [4, 7], "commerci": 4, "mqloss": 4, "input_s": 4, "max_step": [4, 6], "early_stop_patience_step": 4, "relu": 4, "scaler_typ": [4, 5], "model_arg": 4, "point_quantil": 4, "document": [4, 7, 9], "temp": 4, "za": 4, "static_regressor": 4, "facebook": 4, "sinc": [4, 9], "finicki": [4, 9], "yearly_season": 4, "weekly_season": 4, "daily_season": 4, "n_changepoint": 4, "changepoint_prior_scal": 4, "seasonality_mod": 4, "changepoint_rang": 4, "seasonality_prior_scal": 4, "holidays_prior_scal": 4, "thou": 4, "shall": 4, "neither": 4, "prece": 4, "off": [4, 6, 9], "changepoints_rang": 4, "trend_reg": 4, "trend_reg_threshold": 4, "ar_spars": 4, "seasonality_reg": 4, "n_lag": 4, "num_hidden_lay": 4, "d_hidden": 4, "loss_func": 4, "train_spe": 4, "90": [4, 6], "max_epoch": 4, "max_encoder_length": 4, "hidden_s": 4, "n_layer": 4, "add_target_scal": 4, "target_norm": 4, "encodernorm": 4, "temporalfusiontransform": 4, "64": [4, 6], "78": 4, "model_kwarg": 4, "trainer_kwarg": 4, "callback": 4, "obsess": 4, "go": [4, 9], "pt": 4, "lightn": [4, 9], "trainer": 4, "quantileloss": 4, "lesser": 4, "decis": [4, 7, 9], "tree": 4, "elast": 4, "forest": 4, "mlpregressor": 4, "adaboost": 4, "principl": 4, "nthn": 4, "max_depth": [4, 6], "min_samples_split": [4, 6], "polynomial_degre": [4, 6], "randomforest": 4, "mean_rolling_period": 4, "macd_period": 4, "std_rolling_period": 4, "max_rolling_period": 4, "min_rolling_period": 4, "ewm_var_alpha": 4, "quantile90_rolling_period": 4, "quantile10_rolling_period": 4, "ewm_alpha": 4, "additional_lag_period": 4, "abs_energi": 4, "rolling_autocorr_period": 4, "nonzero_last_n": 4, "scale_full_x": 4, "quantile_param": 4, "min_samples_leaf": 4, "n_estim": 4, "250": 4, "cointegration_lag": 4, "series_hash": 4, "frame": [4, 6], "multiari": 4, "window_s": [4, 6], "max_histori": 4, "one_step": 4, "processed_i": 4, "normalize_window": [4, 6], "basi": 4, "extratre": 4, "add_date_part": 4, "x_transform": 4, "wise": [4, 9], "scienc": 4, "am": 4, "arthur": 4, "briton": 4, "ve": 4, "think": 4, "your": [4, 7, 9], "selv": 4, "re": 4, "individu": [4, 9], "ye": [4, 9], "we": [4, 9], "rbf": 4, "noise_var": 4, "lambda_prim": 4, "polynomi": [4, 6], "locally_period": 4, "littl": [4, 9], "flexibl": [4, 6, 9], "toler": [4, 9], "\u03b3": 4, "lambda_": 4, "reason": [4, 6, 9], "might": [4, 9], "365": [4, 6], "input_dim": [4, 6], "output_dim": [4, 6], "shuffl": [4, 6], "model_dict": 4, "bootstrap": 4, "verbose_bool": 4, "multioutput": 4, "framework": [4, 6, 7], "mean_rol": 4, "bit": 4, "exog": 4, "exog_oo": 4, "exog_fc": 4, "sometim": 4, "c": [4, 5, 6, 7, 9], "causal": 4, "ct": 4, "stationar": 4, "hour": [4, 6, 9], "k_factor": 4, "factor_ord": 4, "mamodel": 4, "mapr": 4, "factor_multipl": 4, "idiosyncratic_ar1": 4, "damped_trend": 4, "seasonal_period": 4, "formerli": 4, "damp": 4, "deseason": 4, "use_test": 4, "use_ml": 4, "damped_cycl": 4, "irregular": 4, "stochastic_cycl": 4, "stochastic_trend": 4, "stochastic_level": 4, "cov_typ": 4, "opg": 4, "lbfg": 4, "maxlag": [4, 6], "ic": 4, "fpe": 4, "determinist": 4, "k_ar_diff": [4, 6], "coint_rank": 4, "current_seri": 4, "xf": 4, "negloglik": 4, "conf_int": 4, "ar_ord": 4, "fit_method": 4, "hmc": 4, "num_step": 4, "tensorflowprob": 4, "42": 4, "0009999": 4, "layer_norm": 4, "dropout_r": 4, "512": 4, "num_lay": 4, "hist_len": 4, "720": 4, "decoder_output_dim": 4, "final_decoder_hidden": 4, "num_split": 4, "min_num_epoch": 4, "train_epoch": 4, "patienc": 4, "epoch_len": 4, "permut": 4, "gpu_index": 4, "googl": 4, "research": 4, "mlp": [4, 5], "num_cov_col": 4, "cat_cov_col": 4, "ts_col": 4, "train_rang": 4, "val_rang": 4, "test_rang": 4, "pred_len": 4, "loader": 4, "cubic": 5, "68": 5, "69": 5, "000999999": 5, "70": 5, "71": 5, "pad": 5, "ew": 5, "72": 5, "minmax": 5, "lo": 5, "sort_valu": 5, "ascend": [5, 9], "groupbi": [5, 6], "reset_index": 5, "export2": 5, "export_fin": 5, "to_json": 5, "orient": [5, 6], "pprint": 5, "read_csv": 5, "autots_forecast_template_gen": 5, "jsn": 5, "json_temp": 5, "read": 5, "txt": 5, "dump": 5, "indent": 5, "sort_kei": 5, "41": 6, "21": [6, 7], "contextu": 6, "fall": [6, 7, 9], "densiti": 6, "sequenc": [6, 9], "anomal": 6, "itself": 6, "regard": 6, "1802": 6, "04431": 6, "anomaly_df": 6, "df_col": 6, "wkdom_holidai": 6, "wkdeom_holidai": 6, "lunar_holidai": 6, "lunar_weekdai": 6, "islamic_holidai": 6, "hebrew_holidai": 6, "max_featur": 6, "predict_interv": 6, "job": 6, "threshold_method": 6, "norm": 6, "rolling_period": 6, "surviv": 6, "outlieri": 6, "dataframm": 6, "rolling_zscor": 6, "sf": 6, "rolliing_zscor": 6, "convers": [6, 7], "chines": 6, "arab": 6, "datetime_index": 6, "christian": 6, "aspir": 6, "hebrew": 6, "pyluach": 6, "simlist": 6, "epoch_adjust": 6, "islam": 6, "convertd": 6, "fitnr": 6, "timezon": 6, "new_moon": 6, "continu": 6, "pre": 6, "full_moon": 6, "julian": 6, "johansen": 6, "barba": 6, "towardsdatasci": 6, "canon": 6, "forgotten": 6, "4d1213396da1": 6, "p_mat": 6, "ndarrai": 6, "max_lag": 6, "return_eigenvalu": 6, "endog": 6, "det_ord": 6, "abbrevi": 6, "series_ord": 6, "trim": 6, "ex": 6, "modifi": 6, "multiproces": 6, "conserv": 6, "intel": 6, "hyperthread": 6, "logic": 6, "psutil": [6, 9], "fallsback": 6, "mkl": [6, 9], "simd": 6, "2017": 6, "otto": 6, "seiskari": 6, "mit": 6, "licens": 6, "resourc": [6, 9], "found": [6, 9], "kevinkotz": 6, "www": [6, 9], "notebook": 6, "statespace_dfm_coincid": 6, "introduct": 6, "commandeur": 6, "koopman": 6, "chp": 6, "andrew": 6, "harvei": 6, "notat": 6, "transit": 6, "x_k": 6, "x_": 6, "q_": 6, "qquad": 6, "sim": 6, "y_k": 6, "h": 6, "r_k": 6, "hidden": 6, "system": [6, 9], "matric": 6, "suitabl": 6, "definit": 6, "simo": 6, "sarkk\u00e4": 6, "2013": 6, "cambridg": 6, "univers": 6, "press": [6, 7], "aalto": 6, "fi": 6, "ssarkka": 6, "cup_book_online_20131111": 6, "simdkalman": 6, "kf": 6, "diag": 6, "denot": 6, "uniform": 6, "initial_valu": 6, "initial_covari": 6, "ey": 6, "third": [6, 9], "cov": 6, "29311384": 6, "06948961": 6, "19959416": 6, "00777587": 6, "02528967": 6, "pred_mean": 6, "pred_stdev": 6, "sqrt": 6, "71543": 6, "65322": 6, "multi": 6, "dimension": 6, "howev": [6, 9], "flexibli": 6, "vari": [6, 9], "broadcast": 6, "rule": 6, "oper": 6, "n_state": 6, "n_var": 6, "n_measur": 6, "main": 6, "interfac": 6, "accord": 6, "natur": [6, 9], "scalar": 6, "3d": 6, "lock": 6, "n_test": 6, "likelihood": 6, "log_likelihood": 6, "explan": 6, "With": [6, 9], "boolean": 6, "pairwis": [6, 9], "member": 6, "subresult": 6, "field": 6, "pairwise_covari": 6, "n_iter": 6, "interpret": 6, "mathbb": 6, "x_0": 6, "rm": 6, "prior_mean": 6, "prior_cov": 6, "x_j": 6, "simgl": 6, "y_1": 6, "ldot": 6, "y_j": 6, "y_t": 6, "smooth_mean": 6, "smooth_covari": 6, "smoothing_gain": 6, "y_": 6, "posterior_mean": 6, "posterior_covari": 6, "posterior": 6, "argument": 6, "operand": 6, "transpos": 6, "initial_mean": 6, "beta": 6, "phi": 6, "correct": 6, "allow_auto": 6, "next_smooth_mean": 6, "next_smooth_covari": 6, "prior_covari": 6, "statespac": 6, "oct": 6, "07": 6, "37": 6, "colincatlin": 6, "n_harm": 6, "freq_rang": 6, "grouping_method": 6, "tile": 6, "n_group": 6, "hier_id": 6, "bottom": 6, "holidays_subdiv": 6, "fallback": 6, "unavail": 6, "bias": 6, "simple_2": 6, "linear_mix": 6, "max_it": 6, "mean_weight": 6, "back_method": 6, "half": [6, 9], "remaind": 6, "slice_al": 6, "keepna": 6, "phase": 6, "moon": 6, "stackoverflow": 6, "2531541": 6, "9492254": 6, "keturn": 6, "earlier": 6, "john": 6, "walker": 6, "ecc": 6, "016718": 6, "equat": 6, "2444237": 6, "905": 6, "ecliptic_longitude_epoch": 6, "278": 6, "83354": 6, "ecliptic_longitude_perige": 6, "282": 6, "596403": 6, "eccentr": 6, "moon_mean_longitude_epoch": 6, "975464": 6, "moon_mean_perigee_epoch": 6, "349": 6, "383063": 6, "illumin": 6, "zone": 6, "2444238": 6, "asia": 6, "matter": 6, "central": 6, "precis": 6, "75": 6, "nextnew": 6, "krstn": 6, "eu": 6, "nanpercentil": 6, "in_arr": 6, "rollov": 6, "support": [6, 7, 9], "driven": 6, "placehold": 6, "mixtur": 6, "gum": 6, "diseas": 6, "credibl": 6, "spell": 6, "cast": 6, "variable_pct_chang": 6, "upon": 6, "upper_error": 6, "lower_error": 6, "errorrang": 6, "cum": 6, "qtp": 6, "xn": 6, "broaden": 6, "although": [6, 7, 9], "corrupt": 6, "bay": 6, "theorem": 6, "hot": 6, "history_dai": 6, "set_index": 6, "recur": 6, "weekdai": 6, "commonli": [6, 9], "repeat": [6, 9], "ag": 6, "degre": 6, "dtindex_futur": 6, "full_sort": 6, "nan_arrai": 6, "include_on": 6, "very_smal": 6, "typic": [6, 9], "reshap": [6, 9], "na_str": 6, "categorical_fillna": 6, "handle_unknown": [6, 9], "use_encoded_valu": 6, "downcast": 6, "unalt": 6, "missing_valu": 6, "ordinalencod": [6, 9], "to_numer": 6, "messag": [6, 9], "convert_dtyp": 6, "polish": 6, "999": 6, "dateoffset": [6, 9], "somewher": 6, "pydata": [6, 9], "stabl": [6, 9], "user_guid": [6, 9], "still": [6, 7, 9], "cut": 6, "older": [6, 9], "eventu": 6, "incomplet": [6, 9], "appear": [6, 9], "upsampl": [6, 7], "silenc": 6, "rest": 6, "configur": 6, "random_st": 6, "wide_arr": 6, "gst": 6, "sgt": 6, "46": 6, "error_buff": 6, "z_init": 6, "z_limit": 6, "z_step": 6, "max_contamin": 6, "sd_weight": 6, "anomaly_count_weight": 6, "consecut": 6, "errors_al": 6, "obj": 6, "maxim": 6, "reduct": 6, "invert": 6, "meet": [6, 9], "yield": 6, "itertool": 6, "more_itertool": 6, "descript": [6, 9], "circa": 6, "decay_span": 6, "displacement_row": 6, "span": 6, "decai": 6, "soften": 6, "first_value_onli": 6, "lanczos_factor": 6, "return_diff": 6, "implent": 6, "somewhat": 6, "statmodelsfilt": 6, "linearregress": 6, "suffix": 6, "_mdfcrst": 6, "vagu": 6, "gap": 6, "std_threshold": 6, "purg": 6, "THE": 6, "cumul": 6, "imprecis": 6, "missing": 6, "scatter": 6, "dure": 6, "reverse_align": 6, "n_bin": 6, "kmean": 6, "kbin": 6, "irrevers": 6, "exponeti": 6, "extrapol": 6, "n_harmnon": 6, "quadrat": 6, "revers": [6, 9], "highest": [6, 7, 9], "But": 6, "1600": 6, "upstream": 6, "regression_param": 6, "grouping_forward_limit": 6, "max_level_shift": 6, "serious": 6, "alter": 6, "rolling_window": 6, "n_futur": 6, "macro_micro": 6, "_lltmicro": 6, "horizon": [6, 9], "simpli": [6, 9], "residu": 6, "plai": 6, "center_on": 6, "assur": [6, 9], "sigma": 6, "run_ord": 6, "season_first": 6, "holiday_param": [6, 9], "dv": 6, "reintroduction_model": 6, "reintroducion": 6, "built": 6, "decim": 6, "on_transform": 6, "on_invers": 6, "force_int": 6, "ceil": 6, "floor": 6, "decomp_typ": 6, "stl": 6, "seaonal": 6, "seaonsal": 6, "hilbert": 6, "method_arg": 6, "wiener": 6, "savgol_filt": 6, "butter": 6, "cheby1": 6, "cheby2": 6, "ellip": 6, "bessel": 6, "oh": 6, "nice": 6, "ash": 6, "my": 6, "tomato": 6, "pippin": 6, "lm": 6, "tt": 6, "yy": 6, "amp": 6, "omega": 6, "fitfunc": 6, "unsym": 6, "question": 6, "16716302": 6, "sine": 6, "curv": 6, "pylab": 6, "deviat": [6, 9], "halflif": 6, "23199796": 6, "condens": 6, "context_slic": 6, "halfmax": 6, "forecastlength": 6, "chunk_siz": 6, "7734": 6, "dtype": 6, "float32": 6, "n_record": 6, "num_column": 6, "num_indic": 6, "braycurti": 6, "start_index": 6, "include_last": 6, "indici": 6, "include_differ": 6, "window_shap": 6, "writeabl": 6, "neighbourhood": 6, "gist": 6, "seberg": 6, "3866040": 6, "newer": 6, "toggl": 6, "__version__": 6, "skip_siz": 6, "downsampl": 6, "num": 6, "window_length": 6, "70296498": 6, "numba": 6, "70304475": 6, "1234": 6, "1step": 6, "num_ob": 6, "stride": 6, "trick": 6, "lib": [6, 9], "stride_trick": 6, "rapidli": 7, "deploi": 7, "m6": 7, "competit": 7, "deliv": 7, "invest": 7, "market": 7, "dozen": 7, "usabl": [7, 9], "These": [7, 9], "addition": [7, 9], "proprietari": 7, "readili": 7, "ten": 7, "hundr": 7, "thousand": [7, 9], "exogen": 7, "integr": 7, "automl": 7, "flagship": 7, "abil": [7, 9], "additon": 7, "advis": 7, "come": [7, 9], "distinct": [7, 9], "ideal": [7, 9], "_hourli": [7, 9], "_monthli": 7, "_weekli": [7, 9], "_yearli": [7, 9], "_live_daili": 7, "fast_parallel": 7, "2019": [7, 9], "forecasts_df": [7, 9], "forecasts_up": 7, "forecasts_low": 7, "particular": [7, 9], "extended_tutori": 7, "md": 7, "guid": 7, "look": [7, 9], "production_exampl": [7, 9], "especi": [7, 9], "predefin": 7, "complex": 7, "pretti": [7, 9], "environ": [7, 9], "toward": [7, 9], "prioriti": 7, "ram": 7, "instanc": 7, "pretrain": 7, "crtl": 7, "recov": 7, "udf": 7, "obvious": [7, 9], "2x": 7, "3x": 7, "5x": 7, "no_shared_fast": 7, "decreas": 7, "poorer": 7, "satisfactori": [7, 9], "expens": 7, "feedback": 7, "report": 7, "feel": 7, "favorit": 7, "cours": 7, "codebas": 7, "cat": 7, "henc": 7, "logo": 7, "subpackag": 8, "modul": 8, "_daili": 9, "autot": 9, "df_long": 9, "transact": 9, "altern": 9, "coerc": 9, "minim": 9, "handi": 9, "unit": 9, "side": 9, "oldest": 9, "advantag": 9, "interg": 9, "troubl": 9, "sudden": 9, "overs": 9, "misrepres": 9, "promot": 9, "critic": 9, "tricki": 9, "necess": 9, "leakag": 9, "firstli": 9, "resembl": 9, "enough": 9, "taken": 9, "variat": 9, "valdat": 9, "june": 9, "choic": 9, "messi": 9, "act": 9, "treat": 9, "suspect": 9, "fairli": 9, "whole": 9, "idea": 9, "suffer": 9, "interst": 9, "94": 9, "minneapoli": 9, "paul": 9, "minnesota": 9, "great": 9, "demonstr": 9, "road": 9, "influenc": 9, "alongsid": 9, "volum": 9, "carri": 9, "care": 9, "weights_hourli": 9, "traffic_volum": 9, "49": 9, "168": 9, "lieu": 9, "upper_forecasts_df": 9, "lower_forecasts_df": 9, "By": 9, "impract": 9, "engin": 9, "simplic": 9, "fault": 9, "switch": 9, "evolv": 9, "develop": 9, "example_filenam": 9, "example_export": 9, "deeper": 9, "subsidiari": 9, "df_forecast": 9, "future_regressor_train2d": 9, "future_regressor_forecast2d": 9, "consider": 9, "overfit": 9, "secondli": 9, "composit": 9, "balanc": 9, "qualiti": 9, "iml": 9, "favor": 9, "translat": 9, "insid": 9, "symmetr": 9, "versatil": 9, "human": 9, "coverage_fract": 9, "logarithm": 9, "hiearchial": 9, "went": 9, "wavi": 9, "seriou": 9, "holdout": 9, "pyplot": 9, "plt": 9, "2018": 9, "09": 9, "26": 9, "mosaic_df": 9, "situat": 9, "demand": 9, "tradition": 9, "problem": 9, "exagger": 9, "unfortun": 9, "inher": 9, "sub": 9, "unstabl": 9, "reassign": 9, "wrong": 9, "drive": 9, "label": 9, "recogniz": 9, "usal": 9, "splice": 9, "latter": 9, "depth": 9, "happen": 9, "no_shar": 9, "possbl": 9, "horizontal_gener": 9, "enembl": 9, "extens": 9, "theoret": 9, "studio": 9, "apt": 9, "yum": 9, "sudo": 9, "openbla": 9, "show_config": 9, "doubl": 9, "haven": 9, "broken": 9, "slide": 9, "23": 9, "poissonreg": 9, "squared_error": 9, "histgradientboostingregressor": 9, "uecm": 9, "uniform_filter1d": 9, "stat": 9, "spatial": 9, "Of": 9, "tend": 9, "cu91": 9, "cu101mkl": 9, "lightgbm": 9, "xgboost": 9, "bring": 9, "venv": 9, "anaconda": 9, "miniforg": 9, "numexpr": 9, "bottleneck": 9, "action": 9, "pystan": 9, "forg": 9, "dep": 9, "ext": 9, "pmdarima": 9, "dill": 9, "upgrad": 9, "pointlessli": 9, "mamba": 9, "tqdm": 9, "intelex": 9, "spyder": 9, "torchvis": 9, "torchaudio": 9, "cpuonli": 9, "gpu": 9, "cuda": 9, "mix": 9, "session": 9, "nvidia": 9, "smi": 9, "cudatoolkit": 9, "cudnn": 9, "nccl": 9, "ld_library_path": 9, "conda_prefix": 9, "perman": 9, "bashrc": 9, "env": 9, "mine": 9, "home": 9, "mambaforg": 9, "torch": 9, "url": 9, "whl": 9, "cu113": 9, "cu112": 9, "command": 9, "interchang": 9, "env_nam": 9, "softwar": 9, "oneapi": 9, "ai": 9, "analyt": 9, "toolkit": 9, "aikit37": 9, "aikit": 9, "modin": 9, "dpctl": 9, "config": 9, "omp_num_thread": 9, "use_daal4py_sklearn": 9, "bench": 9, "hang": 9, "clear": 9, "overload": 9, "consumpt": 9, "acceler": 9, "persist": 9, "discuss": 9, "reboot": 9, "heavi": 9, "odd": 9, "shouldn": 9, "greatli": 9, "proper": 9, "future_": 9, "certaini": 9, "Such": 9, "plan": 9, "organ": 9, "inorgan": 9, "busi": 9, "control": 9, "anticp": 9, "hand": 9, "confusingli": 9, "why": 9, "harm": 9, "experi": 9, "scenario": 9, "examin": 9, "enforc": 9, "could": 9, "future_regressor_forecast_2": 9, "prediction_2": 9, "forecasts_df_2": 9, "respons": 9, "multilabel_confusion_matrix": 9, "classification_report": 9, "df_full": 9, "historic_lower_limit": 9, "risk_df_upp": 9, "risk_df_low": 9, "historic_upper_risk_df": 9, "historic_lower_risk_df": 9, "eval_low": 9, "eval_upp": 9, "pred_low": 9, "pred_upp": 9, "zero_divis": 9, "target_nam": 9, "effectiv": 9, "far": 9, "tighter": 9, "extrem": 9, "portion": 9, "analyz": 9, "pick": 9, "anti": 9, "signific": 9, "wiki_pag": 9, "mod": 9, "ll": 9, "full_dat": 9, "date_rang": 9, "2014": 9, "2024": 9, "prophet_holidai": 9, "familiar": 9, "manuali": 9, "clarifi": 9, "text": 9, "editor": 9, "guarante": 9, "incorpor": 9, "crude": 9, "meaning": 9, "properli": 9, "coercibl": 9, "unconnect": 9, "transformer_dict": 9, "tran": 9, "df_tran": 9, "df_inv_return": 9, "tradit": 9, "draw": 9, "pool": 9, "massiv": 9, "global": 9, "pars": 9, "gradientboostingregressor": 9, "experiment": 9, "bla": 9, "lapack": 9, "nyi": 9, "_": 9}, "objects": {"": [[1, 0, 0, "-", "autots"]], "autots": [[1, 1, 1, "", "AnomalyDetector"], [1, 1, 1, "", "AutoTS"], [1, 1, 1, "", "Cassandra"], [1, 1, 1, "", "EventRiskForecast"], [1, 1, 1, "", "GeneralTransformer"], [1, 1, 1, "", "HolidayDetector"], [1, 4, 1, "", "RandomTransform"], [1, 3, 1, "", "TransformTS"], [1, 4, 1, "", "create_lagged_regressor"], [1, 4, 1, "", "create_regressor"], [2, 0, 0, "-", "datasets"], [3, 0, 0, "-", "evaluator"], [1, 4, 1, "", "infer_frequency"], [1, 4, 1, "", "load_artificial"], [1, 4, 1, "", "load_daily"], [1, 4, 1, "", "load_hourly"], [1, 4, 1, "", "load_linear"], [1, 4, 1, "", "load_live_daily"], [1, 4, 1, "", "load_monthly"], [1, 4, 1, "", "load_sine"], [1, 4, 1, "", "load_weekdays"], [1, 4, 1, "", "load_weekly"], [1, 4, 1, "", "load_yearly"], [1, 4, 1, "", "long_to_wide"], [1, 4, 1, "", "model_forecast"], [4, 0, 0, "-", "models"], [5, 0, 0, "-", "templates"], [6, 0, 0, "-", "tools"]], "autots.AnomalyDetector": [[1, 2, 1, "", "detect"], [1, 2, 1, "", "fit"], [1, 2, 1, "", "fit_anomaly_classifier"], [1, 2, 1, "", "get_new_params"], [1, 2, 1, "", "plot"], [1, 2, 1, "", "score_to_anomaly"]], "autots.AutoTS": [[1, 2, 1, "", "back_forecast"], [1, 3, 1, "", "best_model"], [1, 3, 1, "", "best_model_ensemble"], [1, 3, 1, "", "best_model_name"], [1, 3, 1, "", "best_model_params"], [1, 2, 1, "", "best_model_per_series_mape"], [1, 2, 1, "", "best_model_per_series_score"], [1, 3, 1, "", "best_model_transformation_params"], [1, 3, 1, "", "df_wide_numeric"], [1, 2, 1, "", "diagnose_params"], [1, 2, 1, "", "expand_horizontal"], [1, 2, 1, "", "export_best_model"], [1, 2, 1, "", "export_template"], [1, 2, 1, "", "failure_rate"], [1, 2, 1, "", "fit"], [1, 2, 1, "", "fit_data"], [1, 2, 1, "", "get_metric_corr"], [1, 2, 1, "", "get_new_params"], [1, 2, 1, "", "horizontal_per_generation"], [1, 2, 1, "", "horizontal_to_df"], [1, 2, 1, "", "import_best_model"], [1, 2, 1, "", "import_results"], [1, 2, 1, "", "import_template"], [1, 2, 1, "", "list_failed_model_types"], [1, 2, 1, "", "load_template"], [1, 2, 1, "", "mosaic_to_df"], [1, 2, 1, "", "parse_best_model"], [1, 2, 1, "", "plot_back_forecast"], [1, 2, 1, "", "plot_backforecast"], [1, 2, 1, "", "plot_generation_loss"], [1, 2, 1, "", "plot_horizontal"], [1, 2, 1, "", "plot_horizontal_model_count"], [1, 2, 1, "", "plot_horizontal_per_generation"], [1, 2, 1, "", "plot_horizontal_transformers"], [1, 2, 1, "", "plot_metric_corr"], [1, 2, 1, "", "plot_per_series_error"], [1, 2, 1, "", "plot_per_series_mape"], [1, 2, 1, "", "plot_per_series_smape"], [1, 2, 1, "", "plot_transformer_failure_rate"], [1, 2, 1, "", "plot_validations"], [1, 2, 1, "", "predict"], [1, 3, 1, "", "regression_check"], [1, 2, 1, "", "results"], [1, 2, 1, "", "retrieve_validation_forecasts"], [1, 2, 1, "", "save_template"], [1, 3, 1, "", "score_per_series"], [1, 2, 1, "", "validation_agg"]], "autots.AutoTS.initial_results": [[1, 3, 1, "", "model_results"]], "autots.Cassandra..anomaly_detector": [[1, 3, 1, "", "anomalies"], [1, 3, 1, "", "scores"]], "autots.Cassandra.": [[1, 3, 1, "", "holiday_count"], [1, 3, 1, "", "holidays"], [1, 3, 1, "", "params"], [1, 3, 1, "", "predict_x_array"], [1, 3, 1, "", "predicted_trend"], [1, 3, 1, "", "trend_train"], [1, 3, 1, "", "x_array"]], "autots.Cassandra": [[1, 2, 1, "", "analyze_trend"], [1, 2, 1, "", "auto_fit"], [1, 2, 1, "", "base_scaler"], [1, 2, 1, "", "compare_actual_components"], [1, 2, 1, "", "create_forecast_index"], [1, 2, 1, "", "create_t"], [1, 2, 1, "", "cross_validate"], [1, 2, 1, "", "feature_importance"], [1, 2, 1, "id0", "fit"], [1, 2, 1, "", "fit_data"], [1, 2, 1, "id1", "get_new_params"], [1, 2, 1, "", "get_params"], [1, 2, 1, "", "next_fit"], [1, 2, 1, "id2", "plot_components"], [1, 2, 1, "id3", "plot_forecast"], [1, 2, 1, "", "plot_things"], [1, 2, 1, "id4", "plot_trend"], [1, 2, 1, "id5", "predict"], [1, 2, 1, "", "predict_new_product"], [1, 2, 1, "", "process_components"], [1, 2, 1, "id6", "return_components"], [1, 2, 1, "", "rolling_trend"], [1, 2, 1, "", "scale_data"], [1, 2, 1, "", "to_origin_space"], [1, 2, 1, "", "treatment_causal_impact"]], "autots.Cassandra.holiday_detector": [[1, 2, 1, "", "dates_to_holidays"]], "autots.EventRiskForecast": [[1, 2, 1, "id9", "fit"], [1, 2, 1, "id10", "generate_historic_risk_array"], [1, 2, 1, "id11", "generate_result_windows"], [1, 2, 1, "id12", "generate_risk_array"], [1, 2, 1, "id13", "plot"], [1, 2, 1, "", "plot_eval"], [1, 2, 1, "id14", "predict"], [1, 2, 1, "id15", "predict_historic"], [1, 2, 1, "id16", "set_limit"]], "autots.GeneralTransformer": [[1, 2, 1, "", "fill_na"], [1, 2, 1, "", "fit"], [1, 2, 1, "", "fit_transform"], [1, 2, 1, "", "get_new_params"], [1, 2, 1, "", "inverse_transform"], [1, 2, 1, "", "retrieve_transformer"], [1, 2, 1, "", "transform"]], "autots.HolidayDetector": [[1, 2, 1, "", "dates_to_holidays"], [1, 2, 1, "", "detect"], [1, 2, 1, "", "fit"], [1, 2, 1, "", "get_new_params"], [1, 2, 1, "", "plot"], [1, 2, 1, "", "plot_anomaly"]], "autots.datasets": [[2, 0, 0, "-", "fred"], [2, 4, 1, "", "load_artificial"], [2, 4, 1, "", "load_daily"], [2, 4, 1, "", "load_hourly"], [2, 4, 1, "", "load_linear"], [2, 4, 1, "", "load_live_daily"], [2, 4, 1, "", "load_monthly"], [2, 4, 1, "", "load_sine"], [2, 4, 1, "", "load_weekdays"], [2, 4, 1, "", "load_weekly"], [2, 4, 1, "", "load_yearly"], [2, 4, 1, "", "load_zeroes"]], "autots.datasets.fred": [[2, 4, 1, "", "get_fred_data"]], "autots.evaluator": [[3, 0, 0, "-", "anomaly_detector"], [3, 0, 0, "-", "auto_model"], [3, 0, 0, "-", "auto_ts"], [3, 0, 0, "-", "benchmark"], [3, 0, 0, "-", "event_forecasting"], [3, 0, 0, "-", "metrics"], [3, 0, 0, "-", "validation"]], "autots.evaluator.anomaly_detector": [[3, 1, 1, "", "AnomalyDetector"], [3, 1, 1, "", "HolidayDetector"]], "autots.evaluator.anomaly_detector.AnomalyDetector": [[3, 2, 1, "", "detect"], [3, 2, 1, "", "fit"], [3, 2, 1, "", "fit_anomaly_classifier"], [3, 2, 1, "", "get_new_params"], [3, 2, 1, "", "plot"], [3, 2, 1, "", "score_to_anomaly"]], "autots.evaluator.anomaly_detector.HolidayDetector": [[3, 2, 1, "", "dates_to_holidays"], [3, 2, 1, "", "detect"], [3, 2, 1, "", "fit"], [3, 2, 1, "", "get_new_params"], [3, 2, 1, "", "plot"], [3, 2, 1, "", "plot_anomaly"]], "autots.evaluator.auto_model": [[3, 4, 1, "", "ModelMonster"], [3, 1, 1, "", "ModelPrediction"], [3, 4, 1, "", "NewGeneticTemplate"], [3, 4, 1, "", "RandomTemplate"], [3, 1, 1, "", "TemplateEvalObject"], [3, 4, 1, "", "TemplateWizard"], [3, 4, 1, "", "UniqueTemplates"], [3, 4, 1, "", "back_forecast"], [3, 4, 1, "", "create_model_id"], [3, 4, 1, "", "dict_recombination"], [3, 4, 1, "", "generate_score"], [3, 4, 1, "", "generate_score_per_series"], [3, 4, 1, "", "horizontal_template_to_model_list"], [3, 4, 1, "", "model_forecast"], [3, 4, 1, "", "random_model"], [3, 4, 1, "", "remove_leading_zeros"], [3, 4, 1, "", "trans_dict_recomb"], [3, 4, 1, "", "unpack_ensemble_models"], [3, 4, 1, "", "validation_aggregation"]], "autots.evaluator.auto_model.ModelPrediction": [[3, 2, 1, "", "fit"], [3, 2, 1, "", "fit_data"], [3, 2, 1, "", "predict"]], "autots.evaluator.auto_model.TemplateEvalObject": [[3, 2, 1, "", "concat"], [3, 3, 1, "", "full_mae_errors"], [3, 3, 1, "", "full_mae_ids"], [3, 2, 1, "", "load"], [3, 2, 1, "", "save"]], "autots.evaluator.auto_ts": [[3, 1, 1, "", "AutoTS"], [3, 4, 1, "", "error_correlations"], [3, 4, 1, "", "fake_regressor"]], "autots.evaluator.auto_ts.AutoTS": [[3, 2, 1, "", "back_forecast"], [3, 3, 1, "", "best_model"], [3, 3, 1, "", "best_model_ensemble"], [3, 3, 1, "", "best_model_name"], [3, 3, 1, "", "best_model_params"], [3, 2, 1, "", "best_model_per_series_mape"], [3, 2, 1, "", "best_model_per_series_score"], [3, 3, 1, "", "best_model_transformation_params"], [3, 3, 1, "", "df_wide_numeric"], [3, 2, 1, "", "diagnose_params"], [3, 2, 1, "", "expand_horizontal"], [3, 2, 1, "", "export_best_model"], [3, 2, 1, "", "export_template"], [3, 2, 1, "", "failure_rate"], [3, 2, 1, "", "fit"], [3, 2, 1, "", "fit_data"], [3, 2, 1, "", "get_metric_corr"], [3, 2, 1, "", "get_new_params"], [3, 2, 1, "", "horizontal_per_generation"], [3, 2, 1, "", "horizontal_to_df"], [3, 2, 1, "", "import_best_model"], [3, 2, 1, "", "import_results"], [3, 2, 1, "", "import_template"], [3, 2, 1, "", "list_failed_model_types"], [3, 2, 1, "", "load_template"], [3, 2, 1, "", "mosaic_to_df"], [3, 2, 1, "", "parse_best_model"], [3, 2, 1, "", "plot_back_forecast"], [3, 2, 1, "", "plot_backforecast"], [3, 2, 1, "", "plot_generation_loss"], [3, 2, 1, "", "plot_horizontal"], [3, 2, 1, "", "plot_horizontal_model_count"], [3, 2, 1, "", "plot_horizontal_per_generation"], [3, 2, 1, "", "plot_horizontal_transformers"], [3, 2, 1, "", "plot_metric_corr"], [3, 2, 1, "", "plot_per_series_error"], [3, 2, 1, "", "plot_per_series_mape"], [3, 2, 1, "", "plot_per_series_smape"], [3, 2, 1, "", "plot_transformer_failure_rate"], [3, 2, 1, "", "plot_validations"], [3, 2, 1, "", "predict"], [3, 3, 1, "", "regression_check"], [3, 2, 1, "", "results"], [3, 2, 1, "", "retrieve_validation_forecasts"], [3, 2, 1, "", "save_template"], [3, 3, 1, "", "score_per_series"], [3, 2, 1, "", "validation_agg"]], "autots.evaluator.auto_ts.AutoTS.initial_results": [[3, 3, 1, "", "model_results"]], "autots.evaluator.benchmark": [[3, 1, 1, "", "Benchmark"]], "autots.evaluator.benchmark.Benchmark": [[3, 2, 1, "", "run"]], "autots.evaluator.event_forecasting": [[3, 1, 1, "", "EventRiskForecast"], [3, 4, 1, "", "extract_result_windows"], [3, 4, 1, "", "extract_window_index"], [3, 4, 1, "", "set_limit_forecast"], [3, 4, 1, "", "set_limit_forecast_historic"]], "autots.evaluator.event_forecasting.EventRiskForecast": [[3, 2, 1, "id0", "fit"], [3, 2, 1, "id7", "generate_historic_risk_array"], [3, 2, 1, "id8", "generate_result_windows"], [3, 2, 1, "id9", "generate_risk_array"], [3, 2, 1, "id10", "plot"], [3, 2, 1, "", "plot_eval"], [3, 2, 1, "id11", "predict"], [3, 2, 1, "id12", "predict_historic"], [3, 2, 1, "id13", "set_limit"]], "autots.evaluator.metrics": [[3, 4, 1, "", "array_last_val"], [3, 4, 1, "", "chi_squared_hist_distribution_loss"], [3, 4, 1, "", "containment"], [3, 4, 1, "", "contour"], [3, 4, 1, "", "default_scaler"], [3, 4, 1, "", "dwae"], [3, 4, 1, "", "full_metric_evaluation"], [3, 4, 1, "", "kde"], [3, 4, 1, "", "kde_kl_distance"], [3, 4, 1, "", "kl_divergence"], [3, 4, 1, "", "linearity"], [3, 4, 1, "", "mae"], [3, 4, 1, "", "mda"], [3, 4, 1, "", "mean_absolute_differential_error"], [3, 4, 1, "", "mean_absolute_error"], [3, 4, 1, "", "medae"], [3, 4, 1, "", "median_absolute_error"], [3, 4, 1, "", "mlvb"], [3, 4, 1, "", "mqae"], [3, 4, 1, "", "msle"], [3, 4, 1, "", "numpy_ffill"], [3, 4, 1, "", "oda"], [3, 4, 1, "", "pinball_loss"], [3, 4, 1, "", "precomp_wasserstein"], [3, 4, 1, "", "qae"], [3, 4, 1, "", "rmse"], [3, 4, 1, "", "root_mean_square_error"], [3, 4, 1, "", "rps"], [3, 4, 1, "", "scaled_pinball_loss"], [3, 4, 1, "", "smape"], [3, 4, 1, "", "smoothness"], [3, 4, 1, "", "spl"], [3, 4, 1, "", "symmetric_mean_absolute_percentage_error"], [3, 4, 1, "", "threshold_loss"], [3, 4, 1, "", "unsorted_wasserstein"], [3, 4, 1, "", "wasserstein"]], "autots.evaluator.validation": [[3, 4, 1, "", "extract_seasonal_val_periods"], [3, 4, 1, "", "generate_validation_indices"], [3, 4, 1, "", "validate_num_validations"]], "autots.models": [[4, 0, 0, "-", "arch"], [4, 0, 0, "-", "base"], [4, 0, 0, "-", "basics"], [4, 0, 0, "-", "cassandra"], [4, 0, 0, "-", "dnn"], [4, 0, 0, "-", "ensemble"], [4, 0, 0, "-", "gluonts"], [4, 0, 0, "-", "greykite"], [4, 0, 0, "-", "matrix_var"], [4, 0, 0, "-", "mlensemble"], [4, 0, 0, "-", "model_list"], [4, 0, 0, "-", "neural_forecast"], [4, 0, 0, "-", "prophet"], [4, 0, 0, "-", "pytorch"], [4, 0, 0, "-", "sklearn"], [4, 0, 0, "-", "statsmodels"], [4, 0, 0, "-", "tfp"], [4, 0, 0, "-", "tide"]], "autots.models.arch": [[4, 1, 1, "", "ARCH"]], "autots.models.arch.ARCH": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.base": [[4, 1, 1, "", "ModelObject"], [4, 1, 1, "", "PredictionObject"], [4, 4, 1, "", "apply_constraints"], [4, 4, 1, "", "calculate_peak_density"], [4, 4, 1, "", "create_forecast_index"], [4, 4, 1, "", "create_seaborn_palette_from_cmap"], [4, 4, 1, "", "extract_single_series_from_horz"], [4, 4, 1, "", "extract_single_transformer"], [4, 4, 1, "", "plot_distributions"]], "autots.models.base.ModelObject": [[4, 2, 1, "", "basic_profile"], [4, 2, 1, "", "create_forecast_index"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "time"]], "autots.models.base.PredictionObject": [[4, 2, 1, "id0", "apply_constraints"], [4, 2, 1, "id1", "evaluate"], [4, 2, 1, "", "extract_ensemble_runtimes"], [4, 3, 1, "", "forecast"], [4, 2, 1, "id2", "long_form_results"], [4, 3, 1, "", "lower_forecast"], [4, 3, 1, "", "model_name"], [4, 3, 1, "", "model_parameters"], [4, 2, 1, "id3", "plot"], [4, 2, 1, "", "plot_df"], [4, 2, 1, "", "plot_ensemble_runtimes"], [4, 2, 1, "", "plot_grid"], [4, 2, 1, "id4", "total_runtime"], [4, 3, 1, "", "transformation_parameters"], [4, 3, 1, "", "upper_forecast"]], "autots.models.basics": [[4, 1, 1, "", "AverageValueNaive"], [4, 1, 1, "", "BallTreeMultivariateMotif"], [4, 1, 1, "", "ConstantNaive"], [4, 1, 1, "", "FFT"], [4, 1, 1, "", "KalmanStateSpace"], [4, 1, 1, "", "LastValueNaive"], [4, 1, 1, "", "MetricMotif"], [4, 1, 1, "", "Motif"], [4, 1, 1, "", "MotifSimulation"], [4, 1, 1, "", "NVAR"], [4, 1, 1, "", "SeasonalNaive"], [4, 1, 1, "", "SeasonalityMotif"], [4, 1, 1, "", "SectionalMotif"], [4, 3, 1, "", "ZeroesNaive"], [4, 4, 1, "", "looped_motif"], [4, 4, 1, "", "predict_reservoir"]], "autots.models.basics.AverageValueNaive": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.BallTreeMultivariateMotif": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.ConstantNaive": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.FFT": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.KalmanStateSpace": [[4, 2, 1, "", "cost_function"], [4, 2, 1, "", "fit"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"], [4, 2, 1, "", "tune_observational_noise"]], "autots.models.basics.LastValueNaive": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.MetricMotif": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.Motif": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.MotifSimulation": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.NVAR": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.SeasonalNaive": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.SeasonalityMotif": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.SectionalMotif": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.cassandra": [[4, 1, 1, "", "BayesianMultiOutputRegression"], [4, 1, 1, "", "Cassandra"], [4, 4, 1, "", "clean_regressor"], [4, 4, 1, "", "cost_function_dwae"], [4, 4, 1, "", "cost_function_l1"], [4, 4, 1, "", "cost_function_l1_positive"], [4, 4, 1, "", "cost_function_l2"], [4, 4, 1, "", "cost_function_quantile"], [4, 4, 1, "", "create_t"], [4, 4, 1, "", "fit_linear_model"], [4, 4, 1, "", "lstsq_minimize"], [4, 4, 1, "", "lstsq_solve"]], "autots.models.cassandra.BayesianMultiOutputRegression": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "predict"], [4, 2, 1, "", "sample_posterior"]], "autots.models.cassandra.Cassandra..anomaly_detector": [[4, 3, 1, "", "anomalies"], [4, 3, 1, "", "scores"]], "autots.models.cassandra.Cassandra.": [[4, 3, 1, "", "holiday_count"], [4, 3, 1, "", "holidays"], [4, 3, 1, "", "params"], [4, 3, 1, "", "predict_x_array"], [4, 3, 1, "", "predicted_trend"], [4, 3, 1, "", "trend_train"], [4, 3, 1, "", "x_array"]], "autots.models.cassandra.Cassandra": [[4, 2, 1, "", "analyze_trend"], [4, 2, 1, "", "auto_fit"], [4, 2, 1, "", "base_scaler"], [4, 2, 1, "", "compare_actual_components"], [4, 2, 1, "", "create_forecast_index"], [4, 2, 1, "", "create_t"], [4, 2, 1, "", "cross_validate"], [4, 2, 1, "", "feature_importance"], [4, 2, 1, "id5", "fit"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "id6", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "next_fit"], [4, 2, 1, "id7", "plot_components"], [4, 2, 1, "id8", "plot_forecast"], [4, 2, 1, "", "plot_things"], [4, 2, 1, "id9", "plot_trend"], [4, 2, 1, "id10", "predict"], [4, 2, 1, "", "predict_new_product"], [4, 2, 1, "", "process_components"], [4, 2, 1, "id11", "return_components"], [4, 2, 1, "", "rolling_trend"], [4, 2, 1, "", "scale_data"], [4, 2, 1, "", "to_origin_space"], [4, 2, 1, "", "treatment_causal_impact"]], "autots.models.cassandra.Cassandra.holiday_detector": [[4, 2, 1, "", "dates_to_holidays"]], "autots.models.dnn": [[4, 1, 1, "", "KerasRNN"], [4, 1, 1, "", "Transformer"], [4, 4, 1, "", "transformer_build_model"], [4, 4, 1, "", "transformer_encoder"]], "autots.models.dnn.KerasRNN": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "predict"]], "autots.models.dnn.Transformer": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "predict"]], "autots.models.ensemble": [[4, 4, 1, "", "BestNEnsemble"], [4, 4, 1, "", "DistEnsemble"], [4, 4, 1, "", "EnsembleForecast"], [4, 4, 1, "", "EnsembleTemplateGenerator"], [4, 4, 1, "", "HDistEnsemble"], [4, 4, 1, "", "HorizontalEnsemble"], [4, 4, 1, "", "HorizontalTemplateGenerator"], [4, 4, 1, "", "MosaicEnsemble"], [4, 4, 1, "", "find_pattern"], [4, 4, 1, "", "generalize_horizontal"], [4, 4, 1, "", "generate_crosshair_score"], [4, 4, 1, "", "generate_crosshair_score_list"], [4, 4, 1, "", "generate_mosaic_template"], [4, 4, 1, "", "horizontal_classifier"], [4, 4, 1, "", "horizontal_xy"], [4, 4, 1, "", "is_horizontal"], [4, 4, 1, "", "is_mosaic"], [4, 4, 1, "", "mlens_helper"], [4, 4, 1, "", "mosaic_classifier"], [4, 4, 1, "", "mosaic_or_horizontal"], [4, 4, 1, "", "mosaic_to_horizontal"], [4, 4, 1, "", "mosaic_xy"], [4, 4, 1, "", "n_limited_horz"], [4, 4, 1, "", "parse_forecast_length"], [4, 4, 1, "", "parse_horizontal"], [4, 4, 1, "", "parse_mosaic"], [4, 4, 1, "", "process_mosaic_arrays"], [4, 4, 1, "", "summarize_series"]], "autots.models.gluonts": [[4, 1, 1, "", "GluonTS"]], "autots.models.gluonts.GluonTS": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.greykite": [[4, 1, 1, "", "Greykite"], [4, 4, 1, "", "seek_the_oracle"]], "autots.models.greykite.Greykite": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.matrix_var": [[4, 1, 1, "", "LATC"], [4, 1, 1, "", "MAR"], [4, 1, 1, "", "RRVAR"], [4, 1, 1, "", "TMF"], [4, 4, 1, "", "conj_grad_w"], [4, 4, 1, "", "conj_grad_x"], [4, 4, 1, "", "dmd"], [4, 4, 1, "", "dmd4cast"], [4, 4, 1, "", "ell_w"], [4, 4, 1, "", "ell_x"], [4, 4, 1, "", "generate_Psi"], [4, 4, 1, "", "latc_imputer"], [4, 4, 1, "", "latc_predictor"], [4, 4, 1, "", "mar"], [4, 4, 1, "", "mat2ten"], [4, 4, 1, "", "rrvar"], [4, 4, 1, "", "svt_tnn"], [4, 4, 1, "", "ten2mat"], [4, 4, 1, "", "tmf"], [4, 4, 1, "", "update_cg"], [4, 4, 1, "", "var"], [4, 4, 1, "", "var4cast"]], "autots.models.matrix_var.LATC": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.matrix_var.MAR": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.matrix_var.RRVAR": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.matrix_var.TMF": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.mlensemble": [[4, 1, 1, "", "MLEnsemble"], [4, 4, 1, "", "create_feature"]], "autots.models.mlensemble.MLEnsemble": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.model_list": [[4, 4, 1, "", "auto_model_list"], [4, 4, 1, "", "model_list_to_dict"]], "autots.models.neural_forecast": [[4, 1, 1, "", "NeuralForecast"]], "autots.models.neural_forecast.NeuralForecast": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.prophet": [[4, 1, 1, "", "FBProphet"], [4, 1, 1, "", "NeuralProphet"]], "autots.models.prophet.FBProphet": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.prophet.NeuralProphet": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.pytorch": [[4, 1, 1, "", "PytorchForecasting"]], "autots.models.pytorch.PytorchForecasting": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.sklearn": [[4, 1, 1, "", "ComponentAnalysis"], [4, 1, 1, "", "DatepartRegression"], [4, 1, 1, "", "MultivariateRegression"], [4, 1, 1, "", "PreprocessingRegression"], [4, 1, 1, "", "RollingRegression"], [4, 1, 1, "", "UnivariateRegression"], [4, 1, 1, "", "VectorizedMultiOutputGPR"], [4, 1, 1, "", "WindowRegression"], [4, 4, 1, "", "generate_classifier_params"], [4, 4, 1, "", "generate_regressor_params"], [4, 4, 1, "", "retrieve_classifier"], [4, 4, 1, "", "retrieve_regressor"], [4, 4, 1, "", "rolling_x_regressor"], [4, 4, 1, "", "rolling_x_regressor_regressor"]], "autots.models.sklearn.ComponentAnalysis": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.sklearn.DatepartRegression": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.sklearn.MultivariateRegression": [[4, 2, 1, "", "base_scaler"], [4, 2, 1, "", "fit"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"], [4, 2, 1, "", "scale_data"], [4, 2, 1, "", "to_origin_space"]], "autots.models.sklearn.PreprocessingRegression": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.sklearn.RollingRegression": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.sklearn.UnivariateRegression": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.sklearn.VectorizedMultiOutputGPR": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "predict"], [4, 2, 1, "", "predict_proba"]], "autots.models.sklearn.WindowRegression": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels": [[4, 1, 1, "", "ARDL"], [4, 1, 1, "", "ARIMA"], [4, 1, 1, "", "DynamicFactor"], [4, 1, 1, "", "DynamicFactorMQ"], [4, 1, 1, "", "ETS"], [4, 1, 1, "", "GLM"], [4, 1, 1, "", "GLS"], [4, 1, 1, "", "Theta"], [4, 1, 1, "", "UnobservedComponents"], [4, 1, 1, "", "VAR"], [4, 1, 1, "", "VARMAX"], [4, 1, 1, "", "VECM"], [4, 4, 1, "", "arima_seek_the_oracle"], [4, 4, 1, "", "glm_forecast_by_column"]], "autots.models.statsmodels.ARDL": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.ARIMA": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.DynamicFactor": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.DynamicFactorMQ": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.ETS": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.GLM": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.GLS": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.Theta": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.UnobservedComponents": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.VAR": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.VARMAX": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.VECM": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.tfp": [[4, 1, 1, "", "TFPRegression"], [4, 1, 1, "", "TFPRegressor"], [4, 1, 1, "", "TensorflowSTS"]], "autots.models.tfp.TFPRegression": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.tfp.TFPRegressor": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "predict"]], "autots.models.tfp.TensorflowSTS": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.tide": [[4, 1, 1, "", "TiDE"], [4, 1, 1, "", "TimeCovariates"], [4, 1, 1, "", "TimeSeriesdata"], [4, 4, 1, "", "get_HOLIDAYS"], [4, 4, 1, "", "mae_loss"], [4, 4, 1, "", "mape"], [4, 4, 1, "", "nrmse"], [4, 4, 1, "", "rmse"], [4, 4, 1, "", "smape"], [4, 4, 1, "", "wape"]], "autots.models.tide.TiDE": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.tide.TimeCovariates": [[4, 2, 1, "", "get_covariates"]], "autots.models.tide.TimeSeriesdata": [[4, 2, 1, "", "test_val_gen"], [4, 2, 1, "", "tf_dataset"], [4, 2, 1, "", "train_gen"]], "autots.templates": [[5, 0, 0, "-", "general"]], "autots.templates.general": [[5, 5, 1, "", "general_template"]], "autots.tools": [[6, 0, 0, "-", "anomaly_utils"], [6, 0, 0, "-", "calendar"], [6, 0, 0, "-", "cointegration"], [6, 0, 0, "-", "cpu_count"], [6, 0, 0, "-", "fast_kalman"], [6, 0, 0, "-", "fft"], [6, 0, 0, "-", "hierarchial"], [6, 0, 0, "-", "holiday"], [6, 0, 0, "-", "impute"], [6, 0, 0, "-", "lunar"], [6, 0, 0, "-", "percentile"], [6, 0, 0, "-", "probabilistic"], [6, 0, 0, "-", "profile"], [6, 0, 0, "-", "regressor"], [6, 0, 0, "-", "seasonal"], [6, 0, 0, "-", "shaping"], [6, 0, 0, "-", "thresholding"], [6, 0, 0, "-", "transform"], [6, 0, 0, "-", "window_functions"]], "autots.tools.anomaly_utils": [[6, 4, 1, "", "anomaly_df_to_holidays"], [6, 4, 1, "", "anomaly_new_params"], [6, 4, 1, "", "create_dates_df"], [6, 4, 1, "", "dates_to_holidays"], [6, 4, 1, "", "detect_anomalies"], [6, 4, 1, "", "holiday_new_params"], [6, 4, 1, "", "limits_to_anomalies"], [6, 4, 1, "", "loop_sk_outliers"], [6, 4, 1, "", "nonparametric_multivariate"], [6, 4, 1, "", "sk_outliers"], [6, 4, 1, "", "values_to_anomalies"], [6, 4, 1, "", "zscore_survival_function"]], "autots.tools.calendar": [[6, 4, 1, "", "gregorian_to_chinese"], [6, 4, 1, "", "gregorian_to_christian_lunar"], [6, 4, 1, "", "gregorian_to_hebrew"], [6, 4, 1, "", "gregorian_to_islamic"], [6, 4, 1, "", "heb_is_leap"], [6, 4, 1, "", "lunar_from_lunar"], [6, 4, 1, "", "lunar_from_lunar_full"], [6, 4, 1, "", "to_jd"]], "autots.tools.cointegration": [[6, 4, 1, "", "btcd_decompose"], [6, 4, 1, "", "coint_johansen"], [6, 4, 1, "", "fourier_series"], [6, 4, 1, "", "lagmat"]], "autots.tools.cpu_count": [[6, 4, 1, "", "cpu_count"], [6, 4, 1, "", "set_n_jobs"]], "autots.tools.fast_kalman": [[6, 1, 1, "", "Gaussian"], [6, 1, 1, "", "KalmanFilter"], [6, 4, 1, "", "autoshape"], [6, 4, 1, "", "ddot"], [6, 4, 1, "", "ddot_t_right"], [6, 4, 1, "", "ddot_t_right_old"], [6, 4, 1, "", "dinv"], [6, 4, 1, "", "douter"], [6, 4, 1, "", "em_initial_state"], [6, 4, 1, "", "ensure_matrix"], [6, 4, 1, "", "holt_winters_damped_matrices"], [6, 4, 1, "", "new_kalman_params"], [6, 4, 1, "", "predict"], [6, 4, 1, "", "predict_observation"], [6, 4, 1, "", "priv_smooth"], [6, 4, 1, "", "priv_update_with_nan_check"], [6, 4, 1, "", "random_state_space"], [6, 4, 1, "", "smooth"], [6, 4, 1, "", "update"], [6, 4, 1, "", "update_with_nan_check"]], "autots.tools.fast_kalman.Gaussian": [[6, 2, 1, "", "empty"], [6, 2, 1, "", "unvectorize_state"], [6, 2, 1, "", "unvectorize_vars"]], "autots.tools.fast_kalman.KalmanFilter": [[6, 1, 1, "", "Result"], [6, 2, 1, "", "compute"], [6, 2, 1, "", "em"], [6, 2, 1, "", "em_observation_noise"], [6, 2, 1, "", "em_process_noise"], [6, 2, 1, "", "predict"], [6, 2, 1, "", "predict_next"], [6, 2, 1, "", "predict_observation"], [6, 2, 1, "", "smooth"], [6, 2, 1, "", "smooth_current"], [6, 2, 1, "", "update"]], "autots.tools.fft": [[6, 1, 1, "", "FFT"], [6, 4, 1, "", "fourier_extrapolation"]], "autots.tools.fft.FFT": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "predict"]], "autots.tools.hierarchial": [[6, 1, 1, "", "hierarchial"]], "autots.tools.hierarchial.hierarchial": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "reconcile"], [6, 2, 1, "", "transform"]], "autots.tools.holiday": [[6, 4, 1, "", "holiday_flag"], [6, 4, 1, "", "query_holidays"]], "autots.tools.impute": [[6, 4, 1, "", "FillNA"], [6, 1, 1, "", "SeasonalityMotifImputer"], [6, 1, 1, "", "SimpleSeasonalityMotifImputer"], [6, 4, 1, "", "biased_ffill"], [6, 4, 1, "", "fake_date_fill"], [6, 4, 1, "", "fake_date_fill_old"], [6, 4, 1, "", "fill_forward"], [6, 4, 1, "", "fill_forward_alt"], [6, 4, 1, "", "fill_mean"], [6, 4, 1, "", "fill_mean_old"], [6, 4, 1, "", "fill_median"], [6, 4, 1, "", "fill_median_old"], [6, 4, 1, "", "fill_zero"], [6, 4, 1, "", "fillna_np"], [6, 4, 1, "", "rolling_mean"]], "autots.tools.impute.SeasonalityMotifImputer": [[6, 2, 1, "", "impute"]], "autots.tools.impute.SimpleSeasonalityMotifImputer": [[6, 2, 1, "", "impute"]], "autots.tools.lunar": [[6, 4, 1, "", "dcos"], [6, 4, 1, "", "dsin"], [6, 4, 1, "", "fixangle"], [6, 4, 1, "", "kepler"], [6, 4, 1, "", "moon_phase"], [6, 4, 1, "", "moon_phase_df"], [6, 4, 1, "", "phase_string"], [6, 4, 1, "", "todeg"], [6, 4, 1, "", "torad"]], "autots.tools.percentile": [[6, 4, 1, "", "nan_percentile"], [6, 4, 1, "", "nan_quantile"], [6, 4, 1, "", "trimmed_mean"]], "autots.tools.probabilistic": [[6, 4, 1, "", "Point_to_Probability"], [6, 4, 1, "", "Variable_Point_to_Probability"], [6, 4, 1, "", "historic_quantile"], [6, 4, 1, "", "inferred_normal"], [6, 4, 1, "", "percentileofscore_appliable"]], "autots.tools.profile": [[6, 4, 1, "", "data_profile"]], "autots.tools.regressor": [[6, 4, 1, "", "create_lagged_regressor"], [6, 4, 1, "", "create_regressor"]], "autots.tools.seasonal": [[6, 4, 1, "", "create_datepart_components"], [6, 4, 1, "", "create_seasonality_feature"], [6, 4, 1, "", "date_part"], [6, 4, 1, "", "fourier_df"], [6, 4, 1, "", "fourier_series"], [6, 4, 1, "", "random_datepart"], [6, 4, 1, "", "seasonal_independent_match"], [6, 4, 1, "", "seasonal_int"], [6, 4, 1, "", "seasonal_window_match"]], "autots.tools.shaping": [[6, 1, 1, "", "NumericTransformer"], [6, 4, 1, "", "clean_weights"], [6, 4, 1, "", "df_cleanup"], [6, 4, 1, "", "freq_to_timedelta"], [6, 4, 1, "", "infer_frequency"], [6, 4, 1, "", "long_to_wide"], [6, 4, 1, "", "simple_train_test_split"], [6, 4, 1, "", "split_digits_and_non_digits"], [6, 4, 1, "", "subset_series"], [6, 4, 1, "", "wide_to_3d"]], "autots.tools.shaping.NumericTransformer": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.thresholding": [[6, 1, 1, "", "NonparametricThreshold"], [6, 4, 1, "", "consecutive_groups"], [6, 4, 1, "", "nonparametric"]], "autots.tools.thresholding.NonparametricThreshold": [[6, 2, 1, "", "compare_to_epsilon"], [6, 2, 1, "", "find_epsilon"], [6, 2, 1, "", "prune_anoms"], [6, 2, 1, "", "score_anomalies"]], "autots.tools.transform": [[6, 1, 1, "", "AlignLastDiff"], [6, 1, 1, "", "AlignLastValue"], [6, 1, 1, "", "AnomalyRemoval"], [6, 1, 1, "", "BKBandpassFilter"], [6, 1, 1, "", "BTCD"], [6, 1, 1, "", "CenterLastValue"], [6, 1, 1, "", "CenterSplit"], [6, 1, 1, "", "ClipOutliers"], [6, 1, 1, "", "Cointegration"], [6, 1, 1, "", "CumSumTransformer"], [6, 3, 1, "", "DatepartRegression"], [6, 1, 1, "", "DatepartRegressionTransformer"], [6, 1, 1, "", "Detrend"], [6, 1, 1, "", "DiffSmoother"], [6, 1, 1, "", "DifferencedTransformer"], [6, 1, 1, "", "Discretize"], [6, 1, 1, "", "EWMAFilter"], [6, 1, 1, "", "EmptyTransformer"], [6, 1, 1, "", "FFTDecomposition"], [6, 1, 1, "", "FFTFilter"], [6, 1, 1, "", "FastICA"], [6, 1, 1, "", "GeneralTransformer"], [6, 1, 1, "", "HPFilter"], [6, 1, 1, "", "HistoricValues"], [6, 1, 1, "", "HolidayTransformer"], [6, 1, 1, "", "IntermittentOccurrence"], [6, 1, 1, "", "KalmanSmoothing"], [6, 1, 1, "", "LevelShiftMagic"], [6, 3, 1, "", "LevelShiftTransformer"], [6, 1, 1, "", "LocalLinearTrend"], [6, 1, 1, "", "MeanDifference"], [6, 1, 1, "", "PCA"], [6, 1, 1, "", "PctChangeTransformer"], [6, 1, 1, "", "PositiveShift"], [6, 4, 1, "", "RandomTransform"], [6, 1, 1, "", "RegressionFilter"], [6, 1, 1, "", "ReplaceConstant"], [6, 1, 1, "", "RollingMeanTransformer"], [6, 1, 1, "", "Round"], [6, 1, 1, "", "STLFilter"], [6, 1, 1, "", "ScipyFilter"], [6, 1, 1, "", "SeasonalDifference"], [6, 1, 1, "", "SinTrend"], [6, 1, 1, "", "Slice"], [6, 1, 1, "", "StatsmodelsFilter"], [6, 4, 1, "", "bkfilter_st"], [6, 4, 1, "", "clip_outliers"], [6, 4, 1, "", "exponential_decay"], [6, 4, 1, "", "get_transformer_params"], [6, 4, 1, "", "random_cleaners"], [6, 4, 1, "", "remove_outliers"], [6, 4, 1, "", "simple_context_slicer"], [6, 4, 1, "", "transformer_list_to_dict"]], "autots.tools.transform.AlignLastDiff": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.AlignLastValue": [[6, 2, 1, "", "find_centerpoint"], [6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.AnomalyRemoval": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_anomaly_classifier"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "score_to_anomaly"], [6, 2, 1, "", "transform"]], "autots.tools.transform.BKBandpassFilter": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.BTCD": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.CenterLastValue": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.CenterSplit": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.ClipOutliers": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.Cointegration": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.CumSumTransformer": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.DatepartRegressionTransformer": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "impute"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.Detrend": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.DiffSmoother": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "transform"]], "autots.tools.transform.DifferencedTransformer": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.Discretize": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.EWMAFilter": [[6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "transform"]], "autots.tools.transform.EmptyTransformer": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.FFTDecomposition": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.FFTFilter": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.FastICA": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.GeneralTransformer": [[6, 2, 1, "", "fill_na"], [6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "retrieve_transformer"], [6, 2, 1, "", "transform"]], "autots.tools.transform.HPFilter": [[6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "transform"]], "autots.tools.transform.HistoricValues": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.HolidayTransformer": [[6, 2, 1, "", "dates_to_holidays"], [6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.IntermittentOccurrence": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.KalmanSmoothing": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.LevelShiftMagic": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.LocalLinearTrend": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.MeanDifference": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.PCA": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.PctChangeTransformer": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.PositiveShift": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.RegressionFilter": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.ReplaceConstant": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.RollingMeanTransformer": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.Round": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.STLFilter": [[6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "transform"]], "autots.tools.transform.ScipyFilter": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.SeasonalDifference": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.SinTrend": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_sin"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.Slice": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.StatsmodelsFilter": [[6, 2, 1, "", "bkfilter"], [6, 2, 1, "", "cffilter"], [6, 2, 1, "", "convolution_filter"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "transform"]], "autots.tools.window_functions": [[6, 4, 1, "", "chunk_reshape"], [6, 4, 1, "", "last_window"], [6, 4, 1, "", "np_2d_arange"], [6, 4, 1, "", "retrieve_closest_indices"], [6, 4, 1, "", "rolling_window_view"], [6, 4, 1, "", "sliding_window_view"], [6, 4, 1, "", "window_id_maker"], [6, 4, 1, "", "window_lin_reg"], [6, 4, 1, "", "window_lin_reg_mean"], [6, 4, 1, "", "window_lin_reg_mean_no_nan"], [6, 4, 1, "", "window_maker"], [6, 4, 1, "", "window_maker_2"], [6, 4, 1, "", "window_maker_3"], [6, 4, 1, "", "window_sum_mean"], [6, 4, 1, "", "window_sum_mean_nan_tail"], [6, 4, 1, "", "window_sum_nan_mean"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method", "3": "py:attribute", "4": "py:function", "5": "py:data"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "attribute", "Python attribute"], "4": ["py", "function", "Python function"], "5": ["py", "data", "Python data"]}, "titleterms": {"autot": [0, 1, 2, 3, 4, 5, 6, 7, 8], "instal": [0, 7, 9], "get": 0, "start": 0, "modul": [0, 1, 2, 3, 4, 5, 6], "api": 0, "indic": 0, "tabl": [0, 7, 9], "packag": [1, 2, 3, 4, 5, 6, 9], "subpackag": 1, "content": [1, 2, 3, 4, 5, 6, 7, 9], "dataset": 2, "submodul": [2, 3, 4, 5, 6], "fred": 2, "evalu": 3, "anomaly_detector": 3, "auto_model": 3, "auto_t": 3, "benchmark": [3, 9], "event_forecast": 3, "metric": [3, 9], "valid": [3, 9], "model": [4, 9], "arch": 4, "base": 4, "basic": [4, 7], "cassandra": 4, "dnn": 4, "ensembl": [4, 9], "gluont": 4, "greykit": 4, "matrix_var": 4, "mlensembl": 4, "model_list": 4, "neural_forecast": 4, "prophet": 4, "pytorch": 4, "sklearn": 4, "statsmodel": 4, "tfp": 4, "tide": 4, "templat": [5, 9], "gener": 5, "tool": 6, "anomaly_util": 6, "calendar": 6, "cointegr": 6, "cpu_count": 6, "fast_kalman": 6, "usag": 6, "exampl": [6, 9], "fft": 6, "hierarchi": [6, 9], "holidai": 6, "imput": 6, "lunar": 6, "percentil": 6, "probabilist": 6, "profil": 6, "regressor": [6, 9], "season": 6, "shape": 6, "threshold": 6, "transform": [6, 9], "window_funct": 6, "intro": 7, "us": [7, 9], "tip": 7, "speed": [7, 9], "larg": 7, "data": [7, 9], "how": 7, "contribut": 7, "tutori": 9, "extend": 9, "A": 9, "simpl": 9, "import": 9, "you": 9, "can": 9, "tailor": 9, "process": 9, "few": 9, "wai": 9, "what": 9, "worri": 9, "about": 9, "cross": 9, "anoth": 9, "list": 9, "deploy": 9, "export": 9, "run": 9, "just": 9, "One": 9, "group": 9, "forecast": 9, "depend": 9, "version": 9, "requir": 9, "option": 9, "safest": 9, "bet": 9, "intel": 9, "conda": 9, "channel": 9, "sometim": 9, "faster": 9, "also": 9, "more": 9, "prone": 9, "bug": 9, "caveat": 9, "advic": 9, "mysteri": 9, "crash": 9, "seri": 9, "id": 9, "realli": 9, "need": 9, "uniqu": 9, "column": 9, "name": 9, "all": 9, "wide": 9, "short": 9, "train": 9, "histori": 9, "ad": 9, "other": 9, "inform": 9, "simul": 9, "event": 9, "risk": 9, "anomali": 9, "detect": 9, "hack": 9, "pass": 9, "paramet": 9, "aren": 9, "t": 9, "otherwis": 9, "avail": 9, "categor": 9, "custom": 9, "unusu": 9, "frequenc": 9, "independ": 9, "note": 9, "regress": 9}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"AutoTS": [[0, "autots"], [7, "autots"]], "Installation": [[0, "installation"], [7, "id1"]], "Getting Started": [[0, "getting-started"]], "Modules API": [[0, "modules-api"]], "Indices and tables": [[0, "indices-and-tables"]], "autots package": [[1, "autots-package"]], "Subpackages": [[1, "subpackages"]], "Module contents": [[1, "module-autots"], [2, "module-autots.datasets"], [3, "module-autots.evaluator"], [4, "module-autots.models"], [5, "module-autots.templates"], [6, "module-autots.tools"]], "autots.datasets package": [[2, "autots-datasets-package"]], "Submodules": [[2, "submodules"], [3, "submodules"], [4, "submodules"], [5, "submodules"], [6, "submodules"]], "autots.datasets.fred module": [[2, "module-autots.datasets.fred"]], "autots.evaluator package": [[3, "autots-evaluator-package"]], "autots.evaluator.anomaly_detector module": [[3, "module-autots.evaluator.anomaly_detector"]], "autots.evaluator.auto_model module": [[3, "module-autots.evaluator.auto_model"]], "autots.evaluator.auto_ts module": [[3, "module-autots.evaluator.auto_ts"]], "autots.evaluator.benchmark module": [[3, "module-autots.evaluator.benchmark"]], "autots.evaluator.event_forecasting module": [[3, "module-autots.evaluator.event_forecasting"]], "autots.evaluator.metrics module": [[3, "module-autots.evaluator.metrics"]], "autots.evaluator.validation module": [[3, "module-autots.evaluator.validation"]], "autots.models package": [[4, "autots-models-package"]], "autots.models.arch module": [[4, "module-autots.models.arch"]], "autots.models.base module": [[4, "module-autots.models.base"]], "autots.models.basics module": [[4, "module-autots.models.basics"]], "autots.models.cassandra module": [[4, "module-autots.models.cassandra"]], "autots.models.dnn module": [[4, "module-autots.models.dnn"]], "autots.models.ensemble module": [[4, "module-autots.models.ensemble"]], "autots.models.gluonts module": [[4, "module-autots.models.gluonts"]], "autots.models.greykite module": [[4, "module-autots.models.greykite"]], "autots.models.matrix_var module": [[4, "module-autots.models.matrix_var"]], "autots.models.mlensemble module": [[4, "module-autots.models.mlensemble"]], "autots.models.model_list module": [[4, "module-autots.models.model_list"]], "autots.models.neural_forecast module": [[4, "module-autots.models.neural_forecast"]], "autots.models.prophet module": [[4, "module-autots.models.prophet"]], "autots.models.pytorch module": [[4, "module-autots.models.pytorch"]], "autots.models.sklearn module": [[4, "module-autots.models.sklearn"]], "autots.models.statsmodels module": [[4, "module-autots.models.statsmodels"]], "autots.models.tfp module": [[4, "module-autots.models.tfp"]], "autots.models.tide module": [[4, "module-autots.models.tide"]], "autots.templates package": [[5, "autots-templates-package"]], "autots.templates.general module": [[5, "module-autots.templates.general"]], "autots.tools package": [[6, "autots-tools-package"]], "autots.tools.anomaly_utils module": [[6, "module-autots.tools.anomaly_utils"]], "autots.tools.calendar module": [[6, "module-autots.tools.calendar"]], "autots.tools.cointegration module": [[6, "module-autots.tools.cointegration"]], "autots.tools.cpu_count module": [[6, "module-autots.tools.cpu_count"]], "autots.tools.fast_kalman module": [[6, "module-autots.tools.fast_kalman"]], "Usage example": [[6, "usage-example"]], "autots.tools.fft module": [[6, "module-autots.tools.fft"]], "autots.tools.hierarchial module": [[6, "module-autots.tools.hierarchial"]], "autots.tools.holiday module": [[6, "module-autots.tools.holiday"]], "autots.tools.impute module": [[6, "module-autots.tools.impute"]], "autots.tools.lunar module": [[6, "module-autots.tools.lunar"]], "autots.tools.percentile module": [[6, "module-autots.tools.percentile"]], "autots.tools.probabilistic module": [[6, "module-autots.tools.probabilistic"]], "autots.tools.profile module": [[6, "module-autots.tools.profile"]], "autots.tools.regressor module": [[6, "module-autots.tools.regressor"]], "autots.tools.seasonal module": [[6, "module-autots.tools.seasonal"]], "autots.tools.shaping module": [[6, "module-autots.tools.shaping"]], "autots.tools.thresholding module": [[6, "module-autots.tools.thresholding"]], "autots.tools.transform module": [[6, "module-autots.tools.transform"]], "autots.tools.window_functions module": [[6, "module-autots.tools.window_functions"]], "Intro": [[7, "intro"]], "Table of Contents": [[7, "table-of-contents"], [9, "table-of-contents"]], "Basic Use": [[7, "id2"]], "Tips for Speed and Large Data:": [[7, "id3"]], "How to Contribute:": [[7, "how-to-contribute"]], "autots": [[8, "autots"]], "Tutorial": [[9, "tutorial"]], "Extended Tutorial": [[9, "extended-tutorial"]], "A simple example": [[9, "id1"]], "Import of data": [[9, "import-of-data"]], "You can tailor the process in a few ways\u2026": [[9, "you-can-tailor-the-process-in-a-few-ways"]], "What to Worry About": [[9, "what-to-worry-about"]], "Validation and Cross Validation": [[9, "id2"]], "Another Example:": [[9, "id3"]], "Model Lists": [[9, "id4"]], "Deployment and Template Import/Export": [[9, "deployment-and-template-import-export"]], "Running Just One Model": [[9, "id5"]], "Metrics": [[9, "id6"]], "Hierarchial and Grouped Forecasts": [[9, "hierarchial-and-grouped-forecasts"]], "Ensembles": [[9, "id7"]], "Installation and Dependency Versioning": [[9, "installation-and-dependency-versioning"]], "Requirements:": [[9, "requirements"]], "Optional Packages": [[9, "optional-packages"]], "Safest bet for installation:": [[9, "safest-bet-for-installation"]], "Intel conda channel installation (sometime faster, also, more prone to bugs)": [[9, "intel-conda-channel-installation-sometime-faster-also-more-prone-to-bugs"]], "Speed Benchmark": [[9, "speed-benchmark"]], "Caveats and Advice": [[9, "caveats-and-advice"]], "Mysterious crashes": [[9, "mysterious-crashes"]], "Series IDs really need to be unique (or column names need to be all unique in wide data)": [[9, "series-ids-really-need-to-be-unique-or-column-names-need-to-be-all-unique-in-wide-data"]], "Short Training History": [[9, "short-training-history"]], "Adding regressors and other information": [[9, "adding-regressors-and-other-information"]], "Simulation Forecasting": [[9, "id8"]], "Event Risk Forecasting and Anomaly Detection": [[9, "event-risk-forecasting-and-anomaly-detection"]], "A Hack for Passing in Parameters (that aren\u2019t otherwise available)": [[9, "a-hack-for-passing-in-parameters-that-aren-t-otherwise-available"]], "Categorical Data": [[9, "categorical-data"]], "Custom and Unusual Frequencies": [[9, "custom-and-unusual-frequencies"]], "Using the Transformers independently": [[9, "using-the-transformers-independently"]], "Note on ~Regression Models": [[9, "note-on-regression-models"]], "Models": [[9, "id9"]]}, "indexentries": {"anomalydetector (class in autots)": [[1, "autots.AnomalyDetector"]], "autots (class in autots)": [[1, "autots.AutoTS"]], "cassandra (class in autots)": [[1, "autots.Cassandra"]], "eventriskforecast (class in autots)": [[1, "autots.EventRiskForecast"]], "generaltransformer (class in autots)": [[1, "autots.GeneralTransformer"]], "holidaydetector (class in autots)": [[1, "autots.HolidayDetector"]], "randomtransform() (in module autots)": [[1, "autots.RandomTransform"]], "transformts (in module autots)": [[1, "autots.TransformTS"]], "analyze_trend() (autots.cassandra method)": [[1, "autots.Cassandra.analyze_trend"]], "anomalies (autots.cassandra..anomaly_detector attribute)": [[1, "autots.Cassandra..anomaly_detector.anomalies"]], "auto_fit() (autots.cassandra method)": [[1, "autots.Cassandra.auto_fit"]], "autots": [[1, "module-autots"]], "back_forecast() (autots.autots method)": [[1, "autots.AutoTS.back_forecast"]], "base_scaler() (autots.cassandra method)": [[1, "autots.Cassandra.base_scaler"]], "best_model (autots.autots attribute)": [[1, "autots.AutoTS.best_model"]], "best_model_ensemble (autots.autots attribute)": [[1, "autots.AutoTS.best_model_ensemble"]], "best_model_name (autots.autots attribute)": [[1, "autots.AutoTS.best_model_name"]], "best_model_params (autots.autots attribute)": [[1, "autots.AutoTS.best_model_params"]], "best_model_per_series_mape() (autots.autots method)": [[1, "autots.AutoTS.best_model_per_series_mape"]], "best_model_per_series_score() (autots.autots method)": [[1, "autots.AutoTS.best_model_per_series_score"]], "best_model_transformation_params (autots.autots attribute)": [[1, "autots.AutoTS.best_model_transformation_params"]], "compare_actual_components() (autots.cassandra method)": [[1, "autots.Cassandra.compare_actual_components"]], "create_forecast_index() (autots.cassandra method)": [[1, "autots.Cassandra.create_forecast_index"]], "create_lagged_regressor() (in module autots)": [[1, "autots.create_lagged_regressor"]], "create_regressor() (in module autots)": [[1, "autots.create_regressor"]], "create_t() (autots.cassandra method)": [[1, "autots.Cassandra.create_t"]], "cross_validate() (autots.cassandra method)": [[1, "autots.Cassandra.cross_validate"]], "dates_to_holidays() (autots.cassandra.holiday_detector method)": [[1, "autots.Cassandra.holiday_detector.dates_to_holidays"]], "dates_to_holidays() (autots.holidaydetector method)": [[1, "autots.HolidayDetector.dates_to_holidays"]], "detect() (autots.anomalydetector method)": [[1, "autots.AnomalyDetector.detect"]], "detect() (autots.holidaydetector method)": [[1, "autots.HolidayDetector.detect"]], "df_wide_numeric (autots.autots attribute)": [[1, "autots.AutoTS.df_wide_numeric"]], "diagnose_params() (autots.autots method)": [[1, "autots.AutoTS.diagnose_params"]], "expand_horizontal() (autots.autots method)": [[1, "autots.AutoTS.expand_horizontal"]], "export_best_model() (autots.autots method)": [[1, "autots.AutoTS.export_best_model"]], "export_template() (autots.autots method)": [[1, "autots.AutoTS.export_template"]], "failure_rate() (autots.autots method)": [[1, "autots.AutoTS.failure_rate"]], "feature_importance() (autots.cassandra method)": [[1, "autots.Cassandra.feature_importance"]], "fill_na() (autots.generaltransformer method)": [[1, "autots.GeneralTransformer.fill_na"]], "fit() (autots.anomalydetector method)": [[1, "autots.AnomalyDetector.fit"]], "fit() (autots.autots method)": [[1, "autots.AutoTS.fit"]], "fit() (autots.cassandra method)": [[1, "autots.Cassandra.fit"], [1, "id0"]], "fit() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.fit"], [1, "id9"]], "fit() (autots.generaltransformer method)": [[1, "autots.GeneralTransformer.fit"]], "fit() (autots.holidaydetector method)": [[1, "autots.HolidayDetector.fit"]], "fit_anomaly_classifier() (autots.anomalydetector method)": [[1, "autots.AnomalyDetector.fit_anomaly_classifier"]], "fit_data() (autots.autots method)": [[1, "autots.AutoTS.fit_data"]], "fit_data() (autots.cassandra method)": [[1, "autots.Cassandra.fit_data"]], "fit_transform() (autots.generaltransformer method)": [[1, "autots.GeneralTransformer.fit_transform"]], "generate_historic_risk_array() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.generate_historic_risk_array"]], "generate_historic_risk_array() (autots.eventriskforecast static method)": [[1, "id10"]], "generate_result_windows() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.generate_result_windows"], [1, "id11"]], "generate_risk_array() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.generate_risk_array"]], "generate_risk_array() (autots.eventriskforecast static method)": [[1, "id12"]], "get_metric_corr() (autots.autots method)": [[1, "autots.AutoTS.get_metric_corr"]], "get_new_params() (autots.anomalydetector static method)": [[1, "autots.AnomalyDetector.get_new_params"]], "get_new_params() (autots.autots static method)": [[1, "autots.AutoTS.get_new_params"]], "get_new_params() (autots.cassandra method)": [[1, "autots.Cassandra.get_new_params"], [1, "id1"]], "get_new_params() (autots.generaltransformer static method)": [[1, "autots.GeneralTransformer.get_new_params"]], "get_new_params() (autots.holidaydetector static method)": [[1, "autots.HolidayDetector.get_new_params"]], "get_params() (autots.cassandra method)": [[1, "autots.Cassandra.get_params"]], "holiday_count (autots.cassandra. attribute)": [[1, "autots.Cassandra..holiday_count"]], "holidays (autots.cassandra. attribute)": [[1, "autots.Cassandra..holidays"]], "horizontal_per_generation() (autots.autots method)": [[1, "autots.AutoTS.horizontal_per_generation"]], "horizontal_to_df() (autots.autots method)": [[1, "autots.AutoTS.horizontal_to_df"]], "import_best_model() (autots.autots method)": [[1, "autots.AutoTS.import_best_model"]], "import_results() (autots.autots method)": [[1, "autots.AutoTS.import_results"]], "import_template() (autots.autots method)": [[1, "autots.AutoTS.import_template"]], "infer_frequency() (in module autots)": [[1, "autots.infer_frequency"]], "inverse_transform() (autots.generaltransformer method)": [[1, "autots.GeneralTransformer.inverse_transform"]], "list_failed_model_types() (autots.autots method)": [[1, "autots.AutoTS.list_failed_model_types"]], "load_artificial() (in module autots)": [[1, "autots.load_artificial"]], "load_daily() (in module autots)": [[1, "autots.load_daily"]], "load_hourly() (in module autots)": [[1, "autots.load_hourly"]], "load_linear() (in module autots)": [[1, "autots.load_linear"]], "load_live_daily() (in module autots)": [[1, "autots.load_live_daily"]], "load_monthly() (in module autots)": [[1, "autots.load_monthly"]], "load_sine() (in module autots)": [[1, "autots.load_sine"]], "load_template() (autots.autots method)": [[1, "autots.AutoTS.load_template"]], "load_weekdays() (in module autots)": [[1, "autots.load_weekdays"]], "load_weekly() (in module autots)": [[1, "autots.load_weekly"]], "load_yearly() (in module autots)": [[1, "autots.load_yearly"]], "long_to_wide() (in module autots)": [[1, "autots.long_to_wide"]], "model_forecast() (in module autots)": [[1, "autots.model_forecast"]], "model_results (autots.autots.initial_results attribute)": [[1, "autots.AutoTS.initial_results.model_results"]], "module": [[1, "module-autots"], [2, "module-autots.datasets"], [2, "module-autots.datasets.fred"], [3, "module-autots.evaluator"], [3, "module-autots.evaluator.anomaly_detector"], [3, "module-autots.evaluator.auto_model"], [3, "module-autots.evaluator.auto_ts"], [3, "module-autots.evaluator.benchmark"], [3, "module-autots.evaluator.event_forecasting"], [3, "module-autots.evaluator.metrics"], [3, "module-autots.evaluator.validation"], [4, "module-autots.models"], [4, "module-autots.models.arch"], [4, "module-autots.models.base"], [4, "module-autots.models.basics"], [4, "module-autots.models.cassandra"], [4, "module-autots.models.dnn"], [4, "module-autots.models.ensemble"], [4, "module-autots.models.gluonts"], [4, "module-autots.models.greykite"], [4, "module-autots.models.matrix_var"], [4, "module-autots.models.mlensemble"], [4, "module-autots.models.model_list"], [4, "module-autots.models.neural_forecast"], [4, "module-autots.models.prophet"], [4, "module-autots.models.pytorch"], [4, "module-autots.models.sklearn"], [4, "module-autots.models.statsmodels"], [4, "module-autots.models.tfp"], [4, "module-autots.models.tide"], [5, "module-autots.templates"], [5, "module-autots.templates.general"], [6, "module-autots.tools"], [6, "module-autots.tools.anomaly_utils"], [6, "module-autots.tools.calendar"], [6, "module-autots.tools.cointegration"], [6, "module-autots.tools.cpu_count"], [6, "module-autots.tools.fast_kalman"], [6, "module-autots.tools.fft"], [6, "module-autots.tools.hierarchial"], [6, "module-autots.tools.holiday"], [6, "module-autots.tools.impute"], [6, "module-autots.tools.lunar"], [6, "module-autots.tools.percentile"], [6, "module-autots.tools.probabilistic"], [6, "module-autots.tools.profile"], [6, "module-autots.tools.regressor"], [6, "module-autots.tools.seasonal"], [6, "module-autots.tools.shaping"], [6, "module-autots.tools.thresholding"], [6, "module-autots.tools.transform"], [6, "module-autots.tools.window_functions"]], "mosaic_to_df() (autots.autots method)": [[1, "autots.AutoTS.mosaic_to_df"]], "next_fit() (autots.cassandra method)": [[1, "autots.Cassandra.next_fit"]], "params (autots.cassandra. attribute)": [[1, "autots.Cassandra..params"]], "parse_best_model() (autots.autots method)": [[1, "autots.AutoTS.parse_best_model"]], "plot() (autots.anomalydetector method)": [[1, "autots.AnomalyDetector.plot"]], "plot() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.plot"], [1, "id13"]], "plot() (autots.holidaydetector method)": [[1, "autots.HolidayDetector.plot"]], "plot_anomaly() (autots.holidaydetector method)": [[1, "autots.HolidayDetector.plot_anomaly"]], "plot_back_forecast() (autots.autots method)": [[1, "autots.AutoTS.plot_back_forecast"]], "plot_backforecast() (autots.autots method)": [[1, "autots.AutoTS.plot_backforecast"]], "plot_components() (autots.cassandra method)": [[1, "autots.Cassandra.plot_components"], [1, "id2"]], "plot_eval() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.plot_eval"]], "plot_forecast() (autots.cassandra method)": [[1, "autots.Cassandra.plot_forecast"], [1, "id3"]], "plot_generation_loss() (autots.autots method)": [[1, "autots.AutoTS.plot_generation_loss"]], "plot_horizontal() (autots.autots method)": [[1, "autots.AutoTS.plot_horizontal"]], "plot_horizontal_model_count() (autots.autots method)": [[1, "autots.AutoTS.plot_horizontal_model_count"]], "plot_horizontal_per_generation() (autots.autots method)": [[1, "autots.AutoTS.plot_horizontal_per_generation"]], "plot_horizontal_transformers() (autots.autots method)": [[1, "autots.AutoTS.plot_horizontal_transformers"]], "plot_metric_corr() (autots.autots method)": [[1, "autots.AutoTS.plot_metric_corr"]], "plot_per_series_error() (autots.autots method)": [[1, "autots.AutoTS.plot_per_series_error"]], "plot_per_series_mape() (autots.autots method)": [[1, "autots.AutoTS.plot_per_series_mape"]], "plot_per_series_smape() (autots.autots method)": [[1, "autots.AutoTS.plot_per_series_smape"]], "plot_things() (autots.cassandra method)": [[1, "autots.Cassandra.plot_things"]], "plot_transformer_failure_rate() (autots.autots method)": [[1, "autots.AutoTS.plot_transformer_failure_rate"]], "plot_trend() (autots.cassandra method)": [[1, "autots.Cassandra.plot_trend"], [1, "id4"]], "plot_validations() (autots.autots method)": [[1, "autots.AutoTS.plot_validations"]], "predict() (autots.autots method)": [[1, "autots.AutoTS.predict"]], "predict() (autots.cassandra method)": [[1, "autots.Cassandra.predict"], [1, "id5"]], "predict() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.predict"], [1, "id14"]], "predict_historic() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.predict_historic"], [1, "id15"]], "predict_new_product() (autots.cassandra method)": [[1, "autots.Cassandra.predict_new_product"]], "predict_x_array (autots.cassandra. attribute)": [[1, "autots.Cassandra..predict_x_array"]], "predicted_trend (autots.cassandra. attribute)": [[1, "autots.Cassandra..predicted_trend"]], "process_components() (autots.cassandra method)": [[1, "autots.Cassandra.process_components"]], "regression_check (autots.autots attribute)": [[1, "autots.AutoTS.regression_check"]], "results() (autots.autots method)": [[1, "autots.AutoTS.results"]], "retrieve_transformer() (autots.generaltransformer class method)": [[1, "autots.GeneralTransformer.retrieve_transformer"]], "retrieve_validation_forecasts() (autots.autots method)": [[1, "autots.AutoTS.retrieve_validation_forecasts"]], "return_components() (autots.cassandra method)": [[1, "autots.Cassandra.return_components"], [1, "id6"]], "rolling_trend() (autots.cassandra method)": [[1, "autots.Cassandra.rolling_trend"]], "save_template() (autots.autots method)": [[1, "autots.AutoTS.save_template"]], "scale_data() (autots.cassandra method)": [[1, "autots.Cassandra.scale_data"]], "score_per_series (autots.autots attribute)": [[1, "autots.AutoTS.score_per_series"]], "score_to_anomaly() (autots.anomalydetector method)": [[1, "autots.AnomalyDetector.score_to_anomaly"]], "scores (autots.cassandra..anomaly_detector attribute)": [[1, "autots.Cassandra..anomaly_detector.scores"]], "set_limit() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.set_limit"]], "set_limit() (autots.eventriskforecast static method)": [[1, "id16"]], "to_origin_space() (autots.cassandra method)": [[1, "autots.Cassandra.to_origin_space"]], "transform() (autots.generaltransformer method)": [[1, "autots.GeneralTransformer.transform"]], "treatment_causal_impact() (autots.cassandra method)": [[1, "autots.Cassandra.treatment_causal_impact"]], "trend_train (autots.cassandra. attribute)": [[1, "autots.Cassandra..trend_train"]], "validation_agg() (autots.autots method)": [[1, "autots.AutoTS.validation_agg"]], "x_array (autots.cassandra. attribute)": [[1, "autots.Cassandra..x_array"]], "autots.datasets": [[2, "module-autots.datasets"]], "autots.datasets.fred": [[2, "module-autots.datasets.fred"]], "get_fred_data() (in module autots.datasets.fred)": [[2, "autots.datasets.fred.get_fred_data"]], "load_artificial() (in module autots.datasets)": [[2, "autots.datasets.load_artificial"]], "load_daily() (in module autots.datasets)": [[2, "autots.datasets.load_daily"]], "load_hourly() (in module autots.datasets)": [[2, "autots.datasets.load_hourly"]], "load_linear() (in module autots.datasets)": [[2, "autots.datasets.load_linear"]], "load_live_daily() (in module autots.datasets)": [[2, "autots.datasets.load_live_daily"]], "load_monthly() (in module autots.datasets)": [[2, "autots.datasets.load_monthly"]], "load_sine() (in module autots.datasets)": [[2, "autots.datasets.load_sine"]], "load_weekdays() (in module autots.datasets)": [[2, "autots.datasets.load_weekdays"]], "load_weekly() (in module autots.datasets)": [[2, "autots.datasets.load_weekly"]], "load_yearly() (in module autots.datasets)": [[2, "autots.datasets.load_yearly"]], "load_zeroes() (in module autots.datasets)": [[2, "autots.datasets.load_zeroes"]], "anomalydetector (class in autots.evaluator.anomaly_detector)": [[3, "autots.evaluator.anomaly_detector.AnomalyDetector"]], "autots (class in autots.evaluator.auto_ts)": [[3, "autots.evaluator.auto_ts.AutoTS"]], "benchmark (class in autots.evaluator.benchmark)": [[3, "autots.evaluator.benchmark.Benchmark"]], "eventriskforecast (class in autots.evaluator.event_forecasting)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast"]], "holidaydetector (class in autots.evaluator.anomaly_detector)": [[3, "autots.evaluator.anomaly_detector.HolidayDetector"]], "modelmonster() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.ModelMonster"]], "modelprediction (class in autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.ModelPrediction"]], "newgenetictemplate() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.NewGeneticTemplate"]], "randomtemplate() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.RandomTemplate"]], "templateevalobject (class in autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.TemplateEvalObject"]], "templatewizard() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.TemplateWizard"]], "uniquetemplates() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.UniqueTemplates"]], "array_last_val() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.array_last_val"]], "autots.evaluator": [[3, "module-autots.evaluator"]], "autots.evaluator.anomaly_detector": [[3, "module-autots.evaluator.anomaly_detector"]], "autots.evaluator.auto_model": [[3, "module-autots.evaluator.auto_model"]], "autots.evaluator.auto_ts": [[3, "module-autots.evaluator.auto_ts"]], "autots.evaluator.benchmark": [[3, "module-autots.evaluator.benchmark"]], "autots.evaluator.event_forecasting": [[3, "module-autots.evaluator.event_forecasting"]], "autots.evaluator.metrics": [[3, "module-autots.evaluator.metrics"]], "autots.evaluator.validation": [[3, "module-autots.evaluator.validation"]], "back_forecast() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.back_forecast"]], "back_forecast() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.back_forecast"]], "best_model (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.best_model"]], "best_model_ensemble (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.best_model_ensemble"]], "best_model_name (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.best_model_name"]], "best_model_params (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.best_model_params"]], "best_model_per_series_mape() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.best_model_per_series_mape"]], "best_model_per_series_score() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.best_model_per_series_score"]], "best_model_transformation_params (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.best_model_transformation_params"]], "chi_squared_hist_distribution_loss() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.chi_squared_hist_distribution_loss"]], "concat() (autots.evaluator.auto_model.templateevalobject method)": [[3, "autots.evaluator.auto_model.TemplateEvalObject.concat"]], "containment() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.containment"]], "contour() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.contour"]], "create_model_id() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.create_model_id"]], "dates_to_holidays() (autots.evaluator.anomaly_detector.holidaydetector method)": [[3, "autots.evaluator.anomaly_detector.HolidayDetector.dates_to_holidays"]], "default_scaler() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.default_scaler"]], "detect() (autots.evaluator.anomaly_detector.anomalydetector method)": [[3, "autots.evaluator.anomaly_detector.AnomalyDetector.detect"]], "detect() (autots.evaluator.anomaly_detector.holidaydetector method)": [[3, "autots.evaluator.anomaly_detector.HolidayDetector.detect"]], "df_wide_numeric (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.df_wide_numeric"]], "diagnose_params() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.diagnose_params"]], "dict_recombination() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.dict_recombination"]], "dwae() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.dwae"]], "error_correlations() (in module autots.evaluator.auto_ts)": [[3, "autots.evaluator.auto_ts.error_correlations"]], "expand_horizontal() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.expand_horizontal"]], "export_best_model() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.export_best_model"]], "export_template() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.export_template"]], "extract_result_windows() (in module autots.evaluator.event_forecasting)": [[3, "autots.evaluator.event_forecasting.extract_result_windows"]], "extract_seasonal_val_periods() (in module autots.evaluator.validation)": [[3, "autots.evaluator.validation.extract_seasonal_val_periods"]], "extract_window_index() (in module autots.evaluator.event_forecasting)": [[3, "autots.evaluator.event_forecasting.extract_window_index"]], "failure_rate() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.failure_rate"]], "fake_regressor() (in module autots.evaluator.auto_ts)": [[3, "autots.evaluator.auto_ts.fake_regressor"]], "fit() (autots.evaluator.anomaly_detector.anomalydetector method)": [[3, "autots.evaluator.anomaly_detector.AnomalyDetector.fit"]], "fit() (autots.evaluator.anomaly_detector.holidaydetector method)": [[3, "autots.evaluator.anomaly_detector.HolidayDetector.fit"]], "fit() (autots.evaluator.auto_model.modelprediction method)": [[3, "autots.evaluator.auto_model.ModelPrediction.fit"]], "fit() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.fit"]], "fit() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.fit"], [3, "id0"]], "fit_anomaly_classifier() (autots.evaluator.anomaly_detector.anomalydetector method)": [[3, "autots.evaluator.anomaly_detector.AnomalyDetector.fit_anomaly_classifier"]], "fit_data() (autots.evaluator.auto_model.modelprediction method)": [[3, "autots.evaluator.auto_model.ModelPrediction.fit_data"]], "fit_data() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.fit_data"]], "full_mae_errors (autots.evaluator.auto_model.templateevalobject attribute)": [[3, "autots.evaluator.auto_model.TemplateEvalObject.full_mae_errors"]], "full_mae_ids (autots.evaluator.auto_model.templateevalobject attribute)": [[3, "autots.evaluator.auto_model.TemplateEvalObject.full_mae_ids"]], "full_metric_evaluation() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.full_metric_evaluation"]], "generate_historic_risk_array() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.generate_historic_risk_array"]], "generate_historic_risk_array() (autots.evaluator.event_forecasting.eventriskforecast static method)": [[3, "id7"]], "generate_result_windows() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.generate_result_windows"], [3, "id8"]], "generate_risk_array() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.generate_risk_array"]], "generate_risk_array() (autots.evaluator.event_forecasting.eventriskforecast static method)": [[3, "id9"]], "generate_score() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.generate_score"]], "generate_score_per_series() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.generate_score_per_series"]], "generate_validation_indices() (in module autots.evaluator.validation)": [[3, "autots.evaluator.validation.generate_validation_indices"]], "get_metric_corr() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.get_metric_corr"]], "get_new_params() (autots.evaluator.anomaly_detector.anomalydetector static method)": [[3, "autots.evaluator.anomaly_detector.AnomalyDetector.get_new_params"]], "get_new_params() (autots.evaluator.anomaly_detector.holidaydetector static method)": [[3, "autots.evaluator.anomaly_detector.HolidayDetector.get_new_params"]], "get_new_params() (autots.evaluator.auto_ts.autots static method)": [[3, "autots.evaluator.auto_ts.AutoTS.get_new_params"]], "horizontal_per_generation() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.horizontal_per_generation"]], "horizontal_template_to_model_list() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.horizontal_template_to_model_list"]], "horizontal_to_df() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.horizontal_to_df"]], "import_best_model() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.import_best_model"]], "import_results() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.import_results"]], "import_template() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.import_template"]], "kde() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.kde"]], "kde_kl_distance() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.kde_kl_distance"]], "kl_divergence() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.kl_divergence"]], "linearity() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.linearity"]], "list_failed_model_types() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.list_failed_model_types"]], "load() (autots.evaluator.auto_model.templateevalobject method)": [[3, "autots.evaluator.auto_model.TemplateEvalObject.load"]], "load_template() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.load_template"]], "mae() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.mae"]], "mda() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.mda"]], "mean_absolute_differential_error() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.mean_absolute_differential_error"]], "mean_absolute_error() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.mean_absolute_error"]], "medae() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.medae"]], "median_absolute_error() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.median_absolute_error"]], "mlvb() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.mlvb"]], "model_forecast() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.model_forecast"]], "model_results (autots.evaluator.auto_ts.autots.initial_results attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.initial_results.model_results"]], "mosaic_to_df() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.mosaic_to_df"]], "mqae() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.mqae"]], "msle() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.msle"]], "numpy_ffill() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.numpy_ffill"]], "oda() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.oda"]], "parse_best_model() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.parse_best_model"]], "pinball_loss() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.pinball_loss"]], "plot() (autots.evaluator.anomaly_detector.anomalydetector method)": [[3, "autots.evaluator.anomaly_detector.AnomalyDetector.plot"]], "plot() (autots.evaluator.anomaly_detector.holidaydetector method)": [[3, "autots.evaluator.anomaly_detector.HolidayDetector.plot"]], "plot() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.plot"], [3, "id10"]], "plot_anomaly() (autots.evaluator.anomaly_detector.holidaydetector method)": [[3, "autots.evaluator.anomaly_detector.HolidayDetector.plot_anomaly"]], "plot_back_forecast() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_back_forecast"]], "plot_backforecast() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_backforecast"]], "plot_eval() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.plot_eval"]], "plot_generation_loss() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_generation_loss"]], "plot_horizontal() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_horizontal"]], "plot_horizontal_model_count() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_horizontal_model_count"]], "plot_horizontal_per_generation() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_horizontal_per_generation"]], "plot_horizontal_transformers() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_horizontal_transformers"]], "plot_metric_corr() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_metric_corr"]], "plot_per_series_error() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_per_series_error"]], "plot_per_series_mape() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_per_series_mape"]], "plot_per_series_smape() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_per_series_smape"]], "plot_transformer_failure_rate() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_transformer_failure_rate"]], "plot_validations() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_validations"]], "precomp_wasserstein() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.precomp_wasserstein"]], "predict() (autots.evaluator.auto_model.modelprediction method)": [[3, "autots.evaluator.auto_model.ModelPrediction.predict"]], "predict() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.predict"]], "predict() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.predict"], [3, "id11"]], "predict_historic() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.predict_historic"], [3, "id12"]], "qae() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.qae"]], "random_model() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.random_model"]], "regression_check (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.regression_check"]], "remove_leading_zeros() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.remove_leading_zeros"]], "results() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.results"]], "retrieve_validation_forecasts() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.retrieve_validation_forecasts"]], "rmse() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.rmse"]], "root_mean_square_error() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.root_mean_square_error"]], "rps() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.rps"]], "run() (autots.evaluator.benchmark.benchmark method)": [[3, "autots.evaluator.benchmark.Benchmark.run"]], "save() (autots.evaluator.auto_model.templateevalobject method)": [[3, "autots.evaluator.auto_model.TemplateEvalObject.save"]], "save_template() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.save_template"]], "scaled_pinball_loss() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.scaled_pinball_loss"]], "score_per_series (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.score_per_series"]], "score_to_anomaly() (autots.evaluator.anomaly_detector.anomalydetector method)": [[3, "autots.evaluator.anomaly_detector.AnomalyDetector.score_to_anomaly"]], "set_limit() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.set_limit"]], "set_limit() (autots.evaluator.event_forecasting.eventriskforecast static method)": [[3, "id13"]], "set_limit_forecast() (in module autots.evaluator.event_forecasting)": [[3, "autots.evaluator.event_forecasting.set_limit_forecast"]], "set_limit_forecast_historic() (in module autots.evaluator.event_forecasting)": [[3, "autots.evaluator.event_forecasting.set_limit_forecast_historic"]], "smape() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.smape"]], "smoothness() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.smoothness"]], "spl() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.spl"]], "symmetric_mean_absolute_percentage_error() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.symmetric_mean_absolute_percentage_error"]], "threshold_loss() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.threshold_loss"]], "trans_dict_recomb() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.trans_dict_recomb"]], "unpack_ensemble_models() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.unpack_ensemble_models"]], "unsorted_wasserstein() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.unsorted_wasserstein"]], "validate_num_validations() (in module autots.evaluator.validation)": [[3, "autots.evaluator.validation.validate_num_validations"]], "validation_agg() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.validation_agg"]], "validation_aggregation() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.validation_aggregation"]], "wasserstein() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.wasserstein"]], "arch (class in autots.models.arch)": [[4, "autots.models.arch.ARCH"]], "ardl (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.ARDL"]], "arima (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.ARIMA"]], "averagevaluenaive (class in autots.models.basics)": [[4, "autots.models.basics.AverageValueNaive"]], "balltreemultivariatemotif (class in autots.models.basics)": [[4, "autots.models.basics.BallTreeMultivariateMotif"]], "bayesianmultioutputregression (class in autots.models.cassandra)": [[4, "autots.models.cassandra.BayesianMultiOutputRegression"]], "bestnensemble() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.BestNEnsemble"]], "cassandra (class in autots.models.cassandra)": [[4, "autots.models.cassandra.Cassandra"]], "componentanalysis (class in autots.models.sklearn)": [[4, "autots.models.sklearn.ComponentAnalysis"]], "constantnaive (class in autots.models.basics)": [[4, "autots.models.basics.ConstantNaive"]], "datepartregression (class in autots.models.sklearn)": [[4, "autots.models.sklearn.DatepartRegression"]], "distensemble() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.DistEnsemble"]], "dynamicfactor (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.DynamicFactor"]], "dynamicfactormq (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.DynamicFactorMQ"]], "ets (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.ETS"]], "ensembleforecast() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.EnsembleForecast"]], "ensembletemplategenerator() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.EnsembleTemplateGenerator"]], "fbprophet (class in autots.models.prophet)": [[4, "autots.models.prophet.FBProphet"]], "fft (class in autots.models.basics)": [[4, "autots.models.basics.FFT"]], "glm (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.GLM"]], "gls (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.GLS"]], "gluonts (class in autots.models.gluonts)": [[4, "autots.models.gluonts.GluonTS"]], "greykite (class in autots.models.greykite)": [[4, "autots.models.greykite.Greykite"]], "hdistensemble() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.HDistEnsemble"]], "horizontalensemble() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.HorizontalEnsemble"]], "horizontaltemplategenerator() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.HorizontalTemplateGenerator"]], "kalmanstatespace (class in autots.models.basics)": [[4, "autots.models.basics.KalmanStateSpace"]], "kerasrnn (class in autots.models.dnn)": [[4, "autots.models.dnn.KerasRNN"]], "latc (class in autots.models.matrix_var)": [[4, "autots.models.matrix_var.LATC"]], "lastvaluenaive (class in autots.models.basics)": [[4, "autots.models.basics.LastValueNaive"]], "mar (class in autots.models.matrix_var)": [[4, "autots.models.matrix_var.MAR"]], "mlensemble (class in autots.models.mlensemble)": [[4, "autots.models.mlensemble.MLEnsemble"]], "metricmotif (class in autots.models.basics)": [[4, "autots.models.basics.MetricMotif"]], "modelobject (class in autots.models.base)": [[4, "autots.models.base.ModelObject"]], "mosaicensemble() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.MosaicEnsemble"]], "motif (class in autots.models.basics)": [[4, "autots.models.basics.Motif"]], "motifsimulation (class in autots.models.basics)": [[4, "autots.models.basics.MotifSimulation"]], "multivariateregression (class in autots.models.sklearn)": [[4, "autots.models.sklearn.MultivariateRegression"]], "nvar (class in autots.models.basics)": [[4, "autots.models.basics.NVAR"]], "neuralforecast (class in autots.models.neural_forecast)": [[4, "autots.models.neural_forecast.NeuralForecast"]], "neuralprophet (class in autots.models.prophet)": [[4, "autots.models.prophet.NeuralProphet"]], "predictionobject (class in autots.models.base)": [[4, "autots.models.base.PredictionObject"]], "preprocessingregression (class in autots.models.sklearn)": [[4, "autots.models.sklearn.PreprocessingRegression"]], "pytorchforecasting (class in autots.models.pytorch)": [[4, "autots.models.pytorch.PytorchForecasting"]], "rrvar (class in autots.models.matrix_var)": [[4, "autots.models.matrix_var.RRVAR"]], "rollingregression (class in autots.models.sklearn)": [[4, "autots.models.sklearn.RollingRegression"]], "seasonalnaive (class in autots.models.basics)": [[4, "autots.models.basics.SeasonalNaive"]], "seasonalitymotif (class in autots.models.basics)": [[4, "autots.models.basics.SeasonalityMotif"]], "sectionalmotif (class in autots.models.basics)": [[4, "autots.models.basics.SectionalMotif"]], "tfpregression (class in autots.models.tfp)": [[4, "autots.models.tfp.TFPRegression"]], "tfpregressor (class in autots.models.tfp)": [[4, "autots.models.tfp.TFPRegressor"]], "tmf (class in autots.models.matrix_var)": [[4, "autots.models.matrix_var.TMF"]], "tensorflowsts (class in autots.models.tfp)": [[4, "autots.models.tfp.TensorflowSTS"]], "theta (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.Theta"]], "tide (class in autots.models.tide)": [[4, "autots.models.tide.TiDE"]], "timecovariates (class in autots.models.tide)": [[4, "autots.models.tide.TimeCovariates"]], "timeseriesdata (class in autots.models.tide)": [[4, "autots.models.tide.TimeSeriesdata"]], "transformer (class in autots.models.dnn)": [[4, "autots.models.dnn.Transformer"]], "univariateregression (class in autots.models.sklearn)": [[4, "autots.models.sklearn.UnivariateRegression"]], "unobservedcomponents (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.UnobservedComponents"]], "var (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.VAR"]], "varmax (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.VARMAX"]], "vecm (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.VECM"]], "vectorizedmultioutputgpr (class in autots.models.sklearn)": [[4, "autots.models.sklearn.VectorizedMultiOutputGPR"]], "windowregression (class in autots.models.sklearn)": [[4, "autots.models.sklearn.WindowRegression"]], "zeroesnaive (in module autots.models.basics)": [[4, "autots.models.basics.ZeroesNaive"]], "analyze_trend() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.analyze_trend"]], "anomalies (autots.models.cassandra.cassandra..anomaly_detector attribute)": [[4, "autots.models.cassandra.Cassandra..anomaly_detector.anomalies"]], "apply_constraints() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.apply_constraints"], [4, "id0"]], "apply_constraints() (in module autots.models.base)": [[4, "autots.models.base.apply_constraints"]], "arima_seek_the_oracle() (in module autots.models.statsmodels)": [[4, "autots.models.statsmodels.arima_seek_the_oracle"]], "auto_fit() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.auto_fit"]], "auto_model_list() (in module autots.models.model_list)": [[4, "autots.models.model_list.auto_model_list"]], "autots.models": [[4, "module-autots.models"]], "autots.models.arch": [[4, "module-autots.models.arch"]], "autots.models.base": [[4, "module-autots.models.base"]], "autots.models.basics": [[4, "module-autots.models.basics"]], "autots.models.cassandra": [[4, "module-autots.models.cassandra"]], "autots.models.dnn": [[4, "module-autots.models.dnn"]], "autots.models.ensemble": [[4, "module-autots.models.ensemble"]], "autots.models.gluonts": [[4, "module-autots.models.gluonts"]], "autots.models.greykite": [[4, "module-autots.models.greykite"]], "autots.models.matrix_var": [[4, "module-autots.models.matrix_var"]], "autots.models.mlensemble": [[4, "module-autots.models.mlensemble"]], "autots.models.model_list": [[4, "module-autots.models.model_list"]], "autots.models.neural_forecast": [[4, "module-autots.models.neural_forecast"]], "autots.models.prophet": [[4, "module-autots.models.prophet"]], "autots.models.pytorch": [[4, "module-autots.models.pytorch"]], "autots.models.sklearn": [[4, "module-autots.models.sklearn"]], "autots.models.statsmodels": [[4, "module-autots.models.statsmodels"]], "autots.models.tfp": [[4, "module-autots.models.tfp"]], "autots.models.tide": [[4, "module-autots.models.tide"]], "base_scaler() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.base_scaler"]], "base_scaler() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.base_scaler"]], "basic_profile() (autots.models.base.modelobject method)": [[4, "autots.models.base.ModelObject.basic_profile"]], "calculate_peak_density() (in module autots.models.base)": [[4, "autots.models.base.calculate_peak_density"]], "clean_regressor() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.clean_regressor"]], "compare_actual_components() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.compare_actual_components"]], "conj_grad_w() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.conj_grad_w"]], "conj_grad_x() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.conj_grad_x"]], "cost_function() (autots.models.basics.kalmanstatespace method)": [[4, "autots.models.basics.KalmanStateSpace.cost_function"]], "cost_function_dwae() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.cost_function_dwae"]], "cost_function_l1() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.cost_function_l1"]], "cost_function_l1_positive() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.cost_function_l1_positive"]], "cost_function_l2() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.cost_function_l2"]], "cost_function_quantile() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.cost_function_quantile"]], "create_feature() (in module autots.models.mlensemble)": [[4, "autots.models.mlensemble.create_feature"]], "create_forecast_index() (autots.models.base.modelobject method)": [[4, "autots.models.base.ModelObject.create_forecast_index"]], "create_forecast_index() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.create_forecast_index"]], "create_forecast_index() (in module autots.models.base)": [[4, "autots.models.base.create_forecast_index"]], "create_seaborn_palette_from_cmap() (in module autots.models.base)": [[4, "autots.models.base.create_seaborn_palette_from_cmap"]], "create_t() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.create_t"]], "create_t() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.create_t"]], "cross_validate() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.cross_validate"]], "dates_to_holidays() (autots.models.cassandra.cassandra.holiday_detector method)": [[4, "autots.models.cassandra.Cassandra.holiday_detector.dates_to_holidays"]], "dmd() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.dmd"]], "dmd4cast() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.dmd4cast"]], "ell_w() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.ell_w"]], "ell_x() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.ell_x"]], "evaluate() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.evaluate"], [4, "id1"]], "extract_ensemble_runtimes() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.extract_ensemble_runtimes"]], "extract_single_series_from_horz() (in module autots.models.base)": [[4, "autots.models.base.extract_single_series_from_horz"]], "extract_single_transformer() (in module autots.models.base)": [[4, "autots.models.base.extract_single_transformer"]], "feature_importance() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.feature_importance"]], "find_pattern() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.find_pattern"]], "fit() (autots.models.arch.arch method)": [[4, "autots.models.arch.ARCH.fit"]], "fit() (autots.models.basics.averagevaluenaive method)": [[4, "autots.models.basics.AverageValueNaive.fit"]], "fit() (autots.models.basics.balltreemultivariatemotif method)": [[4, "autots.models.basics.BallTreeMultivariateMotif.fit"]], "fit() (autots.models.basics.constantnaive method)": [[4, "autots.models.basics.ConstantNaive.fit"]], "fit() (autots.models.basics.fft method)": [[4, "autots.models.basics.FFT.fit"]], "fit() (autots.models.basics.kalmanstatespace method)": [[4, "autots.models.basics.KalmanStateSpace.fit"]], "fit() (autots.models.basics.lastvaluenaive method)": [[4, "autots.models.basics.LastValueNaive.fit"]], "fit() (autots.models.basics.metricmotif method)": [[4, "autots.models.basics.MetricMotif.fit"]], "fit() (autots.models.basics.motif method)": [[4, "autots.models.basics.Motif.fit"]], "fit() (autots.models.basics.motifsimulation method)": [[4, "autots.models.basics.MotifSimulation.fit"]], "fit() (autots.models.basics.nvar method)": [[4, "autots.models.basics.NVAR.fit"]], "fit() (autots.models.basics.seasonalnaive method)": [[4, "autots.models.basics.SeasonalNaive.fit"]], "fit() (autots.models.basics.seasonalitymotif method)": [[4, "autots.models.basics.SeasonalityMotif.fit"]], "fit() (autots.models.basics.sectionalmotif method)": [[4, "autots.models.basics.SectionalMotif.fit"]], "fit() (autots.models.cassandra.bayesianmultioutputregression method)": [[4, "autots.models.cassandra.BayesianMultiOutputRegression.fit"]], "fit() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.fit"], [4, "id5"]], "fit() (autots.models.dnn.kerasrnn method)": [[4, "autots.models.dnn.KerasRNN.fit"]], "fit() (autots.models.dnn.transformer method)": [[4, "autots.models.dnn.Transformer.fit"]], "fit() (autots.models.gluonts.gluonts method)": [[4, "autots.models.gluonts.GluonTS.fit"]], "fit() (autots.models.greykite.greykite method)": [[4, "autots.models.greykite.Greykite.fit"]], "fit() (autots.models.matrix_var.latc method)": [[4, "autots.models.matrix_var.LATC.fit"]], "fit() (autots.models.matrix_var.mar method)": [[4, "autots.models.matrix_var.MAR.fit"]], "fit() (autots.models.matrix_var.rrvar method)": [[4, "autots.models.matrix_var.RRVAR.fit"]], "fit() (autots.models.matrix_var.tmf method)": [[4, "autots.models.matrix_var.TMF.fit"]], "fit() (autots.models.mlensemble.mlensemble method)": [[4, "autots.models.mlensemble.MLEnsemble.fit"]], "fit() (autots.models.neural_forecast.neuralforecast method)": [[4, "autots.models.neural_forecast.NeuralForecast.fit"]], "fit() (autots.models.prophet.fbprophet method)": [[4, "autots.models.prophet.FBProphet.fit"]], "fit() (autots.models.prophet.neuralprophet method)": [[4, "autots.models.prophet.NeuralProphet.fit"]], "fit() (autots.models.pytorch.pytorchforecasting method)": [[4, "autots.models.pytorch.PytorchForecasting.fit"]], "fit() (autots.models.sklearn.componentanalysis method)": [[4, "autots.models.sklearn.ComponentAnalysis.fit"]], "fit() (autots.models.sklearn.datepartregression method)": [[4, "autots.models.sklearn.DatepartRegression.fit"]], "fit() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.fit"]], "fit() (autots.models.sklearn.preprocessingregression method)": [[4, "autots.models.sklearn.PreprocessingRegression.fit"]], "fit() (autots.models.sklearn.rollingregression method)": [[4, "autots.models.sklearn.RollingRegression.fit"]], "fit() (autots.models.sklearn.univariateregression method)": [[4, "autots.models.sklearn.UnivariateRegression.fit"]], "fit() (autots.models.sklearn.vectorizedmultioutputgpr method)": [[4, "autots.models.sklearn.VectorizedMultiOutputGPR.fit"]], "fit() (autots.models.sklearn.windowregression method)": [[4, "autots.models.sklearn.WindowRegression.fit"]], "fit() (autots.models.statsmodels.ardl method)": [[4, "autots.models.statsmodels.ARDL.fit"]], "fit() (autots.models.statsmodels.arima method)": [[4, "autots.models.statsmodels.ARIMA.fit"]], "fit() (autots.models.statsmodels.dynamicfactor method)": [[4, "autots.models.statsmodels.DynamicFactor.fit"]], "fit() (autots.models.statsmodels.dynamicfactormq method)": [[4, "autots.models.statsmodels.DynamicFactorMQ.fit"]], "fit() (autots.models.statsmodels.ets method)": [[4, "autots.models.statsmodels.ETS.fit"]], "fit() (autots.models.statsmodels.glm method)": [[4, "autots.models.statsmodels.GLM.fit"]], "fit() (autots.models.statsmodels.gls method)": [[4, "autots.models.statsmodels.GLS.fit"]], "fit() (autots.models.statsmodels.theta method)": [[4, "autots.models.statsmodels.Theta.fit"]], "fit() (autots.models.statsmodels.unobservedcomponents method)": [[4, "autots.models.statsmodels.UnobservedComponents.fit"]], "fit() (autots.models.statsmodels.var method)": [[4, "autots.models.statsmodels.VAR.fit"]], "fit() (autots.models.statsmodels.varmax method)": [[4, "autots.models.statsmodels.VARMAX.fit"]], "fit() (autots.models.statsmodels.vecm method)": [[4, "autots.models.statsmodels.VECM.fit"]], "fit() (autots.models.tfp.tfpregression method)": [[4, "autots.models.tfp.TFPRegression.fit"]], "fit() (autots.models.tfp.tfpregressor method)": [[4, "autots.models.tfp.TFPRegressor.fit"]], "fit() (autots.models.tfp.tensorflowsts method)": [[4, "autots.models.tfp.TensorflowSTS.fit"]], "fit() (autots.models.tide.tide method)": [[4, "autots.models.tide.TiDE.fit"]], "fit_data() (autots.models.base.modelobject method)": [[4, "autots.models.base.ModelObject.fit_data"]], "fit_data() (autots.models.basics.kalmanstatespace method)": [[4, "autots.models.basics.KalmanStateSpace.fit_data"]], "fit_data() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.fit_data"]], "fit_data() (autots.models.gluonts.gluonts method)": [[4, "autots.models.gluonts.GluonTS.fit_data"]], "fit_data() (autots.models.sklearn.datepartregression method)": [[4, "autots.models.sklearn.DatepartRegression.fit_data"]], "fit_data() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.fit_data"]], "fit_data() (autots.models.sklearn.preprocessingregression method)": [[4, "autots.models.sklearn.PreprocessingRegression.fit_data"]], "fit_data() (autots.models.sklearn.windowregression method)": [[4, "autots.models.sklearn.WindowRegression.fit_data"]], "fit_linear_model() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.fit_linear_model"]], "forecast (autots.models.base.predictionobject attribute)": [[4, "autots.models.base.PredictionObject.forecast"]], "generalize_horizontal() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.generalize_horizontal"]], "generate_psi() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.generate_Psi"]], "generate_classifier_params() (in module autots.models.sklearn)": [[4, "autots.models.sklearn.generate_classifier_params"]], "generate_crosshair_score() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.generate_crosshair_score"]], "generate_crosshair_score_list() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.generate_crosshair_score_list"]], "generate_mosaic_template() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.generate_mosaic_template"]], "generate_regressor_params() (in module autots.models.sklearn)": [[4, "autots.models.sklearn.generate_regressor_params"]], "get_holidays() (in module autots.models.tide)": [[4, "autots.models.tide.get_HOLIDAYS"]], "get_covariates() (autots.models.tide.timecovariates method)": [[4, "autots.models.tide.TimeCovariates.get_covariates"]], "get_new_params() (autots.models.arch.arch method)": [[4, "autots.models.arch.ARCH.get_new_params"]], "get_new_params() (autots.models.base.modelobject method)": [[4, "autots.models.base.ModelObject.get_new_params"]], "get_new_params() (autots.models.basics.averagevaluenaive method)": [[4, "autots.models.basics.AverageValueNaive.get_new_params"]], "get_new_params() (autots.models.basics.balltreemultivariatemotif method)": [[4, "autots.models.basics.BallTreeMultivariateMotif.get_new_params"]], "get_new_params() (autots.models.basics.constantnaive method)": [[4, "autots.models.basics.ConstantNaive.get_new_params"]], "get_new_params() (autots.models.basics.fft method)": [[4, "autots.models.basics.FFT.get_new_params"]], "get_new_params() (autots.models.basics.kalmanstatespace method)": [[4, "autots.models.basics.KalmanStateSpace.get_new_params"]], "get_new_params() (autots.models.basics.lastvaluenaive method)": [[4, "autots.models.basics.LastValueNaive.get_new_params"]], "get_new_params() (autots.models.basics.metricmotif method)": [[4, "autots.models.basics.MetricMotif.get_new_params"]], "get_new_params() (autots.models.basics.motif method)": [[4, "autots.models.basics.Motif.get_new_params"]], "get_new_params() (autots.models.basics.motifsimulation method)": [[4, "autots.models.basics.MotifSimulation.get_new_params"]], "get_new_params() (autots.models.basics.nvar method)": [[4, "autots.models.basics.NVAR.get_new_params"]], "get_new_params() (autots.models.basics.seasonalnaive method)": [[4, "autots.models.basics.SeasonalNaive.get_new_params"]], "get_new_params() (autots.models.basics.seasonalitymotif method)": [[4, "autots.models.basics.SeasonalityMotif.get_new_params"]], "get_new_params() (autots.models.basics.sectionalmotif method)": [[4, "autots.models.basics.SectionalMotif.get_new_params"]], "get_new_params() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.get_new_params"], [4, "id6"]], "get_new_params() (autots.models.gluonts.gluonts method)": [[4, "autots.models.gluonts.GluonTS.get_new_params"]], "get_new_params() (autots.models.greykite.greykite method)": [[4, "autots.models.greykite.Greykite.get_new_params"]], "get_new_params() (autots.models.matrix_var.latc method)": [[4, "autots.models.matrix_var.LATC.get_new_params"]], "get_new_params() (autots.models.matrix_var.mar method)": [[4, "autots.models.matrix_var.MAR.get_new_params"]], "get_new_params() (autots.models.matrix_var.rrvar method)": [[4, "autots.models.matrix_var.RRVAR.get_new_params"]], "get_new_params() (autots.models.matrix_var.tmf method)": [[4, "autots.models.matrix_var.TMF.get_new_params"]], "get_new_params() (autots.models.mlensemble.mlensemble method)": [[4, "autots.models.mlensemble.MLEnsemble.get_new_params"]], "get_new_params() (autots.models.neural_forecast.neuralforecast method)": [[4, "autots.models.neural_forecast.NeuralForecast.get_new_params"]], "get_new_params() (autots.models.prophet.fbprophet method)": [[4, "autots.models.prophet.FBProphet.get_new_params"]], "get_new_params() (autots.models.prophet.neuralprophet method)": [[4, "autots.models.prophet.NeuralProphet.get_new_params"]], "get_new_params() (autots.models.pytorch.pytorchforecasting method)": [[4, "autots.models.pytorch.PytorchForecasting.get_new_params"]], "get_new_params() (autots.models.sklearn.componentanalysis method)": [[4, "autots.models.sklearn.ComponentAnalysis.get_new_params"]], "get_new_params() (autots.models.sklearn.datepartregression method)": [[4, "autots.models.sklearn.DatepartRegression.get_new_params"]], "get_new_params() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.get_new_params"]], "get_new_params() (autots.models.sklearn.preprocessingregression method)": [[4, "autots.models.sklearn.PreprocessingRegression.get_new_params"]], "get_new_params() (autots.models.sklearn.rollingregression method)": [[4, "autots.models.sklearn.RollingRegression.get_new_params"]], "get_new_params() (autots.models.sklearn.univariateregression method)": [[4, "autots.models.sklearn.UnivariateRegression.get_new_params"]], "get_new_params() (autots.models.sklearn.windowregression method)": [[4, "autots.models.sklearn.WindowRegression.get_new_params"]], "get_new_params() (autots.models.statsmodels.ardl method)": [[4, "autots.models.statsmodels.ARDL.get_new_params"]], "get_new_params() (autots.models.statsmodels.arima method)": [[4, "autots.models.statsmodels.ARIMA.get_new_params"]], "get_new_params() (autots.models.statsmodels.dynamicfactor method)": [[4, "autots.models.statsmodels.DynamicFactor.get_new_params"]], "get_new_params() (autots.models.statsmodels.dynamicfactormq method)": [[4, "autots.models.statsmodels.DynamicFactorMQ.get_new_params"]], "get_new_params() (autots.models.statsmodels.ets method)": [[4, "autots.models.statsmodels.ETS.get_new_params"]], "get_new_params() (autots.models.statsmodels.glm method)": [[4, "autots.models.statsmodels.GLM.get_new_params"]], "get_new_params() (autots.models.statsmodels.gls method)": [[4, "autots.models.statsmodels.GLS.get_new_params"]], "get_new_params() (autots.models.statsmodels.theta method)": [[4, "autots.models.statsmodels.Theta.get_new_params"]], "get_new_params() (autots.models.statsmodels.unobservedcomponents method)": [[4, "autots.models.statsmodels.UnobservedComponents.get_new_params"]], "get_new_params() (autots.models.statsmodels.var method)": [[4, "autots.models.statsmodels.VAR.get_new_params"]], "get_new_params() (autots.models.statsmodels.varmax method)": [[4, "autots.models.statsmodels.VARMAX.get_new_params"]], "get_new_params() (autots.models.statsmodels.vecm method)": [[4, "autots.models.statsmodels.VECM.get_new_params"]], "get_new_params() (autots.models.tfp.tfpregression method)": [[4, "autots.models.tfp.TFPRegression.get_new_params"]], "get_new_params() (autots.models.tfp.tensorflowsts method)": [[4, "autots.models.tfp.TensorflowSTS.get_new_params"]], "get_new_params() (autots.models.tide.tide method)": [[4, "autots.models.tide.TiDE.get_new_params"]], "get_params() (autots.models.arch.arch method)": [[4, "autots.models.arch.ARCH.get_params"]], "get_params() (autots.models.base.modelobject method)": [[4, "autots.models.base.ModelObject.get_params"]], "get_params() (autots.models.basics.averagevaluenaive method)": [[4, "autots.models.basics.AverageValueNaive.get_params"]], "get_params() (autots.models.basics.balltreemultivariatemotif method)": [[4, "autots.models.basics.BallTreeMultivariateMotif.get_params"]], "get_params() (autots.models.basics.constantnaive method)": [[4, "autots.models.basics.ConstantNaive.get_params"]], "get_params() (autots.models.basics.fft method)": [[4, "autots.models.basics.FFT.get_params"]], "get_params() (autots.models.basics.kalmanstatespace method)": [[4, "autots.models.basics.KalmanStateSpace.get_params"]], "get_params() (autots.models.basics.lastvaluenaive method)": [[4, "autots.models.basics.LastValueNaive.get_params"]], "get_params() (autots.models.basics.metricmotif method)": [[4, "autots.models.basics.MetricMotif.get_params"]], "get_params() (autots.models.basics.motif method)": [[4, "autots.models.basics.Motif.get_params"]], "get_params() (autots.models.basics.motifsimulation method)": [[4, "autots.models.basics.MotifSimulation.get_params"]], "get_params() (autots.models.basics.nvar method)": [[4, "autots.models.basics.NVAR.get_params"]], "get_params() (autots.models.basics.seasonalnaive method)": [[4, "autots.models.basics.SeasonalNaive.get_params"]], "get_params() (autots.models.basics.seasonalitymotif method)": [[4, "autots.models.basics.SeasonalityMotif.get_params"]], "get_params() (autots.models.basics.sectionalmotif method)": [[4, "autots.models.basics.SectionalMotif.get_params"]], "get_params() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.get_params"]], "get_params() (autots.models.gluonts.gluonts method)": [[4, "autots.models.gluonts.GluonTS.get_params"]], "get_params() (autots.models.greykite.greykite method)": [[4, "autots.models.greykite.Greykite.get_params"]], "get_params() (autots.models.matrix_var.latc method)": [[4, "autots.models.matrix_var.LATC.get_params"]], "get_params() (autots.models.matrix_var.mar method)": [[4, "autots.models.matrix_var.MAR.get_params"]], "get_params() (autots.models.matrix_var.rrvar method)": [[4, "autots.models.matrix_var.RRVAR.get_params"]], "get_params() (autots.models.matrix_var.tmf method)": [[4, "autots.models.matrix_var.TMF.get_params"]], "get_params() (autots.models.mlensemble.mlensemble method)": [[4, "autots.models.mlensemble.MLEnsemble.get_params"]], "get_params() (autots.models.neural_forecast.neuralforecast method)": [[4, "autots.models.neural_forecast.NeuralForecast.get_params"]], "get_params() (autots.models.prophet.fbprophet method)": [[4, "autots.models.prophet.FBProphet.get_params"]], "get_params() (autots.models.prophet.neuralprophet method)": [[4, "autots.models.prophet.NeuralProphet.get_params"]], "get_params() (autots.models.pytorch.pytorchforecasting method)": [[4, "autots.models.pytorch.PytorchForecasting.get_params"]], "get_params() (autots.models.sklearn.componentanalysis method)": [[4, "autots.models.sklearn.ComponentAnalysis.get_params"]], "get_params() (autots.models.sklearn.datepartregression method)": [[4, "autots.models.sklearn.DatepartRegression.get_params"]], "get_params() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.get_params"]], "get_params() (autots.models.sklearn.preprocessingregression method)": [[4, "autots.models.sklearn.PreprocessingRegression.get_params"]], "get_params() (autots.models.sklearn.rollingregression method)": [[4, "autots.models.sklearn.RollingRegression.get_params"]], "get_params() (autots.models.sklearn.univariateregression method)": [[4, "autots.models.sklearn.UnivariateRegression.get_params"]], "get_params() (autots.models.sklearn.windowregression method)": [[4, "autots.models.sklearn.WindowRegression.get_params"]], "get_params() (autots.models.statsmodels.ardl method)": [[4, "autots.models.statsmodels.ARDL.get_params"]], "get_params() (autots.models.statsmodels.arima method)": [[4, "autots.models.statsmodels.ARIMA.get_params"]], "get_params() (autots.models.statsmodels.dynamicfactor method)": [[4, "autots.models.statsmodels.DynamicFactor.get_params"]], "get_params() (autots.models.statsmodels.dynamicfactormq method)": [[4, "autots.models.statsmodels.DynamicFactorMQ.get_params"]], "get_params() (autots.models.statsmodels.ets method)": [[4, "autots.models.statsmodels.ETS.get_params"]], "get_params() (autots.models.statsmodels.glm method)": [[4, "autots.models.statsmodels.GLM.get_params"]], "get_params() (autots.models.statsmodels.gls method)": [[4, "autots.models.statsmodels.GLS.get_params"]], "get_params() (autots.models.statsmodels.theta method)": [[4, "autots.models.statsmodels.Theta.get_params"]], "get_params() (autots.models.statsmodels.unobservedcomponents method)": [[4, "autots.models.statsmodels.UnobservedComponents.get_params"]], "get_params() (autots.models.statsmodels.var method)": [[4, "autots.models.statsmodels.VAR.get_params"]], "get_params() (autots.models.statsmodels.varmax method)": [[4, "autots.models.statsmodels.VARMAX.get_params"]], "get_params() (autots.models.statsmodels.vecm method)": [[4, "autots.models.statsmodels.VECM.get_params"]], "get_params() (autots.models.tfp.tfpregression method)": [[4, "autots.models.tfp.TFPRegression.get_params"]], "get_params() (autots.models.tfp.tensorflowsts method)": [[4, "autots.models.tfp.TensorflowSTS.get_params"]], "get_params() (autots.models.tide.tide method)": [[4, "autots.models.tide.TiDE.get_params"]], "glm_forecast_by_column() (in module autots.models.statsmodels)": [[4, "autots.models.statsmodels.glm_forecast_by_column"]], "holiday_count (autots.models.cassandra.cassandra. attribute)": [[4, "autots.models.cassandra.Cassandra..holiday_count"]], "holidays (autots.models.cassandra.cassandra. attribute)": [[4, "autots.models.cassandra.Cassandra..holidays"]], "horizontal_classifier() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.horizontal_classifier"]], "horizontal_xy() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.horizontal_xy"]], "is_horizontal() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.is_horizontal"]], "is_mosaic() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.is_mosaic"]], "latc_imputer() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.latc_imputer"]], "latc_predictor() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.latc_predictor"]], "long_form_results() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.long_form_results"], [4, "id2"]], "looped_motif() (in module autots.models.basics)": [[4, "autots.models.basics.looped_motif"]], "lower_forecast (autots.models.base.predictionobject attribute)": [[4, "autots.models.base.PredictionObject.lower_forecast"]], "lstsq_minimize() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.lstsq_minimize"]], "lstsq_solve() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.lstsq_solve"]], "mae_loss() (in module autots.models.tide)": [[4, "autots.models.tide.mae_loss"]], "mape() (in module autots.models.tide)": [[4, "autots.models.tide.mape"]], "mar() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.mar"]], "mat2ten() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.mat2ten"]], "mlens_helper() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.mlens_helper"]], "model_list_to_dict() (in module autots.models.model_list)": [[4, "autots.models.model_list.model_list_to_dict"]], "model_name (autots.models.base.predictionobject attribute)": [[4, "autots.models.base.PredictionObject.model_name"]], "model_parameters (autots.models.base.predictionobject attribute)": [[4, "autots.models.base.PredictionObject.model_parameters"]], "mosaic_classifier() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.mosaic_classifier"]], "mosaic_or_horizontal() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.mosaic_or_horizontal"]], "mosaic_to_horizontal() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.mosaic_to_horizontal"]], "mosaic_xy() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.mosaic_xy"]], "n_limited_horz() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.n_limited_horz"]], "next_fit() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.next_fit"]], "nrmse() (in module autots.models.tide)": [[4, "autots.models.tide.nrmse"]], "params (autots.models.cassandra.cassandra. attribute)": [[4, "autots.models.cassandra.Cassandra..params"]], "parse_forecast_length() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.parse_forecast_length"]], "parse_horizontal() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.parse_horizontal"]], "parse_mosaic() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.parse_mosaic"]], "plot() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.plot"], [4, "id3"]], "plot_components() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.plot_components"], [4, "id7"]], "plot_df() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.plot_df"]], "plot_distributions() (in module autots.models.base)": [[4, "autots.models.base.plot_distributions"]], "plot_ensemble_runtimes() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.plot_ensemble_runtimes"]], "plot_forecast() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.plot_forecast"], [4, "id8"]], "plot_grid() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.plot_grid"]], "plot_things() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.plot_things"]], "plot_trend() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.plot_trend"], [4, "id9"]], "predict() (autots.models.arch.arch method)": [[4, "autots.models.arch.ARCH.predict"]], "predict() (autots.models.basics.averagevaluenaive method)": [[4, "autots.models.basics.AverageValueNaive.predict"]], "predict() (autots.models.basics.balltreemultivariatemotif method)": [[4, "autots.models.basics.BallTreeMultivariateMotif.predict"]], "predict() (autots.models.basics.constantnaive method)": [[4, "autots.models.basics.ConstantNaive.predict"]], "predict() (autots.models.basics.fft method)": [[4, "autots.models.basics.FFT.predict"]], "predict() (autots.models.basics.kalmanstatespace method)": [[4, "autots.models.basics.KalmanStateSpace.predict"]], "predict() (autots.models.basics.lastvaluenaive method)": [[4, "autots.models.basics.LastValueNaive.predict"]], "predict() (autots.models.basics.metricmotif method)": [[4, "autots.models.basics.MetricMotif.predict"]], "predict() (autots.models.basics.motif method)": [[4, "autots.models.basics.Motif.predict"]], "predict() (autots.models.basics.motifsimulation method)": [[4, "autots.models.basics.MotifSimulation.predict"]], "predict() (autots.models.basics.nvar method)": [[4, "autots.models.basics.NVAR.predict"]], "predict() (autots.models.basics.seasonalnaive method)": [[4, "autots.models.basics.SeasonalNaive.predict"]], "predict() (autots.models.basics.seasonalitymotif method)": [[4, "autots.models.basics.SeasonalityMotif.predict"]], "predict() (autots.models.basics.sectionalmotif method)": [[4, "autots.models.basics.SectionalMotif.predict"]], "predict() (autots.models.cassandra.bayesianmultioutputregression method)": [[4, "autots.models.cassandra.BayesianMultiOutputRegression.predict"]], "predict() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.predict"], [4, "id10"]], "predict() (autots.models.dnn.kerasrnn method)": [[4, "autots.models.dnn.KerasRNN.predict"]], "predict() (autots.models.dnn.transformer method)": [[4, "autots.models.dnn.Transformer.predict"]], "predict() (autots.models.gluonts.gluonts method)": [[4, "autots.models.gluonts.GluonTS.predict"]], "predict() (autots.models.greykite.greykite method)": [[4, "autots.models.greykite.Greykite.predict"]], "predict() (autots.models.matrix_var.latc method)": [[4, "autots.models.matrix_var.LATC.predict"]], "predict() (autots.models.matrix_var.mar method)": [[4, "autots.models.matrix_var.MAR.predict"]], "predict() (autots.models.matrix_var.rrvar method)": [[4, "autots.models.matrix_var.RRVAR.predict"]], "predict() (autots.models.matrix_var.tmf method)": [[4, "autots.models.matrix_var.TMF.predict"]], "predict() (autots.models.mlensemble.mlensemble method)": [[4, "autots.models.mlensemble.MLEnsemble.predict"]], "predict() (autots.models.neural_forecast.neuralforecast method)": [[4, "autots.models.neural_forecast.NeuralForecast.predict"]], "predict() (autots.models.prophet.fbprophet method)": [[4, "autots.models.prophet.FBProphet.predict"]], "predict() (autots.models.prophet.neuralprophet method)": [[4, "autots.models.prophet.NeuralProphet.predict"]], "predict() (autots.models.pytorch.pytorchforecasting method)": [[4, "autots.models.pytorch.PytorchForecasting.predict"]], "predict() (autots.models.sklearn.componentanalysis method)": [[4, "autots.models.sklearn.ComponentAnalysis.predict"]], "predict() (autots.models.sklearn.datepartregression method)": [[4, "autots.models.sklearn.DatepartRegression.predict"]], "predict() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.predict"]], "predict() (autots.models.sklearn.preprocessingregression method)": [[4, "autots.models.sklearn.PreprocessingRegression.predict"]], "predict() (autots.models.sklearn.rollingregression method)": [[4, "autots.models.sklearn.RollingRegression.predict"]], "predict() (autots.models.sklearn.univariateregression method)": [[4, "autots.models.sklearn.UnivariateRegression.predict"]], "predict() (autots.models.sklearn.vectorizedmultioutputgpr method)": [[4, "autots.models.sklearn.VectorizedMultiOutputGPR.predict"]], "predict() (autots.models.sklearn.windowregression method)": [[4, "autots.models.sklearn.WindowRegression.predict"]], "predict() (autots.models.statsmodels.ardl method)": [[4, "autots.models.statsmodels.ARDL.predict"]], "predict() (autots.models.statsmodels.arima method)": [[4, "autots.models.statsmodels.ARIMA.predict"]], "predict() (autots.models.statsmodels.dynamicfactor method)": [[4, "autots.models.statsmodels.DynamicFactor.predict"]], "predict() (autots.models.statsmodels.dynamicfactormq method)": [[4, "autots.models.statsmodels.DynamicFactorMQ.predict"]], "predict() (autots.models.statsmodels.ets method)": [[4, "autots.models.statsmodels.ETS.predict"]], "predict() (autots.models.statsmodels.glm method)": [[4, "autots.models.statsmodels.GLM.predict"]], "predict() (autots.models.statsmodels.gls method)": [[4, "autots.models.statsmodels.GLS.predict"]], "predict() (autots.models.statsmodels.theta method)": [[4, "autots.models.statsmodels.Theta.predict"]], "predict() (autots.models.statsmodels.unobservedcomponents method)": [[4, "autots.models.statsmodels.UnobservedComponents.predict"]], "predict() (autots.models.statsmodels.var method)": [[4, "autots.models.statsmodels.VAR.predict"]], "predict() (autots.models.statsmodels.varmax method)": [[4, "autots.models.statsmodels.VARMAX.predict"]], "predict() (autots.models.statsmodels.vecm method)": [[4, "autots.models.statsmodels.VECM.predict"]], "predict() (autots.models.tfp.tfpregression method)": [[4, "autots.models.tfp.TFPRegression.predict"]], "predict() (autots.models.tfp.tfpregressor method)": [[4, "autots.models.tfp.TFPRegressor.predict"]], "predict() (autots.models.tfp.tensorflowsts method)": [[4, "autots.models.tfp.TensorflowSTS.predict"]], "predict() (autots.models.tide.tide method)": [[4, "autots.models.tide.TiDE.predict"]], "predict_new_product() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.predict_new_product"]], "predict_proba() (autots.models.sklearn.vectorizedmultioutputgpr method)": [[4, "autots.models.sklearn.VectorizedMultiOutputGPR.predict_proba"]], "predict_reservoir() (in module autots.models.basics)": [[4, "autots.models.basics.predict_reservoir"]], "predict_x_array (autots.models.cassandra.cassandra. attribute)": [[4, "autots.models.cassandra.Cassandra..predict_x_array"]], "predicted_trend (autots.models.cassandra.cassandra. attribute)": [[4, "autots.models.cassandra.Cassandra..predicted_trend"]], "process_components() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.process_components"]], "process_mosaic_arrays() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.process_mosaic_arrays"]], "retrieve_classifier() (in module autots.models.sklearn)": [[4, "autots.models.sklearn.retrieve_classifier"]], "retrieve_regressor() (in module autots.models.sklearn)": [[4, "autots.models.sklearn.retrieve_regressor"]], "return_components() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.return_components"], [4, "id11"]], "rmse() (in module autots.models.tide)": [[4, "autots.models.tide.rmse"]], "rolling_trend() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.rolling_trend"]], "rolling_x_regressor() (in module autots.models.sklearn)": [[4, "autots.models.sklearn.rolling_x_regressor"]], "rolling_x_regressor_regressor() (in module autots.models.sklearn)": [[4, "autots.models.sklearn.rolling_x_regressor_regressor"]], "rrvar() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.rrvar"]], "sample_posterior() (autots.models.cassandra.bayesianmultioutputregression method)": [[4, "autots.models.cassandra.BayesianMultiOutputRegression.sample_posterior"]], "scale_data() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.scale_data"]], "scale_data() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.scale_data"]], "scores (autots.models.cassandra.cassandra..anomaly_detector attribute)": [[4, "autots.models.cassandra.Cassandra..anomaly_detector.scores"]], "seek_the_oracle() (in module autots.models.greykite)": [[4, "autots.models.greykite.seek_the_oracle"]], "smape() (in module autots.models.tide)": [[4, "autots.models.tide.smape"]], "summarize_series() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.summarize_series"]], "svt_tnn() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.svt_tnn"]], "ten2mat() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.ten2mat"]], "test_val_gen() (autots.models.tide.timeseriesdata method)": [[4, "autots.models.tide.TimeSeriesdata.test_val_gen"]], "tf_dataset() (autots.models.tide.timeseriesdata method)": [[4, "autots.models.tide.TimeSeriesdata.tf_dataset"]], "time() (autots.models.base.modelobject static method)": [[4, "autots.models.base.ModelObject.time"]], "tmf() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.tmf"]], "to_origin_space() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.to_origin_space"]], "to_origin_space() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.to_origin_space"]], "total_runtime() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.total_runtime"], [4, "id4"]], "train_gen() (autots.models.tide.timeseriesdata method)": [[4, "autots.models.tide.TimeSeriesdata.train_gen"]], "transformation_parameters (autots.models.base.predictionobject attribute)": [[4, "autots.models.base.PredictionObject.transformation_parameters"]], "transformer_build_model() (in module autots.models.dnn)": [[4, "autots.models.dnn.transformer_build_model"]], "transformer_encoder() (in module autots.models.dnn)": [[4, "autots.models.dnn.transformer_encoder"]], "treatment_causal_impact() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.treatment_causal_impact"]], "trend_train (autots.models.cassandra.cassandra. attribute)": [[4, "autots.models.cassandra.Cassandra..trend_train"]], "tune_observational_noise() (autots.models.basics.kalmanstatespace method)": [[4, "autots.models.basics.KalmanStateSpace.tune_observational_noise"]], "update_cg() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.update_cg"]], "upper_forecast (autots.models.base.predictionobject attribute)": [[4, "autots.models.base.PredictionObject.upper_forecast"]], "var() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.var"]], "var4cast() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.var4cast"]], "wape() (in module autots.models.tide)": [[4, "autots.models.tide.wape"]], "x_array (autots.models.cassandra.cassandra. attribute)": [[4, "autots.models.cassandra.Cassandra..x_array"]], "autots.templates": [[5, "module-autots.templates"]], "autots.templates.general": [[5, "module-autots.templates.general"]], "general_template (in module autots.templates.general)": [[5, "autots.templates.general.general_template"]], "alignlastdiff (class in autots.tools.transform)": [[6, "autots.tools.transform.AlignLastDiff"]], "alignlastvalue (class in autots.tools.transform)": [[6, "autots.tools.transform.AlignLastValue"]], "anomalyremoval (class in autots.tools.transform)": [[6, "autots.tools.transform.AnomalyRemoval"]], "bkbandpassfilter (class in autots.tools.transform)": [[6, "autots.tools.transform.BKBandpassFilter"]], "btcd (class in autots.tools.transform)": [[6, "autots.tools.transform.BTCD"]], "centerlastvalue (class in autots.tools.transform)": [[6, "autots.tools.transform.CenterLastValue"]], "centersplit (class in autots.tools.transform)": [[6, "autots.tools.transform.CenterSplit"]], "clipoutliers (class in autots.tools.transform)": [[6, "autots.tools.transform.ClipOutliers"]], "cointegration (class in autots.tools.transform)": [[6, "autots.tools.transform.Cointegration"]], "cumsumtransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.CumSumTransformer"]], "datepartregression (in module autots.tools.transform)": [[6, "autots.tools.transform.DatepartRegression"]], "datepartregressiontransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.DatepartRegressionTransformer"]], "detrend (class in autots.tools.transform)": [[6, "autots.tools.transform.Detrend"]], "diffsmoother (class in autots.tools.transform)": [[6, "autots.tools.transform.DiffSmoother"]], "differencedtransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.DifferencedTransformer"]], "discretize (class in autots.tools.transform)": [[6, "autots.tools.transform.Discretize"]], "ewmafilter (class in autots.tools.transform)": [[6, "autots.tools.transform.EWMAFilter"]], "emptytransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.EmptyTransformer"]], "fft (class in autots.tools.fft)": [[6, "autots.tools.fft.FFT"]], "fftdecomposition (class in autots.tools.transform)": [[6, "autots.tools.transform.FFTDecomposition"]], "fftfilter (class in autots.tools.transform)": [[6, "autots.tools.transform.FFTFilter"]], "fastica (class in autots.tools.transform)": [[6, "autots.tools.transform.FastICA"]], "fillna() (in module autots.tools.impute)": [[6, "autots.tools.impute.FillNA"]], "gaussian (class in autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.Gaussian"]], "generaltransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.GeneralTransformer"]], "hpfilter (class in autots.tools.transform)": [[6, "autots.tools.transform.HPFilter"]], "historicvalues (class in autots.tools.transform)": [[6, "autots.tools.transform.HistoricValues"]], "holidaytransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.HolidayTransformer"]], "intermittentoccurrence (class in autots.tools.transform)": [[6, "autots.tools.transform.IntermittentOccurrence"]], "kalmanfilter (class in autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.KalmanFilter"]], "kalmanfilter.result (class in autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.KalmanFilter.Result"]], "kalmansmoothing (class in autots.tools.transform)": [[6, "autots.tools.transform.KalmanSmoothing"]], "levelshiftmagic (class in autots.tools.transform)": [[6, "autots.tools.transform.LevelShiftMagic"]], "levelshifttransformer (in module autots.tools.transform)": [[6, "autots.tools.transform.LevelShiftTransformer"]], "locallineartrend (class in autots.tools.transform)": [[6, "autots.tools.transform.LocalLinearTrend"]], "meandifference (class in autots.tools.transform)": [[6, "autots.tools.transform.MeanDifference"]], "nonparametricthreshold (class in autots.tools.thresholding)": [[6, "autots.tools.thresholding.NonparametricThreshold"]], "numerictransformer (class in autots.tools.shaping)": [[6, "autots.tools.shaping.NumericTransformer"]], "pca (class in autots.tools.transform)": [[6, "autots.tools.transform.PCA"]], "pctchangetransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.PctChangeTransformer"]], "point_to_probability() (in module autots.tools.probabilistic)": [[6, "autots.tools.probabilistic.Point_to_Probability"]], "positiveshift (class in autots.tools.transform)": [[6, "autots.tools.transform.PositiveShift"]], "randomtransform() (in module autots.tools.transform)": [[6, "autots.tools.transform.RandomTransform"]], "regressionfilter (class in autots.tools.transform)": [[6, "autots.tools.transform.RegressionFilter"]], "replaceconstant (class in autots.tools.transform)": [[6, "autots.tools.transform.ReplaceConstant"]], "rollingmeantransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.RollingMeanTransformer"]], "round (class in autots.tools.transform)": [[6, "autots.tools.transform.Round"]], "stlfilter (class in autots.tools.transform)": [[6, "autots.tools.transform.STLFilter"]], "scipyfilter (class in autots.tools.transform)": [[6, "autots.tools.transform.ScipyFilter"]], "seasonaldifference (class in autots.tools.transform)": [[6, "autots.tools.transform.SeasonalDifference"]], "seasonalitymotifimputer (class in autots.tools.impute)": [[6, "autots.tools.impute.SeasonalityMotifImputer"]], "simpleseasonalitymotifimputer (class in autots.tools.impute)": [[6, "autots.tools.impute.SimpleSeasonalityMotifImputer"]], "sintrend (class in autots.tools.transform)": [[6, "autots.tools.transform.SinTrend"]], "slice (class in autots.tools.transform)": [[6, "autots.tools.transform.Slice"]], "statsmodelsfilter (class in autots.tools.transform)": [[6, "autots.tools.transform.StatsmodelsFilter"]], "variable_point_to_probability() (in module autots.tools.probabilistic)": [[6, "autots.tools.probabilistic.Variable_Point_to_Probability"]], "anomaly_df_to_holidays() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.anomaly_df_to_holidays"]], "anomaly_new_params() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.anomaly_new_params"]], "autoshape() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.autoshape"]], "autots.tools": [[6, "module-autots.tools"]], "autots.tools.anomaly_utils": [[6, "module-autots.tools.anomaly_utils"]], "autots.tools.calendar": [[6, "module-autots.tools.calendar"]], "autots.tools.cointegration": [[6, "module-autots.tools.cointegration"]], "autots.tools.cpu_count": [[6, "module-autots.tools.cpu_count"]], "autots.tools.fast_kalman": [[6, "module-autots.tools.fast_kalman"]], "autots.tools.fft": [[6, "module-autots.tools.fft"]], "autots.tools.hierarchial": [[6, "module-autots.tools.hierarchial"]], "autots.tools.holiday": [[6, "module-autots.tools.holiday"]], "autots.tools.impute": [[6, "module-autots.tools.impute"]], "autots.tools.lunar": [[6, "module-autots.tools.lunar"]], "autots.tools.percentile": [[6, "module-autots.tools.percentile"]], "autots.tools.probabilistic": [[6, "module-autots.tools.probabilistic"]], "autots.tools.profile": [[6, "module-autots.tools.profile"]], "autots.tools.regressor": [[6, "module-autots.tools.regressor"]], "autots.tools.seasonal": [[6, "module-autots.tools.seasonal"]], "autots.tools.shaping": [[6, "module-autots.tools.shaping"]], "autots.tools.thresholding": [[6, "module-autots.tools.thresholding"]], "autots.tools.transform": [[6, "module-autots.tools.transform"]], "autots.tools.window_functions": [[6, "module-autots.tools.window_functions"]], "biased_ffill() (in module autots.tools.impute)": [[6, "autots.tools.impute.biased_ffill"]], "bkfilter() (autots.tools.transform.statsmodelsfilter method)": [[6, "autots.tools.transform.StatsmodelsFilter.bkfilter"]], "bkfilter_st() (in module autots.tools.transform)": [[6, "autots.tools.transform.bkfilter_st"]], "btcd_decompose() (in module autots.tools.cointegration)": [[6, "autots.tools.cointegration.btcd_decompose"]], "cffilter() (autots.tools.transform.statsmodelsfilter method)": [[6, "autots.tools.transform.StatsmodelsFilter.cffilter"]], "chunk_reshape() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.chunk_reshape"]], "clean_weights() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.clean_weights"]], "clip_outliers() (in module autots.tools.transform)": [[6, "autots.tools.transform.clip_outliers"]], "coint_johansen() (in module autots.tools.cointegration)": [[6, "autots.tools.cointegration.coint_johansen"]], "compare_to_epsilon() (autots.tools.thresholding.nonparametricthreshold method)": [[6, "autots.tools.thresholding.NonparametricThreshold.compare_to_epsilon"]], "compute() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.compute"]], "consecutive_groups() (in module autots.tools.thresholding)": [[6, "autots.tools.thresholding.consecutive_groups"]], "convolution_filter() (autots.tools.transform.statsmodelsfilter method)": [[6, "autots.tools.transform.StatsmodelsFilter.convolution_filter"]], "cpu_count() (in module autots.tools.cpu_count)": [[6, "autots.tools.cpu_count.cpu_count"]], "create_datepart_components() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.create_datepart_components"]], "create_dates_df() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.create_dates_df"]], "create_lagged_regressor() (in module autots.tools.regressor)": [[6, "autots.tools.regressor.create_lagged_regressor"]], "create_regressor() (in module autots.tools.regressor)": [[6, "autots.tools.regressor.create_regressor"]], "create_seasonality_feature() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.create_seasonality_feature"]], "data_profile() (in module autots.tools.profile)": [[6, "autots.tools.profile.data_profile"]], "date_part() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.date_part"]], "dates_to_holidays() (autots.tools.transform.holidaytransformer method)": [[6, "autots.tools.transform.HolidayTransformer.dates_to_holidays"]], "dates_to_holidays() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.dates_to_holidays"]], "dcos() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.dcos"]], "ddot() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.ddot"]], "ddot_t_right() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.ddot_t_right"]], "ddot_t_right_old() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.ddot_t_right_old"]], "detect_anomalies() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.detect_anomalies"]], "df_cleanup() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.df_cleanup"]], "dinv() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.dinv"]], "douter() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.douter"]], "dsin() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.dsin"]], "em() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.em"]], "em_initial_state() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.em_initial_state"]], "em_observation_noise() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.em_observation_noise"]], "em_process_noise() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.em_process_noise"]], "empty() (autots.tools.fast_kalman.gaussian static method)": [[6, "autots.tools.fast_kalman.Gaussian.empty"]], "ensure_matrix() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.ensure_matrix"]], "exponential_decay() (in module autots.tools.transform)": [[6, "autots.tools.transform.exponential_decay"]], "fake_date_fill() (in module autots.tools.impute)": [[6, "autots.tools.impute.fake_date_fill"]], "fake_date_fill_old() (in module autots.tools.impute)": [[6, "autots.tools.impute.fake_date_fill_old"]], "fill_forward() (in module autots.tools.impute)": [[6, "autots.tools.impute.fill_forward"]], "fill_forward_alt() (in module autots.tools.impute)": [[6, "autots.tools.impute.fill_forward_alt"]], "fill_mean() (in module autots.tools.impute)": [[6, "autots.tools.impute.fill_mean"]], "fill_mean_old() (in module autots.tools.impute)": [[6, "autots.tools.impute.fill_mean_old"]], "fill_median() (in module autots.tools.impute)": [[6, "autots.tools.impute.fill_median"]], "fill_median_old() (in module autots.tools.impute)": [[6, "autots.tools.impute.fill_median_old"]], "fill_na() (autots.tools.transform.generaltransformer method)": [[6, "autots.tools.transform.GeneralTransformer.fill_na"]], "fill_zero() (in module autots.tools.impute)": [[6, "autots.tools.impute.fill_zero"]], "fillna_np() (in module autots.tools.impute)": [[6, "autots.tools.impute.fillna_np"]], "find_centerpoint() (autots.tools.transform.alignlastvalue static method)": [[6, "autots.tools.transform.AlignLastValue.find_centerpoint"]], "find_epsilon() (autots.tools.thresholding.nonparametricthreshold method)": [[6, "autots.tools.thresholding.NonparametricThreshold.find_epsilon"]], "fit() (autots.tools.fft.fft method)": [[6, "autots.tools.fft.FFT.fit"]], "fit() (autots.tools.hierarchial.hierarchial method)": [[6, "autots.tools.hierarchial.hierarchial.fit"]], "fit() (autots.tools.shaping.numerictransformer method)": [[6, "autots.tools.shaping.NumericTransformer.fit"]], "fit() (autots.tools.transform.alignlastdiff method)": [[6, "autots.tools.transform.AlignLastDiff.fit"]], "fit() (autots.tools.transform.alignlastvalue method)": [[6, "autots.tools.transform.AlignLastValue.fit"]], "fit() (autots.tools.transform.anomalyremoval method)": [[6, "autots.tools.transform.AnomalyRemoval.fit"]], "fit() (autots.tools.transform.bkbandpassfilter method)": [[6, "autots.tools.transform.BKBandpassFilter.fit"]], "fit() (autots.tools.transform.btcd method)": [[6, "autots.tools.transform.BTCD.fit"]], "fit() (autots.tools.transform.centerlastvalue method)": [[6, "autots.tools.transform.CenterLastValue.fit"]], "fit() (autots.tools.transform.centersplit method)": [[6, "autots.tools.transform.CenterSplit.fit"]], "fit() (autots.tools.transform.clipoutliers method)": [[6, "autots.tools.transform.ClipOutliers.fit"]], "fit() (autots.tools.transform.cointegration method)": [[6, "autots.tools.transform.Cointegration.fit"]], "fit() (autots.tools.transform.cumsumtransformer method)": [[6, "autots.tools.transform.CumSumTransformer.fit"]], "fit() (autots.tools.transform.datepartregressiontransformer method)": [[6, "autots.tools.transform.DatepartRegressionTransformer.fit"]], "fit() (autots.tools.transform.detrend method)": [[6, "autots.tools.transform.Detrend.fit"]], "fit() (autots.tools.transform.diffsmoother method)": [[6, "autots.tools.transform.DiffSmoother.fit"]], "fit() (autots.tools.transform.differencedtransformer method)": [[6, "autots.tools.transform.DifferencedTransformer.fit"]], "fit() (autots.tools.transform.discretize method)": [[6, "autots.tools.transform.Discretize.fit"]], "fit() (autots.tools.transform.emptytransformer method)": [[6, "autots.tools.transform.EmptyTransformer.fit"]], "fit() (autots.tools.transform.fftdecomposition method)": [[6, "autots.tools.transform.FFTDecomposition.fit"]], "fit() (autots.tools.transform.fftfilter method)": [[6, "autots.tools.transform.FFTFilter.fit"]], "fit() (autots.tools.transform.fastica method)": [[6, "autots.tools.transform.FastICA.fit"]], "fit() (autots.tools.transform.generaltransformer method)": [[6, "autots.tools.transform.GeneralTransformer.fit"]], "fit() (autots.tools.transform.historicvalues method)": [[6, "autots.tools.transform.HistoricValues.fit"]], "fit() (autots.tools.transform.holidaytransformer method)": [[6, "autots.tools.transform.HolidayTransformer.fit"]], "fit() (autots.tools.transform.intermittentoccurrence method)": [[6, "autots.tools.transform.IntermittentOccurrence.fit"]], "fit() (autots.tools.transform.kalmansmoothing method)": [[6, "autots.tools.transform.KalmanSmoothing.fit"]], "fit() (autots.tools.transform.levelshiftmagic method)": [[6, "autots.tools.transform.LevelShiftMagic.fit"]], "fit() (autots.tools.transform.locallineartrend method)": [[6, "autots.tools.transform.LocalLinearTrend.fit"]], "fit() (autots.tools.transform.meandifference method)": [[6, "autots.tools.transform.MeanDifference.fit"]], "fit() (autots.tools.transform.pca method)": [[6, "autots.tools.transform.PCA.fit"]], "fit() (autots.tools.transform.pctchangetransformer method)": [[6, "autots.tools.transform.PctChangeTransformer.fit"]], "fit() (autots.tools.transform.positiveshift method)": [[6, "autots.tools.transform.PositiveShift.fit"]], "fit() (autots.tools.transform.regressionfilter method)": [[6, "autots.tools.transform.RegressionFilter.fit"]], "fit() (autots.tools.transform.replaceconstant method)": [[6, "autots.tools.transform.ReplaceConstant.fit"]], "fit() (autots.tools.transform.rollingmeantransformer method)": [[6, "autots.tools.transform.RollingMeanTransformer.fit"]], "fit() (autots.tools.transform.round method)": [[6, "autots.tools.transform.Round.fit"]], "fit() (autots.tools.transform.scipyfilter method)": [[6, "autots.tools.transform.ScipyFilter.fit"]], "fit() (autots.tools.transform.seasonaldifference method)": [[6, "autots.tools.transform.SeasonalDifference.fit"]], "fit() (autots.tools.transform.sintrend method)": [[6, "autots.tools.transform.SinTrend.fit"]], "fit() (autots.tools.transform.slice method)": [[6, "autots.tools.transform.Slice.fit"]], "fit_anomaly_classifier() (autots.tools.transform.anomalyremoval method)": [[6, "autots.tools.transform.AnomalyRemoval.fit_anomaly_classifier"]], "fit_sin() (autots.tools.transform.sintrend static method)": [[6, "autots.tools.transform.SinTrend.fit_sin"]], "fit_transform() (autots.tools.shaping.numerictransformer method)": [[6, "autots.tools.shaping.NumericTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.alignlastdiff method)": [[6, "autots.tools.transform.AlignLastDiff.fit_transform"]], "fit_transform() (autots.tools.transform.alignlastvalue method)": [[6, "autots.tools.transform.AlignLastValue.fit_transform"]], "fit_transform() (autots.tools.transform.anomalyremoval method)": [[6, "autots.tools.transform.AnomalyRemoval.fit_transform"]], "fit_transform() (autots.tools.transform.bkbandpassfilter method)": [[6, "autots.tools.transform.BKBandpassFilter.fit_transform"]], "fit_transform() (autots.tools.transform.btcd method)": [[6, "autots.tools.transform.BTCD.fit_transform"]], "fit_transform() (autots.tools.transform.centerlastvalue method)": [[6, "autots.tools.transform.CenterLastValue.fit_transform"]], "fit_transform() (autots.tools.transform.centersplit method)": [[6, "autots.tools.transform.CenterSplit.fit_transform"]], "fit_transform() (autots.tools.transform.clipoutliers method)": [[6, "autots.tools.transform.ClipOutliers.fit_transform"]], "fit_transform() (autots.tools.transform.cointegration method)": [[6, "autots.tools.transform.Cointegration.fit_transform"]], "fit_transform() (autots.tools.transform.cumsumtransformer method)": [[6, "autots.tools.transform.CumSumTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.datepartregressiontransformer method)": [[6, "autots.tools.transform.DatepartRegressionTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.detrend method)": [[6, "autots.tools.transform.Detrend.fit_transform"]], "fit_transform() (autots.tools.transform.diffsmoother method)": [[6, "autots.tools.transform.DiffSmoother.fit_transform"]], "fit_transform() (autots.tools.transform.differencedtransformer method)": [[6, "autots.tools.transform.DifferencedTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.discretize method)": [[6, "autots.tools.transform.Discretize.fit_transform"]], "fit_transform() (autots.tools.transform.ewmafilter method)": [[6, "autots.tools.transform.EWMAFilter.fit_transform"]], "fit_transform() (autots.tools.transform.emptytransformer method)": [[6, "autots.tools.transform.EmptyTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.fftdecomposition method)": [[6, "autots.tools.transform.FFTDecomposition.fit_transform"]], "fit_transform() (autots.tools.transform.fftfilter method)": [[6, "autots.tools.transform.FFTFilter.fit_transform"]], "fit_transform() (autots.tools.transform.fastica method)": [[6, "autots.tools.transform.FastICA.fit_transform"]], "fit_transform() (autots.tools.transform.generaltransformer method)": [[6, "autots.tools.transform.GeneralTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.hpfilter method)": [[6, "autots.tools.transform.HPFilter.fit_transform"]], "fit_transform() (autots.tools.transform.historicvalues method)": [[6, "autots.tools.transform.HistoricValues.fit_transform"]], "fit_transform() (autots.tools.transform.holidaytransformer method)": [[6, "autots.tools.transform.HolidayTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.intermittentoccurrence method)": [[6, "autots.tools.transform.IntermittentOccurrence.fit_transform"]], "fit_transform() (autots.tools.transform.kalmansmoothing method)": [[6, "autots.tools.transform.KalmanSmoothing.fit_transform"]], "fit_transform() (autots.tools.transform.levelshiftmagic method)": [[6, "autots.tools.transform.LevelShiftMagic.fit_transform"]], "fit_transform() (autots.tools.transform.locallineartrend method)": [[6, "autots.tools.transform.LocalLinearTrend.fit_transform"]], "fit_transform() (autots.tools.transform.meandifference method)": [[6, "autots.tools.transform.MeanDifference.fit_transform"]], "fit_transform() (autots.tools.transform.pca method)": [[6, "autots.tools.transform.PCA.fit_transform"]], "fit_transform() (autots.tools.transform.pctchangetransformer method)": [[6, "autots.tools.transform.PctChangeTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.positiveshift method)": [[6, "autots.tools.transform.PositiveShift.fit_transform"]], "fit_transform() (autots.tools.transform.regressionfilter method)": [[6, "autots.tools.transform.RegressionFilter.fit_transform"]], "fit_transform() (autots.tools.transform.replaceconstant method)": [[6, "autots.tools.transform.ReplaceConstant.fit_transform"]], "fit_transform() (autots.tools.transform.rollingmeantransformer method)": [[6, "autots.tools.transform.RollingMeanTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.round method)": [[6, "autots.tools.transform.Round.fit_transform"]], "fit_transform() (autots.tools.transform.stlfilter method)": [[6, "autots.tools.transform.STLFilter.fit_transform"]], "fit_transform() (autots.tools.transform.scipyfilter method)": [[6, "autots.tools.transform.ScipyFilter.fit_transform"]], "fit_transform() (autots.tools.transform.seasonaldifference method)": [[6, "autots.tools.transform.SeasonalDifference.fit_transform"]], "fit_transform() (autots.tools.transform.sintrend method)": [[6, "autots.tools.transform.SinTrend.fit_transform"]], "fit_transform() (autots.tools.transform.slice method)": [[6, "autots.tools.transform.Slice.fit_transform"]], "fit_transform() (autots.tools.transform.statsmodelsfilter method)": [[6, "autots.tools.transform.StatsmodelsFilter.fit_transform"]], "fixangle() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.fixangle"]], "fourier_df() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.fourier_df"]], "fourier_extrapolation() (in module autots.tools.fft)": [[6, "autots.tools.fft.fourier_extrapolation"]], "fourier_series() (in module autots.tools.cointegration)": [[6, "autots.tools.cointegration.fourier_series"]], "fourier_series() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.fourier_series"]], "freq_to_timedelta() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.freq_to_timedelta"]], "get_new_params() (autots.tools.transform.alignlastdiff static method)": [[6, "autots.tools.transform.AlignLastDiff.get_new_params"]], "get_new_params() (autots.tools.transform.alignlastvalue static method)": [[6, "autots.tools.transform.AlignLastValue.get_new_params"]], "get_new_params() (autots.tools.transform.anomalyremoval static method)": [[6, "autots.tools.transform.AnomalyRemoval.get_new_params"]], "get_new_params() (autots.tools.transform.bkbandpassfilter static method)": [[6, "autots.tools.transform.BKBandpassFilter.get_new_params"]], "get_new_params() (autots.tools.transform.btcd static method)": [[6, "autots.tools.transform.BTCD.get_new_params"]], "get_new_params() (autots.tools.transform.centerlastvalue static method)": [[6, "autots.tools.transform.CenterLastValue.get_new_params"]], "get_new_params() (autots.tools.transform.centersplit static method)": [[6, "autots.tools.transform.CenterSplit.get_new_params"]], "get_new_params() (autots.tools.transform.clipoutliers static method)": [[6, "autots.tools.transform.ClipOutliers.get_new_params"]], "get_new_params() (autots.tools.transform.cointegration static method)": [[6, "autots.tools.transform.Cointegration.get_new_params"]], "get_new_params() (autots.tools.transform.datepartregressiontransformer static method)": [[6, "autots.tools.transform.DatepartRegressionTransformer.get_new_params"]], "get_new_params() (autots.tools.transform.detrend static method)": [[6, "autots.tools.transform.Detrend.get_new_params"]], "get_new_params() (autots.tools.transform.diffsmoother static method)": [[6, "autots.tools.transform.DiffSmoother.get_new_params"]], "get_new_params() (autots.tools.transform.discretize static method)": [[6, "autots.tools.transform.Discretize.get_new_params"]], "get_new_params() (autots.tools.transform.ewmafilter static method)": [[6, "autots.tools.transform.EWMAFilter.get_new_params"]], "get_new_params() (autots.tools.transform.emptytransformer static method)": [[6, "autots.tools.transform.EmptyTransformer.get_new_params"]], "get_new_params() (autots.tools.transform.fftdecomposition static method)": [[6, "autots.tools.transform.FFTDecomposition.get_new_params"]], "get_new_params() (autots.tools.transform.fftfilter static method)": [[6, "autots.tools.transform.FFTFilter.get_new_params"]], "get_new_params() (autots.tools.transform.fastica static method)": [[6, "autots.tools.transform.FastICA.get_new_params"]], "get_new_params() (autots.tools.transform.generaltransformer static method)": [[6, "autots.tools.transform.GeneralTransformer.get_new_params"]], "get_new_params() (autots.tools.transform.hpfilter static method)": [[6, "autots.tools.transform.HPFilter.get_new_params"]], "get_new_params() (autots.tools.transform.historicvalues static method)": [[6, "autots.tools.transform.HistoricValues.get_new_params"]], "get_new_params() (autots.tools.transform.holidaytransformer static method)": [[6, "autots.tools.transform.HolidayTransformer.get_new_params"]], "get_new_params() (autots.tools.transform.intermittentoccurrence static method)": [[6, "autots.tools.transform.IntermittentOccurrence.get_new_params"]], "get_new_params() (autots.tools.transform.kalmansmoothing static method)": [[6, "autots.tools.transform.KalmanSmoothing.get_new_params"]], "get_new_params() (autots.tools.transform.levelshiftmagic static method)": [[6, "autots.tools.transform.LevelShiftMagic.get_new_params"]], "get_new_params() (autots.tools.transform.locallineartrend static method)": [[6, "autots.tools.transform.LocalLinearTrend.get_new_params"]], "get_new_params() (autots.tools.transform.pca static method)": [[6, "autots.tools.transform.PCA.get_new_params"]], "get_new_params() (autots.tools.transform.regressionfilter static method)": [[6, "autots.tools.transform.RegressionFilter.get_new_params"]], "get_new_params() (autots.tools.transform.replaceconstant static method)": [[6, "autots.tools.transform.ReplaceConstant.get_new_params"]], "get_new_params() (autots.tools.transform.rollingmeantransformer static method)": [[6, "autots.tools.transform.RollingMeanTransformer.get_new_params"]], "get_new_params() (autots.tools.transform.round static method)": [[6, "autots.tools.transform.Round.get_new_params"]], "get_new_params() (autots.tools.transform.stlfilter static method)": [[6, "autots.tools.transform.STLFilter.get_new_params"]], "get_new_params() (autots.tools.transform.scipyfilter static method)": [[6, "autots.tools.transform.ScipyFilter.get_new_params"]], "get_new_params() (autots.tools.transform.seasonaldifference static method)": [[6, "autots.tools.transform.SeasonalDifference.get_new_params"]], "get_new_params() (autots.tools.transform.sintrend static method)": [[6, "autots.tools.transform.SinTrend.get_new_params"]], "get_new_params() (autots.tools.transform.slice static method)": [[6, "autots.tools.transform.Slice.get_new_params"]], "get_transformer_params() (in module autots.tools.transform)": [[6, "autots.tools.transform.get_transformer_params"]], "gregorian_to_chinese() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.gregorian_to_chinese"]], "gregorian_to_christian_lunar() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.gregorian_to_christian_lunar"]], "gregorian_to_hebrew() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.gregorian_to_hebrew"]], "gregorian_to_islamic() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.gregorian_to_islamic"]], "heb_is_leap() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.heb_is_leap"]], "hierarchial (class in autots.tools.hierarchial)": [[6, "autots.tools.hierarchial.hierarchial"]], "historic_quantile() (in module autots.tools.probabilistic)": [[6, "autots.tools.probabilistic.historic_quantile"]], "holiday_flag() (in module autots.tools.holiday)": [[6, "autots.tools.holiday.holiday_flag"]], "holiday_new_params() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.holiday_new_params"]], "holt_winters_damped_matrices() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.holt_winters_damped_matrices"]], "impute() (autots.tools.impute.seasonalitymotifimputer method)": [[6, "autots.tools.impute.SeasonalityMotifImputer.impute"]], "impute() (autots.tools.impute.simpleseasonalitymotifimputer method)": [[6, "autots.tools.impute.SimpleSeasonalityMotifImputer.impute"]], "impute() (autots.tools.transform.datepartregressiontransformer method)": [[6, "autots.tools.transform.DatepartRegressionTransformer.impute"]], "infer_frequency() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.infer_frequency"]], "inferred_normal() (in module autots.tools.probabilistic)": [[6, "autots.tools.probabilistic.inferred_normal"]], "inverse_transform() (autots.tools.shaping.numerictransformer method)": [[6, "autots.tools.shaping.NumericTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.alignlastdiff method)": [[6, "autots.tools.transform.AlignLastDiff.inverse_transform"]], "inverse_transform() (autots.tools.transform.alignlastvalue method)": [[6, "autots.tools.transform.AlignLastValue.inverse_transform"]], "inverse_transform() (autots.tools.transform.bkbandpassfilter method)": [[6, "autots.tools.transform.BKBandpassFilter.inverse_transform"]], "inverse_transform() (autots.tools.transform.btcd method)": [[6, "autots.tools.transform.BTCD.inverse_transform"]], "inverse_transform() (autots.tools.transform.centerlastvalue method)": [[6, "autots.tools.transform.CenterLastValue.inverse_transform"]], "inverse_transform() (autots.tools.transform.centersplit method)": [[6, "autots.tools.transform.CenterSplit.inverse_transform"]], "inverse_transform() (autots.tools.transform.clipoutliers method)": [[6, "autots.tools.transform.ClipOutliers.inverse_transform"]], "inverse_transform() (autots.tools.transform.cointegration method)": [[6, "autots.tools.transform.Cointegration.inverse_transform"]], "inverse_transform() (autots.tools.transform.cumsumtransformer method)": [[6, "autots.tools.transform.CumSumTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.datepartregressiontransformer method)": [[6, "autots.tools.transform.DatepartRegressionTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.detrend method)": [[6, "autots.tools.transform.Detrend.inverse_transform"]], "inverse_transform() (autots.tools.transform.differencedtransformer method)": [[6, "autots.tools.transform.DifferencedTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.discretize method)": [[6, "autots.tools.transform.Discretize.inverse_transform"]], "inverse_transform() (autots.tools.transform.emptytransformer method)": [[6, "autots.tools.transform.EmptyTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.fftdecomposition method)": [[6, "autots.tools.transform.FFTDecomposition.inverse_transform"]], "inverse_transform() (autots.tools.transform.fftfilter method)": [[6, "autots.tools.transform.FFTFilter.inverse_transform"]], "inverse_transform() (autots.tools.transform.fastica method)": [[6, "autots.tools.transform.FastICA.inverse_transform"]], "inverse_transform() (autots.tools.transform.generaltransformer method)": [[6, "autots.tools.transform.GeneralTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.historicvalues method)": [[6, "autots.tools.transform.HistoricValues.inverse_transform"]], "inverse_transform() (autots.tools.transform.holidaytransformer method)": [[6, "autots.tools.transform.HolidayTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.intermittentoccurrence method)": [[6, "autots.tools.transform.IntermittentOccurrence.inverse_transform"]], "inverse_transform() (autots.tools.transform.kalmansmoothing method)": [[6, "autots.tools.transform.KalmanSmoothing.inverse_transform"]], "inverse_transform() (autots.tools.transform.levelshiftmagic method)": [[6, "autots.tools.transform.LevelShiftMagic.inverse_transform"]], "inverse_transform() (autots.tools.transform.locallineartrend method)": [[6, "autots.tools.transform.LocalLinearTrend.inverse_transform"]], "inverse_transform() (autots.tools.transform.meandifference method)": [[6, "autots.tools.transform.MeanDifference.inverse_transform"]], "inverse_transform() (autots.tools.transform.pca method)": [[6, "autots.tools.transform.PCA.inverse_transform"]], "inverse_transform() (autots.tools.transform.pctchangetransformer method)": [[6, "autots.tools.transform.PctChangeTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.positiveshift method)": [[6, "autots.tools.transform.PositiveShift.inverse_transform"]], "inverse_transform() (autots.tools.transform.regressionfilter method)": [[6, "autots.tools.transform.RegressionFilter.inverse_transform"]], "inverse_transform() (autots.tools.transform.replaceconstant method)": [[6, "autots.tools.transform.ReplaceConstant.inverse_transform"]], "inverse_transform() (autots.tools.transform.rollingmeantransformer method)": [[6, "autots.tools.transform.RollingMeanTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.round method)": [[6, "autots.tools.transform.Round.inverse_transform"]], "inverse_transform() (autots.tools.transform.scipyfilter method)": [[6, "autots.tools.transform.ScipyFilter.inverse_transform"]], "inverse_transform() (autots.tools.transform.seasonaldifference method)": [[6, "autots.tools.transform.SeasonalDifference.inverse_transform"]], "inverse_transform() (autots.tools.transform.sintrend method)": [[6, "autots.tools.transform.SinTrend.inverse_transform"]], "inverse_transform() (autots.tools.transform.slice method)": [[6, "autots.tools.transform.Slice.inverse_transform"]], "kepler() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.kepler"]], "lagmat() (in module autots.tools.cointegration)": [[6, "autots.tools.cointegration.lagmat"]], "last_window() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.last_window"]], "limits_to_anomalies() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.limits_to_anomalies"]], "long_to_wide() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.long_to_wide"]], "loop_sk_outliers() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.loop_sk_outliers"]], "lunar_from_lunar() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.lunar_from_lunar"]], "lunar_from_lunar_full() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.lunar_from_lunar_full"]], "moon_phase() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.moon_phase"]], "moon_phase_df() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.moon_phase_df"]], "nan_percentile() (in module autots.tools.percentile)": [[6, "autots.tools.percentile.nan_percentile"]], "nan_quantile() (in module autots.tools.percentile)": [[6, "autots.tools.percentile.nan_quantile"]], "new_kalman_params() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.new_kalman_params"]], "nonparametric() (in module autots.tools.thresholding)": [[6, "autots.tools.thresholding.nonparametric"]], "nonparametric_multivariate() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.nonparametric_multivariate"]], "np_2d_arange() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.np_2d_arange"]], "percentileofscore_appliable() (in module autots.tools.probabilistic)": [[6, "autots.tools.probabilistic.percentileofscore_appliable"]], "phase_string() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.phase_string"]], "predict() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.predict"]], "predict() (autots.tools.fft.fft method)": [[6, "autots.tools.fft.FFT.predict"]], "predict() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.predict"]], "predict_next() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.predict_next"]], "predict_observation() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.predict_observation"]], "predict_observation() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.predict_observation"]], "priv_smooth() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.priv_smooth"]], "priv_update_with_nan_check() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.priv_update_with_nan_check"]], "prune_anoms() (autots.tools.thresholding.nonparametricthreshold method)": [[6, "autots.tools.thresholding.NonparametricThreshold.prune_anoms"]], "query_holidays() (in module autots.tools.holiday)": [[6, "autots.tools.holiday.query_holidays"]], "random_cleaners() (in module autots.tools.transform)": [[6, "autots.tools.transform.random_cleaners"]], "random_datepart() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.random_datepart"]], "random_state_space() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.random_state_space"]], "reconcile() (autots.tools.hierarchial.hierarchial method)": [[6, "autots.tools.hierarchial.hierarchial.reconcile"]], "remove_outliers() (in module autots.tools.transform)": [[6, "autots.tools.transform.remove_outliers"]], "retrieve_closest_indices() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.retrieve_closest_indices"]], "retrieve_transformer() (autots.tools.transform.generaltransformer class method)": [[6, "autots.tools.transform.GeneralTransformer.retrieve_transformer"]], "rolling_mean() (in module autots.tools.impute)": [[6, "autots.tools.impute.rolling_mean"]], "rolling_window_view() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.rolling_window_view"]], "score_anomalies() (autots.tools.thresholding.nonparametricthreshold method)": [[6, "autots.tools.thresholding.NonparametricThreshold.score_anomalies"]], "score_to_anomaly() (autots.tools.transform.anomalyremoval method)": [[6, "autots.tools.transform.AnomalyRemoval.score_to_anomaly"]], "seasonal_independent_match() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.seasonal_independent_match"]], "seasonal_int() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.seasonal_int"]], "seasonal_window_match() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.seasonal_window_match"]], "set_n_jobs() (in module autots.tools.cpu_count)": [[6, "autots.tools.cpu_count.set_n_jobs"]], "simple_context_slicer() (in module autots.tools.transform)": [[6, "autots.tools.transform.simple_context_slicer"]], "simple_train_test_split() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.simple_train_test_split"]], "sk_outliers() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.sk_outliers"]], "sliding_window_view() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.sliding_window_view"]], "smooth() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.smooth"]], "smooth() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.smooth"]], "smooth_current() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.smooth_current"]], "split_digits_and_non_digits() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.split_digits_and_non_digits"]], "subset_series() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.subset_series"]], "to_jd() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.to_jd"]], "todeg() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.todeg"]], "torad() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.torad"]], "transform() (autots.tools.hierarchial.hierarchial method)": [[6, "autots.tools.hierarchial.hierarchial.transform"]], "transform() (autots.tools.shaping.numerictransformer method)": [[6, "autots.tools.shaping.NumericTransformer.transform"]], "transform() (autots.tools.transform.alignlastdiff method)": [[6, "autots.tools.transform.AlignLastDiff.transform"]], "transform() (autots.tools.transform.alignlastvalue method)": [[6, "autots.tools.transform.AlignLastValue.transform"]], "transform() (autots.tools.transform.anomalyremoval method)": [[6, "autots.tools.transform.AnomalyRemoval.transform"]], "transform() (autots.tools.transform.bkbandpassfilter method)": [[6, "autots.tools.transform.BKBandpassFilter.transform"]], "transform() (autots.tools.transform.btcd method)": [[6, "autots.tools.transform.BTCD.transform"]], "transform() (autots.tools.transform.centerlastvalue method)": [[6, "autots.tools.transform.CenterLastValue.transform"]], "transform() (autots.tools.transform.centersplit method)": [[6, "autots.tools.transform.CenterSplit.transform"]], "transform() (autots.tools.transform.clipoutliers method)": [[6, "autots.tools.transform.ClipOutliers.transform"]], "transform() (autots.tools.transform.cointegration method)": [[6, "autots.tools.transform.Cointegration.transform"]], "transform() (autots.tools.transform.cumsumtransformer method)": [[6, "autots.tools.transform.CumSumTransformer.transform"]], "transform() (autots.tools.transform.datepartregressiontransformer method)": [[6, "autots.tools.transform.DatepartRegressionTransformer.transform"]], "transform() (autots.tools.transform.detrend method)": [[6, "autots.tools.transform.Detrend.transform"]], "transform() (autots.tools.transform.diffsmoother method)": [[6, "autots.tools.transform.DiffSmoother.transform"]], "transform() (autots.tools.transform.differencedtransformer method)": [[6, "autots.tools.transform.DifferencedTransformer.transform"]], "transform() (autots.tools.transform.discretize method)": [[6, "autots.tools.transform.Discretize.transform"]], "transform() (autots.tools.transform.ewmafilter method)": [[6, "autots.tools.transform.EWMAFilter.transform"]], "transform() (autots.tools.transform.emptytransformer method)": [[6, "autots.tools.transform.EmptyTransformer.transform"]], "transform() (autots.tools.transform.fftdecomposition method)": [[6, "autots.tools.transform.FFTDecomposition.transform"]], "transform() (autots.tools.transform.fftfilter method)": [[6, "autots.tools.transform.FFTFilter.transform"]], "transform() (autots.tools.transform.fastica method)": [[6, "autots.tools.transform.FastICA.transform"]], "transform() (autots.tools.transform.generaltransformer method)": [[6, "autots.tools.transform.GeneralTransformer.transform"]], "transform() (autots.tools.transform.hpfilter method)": [[6, "autots.tools.transform.HPFilter.transform"]], "transform() (autots.tools.transform.historicvalues method)": [[6, "autots.tools.transform.HistoricValues.transform"]], "transform() (autots.tools.transform.holidaytransformer method)": [[6, "autots.tools.transform.HolidayTransformer.transform"]], "transform() (autots.tools.transform.intermittentoccurrence method)": [[6, "autots.tools.transform.IntermittentOccurrence.transform"]], "transform() (autots.tools.transform.kalmansmoothing method)": [[6, "autots.tools.transform.KalmanSmoothing.transform"]], "transform() (autots.tools.transform.levelshiftmagic method)": [[6, "autots.tools.transform.LevelShiftMagic.transform"]], "transform() (autots.tools.transform.locallineartrend method)": [[6, "autots.tools.transform.LocalLinearTrend.transform"]], "transform() (autots.tools.transform.meandifference method)": [[6, "autots.tools.transform.MeanDifference.transform"]], "transform() (autots.tools.transform.pca method)": [[6, "autots.tools.transform.PCA.transform"]], "transform() (autots.tools.transform.pctchangetransformer method)": [[6, "autots.tools.transform.PctChangeTransformer.transform"]], "transform() (autots.tools.transform.positiveshift method)": [[6, "autots.tools.transform.PositiveShift.transform"]], "transform() (autots.tools.transform.regressionfilter method)": [[6, "autots.tools.transform.RegressionFilter.transform"]], "transform() (autots.tools.transform.replaceconstant method)": [[6, "autots.tools.transform.ReplaceConstant.transform"]], "transform() (autots.tools.transform.rollingmeantransformer method)": [[6, "autots.tools.transform.RollingMeanTransformer.transform"]], "transform() (autots.tools.transform.round method)": [[6, "autots.tools.transform.Round.transform"]], "transform() (autots.tools.transform.stlfilter method)": [[6, "autots.tools.transform.STLFilter.transform"]], "transform() (autots.tools.transform.scipyfilter method)": [[6, "autots.tools.transform.ScipyFilter.transform"]], "transform() (autots.tools.transform.seasonaldifference method)": [[6, "autots.tools.transform.SeasonalDifference.transform"]], "transform() (autots.tools.transform.sintrend method)": [[6, "autots.tools.transform.SinTrend.transform"]], "transform() (autots.tools.transform.slice method)": [[6, "autots.tools.transform.Slice.transform"]], "transform() (autots.tools.transform.statsmodelsfilter method)": [[6, "autots.tools.transform.StatsmodelsFilter.transform"]], "transformer_list_to_dict() (in module autots.tools.transform)": [[6, "autots.tools.transform.transformer_list_to_dict"]], "trimmed_mean() (in module autots.tools.percentile)": [[6, "autots.tools.percentile.trimmed_mean"]], "unvectorize_state() (autots.tools.fast_kalman.gaussian method)": [[6, "autots.tools.fast_kalman.Gaussian.unvectorize_state"]], "unvectorize_vars() (autots.tools.fast_kalman.gaussian method)": [[6, "autots.tools.fast_kalman.Gaussian.unvectorize_vars"]], "update() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.update"]], "update() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.update"]], "update_with_nan_check() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.update_with_nan_check"]], "values_to_anomalies() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.values_to_anomalies"]], "wide_to_3d() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.wide_to_3d"]], "window_id_maker() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_id_maker"]], "window_lin_reg() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_lin_reg"]], "window_lin_reg_mean() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_lin_reg_mean"]], "window_lin_reg_mean_no_nan() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_lin_reg_mean_no_nan"]], "window_maker() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_maker"]], "window_maker_2() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_maker_2"]], "window_maker_3() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_maker_3"]], "window_sum_mean() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_sum_mean"]], "window_sum_mean_nan_tail() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_sum_mean_nan_tail"]], "window_sum_nan_mean() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_sum_nan_mean"]], "zscore_survival_function() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.zscore_survival_function"]]}}) \ No newline at end of file +Search.setIndex({"docnames": ["index", "source/autots", "source/autots.datasets", "source/autots.evaluator", "source/autots.models", "source/autots.templates", "source/autots.tools", "source/intro", "source/modules", "source/tutorial"], "filenames": ["index.rst", "source/autots.rst", "source/autots.datasets.rst", "source/autots.evaluator.rst", "source/autots.models.rst", "source/autots.templates.rst", "source/autots.tools.rst", "source/intro.rst", "source/modules.rst", "source/tutorial.rst"], "titles": ["AutoTS", "autots package", "autots.datasets package", "autots.evaluator package", "autots.models package", "autots.templates package", "autots.tools package", "Intro", "autots", "Tutorial"], "terms": {"i": [0, 1, 2, 3, 4, 6, 7, 9], "an": [0, 1, 2, 3, 4, 6, 7, 9], "autom": [0, 1, 3, 7, 9], "time": [0, 1, 3, 4, 6, 7, 9], "seri": [0, 1, 2, 3, 4, 6, 7], "forecast": [0, 1, 2, 3, 4, 6, 7], "packag": [0, 7, 8], "python": [0, 1, 3, 4, 6, 7, 9], "pip": [0, 2, 7, 9], "requir": [0, 1, 2, 3, 4, 6, 7], "3": [0, 1, 3, 4, 5, 6, 9], "6": [0, 1, 3, 5, 6, 9], "numpi": [0, 1, 3, 4, 6, 9], "panda": [0, 1, 3, 4, 6, 7, 9], "statsmodel": [0, 1, 6, 8, 9], "scikit": [0, 4, 6, 7, 9], "learn": [0, 1, 4, 6, 7, 9], "intro": 0, "content": [0, 8], "basic": [0, 1, 3, 5, 6, 8, 9], "us": [0, 1, 2, 3, 4, 6], "tip": [0, 9], "speed": [0, 1, 3, 4], "larg": [0, 1, 4, 6, 9], "data": [0, 1, 2, 3, 4, 6], "how": [0, 1, 3, 4, 6, 9], "contribut": [0, 1, 3, 9], "tutori": [0, 7], "extend": [0, 6, 7], "deploy": 0, "templat": [0, 1, 3, 4, 7, 8], "import": [0, 1, 2, 3, 5, 6, 7], "export": [0, 1, 2, 3, 4, 5, 7], "depend": [0, 1, 3, 4, 6, 7], "version": [0, 1, 3, 4, 6], "caveat": 0, "advic": 0, "simul": [0, 4, 7], "event": [0, 1, 2, 3, 7], "risk": [0, 1, 3, 7], "anomali": [0, 1, 3, 4, 6, 8], "detect": [0, 1, 3, 6, 8], "transform": [0, 1, 3, 4, 7, 8], "independ": [0, 4, 6, 7], "model": [0, 1, 3, 5, 6, 7, 8], "index": [0, 1, 2, 3, 4, 5, 6, 9], "search": [0, 1, 2, 3, 4, 7, 9], "page": [0, 1, 2], "dataset": [1, 3, 4, 6, 7, 8, 9], "submodul": [1, 8], "fred": [1, 8], "get_fred_data": [1, 2], "load_artifici": [1, 2, 8], "load_daili": [1, 2, 7, 8, 9], "load_hourli": [1, 2, 8, 9], "load_linear": [1, 2, 8], "load_live_daili": [1, 2, 8, 9], "load_monthli": [1, 2, 8, 9], "load_sin": [1, 2, 8], "load_weekdai": [1, 2, 8], "load_weekli": [1, 2, 8], "load_yearli": [1, 2, 8], "load_zero": [1, 2], "evalu": [1, 4, 5, 8, 9], "anomaly_detector": [1, 4, 8, 9], "anomalydetector": [1, 3, 8, 9], "fit": [1, 3, 4, 6, 7, 8, 9], "fit_anomaly_classifi": [1, 3, 6, 8], "get_new_param": [1, 3, 4, 6, 8, 9], "plot": [1, 3, 4, 7, 8, 9], "score_to_anomali": [1, 3, 6, 8], "holidaydetector": [1, 3, 6, 8, 9], "dates_to_holidai": [1, 3, 4, 6, 8, 9], "plot_anomali": [1, 3, 8], "auto_model": [1, 5, 8], "modelmonst": [1, 3], "modelpredict": [1, 3], "fit_data": [1, 3, 4, 8], "predict": [1, 3, 4, 6, 7, 8, 9], "newgenetictempl": [1, 3], "randomtempl": [1, 3], "templateevalobject": [1, 3], "full_mae_id": [1, 3, 4], "full_mae_error": [1, 3, 4], "concat": [1, 3, 5, 6], "load": [1, 2, 3, 4, 5, 7, 9], "save": [1, 3, 4, 6, 7], "templatewizard": [1, 3], "uniquetempl": [1, 3], "back_forecast": [1, 3, 8], "create_model_id": [1, 3], "dict_recombin": [1, 3], "generate_scor": [1, 3], "generate_score_per_seri": [1, 3], "horizontal_template_to_model_list": [1, 3], "model_forecast": [1, 3, 8, 9], "random_model": [1, 3], "remove_leading_zero": [1, 3, 9], "trans_dict_recomb": [1, 3], "unpack_ensemble_model": [1, 3, 5], "validation_aggreg": [1, 3], "auto_t": [1, 8, 9], "best_model": [1, 3, 5, 8, 9], "best_model_nam": [1, 3, 8, 9], "best_model_param": [1, 3, 8, 9], "best_model_transformation_param": [1, 3, 8, 9], "best_model_ensembl": [1, 3, 8, 9], "regression_check": [1, 3, 8], "df_wide_numer": [1, 3, 7, 8, 9], "score_per_seri": [1, 3, 4, 8], "best_model_per_series_map": [1, 3, 8], "best_model_per_series_scor": [1, 3, 8], "diagnose_param": [1, 3, 8], "expand_horizont": [1, 3, 8], "export_best_model": [1, 3, 8], "export_templ": [1, 3, 5, 8, 9], "failure_r": [1, 3, 8], "get_metric_corr": [1, 3, 8], "horizontal_per_gener": [1, 3, 8], "horizontal_to_df": [1, 3, 8], "import_best_model": [1, 3, 8], "import_result": [1, 3, 7, 8], "import_templ": [1, 3, 8, 9], "list_failed_model_typ": [1, 3, 8], "load_templ": [1, 3, 8], "mosaic_to_df": [1, 3, 8, 9], "parse_best_model": [1, 3, 8], "plot_back_forecast": [1, 3, 8], "plot_backforecast": [1, 3, 8, 9], "plot_generation_loss": [1, 3, 8, 9], "plot_horizont": [1, 3, 8, 9], "plot_horizontal_model_count": [1, 3, 8], "plot_horizontal_per_gener": [1, 3, 8, 9], "plot_horizontal_transform": [1, 3, 8, 9], "plot_metric_corr": [1, 3, 8], "plot_per_series_error": [1, 3, 8, 9], "plot_per_series_map": [1, 3, 8, 9], "plot_per_series_smap": [1, 3, 8], "plot_transformer_failure_r": [1, 3, 8], "plot_valid": [1, 3, 8], "result": [1, 2, 3, 4, 6, 7, 8, 9], "retrieve_validation_forecast": [1, 3, 8], "save_templ": [1, 3, 8], "validation_agg": [1, 3, 8], "initial_result": [1, 3, 4, 8], "model_result": [1, 3, 4, 5, 7, 8], "error_correl": [1, 3], "fake_regressor": [1, 3, 9], "benchmark": [1, 8], "run": [1, 2, 3, 4, 5, 6, 7], "event_forecast": [1, 8], "eventriskforecast": [1, 3, 8, 9], "predict_histor": [1, 3, 8, 9], "generate_result_window": [1, 3, 8], "generate_risk_arrai": [1, 3, 8], "generate_historic_risk_arrai": [1, 3, 8, 9], "set_limit": [1, 3, 8], "plot_ev": [1, 3, 8, 9], "extract_result_window": [1, 3], "extract_window_index": [1, 3], "set_limit_forecast": [1, 3], "set_limit_forecast_histor": [1, 3], "metric": [1, 2, 4, 7, 8], "array_last_v": [1, 3], "chi_squared_hist_distribution_loss": [1, 3], "contain": [1, 3, 4, 6, 9], "contour": [1, 3, 4, 9], "default_scal": [1, 3], "dwae": [1, 3], "full_metric_evalu": [1, 3], "kde": [1, 3], "kde_kl_dist": [1, 3], "kl_diverg": [1, 3], "linear": [1, 3, 4, 6, 9], "mae": [1, 3, 4, 9], "mda": [1, 3, 9], "mean_absolute_differential_error": [1, 3], "mean_absolute_error": [1, 3], "meda": [1, 3], "median_absolute_error": [1, 3], "mlvb": [1, 3], "mqae": [1, 3, 4], "msle": [1, 3], "numpy_ffil": [1, 3], "oda": [1, 3], "pinball_loss": [1, 3], "precomp_wasserstein": [1, 3], "qae": [1, 3], "rmse": [1, 3, 4, 9], "root_mean_square_error": [1, 3], "rp": [1, 3], "scaled_pinball_loss": [1, 3], "smape": [1, 3, 4, 9], "smooth": [1, 3, 4, 6, 9], "spl": [1, 3, 4, 9], "symmetric_mean_absolute_percentage_error": [1, 3], "threshold_loss": [1, 3], "unsorted_wasserstein": [1, 3], "wasserstein": [1, 3], "valid": [1, 4, 7, 8], "extract_seasonal_val_period": [1, 3], "generate_validation_indic": [1, 3], "validate_num_valid": [1, 3], "arch": [1, 3, 8, 9], "get_param": [1, 4, 8], "base": [1, 3, 6, 8, 9], "modelobject": [1, 3, 4], "basic_profil": [1, 4], "create_forecast_index": [1, 4, 8], "predictionobject": [1, 3, 4], "model_nam": [1, 3, 4, 9], "model_paramet": [1, 4], "transformation_paramet": [1, 4], "upper_forecast": [1, 3, 4, 7, 9], "lower_forecast": [1, 3, 4, 7, 9], "long_form_result": [1, 4, 9], "total_runtim": [1, 4], "apply_constraint": [1, 4], "extract_ensemble_runtim": [1, 4], "plot_df": [1, 4], "plot_ensemble_runtim": [1, 4], "plot_grid": [1, 4], "calculate_peak_dens": [1, 4], "create_seaborn_palette_from_cmap": [1, 4], "extract_single_series_from_horz": [1, 4], "extract_single_transform": [1, 4], "plot_distribut": [1, 4], "averagevaluena": [1, 3, 4, 5, 9], "balltreemultivariatemotif": [1, 4, 9], "constantna": [1, 4, 9], "fft": [1, 4, 8, 9], "kalmanstatespac": [1, 4, 9], "cost_funct": [1, 4], "tune_observational_nois": [1, 4], "lastvaluena": [1, 3, 4, 9], "metricmotif": [1, 3, 4, 9], "motif": [1, 3, 4, 9], "motifsimul": [1, 4, 9], "nvar": [1, 4, 9], "seasonalna": [1, 3, 4, 9], "seasonalitymotif": [1, 3, 4, 5, 9], "sectionalmotif": [1, 3, 4, 9], "zeroesna": [1, 3, 4], "looped_motif": [1, 4], "predict_reservoir": [1, 4], "cassandra": [1, 5, 6, 8, 9], "bayesianmultioutputregress": [1, 4], "sample_posterior": [1, 4], "plot_forecast": [1, 4, 8], "plot_compon": [1, 4, 8], "plot_trend": [1, 4, 8], "return_compon": [1, 3, 4, 8], "analyze_trend": [1, 4, 8], "auto_fit": [1, 4, 8], "base_scal": [1, 4, 8], "compare_actual_compon": [1, 4, 8], "create_t": [1, 4, 8], "cross_valid": [1, 4, 8, 9], "feature_import": [1, 4, 8], "next_fit": [1, 4, 8], "plot_th": [1, 4, 8], "predict_new_product": [1, 4, 8], "process_compon": [1, 4, 6, 8], "rolling_trend": [1, 4, 8], "scale_data": [1, 4, 8], "to_origin_spac": [1, 4, 8], "treatment_causal_impact": [1, 4, 8], "holiday_detector": [1, 4, 8], "score": [1, 3, 4, 5, 6, 8, 9], "holiday_count": [1, 4, 8], "holidai": [1, 3, 4, 8, 9], "param": [1, 2, 3, 4, 6, 8, 9], "x_arrai": [1, 4, 8], "predict_x_arrai": [1, 4, 8], "trend_train": [1, 4, 8], "predicted_trend": [1, 4, 8], "clean_regressor": [1, 4], "cost_function_dwa": [1, 4], "cost_function_l1": [1, 4], "cost_function_l1_posit": [1, 4], "cost_function_l2": [1, 4], "cost_function_quantil": [1, 4], "fit_linear_model": [1, 4], "lstsq_minim": [1, 4], "lstsq_solv": [1, 4], "dnn": [1, 8], "kerasrnn": [1, 4], "transformer_build_model": [1, 4], "transformer_encod": [1, 4], "ensembl": [1, 3, 5, 7, 8], "bestnensembl": [1, 4], "distensembl": [1, 4], "ensembleforecast": [1, 4], "ensembletemplategener": [1, 4], "hdistensembl": [1, 4], "horizontalensembl": [1, 4], "horizontaltemplategener": [1, 4], "mosaicensembl": [1, 4], "find_pattern": [1, 4], "generalize_horizont": [1, 4], "generate_crosshair_scor": [1, 4], "generate_crosshair_score_list": [1, 4], "generate_mosaic_templ": [1, 4], "horizontal_classifi": [1, 4], "horizontal_xi": [1, 4], "is_horizont": [1, 4], "is_mosa": [1, 4], "mlens_help": [1, 4], "mosaic_classifi": [1, 4], "mosaic_or_horizont": [1, 4], "mosaic_to_horizont": [1, 4, 9], "mosaic_xi": [1, 4], "n_limited_horz": [1, 4], "parse_forecast_length": [1, 4], "parse_horizont": [1, 4], "parse_mosa": [1, 4], "process_mosaic_arrai": [1, 4], "summarize_seri": [1, 4], "gluont": [1, 3, 8, 9], "greykit": [1, 8, 9], "seek_the_oracl": [1, 4], "matrix_var": [1, 8], "latc": [1, 4, 9], "mar": [1, 4, 9], "rrvar": [1, 4, 9], "tmf": [1, 4, 9], "conj_grad_w": [1, 4], "conj_grad_x": [1, 4], "dmd": [1, 4], "dmd4cast": [1, 4], "ell_w": [1, 4], "ell_x": [1, 4], "generate_psi": [1, 4], "latc_imput": [1, 4], "latc_predictor": [1, 4], "mat2ten": [1, 4], "svt_tnn": [1, 4], "ten2mat": [1, 4], "update_cg": [1, 4], "var": [1, 4, 9], "var4cast": [1, 4], "mlensembl": [1, 8], "create_featur": [1, 4], "model_list": [1, 3, 7, 8, 9], "auto_model_list": [1, 4], "model_list_to_dict": [1, 4], "neural_forecast": [1, 8], "neuralforecast": [1, 4, 5, 9], "prophet": [1, 3, 6, 8, 9], "fbprophet": [1, 4, 9], "neuralprophet": [1, 4, 9], "pytorch": [1, 8, 9], "pytorchforecast": [1, 4, 9], "sklearn": [1, 6, 7, 8, 9], "componentanalysi": [1, 4, 9], "datepartregress": [1, 3, 4, 5, 6, 9], "multivariateregress": [1, 4, 9], "preprocessingregress": [1, 4, 9], "rollingregress": [1, 4, 9], "univariateregress": [1, 4, 9], "vectorizedmultioutputgpr": [1, 4], "predict_proba": [1, 4], "windowregress": [1, 4, 9], "generate_classifier_param": [1, 4], "generate_regressor_param": [1, 4], "retrieve_classifi": [1, 4], "retrieve_regressor": [1, 4], "rolling_x_regressor": [1, 4], "rolling_x_regressor_regressor": [1, 4], "ardl": [1, 4, 9], "arima": [1, 4, 5, 6, 9], "dynamicfactor": [1, 4, 9], "dynamicfactormq": [1, 4, 9], "et": [1, 3, 4, 6, 9], "glm": [1, 3, 4, 6, 9], "gl": [1, 3, 4, 6, 9], "theta": [1, 4, 9], "unobservedcompon": [1, 4, 9], "varmax": [1, 4, 9], "vecm": [1, 4, 6, 9], "arima_seek_the_oracl": [1, 4], "glm_forecast_by_column": [1, 4], "tfp": [1, 8], "tfpregress": [1, 4, 9], "tfpregressor": [1, 4], "tensorflowst": [1, 4, 9], "tide": [1, 5, 8, 9], "timecovari": [1, 4], "get_covari": [1, 4], "timeseriesdata": [1, 4], "test_val_gen": [1, 4], "tf_dataset": [1, 4], "train_gen": [1, 4], "get_holidai": [1, 4], "mae_loss": [1, 4], "mape": [1, 3, 4], "nrmse": [1, 4], "wape": [1, 4], "gener": [1, 2, 3, 4, 6, 7, 8, 9], "general_templ": [1, 5], "tool": [1, 2, 3, 4, 7, 8, 9], "anomaly_util": [1, 8], "anomaly_df_to_holidai": [1, 6], "anomaly_new_param": [1, 6], "create_dates_df": [1, 6], "detect_anomali": [1, 6], "holiday_new_param": [1, 6], "limits_to_anomali": [1, 6], "loop_sk_outli": [1, 6], "nonparametric_multivari": [1, 6], "sk_outlier": [1, 6], "values_to_anomali": [1, 6], "zscore_survival_funct": [1, 6], "calendar": [1, 3, 8], "gregorian_to_chines": [1, 6], "gregorian_to_christian_lunar": [1, 6], "gregorian_to_hebrew": [1, 6], "gregorian_to_islam": [1, 6], "heb_is_leap": [1, 6], "lunar_from_lunar": [1, 6], "lunar_from_lunar_ful": [1, 6], "to_jd": [1, 6], "cointegr": [1, 4, 8], "btcd_decompos": [1, 6], "coint_johansen": [1, 6], "fourier_seri": [1, 6], "lagmat": [1, 6], "cpu_count": [1, 8], "set_n_job": [1, 6], "fast_kalman": [1, 8], "usag": 1, "exampl": [1, 2, 3, 4, 7], "gaussian": [1, 4, 6], "empti": [1, 2, 3, 4, 6], "unvectorize_st": [1, 6], "unvectorize_var": [1, 6], "kalmanfilt": [1, 6], "comput": [1, 3, 4, 6], "em": [1, 6], "em_observation_nois": [1, 6], "em_process_nois": [1, 6], "predict_next": [1, 6], "predict_observ": [1, 6], "smooth_curr": [1, 6], "updat": [1, 4, 6, 9], "autoshap": [1, 6], "ddot": [1, 6], "ddot_t_right": [1, 6], "ddot_t_right_old": [1, 6], "dinv": [1, 6], "douter": [1, 6], "em_initial_st": [1, 6], "ensure_matrix": [1, 6], "holt_winters_damped_matric": [1, 6], "new_kalman_param": [1, 6], "priv_smooth": [1, 6], "priv_update_with_nan_check": [1, 6], "random_state_spac": [1, 6], "update_with_nan_check": [1, 6], "fourier_extrapol": [1, 6], "hierarchi": [1, 3, 8], "reconcil": [1, 6], "holiday_flag": [1, 6], "query_holidai": [1, 6], "imput": [1, 4, 8], "fillna": [1, 3, 6, 9], "seasonalitymotifimput": [1, 6], "simpleseasonalitymotifimput": [1, 6], "biased_ffil": [1, 6], "fake_date_fil": [1, 6], "fake_date_fill_old": [1, 6], "fill_forward": [1, 6], "fill_forward_alt": [1, 6], "fill_mean": [1, 6], "fill_mean_old": [1, 6], "fill_median": [1, 6], "fill_median_old": [1, 6], "fill_zero": [1, 6], "fillna_np": [1, 6], "rolling_mean": [1, 6], "lunar": [1, 8], "dco": [1, 6], "dsin": [1, 6], "fixangl": [1, 6], "kepler": [1, 6], "moon_phas": [1, 6], "moon_phase_df": [1, 6], "phase_str": [1, 6], "todeg": [1, 6], "torad": [1, 6], "percentil": [1, 8], "nan_percentil": [1, 6], "nan_quantil": [1, 6], "trimmed_mean": [1, 6], "probabilist": [1, 3, 4, 7, 8, 9], "point_to_prob": [1, 6], "variable_point_to_prob": [1, 6], "historic_quantil": [1, 6], "inferred_norm": [1, 6], "percentileofscore_appli": [1, 6], "profil": [1, 8], "data_profil": [1, 6], "regressor": [1, 3, 4, 7, 8], "create_lagged_regressor": [1, 6, 8], "create_regressor": [1, 6, 8], "season": [1, 3, 4, 8, 9], "create_datepart_compon": [1, 6], "create_seasonality_featur": [1, 6], "date_part": [1, 6], "fourier_df": [1, 6], "random_datepart": [1, 6], "seasonal_independent_match": [1, 6], "seasonal_int": [1, 6], "seasonal_window_match": [1, 6], "shape": [1, 2, 3, 4, 7, 8, 9], "numerictransform": [1, 6], "fit_transform": [1, 6, 8, 9], "inverse_transform": [1, 6, 7, 8, 9], "clean_weight": [1, 6], "df_cleanup": [1, 6], "freq_to_timedelta": [1, 6], "infer_frequ": [1, 6, 8], "long_to_wid": [1, 6, 8, 9], "simple_train_test_split": [1, 6], "split_digits_and_non_digit": [1, 6], "subset_seri": [1, 6], "wide_to_3d": [1, 6], "threshold": [1, 3, 4, 8, 9], "nonparametricthreshold": [1, 6], "compare_to_epsilon": [1, 6], "find_epsilon": [1, 6], "prune_anom": [1, 6], "score_anomali": [1, 6], "consecutive_group": [1, 6], "nonparametr": [1, 3, 6], "alignlastdiff": [1, 6], "alignlastvalu": [1, 6], "find_centerpoint": [1, 6], "anomalyremov": [1, 6], "bkbandpassfilt": [1, 6], "btcd": [1, 6], "centerlastvalu": [1, 6], "centersplit": [1, 6], "clipoutli": [1, 6], "cumsumtransform": [1, 6], "datepartregressiontransform": [1, 6], "detrend": [1, 4, 6, 9], "diffsmooth": [1, 6], "differencedtransform": [1, 3, 6, 9], "discret": [1, 6], "ewmafilt": [1, 6], "emptytransform": [1, 6], "fftdecomposit": [1, 6], "fftfilter": [1, 6], "fastica": [1, 6], "generaltransform": [1, 6, 8, 9], "fill_na": [1, 6, 8], "retrieve_transform": [1, 6, 8], "hpfilter": [1, 6], "historicvalu": [1, 6], "holidaytransform": [1, 6], "intermittentoccurr": [1, 6], "kalmansmooth": [1, 6], "levelshiftmag": [1, 6], "levelshifttransform": [1, 6], "locallineartrend": [1, 6], "meandiffer": [1, 6], "pca": [1, 4, 6], "pctchangetransform": [1, 6], "positiveshift": [1, 6], "randomtransform": [1, 6, 8], "regressionfilt": [1, 6], "replaceconst": [1, 6], "rollingmeantransform": [1, 3, 6], "round": [1, 3, 6, 7], "stlfilter": [1, 6], "scipyfilt": [1, 6, 9], "seasonaldiffer": [1, 6], "sintrend": [1, 6], "fit_sin": [1, 6], "slice": [1, 3, 6, 9], "statsmodelsfilt": [1, 6], "bkfilter": [1, 6, 9], "cffilter": [1, 6], "convolution_filt": [1, 6], "bkfilter_st": [1, 6], "clip_outli": [1, 6], "exponential_decai": [1, 6], "get_transformer_param": [1, 6], "random_clean": [1, 6], "remove_outli": [1, 6], "simple_context_slic": [1, 6], "transformer_list_to_dict": [1, 6], "window_funct": [1, 8], "chunk_reshap": [1, 6], "last_window": [1, 6], "np_2d_arang": [1, 6], "retrieve_closest_indic": [1, 6], "rolling_window_view": [1, 6], "sliding_window_view": [1, 6], "window_id_mak": [1, 6], "window_lin_reg": [1, 6], "window_lin_reg_mean": [1, 6], "window_lin_reg_mean_no_nan": [1, 6], "window_mak": [1, 6], "window_maker_2": [1, 6], "window_maker_3": [1, 6], "window_sum_mean": [1, 6], "window_sum_mean_nan_tail": [1, 6], "window_sum_nan_mean": [1, 6], "select": [1, 4, 6, 7, 9], "http": [1, 2, 3, 4, 6, 9], "github": [1, 4, 6, 7, 9], "com": [1, 2, 4, 6, 9], "winedarksea": 1, "class": [1, 3, 4, 6, 7, 9], "output": [1, 2, 3, 4, 6, 7, 9], "multivari": [1, 3, 4, 6, 7, 9], "method": [1, 3, 4, 6, 7, 9], "zscore": [1, 3, 6], "transform_dict": [1, 3, 6], "transformation_param": [1, 3, 4, 6, 9], "0": [1, 2, 3, 4, 5, 6, 7, 9], "datepart_method": [1, 3, 4, 6], "simple_3": [1, 3, 6], "regression_model": [1, 3, 4, 6], "elasticnet": [1, 3, 6], "model_param": [1, 3, 4, 6, 9], "forecast_param": [1, 3, 6, 9], "none": [1, 2, 3, 4, 6, 7, 9], "method_param": [1, 3, 6], "eval_period": [1, 3, 6, 9], "isolated_onli": [1, 3, 6], "fals": [1, 2, 3, 4, 5, 6, 7, 9], "n_job": [1, 3, 4, 6, 7, 9], "1": [1, 2, 3, 4, 5, 6, 7, 9], "object": [1, 2, 3, 4, 6, 7, 9], "df": [1, 2, 3, 4, 6, 7, 9], "all": [1, 2, 3, 4, 6, 7], "return": [1, 2, 3, 4, 6], "paramet": [1, 2, 3, 4, 6, 7], "pd": [1, 3, 4, 5, 6, 9], "datafram": [1, 2, 3, 4, 6, 7, 9], "wide": [1, 2, 3, 4, 6, 7], "style": [1, 2, 3, 4, 6, 7, 9], "classif": [1, 3, 6], "outlier": [1, 3, 6, 9], "": [1, 3, 4, 6, 7, 9], "static": [1, 3, 4, 6], "random": [1, 2, 3, 4, 6, 9], "new": [1, 3, 4, 6, 9], "combin": [1, 3, 4, 6, 7, 9], "str": [1, 2, 3, 4, 6, 9], "fast": [1, 3, 4, 5, 6, 7, 9], "deep": [1, 3, 7, 9], "default": [1, 2, 3, 4, 6, 7, 9], "ani": [1, 3, 4, 6, 7, 9], "name": [1, 2, 3, 4, 6, 7], "ie": [1, 2, 3, 4, 6, 7, 9], "iqr": [1, 3], "specifi": [1, 3, 4, 6, 9], "onli": [1, 3, 4, 6, 7, 9], "series_nam": [1, 3], "titl": [1, 3, 4], "plot_kwarg": [1, 3], "A": [1, 3, 4, 6, 7], "decisiontre": [1, 3, 4, 6], "ar": [1, 2, 3, 4, 6, 7, 9], "nonstandard": [1, 3, 6], "forecast_length": [1, 3, 4, 6, 7, 9], "int": [1, 2, 3, 4, 6], "14": [1, 3, 4, 9], "frequenc": [1, 2, 3, 4, 6, 7], "infer": [1, 3, 4, 6, 7, 9], "prediction_interv": [1, 3, 4, 6, 7, 9], "float": [1, 2, 3, 4, 6, 9], "9": [1, 3, 4, 6, 7, 9], "max_gener": [1, 3, 7, 9], "20": [1, 2, 3, 4, 6, 9], "no_neg": [1, 3, 9], "bool": [1, 2, 3, 4, 6], "constraint": [1, 3, 4, 9], "initial_templ": [1, 3, 9], "random_se": [1, 2, 3, 4, 6, 9], "2022": [1, 3, 4, 6], "holiday_countri": [1, 3, 4, 6], "u": [1, 2, 3, 4, 6, 9], "subset": [1, 3, 4, 7, 9], "aggfunc": [1, 3, 6, 7, 9], "first": [1, 2, 3, 4, 6, 7, 9], "na_toler": [1, 3, 6], "metric_weight": [1, 3, 7, 9], "dict": [1, 2, 3, 4, 6, 7], "containment_weight": [1, 3, 9], "contour_weight": [1, 3, 9], "01": [1, 2, 3, 4, 6, 7, 9], "imle_weight": [1, 3, 9], "made_weight": [1, 3, 9], "05": [1, 2, 3, 4, 6, 9], "mae_weight": [1, 3, 9], "2": [1, 2, 3, 4, 6, 7, 9], "mage_weight": [1, 3, 9], "mle_weight": [1, 3, 9], "oda_weight": [1, 3], "001": [1, 3, 4, 6], "rmse_weight": [1, 3, 9], "runtime_weight": [1, 3, 7, 9], "smape_weight": [1, 3, 9], "5": [1, 2, 3, 4, 5, 6, 9], "spl_weight": [1, 3, 9], "wasserstein_weight": [1, 3], "drop_most_rec": [1, 3, 6, 7, 9], "drop_data_older_than_period": [1, 3, 6, 9], "transformer_list": [1, 3, 5, 6, 7, 9], "auto": [1, 3, 4, 6, 7, 9], "transformer_max_depth": [1, 3, 5, 6, 7], "models_mod": [1, 3, 9], "num_valid": [1, 3, 4, 5, 7, 9], "models_to_valid": [1, 3, 7, 9], "15": [1, 3, 4, 6, 9], "max_per_model_class": [1, 3, 5, 9], "validation_method": [1, 3, 4, 7, 9], "backward": [1, 3, 4, 6, 7, 9], "min_allowed_train_perc": [1, 3, 4, 6], "prefill_na": [1, 3, 6, 9], "introduce_na": [1, 3], "preclean": [1, 3], "model_interrupt": [1, 3, 7], "true": [1, 2, 3, 4, 5, 6, 7, 9], "generation_timeout": [1, 3], "current_model_fil": [1, 3], "force_gc": [1, 3], "verbos": [1, 3, 4, 6, 9], "genet": [1, 3, 7, 9], "algorithm": [1, 3, 4, 6, 7, 9], "number": [1, 2, 3, 4, 6, 7, 9], "period": [1, 2, 3, 4, 6, 9], "over": [1, 3, 4, 6, 7, 9], "which": [1, 2, 3, 4, 6, 7, 9], "can": [1, 2, 3, 4, 6, 7], "overriden": [1, 3], "later": [1, 3, 6], "when": [1, 3, 4, 6, 7, 9], "you": [1, 3, 4, 6, 7], "don": [1, 3, 4, 6, 9], "t": [1, 2, 3, 4, 6], "have": [1, 2, 3, 4, 6, 7, 9], "much": [1, 2, 3, 6, 9], "histor": [1, 3, 4, 6, 9], "small": [1, 3, 4, 6, 9], "length": [1, 2, 3, 4, 6, 9], "full": [1, 3, 6, 9], "desir": [1, 3, 4, 6, 9], "lenght": [1, 3], "usual": [1, 2, 3, 4, 6, 7, 9], "best": [1, 3, 4, 6, 7, 9], "possibl": [1, 3, 4, 6, 7, 9], "approach": [1, 3, 4, 6, 9], "given": [1, 3, 4, 6, 7, 9], "limit": [1, 3, 4, 6, 7, 9], "specif": [1, 2, 3, 4, 6, 7, 9], "datetim": [1, 2, 3, 4, 6, 7, 9], "offset": [1, 3, 6, 9], "forc": [1, 3, 4, 9], "rollup": [1, 3, 9], "daili": [1, 2, 3, 4, 6, 7, 9], "input": [1, 3, 4, 6, 7, 9], "m": [1, 2, 3, 4, 6, 9], "monthli": [1, 2, 3, 6, 7, 9], "uncertainti": [1, 3, 4, 6], "rang": [1, 3, 4, 6, 9], "upper": [1, 3, 4, 6, 7, 9], "lower": [1, 3, 4, 6, 7, 9], "adjust": [1, 3, 4, 6, 7, 9], "rare": [1, 3, 4, 9], "match": [1, 2, 3, 4, 6, 9], "actual": [1, 3, 4, 6, 9], "more": [1, 2, 3, 4, 6, 7], "longer": [1, 3, 9], "runtim": [1, 3, 4, 7, 9], "better": [1, 2, 3, 4, 9], "accuraci": [1, 3, 4, 7, 9], "It": [1, 3, 4, 6, 7, 9], "call": [1, 2, 3, 4, 6, 9], "max": [1, 2, 3, 4, 6, 7, 9], "becaus": [1, 3, 4, 6, 7, 9], "somedai": [1, 3], "earli": [1, 3], "stop": [1, 3, 6, 7], "option": [1, 3, 4, 6, 7], "now": [1, 3, 4, 6, 9], "thi": [1, 2, 3, 4, 6, 7, 9], "just": [1, 2, 3, 4, 6], "exact": [1, 3, 6], "neg": [1, 3, 4], "up": [1, 2, 3, 6, 9], "valu": [1, 2, 3, 4, 6, 7, 9], "st": [1, 2, 3, 4, 6, 9], "dev": [1, 3, 4, 6, 9], "abov": [1, 3, 4, 6, 9], "below": [1, 2, 3, 6, 9], "min": [1, 3, 4, 9], "constrain": [1, 3, 6, 9], "also": [1, 3, 4, 6, 7], "instead": [1, 2, 3, 4, 6], "accept": [1, 3, 6, 9], "dictionari": [1, 3, 4, 6, 9], "follow": [1, 3, 4, 6, 9], "kei": [1, 2, 3, 4, 9], "constraint_method": [1, 3, 4], "one": [1, 3, 4, 6, 9], "stdev_min": [1, 3, 4], "stdev": [1, 3, 4], "mean": [1, 3, 4, 6, 9], "absolut": [1, 3, 4, 9], "arrai": [1, 3, 4, 6, 9], "final": [1, 3, 4, 6, 9], "each": [1, 2, 3, 4, 6, 7, 9], "quantil": [1, 3, 4, 6, 9], "constraint_regular": [1, 3, 4], "where": [1, 3, 4, 6, 7, 9], "hard": [1, 3, 4, 9], "cutoff": [1, 3, 4, 6], "between": [1, 2, 3, 4, 6, 7, 9], "penalti": [1, 3, 4], "term": [1, 3, 4], "upper_constraint": [1, 3, 4], "unus": [1, 3, 4, 6], "lower_constraint": [1, 3, 4], "bound": [1, 3, 4, 6, 7, 9], "appli": [1, 3, 4, 6, 7, 9], "otherwis": [1, 2, 3, 4, 6], "list": [1, 2, 3, 4, 6, 7], "comma": [1, 3, 9], "separ": [1, 3, 4, 6, 9], "string": [1, 3, 4, 6, 9], "simpl": [1, 3, 4, 6, 7], "distanc": [1, 3, 4, 6, 7, 9], "horizont": [1, 3, 4, 7, 9], "mosaic": [1, 3, 4, 7, 9], "subsampl": [1, 3], "randomli": [1, 3, 6], "start": [1, 2, 3, 4, 5, 6, 7, 9], "includ": [1, 3, 4, 6, 7, 9], "both": [1, 3, 6, 9], "previou": [1, 3, 6], "self": [1, 3, 4], "seed": [1, 2, 3, 6], "allow": [1, 3, 4, 6, 7, 9], "slightli": [1, 3, 6], "consist": [1, 3, 6, 9], "pass": [1, 2, 3, 4, 6, 7], "through": [1, 3, 4, 6, 7, 9], "some": [1, 2, 3, 4, 6, 7, 9], "maximum": [1, 3, 6, 9], "onc": [1, 3, 4], "mani": [1, 3, 4, 6, 7, 9], "take": [1, 3, 4, 6, 7, 9], "column": [1, 2, 3, 4, 5, 6, 7], "unless": [1, 3, 4, 9], "case": [1, 2, 3, 4, 6, 9], "same": [1, 2, 3, 4, 6, 9], "roll": [1, 3, 4, 6, 9], "higher": [1, 3, 4, 6, 7, 9], "duplic": [1, 3, 6], "timestamp": [1, 3, 4, 6], "remov": [1, 3, 4, 6, 9], "try": [1, 2, 3, 6, 9], "np": [1, 3, 4, 6, 9], "sum": [1, 3, 6, 9], "bewar": [1, 3, 6, 9], "numer": [1, 3, 4, 6, 9], "aggreg": [1, 3, 6, 7, 9], "like": [1, 2, 3, 4, 6, 9], "work": [1, 2, 3, 4, 6, 9], "non": [1, 3, 4, 6, 9], "chang": [1, 3, 6, 9], "nan": [1, 3, 4, 6, 7, 9], "drop": [1, 3, 5, 6, 9], "thei": [1, 3, 4, 6, 7, 9], "than": [1, 3, 4, 6, 9], "percent": [1, 2, 3, 6, 9], "95": [1, 3, 6, 9], "here": [1, 3, 4, 6, 9], "would": [1, 3, 4, 9], "weight": [1, 3, 4, 6, 7, 9], "assign": [1, 3], "effect": [1, 3, 4, 6, 9], "rank": [1, 3, 4, 6], "n": [1, 3, 4, 6, 9], "most": [1, 2, 3, 4, 6, 7, 9], "recent": [1, 2, 3, 4, 6, 9], "point": [1, 3, 4, 6, 7, 9], "sai": [1, 3, 7, 9], "sale": [1, 3, 6, 9], "current": [1, 2, 3, 4, 6, 7, 9], "unfinish": [1, 3], "month": [1, 3, 6, 7, 9], "occur": [1, 3, 6, 9], "after": [1, 3, 4, 6, 7, 9], "aggregr": [1, 3], "so": [1, 2, 3, 4, 6, 7, 9], "whatev": [1, 3, 4], "alia": [1, 3, 4, 6], "prob": [1, 3], "affect": [1, 3, 4, 6], "algorithim": [1, 3], "from": [1, 2, 3, 4, 5, 6, 7, 9], "probabl": [1, 2, 3, 4, 6, 7, 9], "note": [1, 2, 3, 4, 6], "doe": [1, 3, 4, 6, 9], "initi": [1, 3, 4, 6, 9], "alias": [1, 3, 4, 6], "superfast": [1, 3, 7, 9], "scalabl": [1, 3, 7], "should": [1, 3, 4, 6, 9], "fewer": [1, 2, 3, 9], "memori": [1, 3, 4, 6, 9], "issu": [1, 3, 4, 7, 9], "scale": [1, 3, 4, 6, 7, 9], "sequenti": [1, 3], "faster": [1, 2, 3, 4, 6, 7], "newli": [1, 3], "sporad": [1, 3], "util": [1, 3, 4, 6, 7, 9], "slower": [1, 3, 7, 9], "user": [1, 3, 4, 6, 7, 9], "mode": [1, 3, 4, 7], "capabl": [1, 3, 9], "gradient_boost": [1, 3], "neuralnet": [1, 3, 4], "regress": [1, 3, 4, 6], "cross": [1, 3, 4, 7], "perform": [1, 3, 6, 7, 9], "train": [1, 3, 4, 6, 7], "test": [1, 2, 3, 4, 6, 9], "split": [1, 3, 4, 6, 9], "confus": [1, 3, 4, 6, 7, 9], "eval": [1, 3], "segment": [1, 3, 6, 9], "total": [1, 3, 4, 6], "avail": [1, 3, 4, 6, 7], "out": [1, 3, 4, 7, 9], "50": [1, 3, 4], "top": [1, 3, 6, 7, 9], "Or": [1, 3], "tri": [1, 3, 7, 9], "99": [1, 3, 4], "100": [1, 3, 4, 6, 7, 9], "If": [1, 3, 4, 6, 7, 9], "addit": [1, 3, 4, 6, 9], "per_seri": [1, 3, 4], "ad": [1, 3, 4, 6, 7], "what": [1, 2, 3, 4], "famili": [1, 3, 4], "even": [1, 3, 4, 7, 9], "integ": [1, 3, 6], "recenc": [1, 3], "shorter": [1, 3, 6], "set": [1, 2, 3, 4, 6, 7, 9], "equal": [1, 3, 4, 6, 9], "size": [1, 3, 4, 6, 9], "poetic": [1, 3], "less": [1, 3, 4, 6, 9], "strategi": [1, 3], "other": [1, 2, 3, 4, 6, 7], "similar": [1, 3, 4, 6, 7, 9], "364": [1, 3, 6, 9], "year": [1, 3, 6], "immedi": [1, 3, 4, 6, 9], "automat": [1, 3, 6, 7, 9], "find": [1, 3, 4, 6, 7, 9], "section": [1, 3, 7, 9], "custom": [1, 3, 4, 6], "need": [1, 2, 3, 4, 6, 7], "validation_index": [1, 3, 9], "datetimeindex": [1, 3, 4, 6, 7, 9], "tail": [1, 3, 6, 9], "els": [1, 2, 3, 4, 6, 7, 9], "rais": [1, 3, 6], "error": [1, 3, 4, 6, 7, 9], "10": [1, 3, 4, 6, 9], "mandat": [1, 3], "unrecommend": [1, 3], "replac": [1, 3, 6], "lead": [1, 3, 7, 9], "zero": [1, 2, 3, 4, 6, 9], "collect": [1, 3, 4, 6, 7], "hasn": [1, 3], "yet": [1, 3, 4, 6, 9], "fill": [1, 3, 4, 6, 7], "leav": [1, 3, 9], "interpol": [1, 3, 4, 6], "recommend": [1, 3, 6, 7, 9], "median": [1, 3, 4, 6], "mai": [1, 2, 3, 4, 6, 7, 9], "assum": [1, 3, 6, 9], "whether": [1, 2, 3, 4, 6], "last": [1, 3, 4, 6, 9], "help": [1, 3, 4, 6, 7, 9], "make": [1, 2, 3, 4, 6, 7, 9], "robust": [1, 3, 4, 6], "introduc": [1, 3], "row": [1, 2, 3, 5, 6], "Will": [1, 3, 4, 6], "keyboardinterrupt": [1, 3, 7], "quit": [1, 3, 6, 9], "entir": [1, 3, 6, 7, 9], "program": [1, 3], "attempt": [1, 3, 6, 9], "conjunct": [1, 3], "result_fil": [1, 3, 7], "accident": [1, 3], "complet": [1, 3, 4, 6], "termin": [1, 3], "end_gener": [1, 3], "end": [1, 2, 3, 6], "skip": [1, 2, 3, 4, 6], "again": [1, 3, 9], "minut": [1, 3], "proceed": [1, 3], "check": [1, 3, 6, 7, 9], "offer": [1, 3, 9], "approxim": [1, 3, 6], "timeout": [1, 2, 3], "overal": [1, 3, 6, 9], "cap": [1, 3, 6], "per": [1, 3, 4, 6, 9], "file": [1, 3, 9], "path": [1, 3], "write": [1, 3, 4, 5], "disk": [1, 3], "debug": [1, 3], "crash": [1, 3, 4], "json": [1, 3, 4, 5, 9], "append": [1, 3], "gc": [1, 3], "won": [1, 2, 3, 4, 6, 7, 9], "differ": [1, 3, 4, 6, 7, 9], "reduc": [1, 2, 3, 4, 7, 9], "give": [1, 3, 6, 7], "core": [1, 3, 4, 6, 7], "parallel": [1, 3, 4, 7, 9], "process": [1, 3, 4, 6], "joblib": [1, 3, 4, 9], "context": [1, 3, 4], "manag": [1, 3, 4, 6, 9], "type": [1, 2, 3, 4, 6, 7, 9], "id": [1, 2, 3, 4, 6, 7], "future_regressor": [1, 3, 4, 6, 9], "n_split": [1, 3, 9], "creat": [1, 2, 3, 4, 6, 9], "backcast": [1, 3, 6], "back": [1, 3, 4, 6, 9], "OF": [1, 3], "sampl": [1, 2, 3, 4, 6, 7, 9], "often": [1, 3, 6, 7, 9], "As": [1, 3, 6, 9], "repres": [1, 3, 4, 6, 9], "real": [1, 3, 4, 9], "world": [1, 3, 4, 9], "There": [1, 3, 7, 9], "jump": [1, 3, 9], "chunk": [1, 3, 9], "arg": [1, 3, 4, 6], "except": [1, 3, 4], "piec": [1, 3, 9], "fastest": [1, 3], "observ": [1, 3, 4, 6], "level": [1, 3, 4, 6, 7, 9], "function": [1, 3, 4, 6, 7, 9], "standard": [1, 3, 4, 6], "access": [1, 3, 9], "isn": [1, 3, 4, 6, 9], "classic": [1, 3], "percentag": [1, 3, 9], "intend": [1, 3, 9], "quick": [1, 3, 9], "visual": [1, 3, 9], "statist": [1, 3, 4, 6, 7], "see": [1, 3, 4, 6, 7, 9], "target": [1, 3, 4, 6, 9], "waterfall_plot": [1, 3], "explain": [1, 3, 4], "caus": [1, 3, 4, 9], "measur": [1, 2, 3, 6, 9], "outcom": [1, 3, 4, 9], "shap": [1, 3], "coeffici": [1, 3], "correl": [1, 3], "show": [1, 3, 4, 9], "waterfal": [1, 3], "enabl": [1, 3], "expand": [1, 3, 4, 6], "rerun": [1, 3, 9], "filenam": [1, 3], "kwarg": [1, 2, 3, 4, 6], "ever": [1, 3, 6], "40": [1, 3, 6], "include_result": [1, 3], "unpack_ensembl": [1, 3], "min_metr": [1, 3], "max_metr": [1, 3], "reusabl": [1, 3], "csv": [1, 3, 5, 9], "slowest": [1, 3, 6, 9], "diagnost": [1, 3, 4], "compon": [1, 3, 4, 6], "larger": [1, 3, 4, 6, 9], "count": [1, 3, 4, 6], "lowest": [1, 3, 4, 6], "wai": [1, 3, 4, 6], "major": [1, 3, 9], "part": [1, 3, 4, 6, 9], "addon": [1, 3], "result_set": [1, 3], "fraction": [1, 3, 9], "date_col": [1, 3, 6, 7, 9], "value_col": [1, 3, 6, 7, 9], "id_col": [1, 3, 6, 7, 9], "grouping_id": [1, 3, 6], "suppli": [1, 3, 4, 6, 9], "three": [1, 3, 7, 9], "identifi": [1, 3, 4, 6, 9], "singl": [1, 3, 4, 6, 7, 9], "extern": [1, 3, 9], "colname1": [1, 3], "colname2": [1, 3], "increas": [1, 2, 3, 4, 7, 9], "left": [1, 3, 6, 9], "blank": [1, 3], "its": [1, 3, 4, 9], "tabl": [1, 3, 4], "pickl": [1, 3], "inform": [1, 3, 4, 6], "series_id": [1, 3, 4, 6, 7, 9], "group_id": [1, 3, 6], "map": [1, 3, 4], "x": [1, 3, 4, 5, 6, 9], "retain": [1, 3], "potenti": [1, 3, 6, 9], "futur": [1, 3, 4, 6, 9], "setup": [1, 3], "involv": [1, 3], "percent_best": [1, 3], "among": [1, 3, 9], "across": [1, 3, 4, 7, 9], "helper": [1, 3], "import_target": [1, 3], "enforce_model_list": [1, 3], "include_ensembl": [1, 3], "overrid": [1, 3], "exist": [1, 3, 4, 6, 9], "add": [1, 3, 4, 6, 9], "anoth": [1, 3, 6], "add_on": [1, 3], "include_horizont": [1, 3], "force_valid": [1, 3], "previous": [1, 3, 6], "must": [1, 2, 3, 4, 6, 9], "done": [1, 3, 7, 9], "befor": [1, 3, 4, 6, 7, 9], "locat": [1, 3], "alreadi": [1, 3, 4, 6, 7, 9], "keep": [1, 3, 4, 6], "init": [1, 3, 4], "anywai": [1, 3], "unpack": [1, 3], "kept": [1, 3], "overridden": [1, 3], "keep_ensembl": [1, 3, 5], "get": [1, 2, 3, 4, 6, 7, 9], "sent": [1, 3], "regardless": [1, 3, 4], "weird": [1, 3], "behavior": [1, 3, 6], "wtih": [1, 3], "In": [1, 3, 4, 6, 7, 9], "validate_import": [1, 3], "eras": [1, 3], "fail": [1, 3, 4, 9], "had": [1, 3, 4], "least": [1, 3, 6, 9], "success": [1, 3, 6], "funciton": [1, 3], "readabl": [1, 3, 9], "start_dat": [1, 2, 3, 4, 7, 9], "alpha": [1, 3, 4, 6], "25": [1, 3, 4, 6], "facecolor": [1, 3, 4], "black": [1, 3, 4], "loc": [1, 3, 4], "accur": [1, 3, 7, 9], "gain": [1, 3, 6, 9], "improv": [1, 3, 6, 7, 9], "doesn": [1, 3, 6, 9], "account": [1, 3, 6], "benefit": [1, 3, 9], "seen": [1, 3, 9], "max_seri": [1, 3], "chosen": [1, 3, 7, 9], "common": [1, 3, 6, 9], "model_id": [1, 3, 4], "color_list": [1, 3], "top_n": [1, 3], "frequent": [1, 3], "factor": [1, 3, 4], "nest": [1, 3, 9], "well": [1, 3, 4, 6, 7, 9], "do": [1, 3, 4, 6, 9], "slow": [1, 2, 3, 4, 6, 9], "captur": [1, 3, 4, 9], "hex": [1, 3], "color": [1, 3, 4], "bar": [1, 3, 6], "col": [1, 3, 4, 6], "The": [1, 3, 4, 6, 7, 9], "highli": [1, 3, 4, 9], "those": [1, 3, 4, 6, 9], "mostli": [1, 3, 4, 6, 9], "unscal": [1, 3, 9], "ones": [1, 3, 9], "max_name_char": [1, 3], "ff9912": [1, 3], "figsiz": [1, 3, 4], "12": [1, 3, 4, 6, 7, 9], "4": [1, 3, 4, 5, 6, 7, 9], "kind": [1, 3, 6, 9], "upper_clip": [1, 3], "1000": [1, 3, 4, 6, 9], "avg": [1, 3, 4, 6], "sort": [1, 3, 6], "chop": [1, 3], "tupl": [1, 2, 3, 4, 6], "axi": [1, 3, 4, 6, 9], "pie": [1, 3, 9], "prevent": [1, 3, 4, 9], "unnecessari": [1, 3], "distort": [1, 3], "To": [1, 3, 9], "compat": [1, 3], "necessarili": [1, 3, 9], "maintain": [1, 3, 6, 7, 9], "prefer": [1, 3], "failur": [1, 2, 3], "rate": [1, 3, 4], "ignor": [1, 2, 3, 4, 6], "due": [1, 2, 3, 6, 9], "df_wide": [1, 3, 4, 6, 9], "end_dat": [1, 3], "compare_horizont": [1, 3], "include_bound": [1, 3, 4], "35": [1, 3, 9], "start_color": [1, 3], "darkr": [1, 3], "end_color": [1, 3], "a2ad9c": [1, 3], "reforecast": [1, 3], "validation_forecast": [1, 3], "cach": [1, 3], "store": [1, 3, 4, 6, 9], "refer": [1, 3, 9], "best_model_id": [1, 3], "overlap": [1, 3, 9], "graph": [1, 3], "reader": [1, 3], "compar": [1, 3, 4, 6, 9], "place": [1, 3, 6, 9], "begin": [1, 3, 4, 6, 9], "either": [1, 3, 4, 6, 7, 9], "worst": [1, 3], "versu": [1, 3], "vline": [1, 3, 4], "val": [1, 3, 4], "marker": [1, 3], "just_point_forecast": [1, 3, 4], "fail_on_forecast_nan": [1, 3], "date": [1, 2, 3, 4, 6, 7, 9], "update_fit": [1, 3], "underli": [1, 3, 4, 9], "retrain": [1, 3], "interv": [1, 3, 4, 6], "design": [1, 3, 6, 7, 9], "high": [1, 3, 6, 7, 9], "suffici": [1, 3, 9], "without": [1, 3, 6, 7, 9], "ahead": [1, 3, 4, 6, 9], "__init__": [1, 3, 4], "prediction_object": [1, 3], "Not": [1, 2, 3, 4, 6], "implement": [1, 3, 4, 6, 9], "present": [1, 2, 3, 4, 6, 9], "strongli": [1, 3], "ha": [1, 3, 4, 6, 7, 9], "metadata": [1, 3, 4], "conveni": [1, 3, 6, 9], "id_nam": [1, 3, 4], "seriesid": [1, 2, 3, 4], "value_nam": [1, 3, 4], "interval_nam": [1, 3, 4], "predictioninterv": [1, 3, 4], "preprocessing_transform": [1, 4], "basescal": [1, 4], "past_impacts_intervent": [1, 4], "common_fouri": [1, 4, 6], "ar_lag": [1, 4], "ar_interaction_season": [1, 4], "anomaly_detector_param": [1, 3, 4, 6], "anomaly_intervent": [1, 4], "holiday_detector_param": [1, 4, 6], "holiday_countries_us": [1, 4, 6], "multivariate_featur": [1, 4], "multivariate_transform": [1, 4], "regressor_transform": [1, 4], "regressors_us": [1, 4], "linear_model": [1, 4], "randomwalk_n": [1, 4], "trend_window": [1, 4], "30": [1, 3, 4, 6, 7], "trend_standin": [1, 4], "trend_anomaly_detector_param": [1, 4], "trend_transform": [1, 4], "trend_model": [1, 4], "modelparamet": [1, 3, 4, 5, 9], "trend_phi": [1, 4], "max_colinear": [1, 4], "998": [1, 4], "max_multicolinear": [1, 4], "decomposit": [1, 4, 6], "advanc": [1, 3, 4], "trend": [1, 4, 6], "preprocess": [1, 4, 6, 7, 9], "tunc": [1, 4], "etiam": [1, 4], "fati": [1, 4], "aperit": [1, 4], "futuri": [1, 4], "ora": [1, 4], "dei": [1, 4], "iussu": [1, 4], "umquam": [1, 4], "credita": [1, 4], "teucri": [1, 4], "Nos": [1, 4], "delubra": [1, 4], "deum": [1, 4], "miseri": [1, 4], "quibu": [1, 4], "ultimu": [1, 4], "esset": [1, 4], "ill": [1, 4], "di": [1, 4], "festa": [1, 4], "velamu": [1, 4], "frond": [1, 4], "urbem": [1, 4], "aeneid": [1, 4], "246": [1, 4], "249": [1, 4], "impact": [1, 3, 4, 6, 9], "uniqu": [1, 3, 4, 6], "past": [1, 4, 6, 9], "outsid": [1, 4, 9], "unforecast": [1, 4, 6], "accordingli": [1, 4, 9], "origin": [1, 3, 4, 6, 9], "product": [1, 4, 6, 7, 9], "goal": [1, 4], "temporari": [1, 4], "whose": [1, 4, 6], "rel": [1, 3, 4, 6, 7, 9], "known": [1, 3, 4, 7, 9], "essenti": [1, 3, 4, 9], "estim": [1, 4, 6, 9], "raw": [1, 4, 6], "presenc": [1, 4], "warn": [1, 3, 4, 6], "about": [1, 3, 4, 6], "remove_excess_anomali": [1, 4, 6], "detector": [1, 3, 4, 6], "reli": [1, 4, 9], "alwai": [1, 3, 4, 6, 9], "element": [1, 2, 4, 6], "histori": [1, 2, 3, 4, 6], "intern": [1, 3, 4, 6, 7, 9], "attribut": [1, 3, 4, 9], "figur": [1, 3, 4], "expect": [1, 3, 4, 6, 7, 9], "latest": [1, 4], "code": [1, 3, 4, 5, 6, 7], "dai": [1, 2, 3, 4, 6, 9], "7": [1, 3, 4, 6, 9], "weekli": [1, 2, 4], "For": [1, 2, 3, 4, 7, 9], "slope": [1, 4], "analysi": [1, 4, 6], "posit": [1, 3, 4, 6, 9], "sign": [1, 4], "exactli": [1, 4, 6], "regression_typ": [1, 4, 6, 9], "pattern": [1, 3, 4, 6, 9], "inaccur": [1, 4], "flag": [1, 3, 4, 6, 9], "keep_col": [1, 4], "keep_cols_idx": [1, 4], "dtindex": [1, 4, 6], "regressor_per_seri": [1, 4], "flag_regressor": [1, 4], "categorical_group": [1, 4], "past_impact": [1, 4], "future_impact": [1, 4], "regressor_forecast_model": [1, 4], "regressor_forecast_model_param": [1, 4], "regressor_forecast_transform": [1, 4], "include_histori": [1, 4], "tune": [1, 4], "16": [1, 3, 4], "anomaly_color": [1, 4], "darkslateblu": [1, 4], "holiday_color": [1, 4], "darkgreen": [1, 4], "trend_anomaly_color": [1, 4], "slategrai": [1, 4], "point_siz": [1, 4], "know": [1, 4, 9], "d4f74f": [1, 4], "82ab5a": [1, 4], "ff6c05": [1, 4], "c12600": [1, 4], "new_df": [1, 4], "include_organ": [1, 4], "step": [1, 3, 4, 6, 9], "equival": [1, 4, 6, 9], "include_impact": [1, 4], "multipl": [1, 3, 4, 6, 7, 9], "trend_residu": [1, 4], "trans_method": [1, 4, 6, 9], "featur": [1, 4, 6, 7, 9], "space": [1, 2, 4, 6, 9], "intervention_d": [1, 4], "df_train": [1, 3, 4, 6, 9], "lower_limit": [1, 3, 6, 9], "upper_limit": [1, 3, 6, 9], "univariatemotif": [1, 3], "model_param_dict": [1, 3, 9], "distance_metr": [1, 3, 4, 6], "euclidean": [1, 3], "k": [1, 3, 4, 6], "pointed_method": [1, 3], "return_result_window": [1, 3, 4], "window": [1, 3, 4, 6, 9], "model_transform_dict": [1, 3, 9], "pchip": [1, 3], "fix": [1, 3, 6, 9], "maxabsscal": [1, 3, 6], "model_forecast_kwarg": [1, 3], "321": [1, 3, 9], "future_regressor_train": [1, 3, 4, 9], "future_regressor_forecast": [1, 3, 4, 9], "close": [1, 3, 4, 6, 7, 9], "exceed": [1, 3, 6, 9], "four": [1, 3, 9], "calcul": [1, 3, 4, 6, 9], "direct": [1, 3, 4, 6, 9], "edg": [1, 2, 3, 6, 9], "y": [1, 2, 3, 4, 6, 9], "z": [1, 3, 4, 9], "primarili": [1, 3, 9], "num_seri": [1, 3, 4, 6, 9], "middl": [1, 3, 6], "too": [1, 2, 3, 6, 9], "flip": [1, 3], "ab": [1, 3, 4, 6], "l": [1, 3], "timestep": [1, 3, 6, 9], "two": [1, 3, 6, 9], "neighbor": [1, 3, 4], "resolut": [1, 3], "greater": [1, 3, 6, 9], "class_method": [1, 3], "standalon": [1, 3], "item": [1, 3, 6], "generaet_result_window": [1, 3], "fit_forecast": [1, 3], "result_window": [1, 3, 4], "forecast_df": [1, 3], "up_forecast_df": [1, 3], "low_forecast_df": [1, 3], "lower_limit_2d": [1, 3, 9], "upper_limit_2d": [1, 3, 9], "upper_risk_arrai": [1, 3, 9], "lower_risk_arrai": [1, 3, 9], "event_risk": [1, 3], "multivariatemotif": [1, 3, 9], "autots_kwarg": [1, 3], "shortcut": [1, 3], "suggest": [1, 3, 9], "normal": [1, 3, 4, 6], "model_method": [1, 3], "wa": [1, 3, 4, 6, 9], "num_sampl": [1, 3], "column_idx": [1, 3], "grai": [1, 3], "838996": [1, 3], "c0c0c0": [1, 3], "dcdcdc": [1, 3], "a9a9a9": [1, 3], "808080": [1, 3], "989898": [1, 3], "757575": [1, 3], "696969": [1, 3], "c9c0bb": [1, 3], "c8c8c8": [1, 3], "323232": [1, 3], "e5e4e2": [1, 3], "778899": [1, 3], "4f666a": [1, 3], "848482": [1, 3], "414a4c": [1, 3], "8a7f80": [1, 3], "c4c3d0": [1, 3], "bebeb": [1, 3], "dbd7d2": [1, 3], "up_low_color": [1, 3], "ff4500": [1, 3], "ff5349": [1, 3], "bar_color": [1, 3], "6495ed": [1, 3], "bar_ylim": [1, 3], "8": [1, 3, 4, 6, 9], "ylim": [1, 3], "barplot": [1, 3], "df_test": [1, 3, 9], "actuals_color": [1, 3], "00bfff": [1, 3], "v": [1, 3], "dt": [1, 2, 3, 6], "line": [1, 3, 4, 9], "manual": [1, 3, 9], "appropri": [1, 3, 4, 6, 7, 9], "assess": [1, 3, 9], "target_shap": [1, 3], "handl": [1, 3, 4, 9], "overview": [1, 3], "defin": [1, 3, 4, 6, 7, 9], "group": [1, 3, 4, 6], "reconcili": [1, 6, 9], "2020": [1, 3, 4, 6, 9], "mathemat": [1, 6], "chronolog": [1, 6], "fulli": [1, 4, 6], "under": [1, 6, 9], "condit": [1, 6], "primari": [1, 6], "intent": [1, 6], "invers": [1, 4, 6, 9], "na": [1, 4, 6], "filter": [1, 3, 4, 6, 9], "cannot": [1, 6, 9], "rollingmean": [1, 6], "pctchang": [1, 6], "cumsum": [1, 6], "ffill": [1, 6], "forward": [1, 3, 6, 9], "until": [1, 6, 9], "reach": [1, 6], "miss": [1, 6, 9], "averag": [1, 3, 4, 6, 9], "rolling_mean_24": [1, 6], "24": [1, 4, 6, 9], "ffill_mean_bias": [1, 6], "fake_d": [1, 6], "shift": [1, 4, 6], "thu": [1, 3, 6, 9], "incorrect": [1, 6], "iterativeimput": [1, 6, 9], "iter": [1, 6], "minmaxscal": [1, 6], "powertransform": [1, 6], "quantiletransform": [1, 6], "standardscal": [1, 6], "robustscal": [1, 6], "worth": [1, 6], "n_compon": [1, 4, 6], "receiv": [1, 6, 7], "second_transform": [1, 6], "fixedrollingmean": [1, 6], "disabl": [1, 6], "rollingmean10": [1, 6], "rollingmean100thn": [1, 6], "len": [1, 3, 4, 6], "minimum": [1, 4, 6, 9], "convert": [1, 4, 6, 9], "pct_chang": [1, 6], "lot": [1, 4, 6, 9], "sin": [1, 6], "log": [1, 3, 6, 9], "necessari": [1, 4, 6, 7, 9], "lag": [1, 4, 6], "seasonaldifferencemean": [1, 6], "seasonaldifference7": [1, 6], "28": [1, 3, 4, 6], "parameter": [1, 6], "center": [1, 6], "around": [1, 4, 6], "record": [1, 2, 3, 5, 6, 7], "bin": [1, 3, 6], "move": [1, 3, 4, 6], "lose": [1, 6], "smoother": [1, 6], "scipi": [1, 4, 6, 9], "hp_filter": [1, 6], "decompos": [1, 6], "exponenti": [1, 4, 6, 9], "joint": [1, 6], "differenc": [1, 4, 6], "vector": [1, 3, 4, 6], "box": [1, 6], "tiao": [1, 6], "align": [1, 6], "tailor": [1, 6], "wish": [1, 6], "good": [1, 6, 9], "cheer": [1, 6], "local": [1, 4, 6], "state": [1, 4, 6], "clip": [1, 6], "std": [1, 6], "awai": [1, 6], "compens": [1, 6], "croston": [1, 6], "inspir": [1, 6, 9], "magnitud": [1, 2, 4, 6, 9], "occurr": [1, 6, 9], "intermitt": [1, 6], "fourier": [1, 6], "harmon": [1, 6], "reintroduc": [1, 6], "within": [1, 6], "diff": [1, 3, 6], "overwrit": [1, 6, 9], "baxter": [1, 6], "king": [1, 4, 6], "bandpass": [1, 6], "poisson": [1, 6], "applic": [1, 6], "techniqu": [1, 6], "directli": [1, 6, 7, 9], "fillzero": [1, 6], "undo": [1, 6], "mad": [1, 6], "classmethod": [1, 6], "retriev": [1, 2, 6], "legaci": [1, 6], "min_occurr": [1, 3, 6], "splash_threshold": [1, 3, 6], "65": [1, 3, 6], "use_dayofmonth_holidai": [1, 3, 6], "use_wkdom_holidai": [1, 3, 6], "use_wkdeom_holidai": [1, 3, 6], "use_lunar_holidai": [1, 3, 6], "use_lunar_weekdai": [1, 3, 6], "use_islamic_holidai": [1, 3, 6], "use_hebrew_holidai": [1, 3, 6], "holiday_impact": [1, 3, 6], "popul": [1, 3, 6], "day_holidai": [1, 3, 6], "long": [1, 2, 3, 4, 6, 7, 9], "join": [1, 3, 6], "rather": [1, 3, 6, 9], "format": [1, 2, 3, 4, 6, 7, 9], "series_flag": [1, 3, 6], "contan": [1, 3, 6], "holiday_nam": [1, 3, 6], "anomaly_scor": [1, 3, 6], "include_anomali": [1, 3], "03": [1, 4, 6], "02": [1, 6], "005": [1, 6], "002": [1, 6], "06": [1, 4, 6], "04": [1, 6], "na_prob_dict": [1, 6], "datepartregressionimput": [1, 6], "025": [1, 6], "iterativeimputerextratre": [1, 6], "0001": [1, 4, 6], "knnimput": [1, 6], "seasonalitymotifimputer1k": [1, 6], "seasonalitymotifimputerlinmix": [1, 6], "fast_param": [1, 6], "superfast_param": [1, 6], "traditional_ord": [1, 6], "transformer_min_depth": [1, 6], "allow_non": [1, 6], "no_nan_fil": [1, 6], "choosen": [1, 6, 9], "signal": [1, 6, 9], "transformt": [1, 8], "summar": [1, 4, 6, 9], "backfil": [1, 6], "bfill": [1, 6], "head": [1, 3, 5, 6, 9], "regressor_train": [1, 6], "iloc": [1, 6, 9], "thing": [1, 4, 6, 9], "feature_agglomer": [1, 6], "gaussian_random_project": [1, 6], "deal": [1, 6, 9], "prefil": [1, 6], "elsewher": [1, 6], "regressor_forecast": [1, 6], "simple_binar": [1, 6], "encode_holiday_typ": [1, 6], "distribut": [1, 2, 3, 6, 7], "gamma": [1, 2, 4, 6], "univari": [1, 4, 6, 9], "holiday_regr_styl": [1, 6], "preprocessing_param": [1, 6], "datepart": [1, 4, 6], "been": [1, 3, 6, 9], "peopl": [1, 6], "NOT": [1, 3, 4, 6, 9], "machin": [1, 6, 7], "elabor": [1, 6], "build": [1, 6, 9], "And": [1, 4, 6, 7], "post": [1, 6, 7, 9], "hoc": [1, 6], "want": [1, 6, 9], "easili": [1, 6, 9], "categor": [1, 2, 6], "discard": [1, 6], "annoi": [1, 6], "countri": [1, 6], "pull": [1, 2, 4, 6], "req": [1, 3, 6], "pkg": [1, 6], "subdiv": [1, 6], "subdivis": [1, 6], "func": [1, 6], "resampl": [1, 6], "creation": [1, 4, 6], "swappabl": [1, 6], "infer_freq": [1, 6], "date_start": [1, 2], "date_end": [1, 2], "artif": [1, 2, 9], "wiki": [1, 2, 3], "germani": [1, 2], "thanksgiv": [1, 2, 9], "microsoft": [1, 2], "procter_": [1, 2], "26_gambl": [1, 2], "youtub": [1, 2], "united_st": [1, 2], "elizabeth_ii": [1, 2], "william_shakespear": [1, 2], "cleopatra": [1, 2], "george_washington": [1, 2], "chinese_new_year": [1, 2], "standard_devi": [1, 2, 9], "christma": [1, 2, 9], "list_of_highest": [1, 2], "grossing_film": [1, 2], "list_of_countries_that_have_gained_independence_from_the_united_kingdom": [1, 2], "periodic_t": [1, 2], "sourc": [1, 2, 6, 9], "wikimedia": [1, 2], "foundat": [1, 2], "traffic": [1, 2, 9], "mn": [1, 2], "dot": [1, 2], "via": [1, 2], "uci": [1, 2], "repositori": [1, 2], "2021": [1, 2, 3, 4, 9], "introduce_nan": [1, 2], "introduce_random": [1, 2], "123": [1, 2, 3, 6], "null": [1, 2], "observation_start": [1, 2], "observation_end": [1, 2], "fred_kei": [1, 2], "fred_seri": [1, 2, 9], "dgs10": [1, 2], "t5yie": [1, 2], "sp500": [1, 2], "dcoilwtico": [1, 2], "dexuseu": [1, 2], "wpu0911": [1, 2], "ticker": [1, 2, 9], "msft": [1, 2], "trends_list": [1, 2, 9], "cycl": [1, 2, 4], "trends_geo": [1, 2], "weather_data_typ": [1, 2], "awnd": [1, 2], "wsf2": [1, 2], "tavg": [1, 2], "weather_st": [1, 2, 9], "usw00094846": [1, 2], "usw00014925": [1, 2], "weather_year": [1, 2], "london_air_st": [1, 2, 9], "ct3": [1, 2], "sk8": [1, 2], "london_air_speci": [1, 2], "pm25": [1, 2], "london_air_dai": [1, 2], "180": [1, 2], "earthquake_dai": [1, 2], "earthquake_min_magnitud": [1, 2, 9], "gsa_kei": [1, 2], "gov_domain_list": [1, 2, 9], "nasa": [1, 2], "gov": [1, 2], "gov_domain_limit": [1, 2], "600": [1, 2], "wikipedia_pag": [1, 2, 9], "microsoft_offic": [1, 2], "wiki_languag": [1, 2], "en": [1, 2, 3, 6, 9], "weather_event_typ": [1, 2, 9], "28z": [1, 2], "29": [1, 2], "winter": [1, 2, 9], "weather": [1, 2, 9], "storm": [1, 2], "caiso_queri": [1, 2], "ene_slr": [1, 2], "300": [1, 2, 4], "sleep_second": [1, 2, 9], "activ": [1, 2, 4, 9], "internet": [1, 2, 9], "connect": [1, 2, 9], "respect": [1, 2, 6, 9], "free": [1, 2, 7], "heavili": [1, 2, 4, 6, 9], "exclud": [1, 2, 6], "d": [1, 2, 3, 4, 6, 9], "earliest": [1, 2], "get_seri": [1, 2], "yfinanc": [1, 2, 9], "api": [1, 2, 7, 9], "restrict": [1, 2, 4], "stlouisf": [1, 2], "org": [1, 2, 3, 4, 6, 9], "doc": [1, 2, 4, 6, 7, 9], "api_kei": [1, 2], "html": [1, 2, 4, 6, 9], "fredapi": [1, 2, 9], "stock": [1, 2, 7, 9], "pypi": [1, 2], "keyword": [1, 2], "pytrend": [1, 2, 9], "ncei": [1, 2], "noaa": [1, 2], "ghcn": [1, 2], "prcp": [1, 2], "snow": [1, 2], "tmax": [1, 2], "tmin": [1, 2], "wsf1": [1, 2], "wsf5": [1, 2], "wsfg": [1, 2], "station": [1, 2], "londonair": [1, 2], "uk": [1, 2], "london_speci": [1, 2], "london": [1, 2], "air": [1, 2], "smallest": [1, 2, 3], "earthquak": [1, 2], "usg": [1, 2], "open": [1, 2, 5, 9], "gsa": [1, 2], "dap": [1, 2], "dist": [1, 2, 4, 9], "govern": [1, 2], "domain": [1, 2], "veri": [1, 2, 4, 6, 9], "usp": [1, 2], "ncbi": [1, 2], "nlm": [1, 2], "nih": [1, 2], "cdc": [1, 2], "ir": [1, 2], "usajob": [1, 2], "studentaid": [1, 2], "usembassi": [1, 2], "tsunami": [1, 2], "smaller": [1, 2, 3, 4, 6, 9], "10000": [1, 2], "wikipedia": [1, 2, 3], "encod": [1, 2, 3, 9], "underscor": [1, 2], "sever": [1, 2, 7, 9], "www1": [1, 2], "ncdc": [1, 2], "pub": [1, 2, 6], "swdi": [1, 2], "stormev": [1, 2], "csvfile": [1, 2], "pdf": [1, 2, 6], "hardcod": [1, 2], "queri": [1, 2, 6], "server": [1, 2], "download": [1, 2, 9], "feder": [1, 2], "reserv": [1, 2], "loui": [1, 2], "econom": [1, 2], "indic": [1, 2, 3, 6], "week": [1, 2], "petroleum": [1, 2], "industri": [1, 2], "eia": [1, 2], "annual": [1, 2], "cleaner": [1, 6], "pivot_t": [1, 6], "determin": [1, 4, 6], "provid": [1, 3, 4, 6, 9], "starttimestamp": [1, 3], "template_col": [1, 3], "transformationparamet": [1, 3, 4, 5], "horizontal_subset": [1, 3], "return_model": [1, 3], "model_count": [1, 3], "albeit": [1, 3, 9], "she": [1, 3], "turn": [1, 3], "me": [1, 3], "newt": [1, 3], "got": [1, 3, 4], "width": [1, 3, 6], "ask": [1, 3], "few": [1, 3], "cpu": [1, 3, 4, 6, 7, 9], "meant": [1, 3], "tranform": [1, 3], "instal": [2, 4, 6], "fredkei": 2, "seriesnamedict": 2, "simplest": [2, 9], "sure": [2, 6, 7, 9], "request": [2, 6, 7, 9], "pair": 2, "seriesnam": 2, "anyth": [2, 6], "second": [2, 4, 6, 9], "sleep": 2, "chanc": 2, "mon": [3, 6], "jul": [3, 6], "18": [3, 4], "19": [3, 4], "55": 3, "author": [3, 4, 6], "colin": [3, 4, 6, 9], "mid": [3, 6], "transformation_dict": [3, 4], "model_str": 3, "parameter_dict": 3, "feed": 3, "pipelin": 3, "submitted_paramet": 3, "sort_column": 3, "sort_ascend": 3, "max_result": 3, "recursive_count": 3, "old": [3, 9], "No": [3, 4, 6, 7], "mate": 3, "sanderson": 3, "submitted_paramt": 3, "hyperparamet": 3, "per_timestamp_smap": 3, "per_series_metr": [3, 4], "per_series_ma": 3, "per_series_rms": 3, "per_series_mad": 3, "per_series_contour": 3, "per_series_spl": 3, "per_series_ml": 3, "per_series_iml": 3, "per_series_max": 3, "per_series_oda": 3, "per_series_mqa": 3, "per_series_dwa": 3, "per_series_ewma": 3, "per_series_uwms": 3, "per_series_smooth": 3, "per_series_m": 3, "per_series_mats": 3, "per_series_wasserstein": 3, "per_series_dwd": 3, "correspond": [3, 4, 6], "order": [3, 4, 6, 9], "another_ev": 3, "merg": 3, "onto": 3, "validation_round": 3, "current_gener": 3, "traceback": 3, "mosaic_us": 3, "additional_msg": 3, "who": [3, 4], "tim": 3, "hyperparamt": 3, "prepar": 3, "info": [3, 6], "print": [3, 5, 6, 7, 9], "statement": 3, "keyboard": 3, "interrupt": [3, 7], "caught": [3, 4], "break": 3, "tracebook": 3, "represent": 3, "everi": [3, 4, 6, 9], "existing_templ": 3, "new_poss": 3, "selection_col": 3, "new_possibl": 3, "namess": 3, "judg": [3, 9], "hash": 3, "b": [3, 6], "recombin": 3, "ident": [3, 4], "made": [3, 4, 6, 9], "mle": [3, 9], "mage": [3, 9], "bigger": 3, "results_object": 3, "total_valid": 3, "models_to_us": [3, 4], "model_prob": 3, "counter": [3, 6], "n_model": 3, "keyword_format": 3, "preceed": [3, 9], "dict_arrai": 3, "recurs": [3, 5, 9], "unnest": 3, "validation_result": [3, 5, 7], "groupby_col": 3, "all_result": 3, "corr": 3, "onehot": 3, "poli": 3, "100000": [3, 6], "dimens": [3, 4, 6, 9], "fake": [3, 6], "purpos": [3, 6, 9], "fri": [3, 6], "nov": 3, "13": [3, 4, 9], "45": [3, 4], "base_models_onli": 3, "tensorflow": [3, 4, 9], "jan": [3, 4], "27": [3, 6], "36": [3, 4], "lag_1": [3, 4, 6], "lag_2": [3, 4], "nearest": [3, 4, 6], "ndim": 3, "f": [3, 9], "ae": 3, "precalcul": 3, "arr": [3, 6], "loss": [3, 4, 9], "chi": 3, "squar": [3, 6, 9], "histogram": 3, "unchang": 3, "flat": [3, 9], "concern": [3, 9], "bluff": 3, "river": 3, "elev": 3, "equiavel": 3, "last_of_arrai": [3, 4], "direciton": 3, "growth": [3, 4], "declin": 3, "scaler": [3, 4], "cumsum_a": [3, 4], "diff_a": [3, 4], "extra": [3, 9], "precomput": [3, 4], "effici": [3, 4, 6, 9], "loop": [3, 4], "worri": 3, "them": [3, 9], "detail": [3, 4, 6, 7, 9], "bandwidth": 3, "kl": 3, "diverg": 3, "p": [3, 4, 6, 9], "q": [3, 4, 6, 9], "epsilon": [3, 4, 6], "1e": [3, 6], "perecentag": 3, "progress": [3, 7, 9], "along": [3, 9], "differenti": [3, 9], "sole": 3, "optim": [3, 4, 7, 9], "unanchor": 3, "1d": [3, 6], "nan_flag": [3, 6], "baselin": 3, "naiv": [3, 4, 7, 9], "poorli": [3, 6, 9], "85": 3, "largest": [3, 9], "full_error": 3, "le": 3, "y_pred": [3, 4], "y_true": [3, 4], "penal": [3, 9], "underestim": [3, 9], "overestim": [3, 9], "avoid": [3, 6, 9], "divid": 3, "aren": [3, 4], "down": [3, 6, 9], "bad": [3, 9], "er": 3, "push": 3, "exclus": 3, "sqe": 3, "catlin": [3, 6, 7], "syllepsi": 3, "live": [3, 7], "22": [3, 4, 6], "categori": 3, "OR": 3, "being": [3, 4, 6, 7, 9], "pinbal": [3, 9], "gradient": 3, "volatil": [3, 9], "precomputed_spl": 3, "unmatch": 3, "poor": [3, 9], "penalty_threshold": 3, "view": [3, 6, 9], "2d": [3, 6], "strength": [3, 6], "earth": 3, "perhap": [3, 6], "relev": [3, 6], "unsort": 3, "extract": [3, 4], "py": [3, 7, 9], "amfm": 3, "possibli": [3, 4, 6], "modif": 3, "structur": [3, 4, 6], "11": [3, 9], "2023": [3, 4, 6, 7], "validation_param": 3, "etc": [3, 6, 9], "clean": [3, 6, 9], "beyond": [3, 4, 6], "constant": [4, 6], "vol": 4, "garch": 4, "o": [4, 6], "power": [4, 9], "rescal": 4, "maxit": 4, "200": [4, 6], "linux": [4, 6, 9], "distro": 4, "confid": [4, 6], "multiprocess": [4, 6, 9], "uniniti": 4, "fit_runtim": 4, "timedelta": 4, "hold": 4, "timeseri": [4, 6, 9], "last_dat": 4, "forecast_index": 4, "forecast_column": 4, "predict_runtim": 4, "transformation_runtim": 4, "per_timestamp": 4, "avg_metr": 4, "avg_metrics_weight": 4, "form": [4, 6, 9], "twice": [4, 6], "series_weight": 4, "per_timestamp_error": 4, "evalut": 4, "against": 4, "suboptim": 4, "update_datetime_nam": 4, "datetime_column": 4, "tell": [4, 9], "remove_zero": [4, 9], "right": [4, 6, 7], "title_substr": 4, "ax": [4, 6], "matplotlib": [4, 9], "dash": 4, "vertic": 4, "intens": 4, "shade": 4, "region": [4, 6], "xlim_right": 4, "grid": [4, 7], "group_col": 4, "y_col": 4, "totalruntimesecond": 4, "train_last_d": 4, "cmap_nam": 4, "gist_rainbow": 4, "runtimes_data": 4, "xlim": 4, "title_suffix": 4, "point_method": 4, "canberra": [4, 6], "sample_fract": [4, 6], "adapt": 4, "struggl": 4, "short": 4, "max_window": [4, 6], "weighted_mean": 4, "midhing": [4, 6], "cdist": [4, 9], "closest": [4, 6, 9], "consid": [4, 9], "n_harmon": [4, 6], "state_transit": [4, 6], "process_nois": [4, 6], "observation_model": [4, 6], "observation_nois": [4, 6], "em_it": [4, 6], "undefin": 4, "solv": [4, 6, 9], "kalman": [4, 6, 9], "comparison_transform": 4, "combination_transform": 4, "comparison": [4, 6], "mse": [4, 9], "minkowski": 4, "5000": [4, 6], "tradeoff": [4, 6], "own": [4, 9], "gather": 4, "phrase_len": 4, "magnitude_pct_change_sign": 4, "share": 4, "l2": 4, "max_motif": 4, "recency_weight": 4, "cutoff_threshold": 4, "cutoff_minimum": 4, "dark": [4, 6], "magic": [4, 6], "evil": 4, "mastermind": 4, "project": [4, 7], "knn": 4, "interest": [4, 9], "togeth": [4, 6, 9], "pairwise_dist": 4, "amount": [4, 6, 9], "choos": [4, 9], "sign_biased_mean": 4, "ridge_param": 4, "5e": 4, "warmup_pt": [4, 6], "seed_pt": 4, "seed_weight": 4, "batch_siz": 4, "batch_method": 4, "input_ord": 4, "nonlinear": 4, "variabl": [4, 6, 9], "autoregress": 4, "next": [4, 6, 9], "reservoir": 4, "quantinfo": 4, "ng": 4, "rc": 4, "paper": [4, 7], "gauthier": 4, "j": [4, 6], "bollt": 4, "e": [4, 6], "griffith": 4, "al": 4, "nat": 4, "commun": [4, 9], "5564": 4, "doi": 4, "1038": 4, "s41467": 4, "021": 4, "25801": 4, "pointless": 4, "lambda": [4, 6], "ridg": 4, "realiti": 4, "warmup": 4, "fine": [4, 9], "linearli": 4, "batch": [4, 7], "lastvalu": [4, 6], "concerto": 4, "g": [4, 6], "minor": 4, "op": 4, "rv": 4, "315": 4, "produc": [4, 9], "nan_euclidean": [4, 6, 9], "include_differenc": [4, 6], "stride_s": [4, 6], "covari": [4, 6], "ratio": 4, "num_regressor_seri": 4, "ob": [4, 6], "xa": 4, "xb": 4, "r_arr": 4, "inner": 4, "hungri": 4, "big": 4, "linpack": [4, 9], "seem": [4, 9], "sensit": [4, 6, 9], "address": 4, "tue": 4, "sep": 4, "57": 4, "assist": 4, "crgillespie22": 4, "gaussian_prior_mean": 4, "wishart_prior_scal": 4, "wishart_dof_excess": 4, "bayesian": [4, 6], "conjug": 4, "prior": [4, 6], "encourag": [4, 9], "coef": 4, "regular": [4, 9], "peak": 4, "matrix": [4, 6], "varianc": 4, "nois": [4, 6], "while": [4, 7, 9], "return_std": 4, "n_sampl": 4, "in_d": 4, "prefix": 4, "regr_": 4, "15000": 4, "l1": 4, "cost": 4, "lin": 4, "reg": 4, "lamb": [4, 6], "identity_matrix": 4, "neural": 4, "net": 4, "rnn_type": 4, "lstm": 4, "kernel_initi": 4, "lecun_uniform": 4, "hidden_layer_s": 4, "32": [4, 6], "adam": 4, "huber": 4, "epoch": [4, 6], "wrapper": [4, 6], "kera": 4, "rnn": 4, "cell": 4, "gru": 4, "layer": 4, "compil": [4, 9], "tf": 4, "set_se": 4, "head_siz": 4, "256": 4, "num_head": 4, "ff_dim": 4, "num_transformer_block": 4, "mlp_unit": 4, "128": 4, "mlp_dropout": 4, "dropout": 4, "io": [4, 6], "timeseries_transformer_classif": 4, "input_shap": 4, "output_shap": [4, 6], "ensemble_param": 4, "forecasts_runtim": 4, "model_weight": 4, "incompat": [4, 9], "bestn": [4, 9], "forecast_id": 4, "forecast_runtim": 4, "forecasts_list": 4, "ensemble_str": 4, "prematched_seri": 4, "use_valid": 4, "subset_flag": 4, "per_series2": 4, "only_specifi": 4, "outer": [4, 6], "known_match": 4, "available_model": 4, "full_model": 4, "error_matrix": 4, "error_list": 4, "col_nam": 4, "smoothing_window": 4, "metric_nam": 4, "classifier_param": 4, "classifi": 4, "unknown": 4, "construct": [4, 5, 6, 9], "x_predict": 4, "ensemble_list": 4, "models_sourc": 4, "all_seri": 4, "forecast_period": [4, 9], "datestamp": 4, "retur": 4, "safety_model": 4, "local_result": 4, "total_v": 4, "describ": [4, 9], "releas": 4, "amazon": 4, "realli": [4, 6], "mxnet": [4, 9], "gui": 4, "sorta": 4, "mayb": 4, "deprec": [4, 6, 9], "sad": 4, "excel": [4, 9], "routin": 4, "stabil": 4, "strong": 4, "suit": 4, "gluon_model": 4, "deepar": 4, "learning_r": 4, "context_length": 4, "npt": 4, "deepstat": 4, "wavenet": 4, "deepfactor": 4, "sff": 4, "mqcnn": 4, "deepvar": 4, "gpvar": 4, "nbeat": 4, "network": 4, "2forecastlength": [4, 6], "nforecastlength": 4, "unlik": [4, 6, 9], "df_index": 4, "freq": [4, 6, 9], "model_templ": 4, "silverkit": 4, "unitedst": 4, "inner_n_job": 4, "relat": [4, 9], "borrow": 4, "xinyu": 4, "chen": 4, "xinychen": 4, "transdim": 4, "medium": [4, 9], "articl": 4, "thrown": 4, "nan_to_num": 4, "pinv": 4, "On": [4, 9], "entri": 4, "dlascl": 4, "illeg": 4, "time_horizon": 4, "time_lag": 4, "lambda0": 4, "33333333": 4, "low": [4, 6, 9], "tensor": 4, "arxiv": [4, 6], "2104": 4, "14936": 4, "blob": 4, "master": 4, "mat": 4, "predictor": 4, "ipynb": 4, "rho": 4, "inner_maxit": 4, "tempor": 4, "sparse_mat": 4, "ind": 4, "w": [4, 5, 6], "psi": 4, "r": [4, 5, 6], "dynam": [4, 6, 9], "pred_step": 4, "sparse_tensor": 4, "rho0": 4, "recogn": [4, 7], "pred_time_step": 4, "time_interv": 4, "kernel": 4, "dim": [4, 6], "tau": 4, "aq": 4, "rold": 4, "delta": 4, "sun": 4, "expanded_binar": [4, 6], "ml": [4, 9], "aspect": 4, "n_seri": [4, 6], "variou": [4, 6], "nixtla": 4, "Be": [4, 7], "commerci": 4, "mqloss": 4, "input_s": 4, "max_step": [4, 6], "early_stop_patience_step": 4, "relu": 4, "scaler_typ": 4, "model_arg": 4, "point_quantil": 4, "document": [4, 7, 9], "temp": 4, "za": 4, "static_regressor": 4, "facebook": 4, "sinc": [4, 9], "finicki": [4, 9], "yearly_season": 4, "weekly_season": 4, "daily_season": 4, "n_changepoint": 4, "changepoint_prior_scal": 4, "seasonality_mod": 4, "changepoint_rang": 4, "seasonality_prior_scal": 4, "holidays_prior_scal": 4, "thou": 4, "shall": 4, "neither": 4, "prece": 4, "off": [4, 6, 9], "changepoints_rang": 4, "trend_reg": 4, "trend_reg_threshold": 4, "ar_spars": 4, "seasonality_reg": 4, "n_lag": 4, "num_hidden_lay": 4, "d_hidden": 4, "loss_func": 4, "train_spe": 4, "90": [4, 6], "max_epoch": 4, "max_encoder_length": 4, "hidden_s": 4, "n_layer": 4, "add_target_scal": 4, "target_norm": 4, "encodernorm": 4, "temporalfusiontransform": 4, "64": [4, 6], "78": 4, "model_kwarg": 4, "trainer_kwarg": 4, "callback": 4, "obsess": 4, "go": [4, 9], "pt": 4, "lightn": [4, 9], "trainer": 4, "quantileloss": 4, "lesser": 4, "decis": [4, 7, 9], "tree": 4, "elast": 4, "forest": 4, "mlpregressor": 4, "adaboost": 4, "principl": 4, "nthn": 4, "max_depth": [4, 6], "min_samples_split": [4, 6], "polynomial_degre": [4, 6], "randomforest": 4, "mean_rolling_period": 4, "macd_period": 4, "std_rolling_period": 4, "max_rolling_period": 4, "min_rolling_period": 4, "ewm_var_alpha": 4, "quantile90_rolling_period": 4, "quantile10_rolling_period": 4, "ewm_alpha": 4, "additional_lag_period": 4, "abs_energi": 4, "rolling_autocorr_period": 4, "nonzero_last_n": 4, "scale_full_x": 4, "quantile_param": 4, "min_samples_leaf": 4, "n_estim": 4, "250": 4, "cointegration_lag": 4, "series_hash": 4, "frame": [4, 6], "multiari": 4, "window_s": [4, 6], "max_histori": 4, "one_step": 4, "processed_i": 4, "normalize_window": [4, 6], "basi": 4, "extratre": 4, "add_date_part": 4, "x_transform": 4, "wise": [4, 9], "scienc": 4, "am": 4, "arthur": 4, "briton": 4, "ve": 4, "think": 4, "your": [4, 7, 9], "selv": 4, "re": 4, "individu": [4, 9], "ye": [4, 9], "we": [4, 9], "rbf": 4, "noise_var": 4, "lambda_prim": 4, "polynomi": [4, 6], "locally_period": 4, "littl": [4, 9], "flexibl": [4, 6, 9], "toler": [4, 9], "\u03b3": 4, "lambda_": 4, "reason": [4, 6, 9], "might": [4, 9], "365": [4, 6], "input_dim": [4, 6], "output_dim": [4, 6], "shuffl": [4, 6], "model_dict": 4, "bootstrap": 4, "verbose_bool": 4, "multioutput": 4, "framework": [4, 6, 7], "mean_rol": 4, "bit": 4, "exog": 4, "exog_oo": 4, "exog_fc": 4, "sometim": 4, "c": [4, 6, 7, 9], "causal": 4, "ct": 4, "stationar": 4, "hour": [4, 6, 9], "k_factor": 4, "factor_ord": 4, "mamodel": 4, "mapr": 4, "factor_multipl": 4, "idiosyncratic_ar1": 4, "damped_trend": 4, "seasonal_period": 4, "formerli": 4, "damp": 4, "deseason": 4, "use_test": 4, "use_ml": 4, "damped_cycl": 4, "irregular": 4, "stochastic_cycl": 4, "stochastic_trend": 4, "stochastic_level": 4, "cov_typ": 4, "opg": 4, "lbfg": 4, "maxlag": [4, 6], "ic": 4, "fpe": 4, "determinist": 4, "k_ar_diff": [4, 6], "coint_rank": 4, "current_seri": 4, "xf": 4, "negloglik": 4, "conf_int": 4, "ar_ord": 4, "fit_method": 4, "hmc": 4, "num_step": 4, "tensorflowprob": 4, "42": 4, "0009999": 4, "layer_norm": 4, "dropout_r": 4, "512": 4, "num_lay": 4, "hist_len": 4, "720": 4, "decoder_output_dim": 4, "final_decoder_hidden": 4, "num_split": 4, "min_num_epoch": 4, "train_epoch": 4, "patienc": 4, "epoch_len": 4, "permut": 4, "gpu_index": 4, "googl": 4, "research": 4, "mlp": 4, "num_cov_col": 4, "cat_cov_col": 4, "ts_col": 4, "train_rang": 4, "val_rang": 4, "test_rang": 4, "pred_len": 4, "loader": 4, "68": 5, "69": 5, "70": 5, "71": 5, "72": 5, "sort_valu": 5, "ascend": [5, 9], "groupbi": [5, 6], "reset_index": 5, "export2": 5, "export_fin": 5, "to_json": 5, "orient": [5, 6], "pprint": 5, "read_csv": 5, "autots_forecast_template_gen": 5, "jsn": 5, "json_temp": 5, "read": 5, "txt": 5, "dump": 5, "indent": 5, "sort_kei": 5, "41": 6, "21": [6, 7], "contextu": 6, "fall": [6, 7, 9], "densiti": 6, "sequenc": [6, 9], "anomal": 6, "itself": 6, "regard": 6, "1802": 6, "04431": 6, "anomaly_df": 6, "df_col": 6, "wkdom_holidai": 6, "wkdeom_holidai": 6, "lunar_holidai": 6, "lunar_weekdai": 6, "islamic_holidai": 6, "hebrew_holidai": 6, "max_featur": 6, "predict_interv": 6, "job": 6, "threshold_method": 6, "norm": 6, "rolling_period": 6, "surviv": 6, "outlieri": 6, "dataframm": 6, "rolling_zscor": 6, "sf": 6, "rolliing_zscor": 6, "convers": [6, 7], "chines": 6, "arab": 6, "datetime_index": 6, "christian": 6, "aspir": 6, "hebrew": 6, "pyluach": 6, "simlist": 6, "epoch_adjust": 6, "islam": 6, "convertd": 6, "fitnr": 6, "timezon": 6, "new_moon": 6, "continu": 6, "pre": 6, "full_moon": 6, "julian": 6, "johansen": 6, "barba": 6, "towardsdatasci": 6, "canon": 6, "forgotten": 6, "4d1213396da1": 6, "p_mat": 6, "ndarrai": 6, "max_lag": 6, "return_eigenvalu": 6, "endog": 6, "det_ord": 6, "abbrevi": 6, "series_ord": 6, "trim": 6, "ex": 6, "modifi": 6, "multiproces": 6, "conserv": 6, "intel": 6, "hyperthread": 6, "logic": 6, "psutil": [6, 9], "fallsback": 6, "mkl": [6, 9], "simd": 6, "2017": 6, "otto": 6, "seiskari": 6, "mit": 6, "licens": 6, "resourc": [6, 9], "found": [6, 9], "kevinkotz": 6, "www": [6, 9], "notebook": 6, "statespace_dfm_coincid": 6, "introduct": 6, "commandeur": 6, "koopman": 6, "chp": 6, "andrew": 6, "harvei": 6, "notat": 6, "transit": 6, "x_k": 6, "x_": 6, "q_": 6, "qquad": 6, "sim": 6, "y_k": 6, "h": 6, "r_k": 6, "hidden": 6, "system": [6, 9], "matric": 6, "suitabl": 6, "definit": 6, "simo": 6, "sarkk\u00e4": 6, "2013": 6, "cambridg": 6, "univers": 6, "press": [6, 7], "aalto": 6, "fi": 6, "ssarkka": 6, "cup_book_online_20131111": 6, "simdkalman": 6, "kf": 6, "diag": 6, "denot": 6, "uniform": 6, "initial_valu": 6, "initial_covari": 6, "ey": 6, "third": [6, 9], "cov": 6, "29311384": 6, "06948961": 6, "19959416": 6, "00777587": 6, "02528967": 6, "pred_mean": 6, "pred_stdev": 6, "sqrt": 6, "71543": 6, "65322": 6, "multi": 6, "dimension": 6, "howev": [6, 9], "flexibli": 6, "vari": [6, 9], "broadcast": 6, "rule": 6, "oper": 6, "n_state": 6, "n_var": 6, "n_measur": 6, "main": 6, "interfac": 6, "accord": 6, "natur": [6, 9], "scalar": 6, "3d": 6, "lock": 6, "n_test": 6, "likelihood": 6, "log_likelihood": 6, "explan": 6, "With": [6, 9], "boolean": 6, "pairwis": [6, 9], "member": 6, "subresult": 6, "field": 6, "pairwise_covari": 6, "n_iter": 6, "interpret": 6, "mathbb": 6, "x_0": 6, "rm": 6, "prior_mean": 6, "prior_cov": 6, "x_j": 6, "simgl": 6, "y_1": 6, "ldot": 6, "y_j": 6, "y_t": 6, "smooth_mean": 6, "smooth_covari": 6, "smoothing_gain": 6, "y_": 6, "posterior_mean": 6, "posterior_covari": 6, "posterior": 6, "argument": 6, "operand": 6, "transpos": 6, "initial_mean": 6, "beta": 6, "phi": 6, "correct": 6, "allow_auto": 6, "next_smooth_mean": 6, "next_smooth_covari": 6, "prior_covari": 6, "statespac": 6, "oct": 6, "07": 6, "37": 6, "colincatlin": 6, "n_harm": 6, "freq_rang": 6, "grouping_method": 6, "tile": 6, "n_group": 6, "hier_id": 6, "bottom": 6, "holidays_subdiv": 6, "fallback": 6, "unavail": 6, "bias": 6, "simple_2": 6, "linear_mix": 6, "max_it": 6, "mean_weight": 6, "back_method": 6, "half": [6, 9], "remaind": 6, "slice_al": 6, "keepna": 6, "phase": 6, "moon": 6, "stackoverflow": 6, "2531541": 6, "9492254": 6, "keturn": 6, "earlier": 6, "john": 6, "walker": 6, "ecc": 6, "016718": 6, "equat": 6, "2444237": 6, "905": 6, "ecliptic_longitude_epoch": 6, "278": 6, "83354": 6, "ecliptic_longitude_perige": 6, "282": 6, "596403": 6, "eccentr": 6, "moon_mean_longitude_epoch": 6, "975464": 6, "moon_mean_perigee_epoch": 6, "349": 6, "383063": 6, "illumin": 6, "zone": 6, "2444238": 6, "asia": 6, "matter": 6, "central": 6, "precis": 6, "75": 6, "nextnew": 6, "krstn": 6, "eu": 6, "nanpercentil": 6, "in_arr": 6, "rollov": 6, "support": [6, 7, 9], "driven": 6, "placehold": 6, "mixtur": 6, "gum": 6, "diseas": 6, "credibl": 6, "spell": 6, "cast": 6, "variable_pct_chang": 6, "upon": 6, "upper_error": 6, "lower_error": 6, "errorrang": 6, "cum": 6, "qtp": 6, "xn": 6, "broaden": 6, "although": [6, 7, 9], "corrupt": 6, "bay": 6, "theorem": 6, "hot": 6, "history_dai": 6, "set_index": 6, "recur": 6, "weekdai": 6, "commonli": [6, 9], "repeat": [6, 9], "ag": 6, "degre": 6, "dtindex_futur": 6, "full_sort": 6, "nan_arrai": 6, "include_on": 6, "very_smal": 6, "typic": [6, 9], "reshap": [6, 9], "na_str": 6, "categorical_fillna": 6, "handle_unknown": [6, 9], "use_encoded_valu": 6, "downcast": 6, "unalt": 6, "missing_valu": 6, "ordinalencod": [6, 9], "to_numer": 6, "messag": [6, 9], "convert_dtyp": 6, "polish": 6, "999": 6, "dateoffset": [6, 9], "somewher": 6, "pydata": [6, 9], "stabl": [6, 9], "user_guid": [6, 9], "still": [6, 7, 9], "cut": 6, "older": [6, 9], "eventu": 6, "incomplet": [6, 9], "appear": [6, 9], "upsampl": [6, 7], "silenc": 6, "rest": 6, "configur": 6, "random_st": 6, "wide_arr": 6, "gst": 6, "sgt": 6, "46": 6, "error_buff": 6, "z_init": 6, "z_limit": 6, "z_step": 6, "max_contamin": 6, "sd_weight": 6, "anomaly_count_weight": 6, "consecut": 6, "errors_al": 6, "obj": 6, "maxim": 6, "reduct": 6, "invert": 6, "meet": [6, 9], "yield": 6, "itertool": 6, "more_itertool": 6, "descript": [6, 9], "circa": 6, "decay_span": 6, "displacement_row": 6, "span": 6, "decai": 6, "soften": 6, "first_value_onli": 6, "lanczos_factor": 6, "return_diff": 6, "implent": 6, "somewhat": 6, "statmodelsfilt": 6, "linearregress": 6, "suffix": 6, "_mdfcrst": 6, "vagu": 6, "gap": 6, "std_threshold": 6, "purg": 6, "THE": 6, "cumul": 6, "imprecis": 6, "missing": 6, "scatter": 6, "dure": 6, "reverse_align": 6, "n_bin": 6, "kmean": 6, "kbin": 6, "irrevers": 6, "exponeti": 6, "extrapol": 6, "n_harmnon": 6, "quadrat": 6, "revers": [6, 9], "highest": [6, 7, 9], "But": 6, "1600": 6, "upstream": 6, "regression_param": 6, "grouping_forward_limit": 6, "max_level_shift": 6, "serious": 6, "alter": 6, "rolling_window": 6, "n_futur": 6, "macro_micro": 6, "_lltmicro": 6, "horizon": [6, 9], "simpli": [6, 9], "residu": 6, "plai": 6, "center_on": 6, "assur": [6, 9], "sigma": 6, "run_ord": 6, "season_first": 6, "holiday_param": [6, 9], "dv": 6, "reintroduction_model": 6, "reintroducion": 6, "built": 6, "decim": 6, "on_transform": 6, "on_invers": 6, "force_int": 6, "ceil": 6, "floor": 6, "decomp_typ": 6, "stl": 6, "seaonal": 6, "seaonsal": 6, "hilbert": 6, "method_arg": 6, "wiener": 6, "savgol_filt": 6, "butter": 6, "cheby1": 6, "cheby2": 6, "ellip": 6, "bessel": 6, "oh": 6, "nice": 6, "ash": 6, "my": 6, "tomato": 6, "pippin": 6, "lm": 6, "tt": 6, "yy": 6, "amp": 6, "omega": 6, "fitfunc": 6, "unsym": 6, "question": 6, "16716302": 6, "sine": 6, "curv": 6, "pylab": 6, "deviat": [6, 9], "halflif": 6, "23199796": 6, "condens": 6, "context_slic": 6, "halfmax": 6, "forecastlength": 6, "chunk_siz": 6, "7734": 6, "dtype": 6, "float32": 6, "n_record": 6, "num_column": 6, "num_indic": 6, "braycurti": 6, "start_index": 6, "include_last": 6, "indici": 6, "include_differ": 6, "window_shap": 6, "writeabl": 6, "neighbourhood": 6, "gist": 6, "seberg": 6, "3866040": 6, "newer": 6, "toggl": 6, "__version__": 6, "skip_siz": 6, "downsampl": 6, "num": 6, "window_length": 6, "70296498": 6, "numba": 6, "70304475": 6, "1234": 6, "1step": 6, "num_ob": 6, "stride": 6, "trick": 6, "lib": [6, 9], "stride_trick": 6, "rapidli": 7, "deploi": 7, "m6": 7, "competit": 7, "deliv": 7, "invest": 7, "market": 7, "dozen": 7, "usabl": [7, 9], "These": [7, 9], "addition": [7, 9], "proprietari": 7, "readili": 7, "ten": 7, "hundr": 7, "thousand": [7, 9], "exogen": 7, "integr": 7, "automl": 7, "flagship": 7, "abil": [7, 9], "additon": 7, "advis": 7, "come": [7, 9], "distinct": [7, 9], "ideal": [7, 9], "_hourli": [7, 9], "_monthli": 7, "_weekli": [7, 9], "_yearli": [7, 9], "_live_daili": 7, "fast_parallel": 7, "2019": [7, 9], "forecasts_df": [7, 9], "forecasts_up": 7, "forecasts_low": 7, "particular": [7, 9], "extended_tutori": 7, "md": 7, "guid": 7, "look": [7, 9], "production_exampl": [7, 9], "especi": [7, 9], "predefin": 7, "complex": 7, "pretti": [7, 9], "environ": [7, 9], "toward": [7, 9], "prioriti": 7, "ram": 7, "instanc": 7, "pretrain": 7, "crtl": 7, "recov": 7, "udf": 7, "obvious": [7, 9], "2x": 7, "3x": 7, "5x": 7, "no_shared_fast": 7, "decreas": 7, "poorer": 7, "satisfactori": [7, 9], "expens": 7, "feedback": 7, "report": 7, "feel": 7, "favorit": 7, "cours": 7, "codebas": 7, "cat": 7, "henc": 7, "logo": 7, "subpackag": 8, "modul": 8, "_daili": 9, "autot": 9, "df_long": 9, "transact": 9, "altern": 9, "coerc": 9, "minim": 9, "handi": 9, "unit": 9, "side": 9, "oldest": 9, "advantag": 9, "interg": 9, "troubl": 9, "sudden": 9, "overs": 9, "misrepres": 9, "promot": 9, "critic": 9, "tricki": 9, "necess": 9, "leakag": 9, "firstli": 9, "resembl": 9, "enough": 9, "taken": 9, "variat": 9, "valdat": 9, "june": 9, "choic": 9, "messi": 9, "act": 9, "treat": 9, "suspect": 9, "fairli": 9, "whole": 9, "idea": 9, "suffer": 9, "interst": 9, "94": 9, "minneapoli": 9, "paul": 9, "minnesota": 9, "great": 9, "demonstr": 9, "road": 9, "influenc": 9, "alongsid": 9, "volum": 9, "carri": 9, "care": 9, "weights_hourli": 9, "traffic_volum": 9, "49": 9, "168": 9, "lieu": 9, "upper_forecasts_df": 9, "lower_forecasts_df": 9, "By": 9, "impract": 9, "engin": 9, "simplic": 9, "fault": 9, "switch": 9, "evolv": 9, "develop": 9, "example_filenam": 9, "example_export": 9, "deeper": 9, "subsidiari": 9, "df_forecast": 9, "future_regressor_train2d": 9, "future_regressor_forecast2d": 9, "consider": 9, "overfit": 9, "secondli": 9, "composit": 9, "balanc": 9, "qualiti": 9, "iml": 9, "favor": 9, "translat": 9, "insid": 9, "symmetr": 9, "versatil": 9, "human": 9, "coverage_fract": 9, "logarithm": 9, "hiearchial": 9, "went": 9, "wavi": 9, "seriou": 9, "holdout": 9, "pyplot": 9, "plt": 9, "2018": 9, "09": 9, "26": 9, "mosaic_df": 9, "situat": 9, "demand": 9, "tradition": 9, "problem": 9, "exagger": 9, "unfortun": 9, "inher": 9, "sub": 9, "unstabl": 9, "reassign": 9, "wrong": 9, "drive": 9, "label": 9, "recogniz": 9, "usal": 9, "splice": 9, "latter": 9, "depth": 9, "happen": 9, "no_shar": 9, "possbl": 9, "horizontal_gener": 9, "enembl": 9, "extens": 9, "theoret": 9, "studio": 9, "apt": 9, "yum": 9, "sudo": 9, "openbla": 9, "show_config": 9, "doubl": 9, "haven": 9, "broken": 9, "slide": 9, "23": 9, "poissonreg": 9, "squared_error": 9, "histgradientboostingregressor": 9, "uecm": 9, "uniform_filter1d": 9, "stat": 9, "spatial": 9, "Of": 9, "tend": 9, "cu91": 9, "cu101mkl": 9, "lightgbm": 9, "xgboost": 9, "bring": 9, "venv": 9, "anaconda": 9, "miniforg": 9, "numexpr": 9, "bottleneck": 9, "action": 9, "pystan": 9, "forg": 9, "dep": 9, "ext": 9, "pmdarima": 9, "dill": 9, "upgrad": 9, "pointlessli": 9, "mamba": 9, "tqdm": 9, "intelex": 9, "spyder": 9, "torchvis": 9, "torchaudio": 9, "cpuonli": 9, "gpu": 9, "cuda": 9, "mix": 9, "session": 9, "nvidia": 9, "smi": 9, "cudatoolkit": 9, "cudnn": 9, "nccl": 9, "ld_library_path": 9, "conda_prefix": 9, "perman": 9, "bashrc": 9, "env": 9, "mine": 9, "home": 9, "mambaforg": 9, "torch": 9, "url": 9, "whl": 9, "cu113": 9, "cu112": 9, "command": 9, "interchang": 9, "env_nam": 9, "softwar": 9, "oneapi": 9, "ai": 9, "analyt": 9, "toolkit": 9, "aikit37": 9, "aikit": 9, "modin": 9, "dpctl": 9, "config": 9, "omp_num_thread": 9, "use_daal4py_sklearn": 9, "bench": 9, "hang": 9, "clear": 9, "overload": 9, "consumpt": 9, "acceler": 9, "persist": 9, "discuss": 9, "reboot": 9, "heavi": 9, "odd": 9, "shouldn": 9, "greatli": 9, "proper": 9, "future_": 9, "certaini": 9, "Such": 9, "plan": 9, "organ": 9, "inorgan": 9, "busi": 9, "control": 9, "anticp": 9, "hand": 9, "confusingli": 9, "why": 9, "harm": 9, "experi": 9, "scenario": 9, "examin": 9, "enforc": 9, "could": 9, "future_regressor_forecast_2": 9, "prediction_2": 9, "forecasts_df_2": 9, "respons": 9, "multilabel_confusion_matrix": 9, "classification_report": 9, "df_full": 9, "historic_lower_limit": 9, "risk_df_upp": 9, "risk_df_low": 9, "historic_upper_risk_df": 9, "historic_lower_risk_df": 9, "eval_low": 9, "eval_upp": 9, "pred_low": 9, "pred_upp": 9, "zero_divis": 9, "target_nam": 9, "effectiv": 9, "far": 9, "tighter": 9, "extrem": 9, "portion": 9, "analyz": 9, "pick": 9, "anti": 9, "signific": 9, "wiki_pag": 9, "mod": 9, "ll": 9, "full_dat": 9, "date_rang": 9, "2014": 9, "2024": 9, "prophet_holidai": 9, "familiar": 9, "manuali": 9, "clarifi": 9, "text": 9, "editor": 9, "guarante": 9, "incorpor": 9, "crude": 9, "meaning": 9, "properli": 9, "coercibl": 9, "unconnect": 9, "transformer_dict": 9, "tran": 9, "df_tran": 9, "df_inv_return": 9, "tradit": 9, "draw": 9, "pool": 9, "massiv": 9, "global": 9, "pars": 9, "gradientboostingregressor": 9, "experiment": 9, "bla": 9, "lapack": 9, "nyi": 9, "_": 9}, "objects": {"": [[1, 0, 0, "-", "autots"]], "autots": [[1, 1, 1, "", "AnomalyDetector"], [1, 1, 1, "", "AutoTS"], [1, 1, 1, "", "Cassandra"], [1, 1, 1, "", "EventRiskForecast"], [1, 1, 1, "", "GeneralTransformer"], [1, 1, 1, "", "HolidayDetector"], [1, 4, 1, "", "RandomTransform"], [1, 3, 1, "", "TransformTS"], [1, 4, 1, "", "create_lagged_regressor"], [1, 4, 1, "", "create_regressor"], [2, 0, 0, "-", "datasets"], [3, 0, 0, "-", "evaluator"], [1, 4, 1, "", "infer_frequency"], [1, 4, 1, "", "load_artificial"], [1, 4, 1, "", "load_daily"], [1, 4, 1, "", "load_hourly"], [1, 4, 1, "", "load_linear"], [1, 4, 1, "", "load_live_daily"], [1, 4, 1, "", "load_monthly"], [1, 4, 1, "", "load_sine"], [1, 4, 1, "", "load_weekdays"], [1, 4, 1, "", "load_weekly"], [1, 4, 1, "", "load_yearly"], [1, 4, 1, "", "long_to_wide"], [1, 4, 1, "", "model_forecast"], [4, 0, 0, "-", "models"], [5, 0, 0, "-", "templates"], [6, 0, 0, "-", "tools"]], "autots.AnomalyDetector": [[1, 2, 1, "", "detect"], [1, 2, 1, "", "fit"], [1, 2, 1, "", "fit_anomaly_classifier"], [1, 2, 1, "", "get_new_params"], [1, 2, 1, "", "plot"], [1, 2, 1, "", "score_to_anomaly"]], "autots.AutoTS": [[1, 2, 1, "", "back_forecast"], [1, 3, 1, "", "best_model"], [1, 3, 1, "", "best_model_ensemble"], [1, 3, 1, "", "best_model_name"], [1, 3, 1, "", "best_model_params"], [1, 2, 1, "", "best_model_per_series_mape"], [1, 2, 1, "", "best_model_per_series_score"], [1, 3, 1, "", "best_model_transformation_params"], [1, 3, 1, "", "df_wide_numeric"], [1, 2, 1, "", "diagnose_params"], [1, 2, 1, "", "expand_horizontal"], [1, 2, 1, "", "export_best_model"], [1, 2, 1, "", "export_template"], [1, 2, 1, "", "failure_rate"], [1, 2, 1, "", "fit"], [1, 2, 1, "", "fit_data"], [1, 2, 1, "", "get_metric_corr"], [1, 2, 1, "", "get_new_params"], [1, 2, 1, "", "horizontal_per_generation"], [1, 2, 1, "", "horizontal_to_df"], [1, 2, 1, "", "import_best_model"], [1, 2, 1, "", "import_results"], [1, 2, 1, "", "import_template"], [1, 2, 1, "", "list_failed_model_types"], [1, 2, 1, "", "load_template"], [1, 2, 1, "", "mosaic_to_df"], [1, 2, 1, "", "parse_best_model"], [1, 2, 1, "", "plot_back_forecast"], [1, 2, 1, "", "plot_backforecast"], [1, 2, 1, "", "plot_generation_loss"], [1, 2, 1, "", "plot_horizontal"], [1, 2, 1, "", "plot_horizontal_model_count"], [1, 2, 1, "", "plot_horizontal_per_generation"], [1, 2, 1, "", "plot_horizontal_transformers"], [1, 2, 1, "", "plot_metric_corr"], [1, 2, 1, "", "plot_per_series_error"], [1, 2, 1, "", "plot_per_series_mape"], [1, 2, 1, "", "plot_per_series_smape"], [1, 2, 1, "", "plot_transformer_failure_rate"], [1, 2, 1, "", "plot_validations"], [1, 2, 1, "", "predict"], [1, 3, 1, "", "regression_check"], [1, 2, 1, "", "results"], [1, 2, 1, "", "retrieve_validation_forecasts"], [1, 2, 1, "", "save_template"], [1, 3, 1, "", "score_per_series"], [1, 2, 1, "", "validation_agg"]], "autots.AutoTS.initial_results": [[1, 3, 1, "", "model_results"]], "autots.Cassandra..anomaly_detector": [[1, 3, 1, "", "anomalies"], [1, 3, 1, "", "scores"]], "autots.Cassandra.": [[1, 3, 1, "", "holiday_count"], [1, 3, 1, "", "holidays"], [1, 3, 1, "", "params"], [1, 3, 1, "", "predict_x_array"], [1, 3, 1, "", "predicted_trend"], [1, 3, 1, "", "trend_train"], [1, 3, 1, "", "x_array"]], "autots.Cassandra": [[1, 2, 1, "", "analyze_trend"], [1, 2, 1, "", "auto_fit"], [1, 2, 1, "", "base_scaler"], [1, 2, 1, "", "compare_actual_components"], [1, 2, 1, "", "create_forecast_index"], [1, 2, 1, "", "create_t"], [1, 2, 1, "", "cross_validate"], [1, 2, 1, "", "feature_importance"], [1, 2, 1, "id0", "fit"], [1, 2, 1, "", "fit_data"], [1, 2, 1, "id1", "get_new_params"], [1, 2, 1, "", "get_params"], [1, 2, 1, "", "next_fit"], [1, 2, 1, "id2", "plot_components"], [1, 2, 1, "id3", "plot_forecast"], [1, 2, 1, "", "plot_things"], [1, 2, 1, "id4", "plot_trend"], [1, 2, 1, "id5", "predict"], [1, 2, 1, "", "predict_new_product"], [1, 2, 1, "", "process_components"], [1, 2, 1, "id6", "return_components"], [1, 2, 1, "", "rolling_trend"], [1, 2, 1, "", "scale_data"], [1, 2, 1, "", "to_origin_space"], [1, 2, 1, "", "treatment_causal_impact"]], "autots.Cassandra.holiday_detector": [[1, 2, 1, "", "dates_to_holidays"]], "autots.EventRiskForecast": [[1, 2, 1, "id9", "fit"], [1, 2, 1, "id10", "generate_historic_risk_array"], [1, 2, 1, "id11", "generate_result_windows"], [1, 2, 1, "id12", "generate_risk_array"], [1, 2, 1, "id13", "plot"], [1, 2, 1, "", "plot_eval"], [1, 2, 1, "id14", "predict"], [1, 2, 1, "id15", "predict_historic"], [1, 2, 1, "id16", "set_limit"]], "autots.GeneralTransformer": [[1, 2, 1, "", "fill_na"], [1, 2, 1, "", "fit"], [1, 2, 1, "", "fit_transform"], [1, 2, 1, "", "get_new_params"], [1, 2, 1, "", "inverse_transform"], [1, 2, 1, "", "retrieve_transformer"], [1, 2, 1, "", "transform"]], "autots.HolidayDetector": [[1, 2, 1, "", "dates_to_holidays"], [1, 2, 1, "", "detect"], [1, 2, 1, "", "fit"], [1, 2, 1, "", "get_new_params"], [1, 2, 1, "", "plot"], [1, 2, 1, "", "plot_anomaly"]], "autots.datasets": [[2, 0, 0, "-", "fred"], [2, 4, 1, "", "load_artificial"], [2, 4, 1, "", "load_daily"], [2, 4, 1, "", "load_hourly"], [2, 4, 1, "", "load_linear"], [2, 4, 1, "", "load_live_daily"], [2, 4, 1, "", "load_monthly"], [2, 4, 1, "", "load_sine"], [2, 4, 1, "", "load_weekdays"], [2, 4, 1, "", "load_weekly"], [2, 4, 1, "", "load_yearly"], [2, 4, 1, "", "load_zeroes"]], "autots.datasets.fred": [[2, 4, 1, "", "get_fred_data"]], "autots.evaluator": [[3, 0, 0, "-", "anomaly_detector"], [3, 0, 0, "-", "auto_model"], [3, 0, 0, "-", "auto_ts"], [3, 0, 0, "-", "benchmark"], [3, 0, 0, "-", "event_forecasting"], [3, 0, 0, "-", "metrics"], [3, 0, 0, "-", "validation"]], "autots.evaluator.anomaly_detector": [[3, 1, 1, "", "AnomalyDetector"], [3, 1, 1, "", "HolidayDetector"]], "autots.evaluator.anomaly_detector.AnomalyDetector": [[3, 2, 1, "", "detect"], [3, 2, 1, "", "fit"], [3, 2, 1, "", "fit_anomaly_classifier"], [3, 2, 1, "", "get_new_params"], [3, 2, 1, "", "plot"], [3, 2, 1, "", "score_to_anomaly"]], "autots.evaluator.anomaly_detector.HolidayDetector": [[3, 2, 1, "", "dates_to_holidays"], [3, 2, 1, "", "detect"], [3, 2, 1, "", "fit"], [3, 2, 1, "", "get_new_params"], [3, 2, 1, "", "plot"], [3, 2, 1, "", "plot_anomaly"]], "autots.evaluator.auto_model": [[3, 4, 1, "", "ModelMonster"], [3, 1, 1, "", "ModelPrediction"], [3, 4, 1, "", "NewGeneticTemplate"], [3, 4, 1, "", "RandomTemplate"], [3, 1, 1, "", "TemplateEvalObject"], [3, 4, 1, "", "TemplateWizard"], [3, 4, 1, "", "UniqueTemplates"], [3, 4, 1, "", "back_forecast"], [3, 4, 1, "", "create_model_id"], [3, 4, 1, "", "dict_recombination"], [3, 4, 1, "", "generate_score"], [3, 4, 1, "", "generate_score_per_series"], [3, 4, 1, "", "horizontal_template_to_model_list"], [3, 4, 1, "", "model_forecast"], [3, 4, 1, "", "random_model"], [3, 4, 1, "", "remove_leading_zeros"], [3, 4, 1, "", "trans_dict_recomb"], [3, 4, 1, "", "unpack_ensemble_models"], [3, 4, 1, "", "validation_aggregation"]], "autots.evaluator.auto_model.ModelPrediction": [[3, 2, 1, "", "fit"], [3, 2, 1, "", "fit_data"], [3, 2, 1, "", "predict"]], "autots.evaluator.auto_model.TemplateEvalObject": [[3, 2, 1, "", "concat"], [3, 3, 1, "", "full_mae_errors"], [3, 3, 1, "", "full_mae_ids"], [3, 2, 1, "", "load"], [3, 2, 1, "", "save"]], "autots.evaluator.auto_ts": [[3, 1, 1, "", "AutoTS"], [3, 4, 1, "", "error_correlations"], [3, 4, 1, "", "fake_regressor"]], "autots.evaluator.auto_ts.AutoTS": [[3, 2, 1, "", "back_forecast"], [3, 3, 1, "", "best_model"], [3, 3, 1, "", "best_model_ensemble"], [3, 3, 1, "", "best_model_name"], [3, 3, 1, "", "best_model_params"], [3, 2, 1, "", "best_model_per_series_mape"], [3, 2, 1, "", "best_model_per_series_score"], [3, 3, 1, "", "best_model_transformation_params"], [3, 3, 1, "", "df_wide_numeric"], [3, 2, 1, "", "diagnose_params"], [3, 2, 1, "", "expand_horizontal"], [3, 2, 1, "", "export_best_model"], [3, 2, 1, "", "export_template"], [3, 2, 1, "", "failure_rate"], [3, 2, 1, "", "fit"], [3, 2, 1, "", "fit_data"], [3, 2, 1, "", "get_metric_corr"], [3, 2, 1, "", "get_new_params"], [3, 2, 1, "", "horizontal_per_generation"], [3, 2, 1, "", "horizontal_to_df"], [3, 2, 1, "", "import_best_model"], [3, 2, 1, "", "import_results"], [3, 2, 1, "", "import_template"], [3, 2, 1, "", "list_failed_model_types"], [3, 2, 1, "", "load_template"], [3, 2, 1, "", "mosaic_to_df"], [3, 2, 1, "", "parse_best_model"], [3, 2, 1, "", "plot_back_forecast"], [3, 2, 1, "", "plot_backforecast"], [3, 2, 1, "", "plot_generation_loss"], [3, 2, 1, "", "plot_horizontal"], [3, 2, 1, "", "plot_horizontal_model_count"], [3, 2, 1, "", "plot_horizontal_per_generation"], [3, 2, 1, "", "plot_horizontal_transformers"], [3, 2, 1, "", "plot_metric_corr"], [3, 2, 1, "", "plot_per_series_error"], [3, 2, 1, "", "plot_per_series_mape"], [3, 2, 1, "", "plot_per_series_smape"], [3, 2, 1, "", "plot_transformer_failure_rate"], [3, 2, 1, "", "plot_validations"], [3, 2, 1, "", "predict"], [3, 3, 1, "", "regression_check"], [3, 2, 1, "", "results"], [3, 2, 1, "", "retrieve_validation_forecasts"], [3, 2, 1, "", "save_template"], [3, 3, 1, "", "score_per_series"], [3, 2, 1, "", "validation_agg"]], "autots.evaluator.auto_ts.AutoTS.initial_results": [[3, 3, 1, "", "model_results"]], "autots.evaluator.benchmark": [[3, 1, 1, "", "Benchmark"]], "autots.evaluator.benchmark.Benchmark": [[3, 2, 1, "", "run"]], "autots.evaluator.event_forecasting": [[3, 1, 1, "", "EventRiskForecast"], [3, 4, 1, "", "extract_result_windows"], [3, 4, 1, "", "extract_window_index"], [3, 4, 1, "", "set_limit_forecast"], [3, 4, 1, "", "set_limit_forecast_historic"]], "autots.evaluator.event_forecasting.EventRiskForecast": [[3, 2, 1, "id0", "fit"], [3, 2, 1, "id7", "generate_historic_risk_array"], [3, 2, 1, "id8", "generate_result_windows"], [3, 2, 1, "id9", "generate_risk_array"], [3, 2, 1, "id10", "plot"], [3, 2, 1, "", "plot_eval"], [3, 2, 1, "id11", "predict"], [3, 2, 1, "id12", "predict_historic"], [3, 2, 1, "id13", "set_limit"]], "autots.evaluator.metrics": [[3, 4, 1, "", "array_last_val"], [3, 4, 1, "", "chi_squared_hist_distribution_loss"], [3, 4, 1, "", "containment"], [3, 4, 1, "", "contour"], [3, 4, 1, "", "default_scaler"], [3, 4, 1, "", "dwae"], [3, 4, 1, "", "full_metric_evaluation"], [3, 4, 1, "", "kde"], [3, 4, 1, "", "kde_kl_distance"], [3, 4, 1, "", "kl_divergence"], [3, 4, 1, "", "linearity"], [3, 4, 1, "", "mae"], [3, 4, 1, "", "mda"], [3, 4, 1, "", "mean_absolute_differential_error"], [3, 4, 1, "", "mean_absolute_error"], [3, 4, 1, "", "medae"], [3, 4, 1, "", "median_absolute_error"], [3, 4, 1, "", "mlvb"], [3, 4, 1, "", "mqae"], [3, 4, 1, "", "msle"], [3, 4, 1, "", "numpy_ffill"], [3, 4, 1, "", "oda"], [3, 4, 1, "", "pinball_loss"], [3, 4, 1, "", "precomp_wasserstein"], [3, 4, 1, "", "qae"], [3, 4, 1, "", "rmse"], [3, 4, 1, "", "root_mean_square_error"], [3, 4, 1, "", "rps"], [3, 4, 1, "", "scaled_pinball_loss"], [3, 4, 1, "", "smape"], [3, 4, 1, "", "smoothness"], [3, 4, 1, "", "spl"], [3, 4, 1, "", "symmetric_mean_absolute_percentage_error"], [3, 4, 1, "", "threshold_loss"], [3, 4, 1, "", "unsorted_wasserstein"], [3, 4, 1, "", "wasserstein"]], "autots.evaluator.validation": [[3, 4, 1, "", "extract_seasonal_val_periods"], [3, 4, 1, "", "generate_validation_indices"], [3, 4, 1, "", "validate_num_validations"]], "autots.models": [[4, 0, 0, "-", "arch"], [4, 0, 0, "-", "base"], [4, 0, 0, "-", "basics"], [4, 0, 0, "-", "cassandra"], [4, 0, 0, "-", "dnn"], [4, 0, 0, "-", "ensemble"], [4, 0, 0, "-", "gluonts"], [4, 0, 0, "-", "greykite"], [4, 0, 0, "-", "matrix_var"], [4, 0, 0, "-", "mlensemble"], [4, 0, 0, "-", "model_list"], [4, 0, 0, "-", "neural_forecast"], [4, 0, 0, "-", "prophet"], [4, 0, 0, "-", "pytorch"], [4, 0, 0, "-", "sklearn"], [4, 0, 0, "-", "statsmodels"], [4, 0, 0, "-", "tfp"], [4, 0, 0, "-", "tide"]], "autots.models.arch": [[4, 1, 1, "", "ARCH"]], "autots.models.arch.ARCH": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.base": [[4, 1, 1, "", "ModelObject"], [4, 1, 1, "", "PredictionObject"], [4, 4, 1, "", "apply_constraints"], [4, 4, 1, "", "calculate_peak_density"], [4, 4, 1, "", "create_forecast_index"], [4, 4, 1, "", "create_seaborn_palette_from_cmap"], [4, 4, 1, "", "extract_single_series_from_horz"], [4, 4, 1, "", "extract_single_transformer"], [4, 4, 1, "", "plot_distributions"]], "autots.models.base.ModelObject": [[4, 2, 1, "", "basic_profile"], [4, 2, 1, "", "create_forecast_index"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "time"]], "autots.models.base.PredictionObject": [[4, 2, 1, "id0", "apply_constraints"], [4, 2, 1, "id1", "evaluate"], [4, 2, 1, "", "extract_ensemble_runtimes"], [4, 3, 1, "", "forecast"], [4, 2, 1, "id2", "long_form_results"], [4, 3, 1, "", "lower_forecast"], [4, 3, 1, "", "model_name"], [4, 3, 1, "", "model_parameters"], [4, 2, 1, "id3", "plot"], [4, 2, 1, "", "plot_df"], [4, 2, 1, "", "plot_ensemble_runtimes"], [4, 2, 1, "", "plot_grid"], [4, 2, 1, "id4", "total_runtime"], [4, 3, 1, "", "transformation_parameters"], [4, 3, 1, "", "upper_forecast"]], "autots.models.basics": [[4, 1, 1, "", "AverageValueNaive"], [4, 1, 1, "", "BallTreeMultivariateMotif"], [4, 1, 1, "", "ConstantNaive"], [4, 1, 1, "", "FFT"], [4, 1, 1, "", "KalmanStateSpace"], [4, 1, 1, "", "LastValueNaive"], [4, 1, 1, "", "MetricMotif"], [4, 1, 1, "", "Motif"], [4, 1, 1, "", "MotifSimulation"], [4, 1, 1, "", "NVAR"], [4, 1, 1, "", "SeasonalNaive"], [4, 1, 1, "", "SeasonalityMotif"], [4, 1, 1, "", "SectionalMotif"], [4, 3, 1, "", "ZeroesNaive"], [4, 4, 1, "", "looped_motif"], [4, 4, 1, "", "predict_reservoir"]], "autots.models.basics.AverageValueNaive": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.BallTreeMultivariateMotif": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.ConstantNaive": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.FFT": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.KalmanStateSpace": [[4, 2, 1, "", "cost_function"], [4, 2, 1, "", "fit"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"], [4, 2, 1, "", "tune_observational_noise"]], "autots.models.basics.LastValueNaive": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.MetricMotif": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.Motif": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.MotifSimulation": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.NVAR": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.SeasonalNaive": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.SeasonalityMotif": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.basics.SectionalMotif": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.cassandra": [[4, 1, 1, "", "BayesianMultiOutputRegression"], [4, 1, 1, "", "Cassandra"], [4, 4, 1, "", "clean_regressor"], [4, 4, 1, "", "cost_function_dwae"], [4, 4, 1, "", "cost_function_l1"], [4, 4, 1, "", "cost_function_l1_positive"], [4, 4, 1, "", "cost_function_l2"], [4, 4, 1, "", "cost_function_quantile"], [4, 4, 1, "", "create_t"], [4, 4, 1, "", "fit_linear_model"], [4, 4, 1, "", "lstsq_minimize"], [4, 4, 1, "", "lstsq_solve"]], "autots.models.cassandra.BayesianMultiOutputRegression": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "predict"], [4, 2, 1, "", "sample_posterior"]], "autots.models.cassandra.Cassandra..anomaly_detector": [[4, 3, 1, "", "anomalies"], [4, 3, 1, "", "scores"]], "autots.models.cassandra.Cassandra.": [[4, 3, 1, "", "holiday_count"], [4, 3, 1, "", "holidays"], [4, 3, 1, "", "params"], [4, 3, 1, "", "predict_x_array"], [4, 3, 1, "", "predicted_trend"], [4, 3, 1, "", "trend_train"], [4, 3, 1, "", "x_array"]], "autots.models.cassandra.Cassandra": [[4, 2, 1, "", "analyze_trend"], [4, 2, 1, "", "auto_fit"], [4, 2, 1, "", "base_scaler"], [4, 2, 1, "", "compare_actual_components"], [4, 2, 1, "", "create_forecast_index"], [4, 2, 1, "", "create_t"], [4, 2, 1, "", "cross_validate"], [4, 2, 1, "", "feature_importance"], [4, 2, 1, "id5", "fit"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "id6", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "next_fit"], [4, 2, 1, "id7", "plot_components"], [4, 2, 1, "id8", "plot_forecast"], [4, 2, 1, "", "plot_things"], [4, 2, 1, "id9", "plot_trend"], [4, 2, 1, "id10", "predict"], [4, 2, 1, "", "predict_new_product"], [4, 2, 1, "", "process_components"], [4, 2, 1, "id11", "return_components"], [4, 2, 1, "", "rolling_trend"], [4, 2, 1, "", "scale_data"], [4, 2, 1, "", "to_origin_space"], [4, 2, 1, "", "treatment_causal_impact"]], "autots.models.cassandra.Cassandra.holiday_detector": [[4, 2, 1, "", "dates_to_holidays"]], "autots.models.dnn": [[4, 1, 1, "", "KerasRNN"], [4, 1, 1, "", "Transformer"], [4, 4, 1, "", "transformer_build_model"], [4, 4, 1, "", "transformer_encoder"]], "autots.models.dnn.KerasRNN": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "predict"]], "autots.models.dnn.Transformer": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "predict"]], "autots.models.ensemble": [[4, 4, 1, "", "BestNEnsemble"], [4, 4, 1, "", "DistEnsemble"], [4, 4, 1, "", "EnsembleForecast"], [4, 4, 1, "", "EnsembleTemplateGenerator"], [4, 4, 1, "", "HDistEnsemble"], [4, 4, 1, "", "HorizontalEnsemble"], [4, 4, 1, "", "HorizontalTemplateGenerator"], [4, 4, 1, "", "MosaicEnsemble"], [4, 4, 1, "", "find_pattern"], [4, 4, 1, "", "generalize_horizontal"], [4, 4, 1, "", "generate_crosshair_score"], [4, 4, 1, "", "generate_crosshair_score_list"], [4, 4, 1, "", "generate_mosaic_template"], [4, 4, 1, "", "horizontal_classifier"], [4, 4, 1, "", "horizontal_xy"], [4, 4, 1, "", "is_horizontal"], [4, 4, 1, "", "is_mosaic"], [4, 4, 1, "", "mlens_helper"], [4, 4, 1, "", "mosaic_classifier"], [4, 4, 1, "", "mosaic_or_horizontal"], [4, 4, 1, "", "mosaic_to_horizontal"], [4, 4, 1, "", "mosaic_xy"], [4, 4, 1, "", "n_limited_horz"], [4, 4, 1, "", "parse_forecast_length"], [4, 4, 1, "", "parse_horizontal"], [4, 4, 1, "", "parse_mosaic"], [4, 4, 1, "", "process_mosaic_arrays"], [4, 4, 1, "", "summarize_series"]], "autots.models.gluonts": [[4, 1, 1, "", "GluonTS"]], "autots.models.gluonts.GluonTS": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.greykite": [[4, 1, 1, "", "Greykite"], [4, 4, 1, "", "seek_the_oracle"]], "autots.models.greykite.Greykite": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.matrix_var": [[4, 1, 1, "", "LATC"], [4, 1, 1, "", "MAR"], [4, 1, 1, "", "RRVAR"], [4, 1, 1, "", "TMF"], [4, 4, 1, "", "conj_grad_w"], [4, 4, 1, "", "conj_grad_x"], [4, 4, 1, "", "dmd"], [4, 4, 1, "", "dmd4cast"], [4, 4, 1, "", "ell_w"], [4, 4, 1, "", "ell_x"], [4, 4, 1, "", "generate_Psi"], [4, 4, 1, "", "latc_imputer"], [4, 4, 1, "", "latc_predictor"], [4, 4, 1, "", "mar"], [4, 4, 1, "", "mat2ten"], [4, 4, 1, "", "rrvar"], [4, 4, 1, "", "svt_tnn"], [4, 4, 1, "", "ten2mat"], [4, 4, 1, "", "tmf"], [4, 4, 1, "", "update_cg"], [4, 4, 1, "", "var"], [4, 4, 1, "", "var4cast"]], "autots.models.matrix_var.LATC": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.matrix_var.MAR": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.matrix_var.RRVAR": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.matrix_var.TMF": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.mlensemble": [[4, 1, 1, "", "MLEnsemble"], [4, 4, 1, "", "create_feature"]], "autots.models.mlensemble.MLEnsemble": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.model_list": [[4, 4, 1, "", "auto_model_list"], [4, 4, 1, "", "model_list_to_dict"]], "autots.models.neural_forecast": [[4, 1, 1, "", "NeuralForecast"]], "autots.models.neural_forecast.NeuralForecast": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.prophet": [[4, 1, 1, "", "FBProphet"], [4, 1, 1, "", "NeuralProphet"]], "autots.models.prophet.FBProphet": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.prophet.NeuralProphet": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.pytorch": [[4, 1, 1, "", "PytorchForecasting"]], "autots.models.pytorch.PytorchForecasting": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.sklearn": [[4, 1, 1, "", "ComponentAnalysis"], [4, 1, 1, "", "DatepartRegression"], [4, 1, 1, "", "MultivariateRegression"], [4, 1, 1, "", "PreprocessingRegression"], [4, 1, 1, "", "RollingRegression"], [4, 1, 1, "", "UnivariateRegression"], [4, 1, 1, "", "VectorizedMultiOutputGPR"], [4, 1, 1, "", "WindowRegression"], [4, 4, 1, "", "generate_classifier_params"], [4, 4, 1, "", "generate_regressor_params"], [4, 4, 1, "", "retrieve_classifier"], [4, 4, 1, "", "retrieve_regressor"], [4, 4, 1, "", "rolling_x_regressor"], [4, 4, 1, "", "rolling_x_regressor_regressor"]], "autots.models.sklearn.ComponentAnalysis": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.sklearn.DatepartRegression": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.sklearn.MultivariateRegression": [[4, 2, 1, "", "base_scaler"], [4, 2, 1, "", "fit"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"], [4, 2, 1, "", "scale_data"], [4, 2, 1, "", "to_origin_space"]], "autots.models.sklearn.PreprocessingRegression": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.sklearn.RollingRegression": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.sklearn.UnivariateRegression": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.sklearn.VectorizedMultiOutputGPR": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "predict"], [4, 2, 1, "", "predict_proba"]], "autots.models.sklearn.WindowRegression": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "fit_data"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels": [[4, 1, 1, "", "ARDL"], [4, 1, 1, "", "ARIMA"], [4, 1, 1, "", "DynamicFactor"], [4, 1, 1, "", "DynamicFactorMQ"], [4, 1, 1, "", "ETS"], [4, 1, 1, "", "GLM"], [4, 1, 1, "", "GLS"], [4, 1, 1, "", "Theta"], [4, 1, 1, "", "UnobservedComponents"], [4, 1, 1, "", "VAR"], [4, 1, 1, "", "VARMAX"], [4, 1, 1, "", "VECM"], [4, 4, 1, "", "arima_seek_the_oracle"], [4, 4, 1, "", "glm_forecast_by_column"]], "autots.models.statsmodels.ARDL": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.ARIMA": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.DynamicFactor": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.DynamicFactorMQ": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.ETS": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.GLM": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.GLS": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.Theta": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.UnobservedComponents": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.VAR": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.VARMAX": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.statsmodels.VECM": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.tfp": [[4, 1, 1, "", "TFPRegression"], [4, 1, 1, "", "TFPRegressor"], [4, 1, 1, "", "TensorflowSTS"]], "autots.models.tfp.TFPRegression": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.tfp.TFPRegressor": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "predict"]], "autots.models.tfp.TensorflowSTS": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.tide": [[4, 1, 1, "", "TiDE"], [4, 1, 1, "", "TimeCovariates"], [4, 1, 1, "", "TimeSeriesdata"], [4, 4, 1, "", "get_HOLIDAYS"], [4, 4, 1, "", "mae_loss"], [4, 4, 1, "", "mape"], [4, 4, 1, "", "nrmse"], [4, 4, 1, "", "rmse"], [4, 4, 1, "", "smape"], [4, 4, 1, "", "wape"]], "autots.models.tide.TiDE": [[4, 2, 1, "", "fit"], [4, 2, 1, "", "get_new_params"], [4, 2, 1, "", "get_params"], [4, 2, 1, "", "predict"]], "autots.models.tide.TimeCovariates": [[4, 2, 1, "", "get_covariates"]], "autots.models.tide.TimeSeriesdata": [[4, 2, 1, "", "test_val_gen"], [4, 2, 1, "", "tf_dataset"], [4, 2, 1, "", "train_gen"]], "autots.templates": [[5, 0, 0, "-", "general"]], "autots.templates.general": [[5, 5, 1, "", "general_template"]], "autots.tools": [[6, 0, 0, "-", "anomaly_utils"], [6, 0, 0, "-", "calendar"], [6, 0, 0, "-", "cointegration"], [6, 0, 0, "-", "cpu_count"], [6, 0, 0, "-", "fast_kalman"], [6, 0, 0, "-", "fft"], [6, 0, 0, "-", "hierarchial"], [6, 0, 0, "-", "holiday"], [6, 0, 0, "-", "impute"], [6, 0, 0, "-", "lunar"], [6, 0, 0, "-", "percentile"], [6, 0, 0, "-", "probabilistic"], [6, 0, 0, "-", "profile"], [6, 0, 0, "-", "regressor"], [6, 0, 0, "-", "seasonal"], [6, 0, 0, "-", "shaping"], [6, 0, 0, "-", "thresholding"], [6, 0, 0, "-", "transform"], [6, 0, 0, "-", "window_functions"]], "autots.tools.anomaly_utils": [[6, 4, 1, "", "anomaly_df_to_holidays"], [6, 4, 1, "", "anomaly_new_params"], [6, 4, 1, "", "create_dates_df"], [6, 4, 1, "", "dates_to_holidays"], [6, 4, 1, "", "detect_anomalies"], [6, 4, 1, "", "holiday_new_params"], [6, 4, 1, "", "limits_to_anomalies"], [6, 4, 1, "", "loop_sk_outliers"], [6, 4, 1, "", "nonparametric_multivariate"], [6, 4, 1, "", "sk_outliers"], [6, 4, 1, "", "values_to_anomalies"], [6, 4, 1, "", "zscore_survival_function"]], "autots.tools.calendar": [[6, 4, 1, "", "gregorian_to_chinese"], [6, 4, 1, "", "gregorian_to_christian_lunar"], [6, 4, 1, "", "gregorian_to_hebrew"], [6, 4, 1, "", "gregorian_to_islamic"], [6, 4, 1, "", "heb_is_leap"], [6, 4, 1, "", "lunar_from_lunar"], [6, 4, 1, "", "lunar_from_lunar_full"], [6, 4, 1, "", "to_jd"]], "autots.tools.cointegration": [[6, 4, 1, "", "btcd_decompose"], [6, 4, 1, "", "coint_johansen"], [6, 4, 1, "", "fourier_series"], [6, 4, 1, "", "lagmat"]], "autots.tools.cpu_count": [[6, 4, 1, "", "cpu_count"], [6, 4, 1, "", "set_n_jobs"]], "autots.tools.fast_kalman": [[6, 1, 1, "", "Gaussian"], [6, 1, 1, "", "KalmanFilter"], [6, 4, 1, "", "autoshape"], [6, 4, 1, "", "ddot"], [6, 4, 1, "", "ddot_t_right"], [6, 4, 1, "", "ddot_t_right_old"], [6, 4, 1, "", "dinv"], [6, 4, 1, "", "douter"], [6, 4, 1, "", "em_initial_state"], [6, 4, 1, "", "ensure_matrix"], [6, 4, 1, "", "holt_winters_damped_matrices"], [6, 4, 1, "", "new_kalman_params"], [6, 4, 1, "", "predict"], [6, 4, 1, "", "predict_observation"], [6, 4, 1, "", "priv_smooth"], [6, 4, 1, "", "priv_update_with_nan_check"], [6, 4, 1, "", "random_state_space"], [6, 4, 1, "", "smooth"], [6, 4, 1, "", "update"], [6, 4, 1, "", "update_with_nan_check"]], "autots.tools.fast_kalman.Gaussian": [[6, 2, 1, "", "empty"], [6, 2, 1, "", "unvectorize_state"], [6, 2, 1, "", "unvectorize_vars"]], "autots.tools.fast_kalman.KalmanFilter": [[6, 1, 1, "", "Result"], [6, 2, 1, "", "compute"], [6, 2, 1, "", "em"], [6, 2, 1, "", "em_observation_noise"], [6, 2, 1, "", "em_process_noise"], [6, 2, 1, "", "predict"], [6, 2, 1, "", "predict_next"], [6, 2, 1, "", "predict_observation"], [6, 2, 1, "", "smooth"], [6, 2, 1, "", "smooth_current"], [6, 2, 1, "", "update"]], "autots.tools.fft": [[6, 1, 1, "", "FFT"], [6, 4, 1, "", "fourier_extrapolation"]], "autots.tools.fft.FFT": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "predict"]], "autots.tools.hierarchial": [[6, 1, 1, "", "hierarchial"]], "autots.tools.hierarchial.hierarchial": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "reconcile"], [6, 2, 1, "", "transform"]], "autots.tools.holiday": [[6, 4, 1, "", "holiday_flag"], [6, 4, 1, "", "query_holidays"]], "autots.tools.impute": [[6, 4, 1, "", "FillNA"], [6, 1, 1, "", "SeasonalityMotifImputer"], [6, 1, 1, "", "SimpleSeasonalityMotifImputer"], [6, 4, 1, "", "biased_ffill"], [6, 4, 1, "", "fake_date_fill"], [6, 4, 1, "", "fake_date_fill_old"], [6, 4, 1, "", "fill_forward"], [6, 4, 1, "", "fill_forward_alt"], [6, 4, 1, "", "fill_mean"], [6, 4, 1, "", "fill_mean_old"], [6, 4, 1, "", "fill_median"], [6, 4, 1, "", "fill_median_old"], [6, 4, 1, "", "fill_zero"], [6, 4, 1, "", "fillna_np"], [6, 4, 1, "", "rolling_mean"]], "autots.tools.impute.SeasonalityMotifImputer": [[6, 2, 1, "", "impute"]], "autots.tools.impute.SimpleSeasonalityMotifImputer": [[6, 2, 1, "", "impute"]], "autots.tools.lunar": [[6, 4, 1, "", "dcos"], [6, 4, 1, "", "dsin"], [6, 4, 1, "", "fixangle"], [6, 4, 1, "", "kepler"], [6, 4, 1, "", "moon_phase"], [6, 4, 1, "", "moon_phase_df"], [6, 4, 1, "", "phase_string"], [6, 4, 1, "", "todeg"], [6, 4, 1, "", "torad"]], "autots.tools.percentile": [[6, 4, 1, "", "nan_percentile"], [6, 4, 1, "", "nan_quantile"], [6, 4, 1, "", "trimmed_mean"]], "autots.tools.probabilistic": [[6, 4, 1, "", "Point_to_Probability"], [6, 4, 1, "", "Variable_Point_to_Probability"], [6, 4, 1, "", "historic_quantile"], [6, 4, 1, "", "inferred_normal"], [6, 4, 1, "", "percentileofscore_appliable"]], "autots.tools.profile": [[6, 4, 1, "", "data_profile"]], "autots.tools.regressor": [[6, 4, 1, "", "create_lagged_regressor"], [6, 4, 1, "", "create_regressor"]], "autots.tools.seasonal": [[6, 4, 1, "", "create_datepart_components"], [6, 4, 1, "", "create_seasonality_feature"], [6, 4, 1, "", "date_part"], [6, 4, 1, "", "fourier_df"], [6, 4, 1, "", "fourier_series"], [6, 4, 1, "", "random_datepart"], [6, 4, 1, "", "seasonal_independent_match"], [6, 4, 1, "", "seasonal_int"], [6, 4, 1, "", "seasonal_window_match"]], "autots.tools.shaping": [[6, 1, 1, "", "NumericTransformer"], [6, 4, 1, "", "clean_weights"], [6, 4, 1, "", "df_cleanup"], [6, 4, 1, "", "freq_to_timedelta"], [6, 4, 1, "", "infer_frequency"], [6, 4, 1, "", "long_to_wide"], [6, 4, 1, "", "simple_train_test_split"], [6, 4, 1, "", "split_digits_and_non_digits"], [6, 4, 1, "", "subset_series"], [6, 4, 1, "", "wide_to_3d"]], "autots.tools.shaping.NumericTransformer": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.thresholding": [[6, 1, 1, "", "NonparametricThreshold"], [6, 4, 1, "", "consecutive_groups"], [6, 4, 1, "", "nonparametric"]], "autots.tools.thresholding.NonparametricThreshold": [[6, 2, 1, "", "compare_to_epsilon"], [6, 2, 1, "", "find_epsilon"], [6, 2, 1, "", "prune_anoms"], [6, 2, 1, "", "score_anomalies"]], "autots.tools.transform": [[6, 1, 1, "", "AlignLastDiff"], [6, 1, 1, "", "AlignLastValue"], [6, 1, 1, "", "AnomalyRemoval"], [6, 1, 1, "", "BKBandpassFilter"], [6, 1, 1, "", "BTCD"], [6, 1, 1, "", "CenterLastValue"], [6, 1, 1, "", "CenterSplit"], [6, 1, 1, "", "ClipOutliers"], [6, 1, 1, "", "Cointegration"], [6, 1, 1, "", "CumSumTransformer"], [6, 3, 1, "", "DatepartRegression"], [6, 1, 1, "", "DatepartRegressionTransformer"], [6, 1, 1, "", "Detrend"], [6, 1, 1, "", "DiffSmoother"], [6, 1, 1, "", "DifferencedTransformer"], [6, 1, 1, "", "Discretize"], [6, 1, 1, "", "EWMAFilter"], [6, 1, 1, "", "EmptyTransformer"], [6, 1, 1, "", "FFTDecomposition"], [6, 1, 1, "", "FFTFilter"], [6, 1, 1, "", "FastICA"], [6, 1, 1, "", "GeneralTransformer"], [6, 1, 1, "", "HPFilter"], [6, 1, 1, "", "HistoricValues"], [6, 1, 1, "", "HolidayTransformer"], [6, 1, 1, "", "IntermittentOccurrence"], [6, 1, 1, "", "KalmanSmoothing"], [6, 1, 1, "", "LevelShiftMagic"], [6, 3, 1, "", "LevelShiftTransformer"], [6, 1, 1, "", "LocalLinearTrend"], [6, 1, 1, "", "MeanDifference"], [6, 1, 1, "", "PCA"], [6, 1, 1, "", "PctChangeTransformer"], [6, 1, 1, "", "PositiveShift"], [6, 4, 1, "", "RandomTransform"], [6, 1, 1, "", "RegressionFilter"], [6, 1, 1, "", "ReplaceConstant"], [6, 1, 1, "", "RollingMeanTransformer"], [6, 1, 1, "", "Round"], [6, 1, 1, "", "STLFilter"], [6, 1, 1, "", "ScipyFilter"], [6, 1, 1, "", "SeasonalDifference"], [6, 1, 1, "", "SinTrend"], [6, 1, 1, "", "Slice"], [6, 1, 1, "", "StatsmodelsFilter"], [6, 4, 1, "", "bkfilter_st"], [6, 4, 1, "", "clip_outliers"], [6, 4, 1, "", "exponential_decay"], [6, 4, 1, "", "get_transformer_params"], [6, 4, 1, "", "random_cleaners"], [6, 4, 1, "", "remove_outliers"], [6, 4, 1, "", "simple_context_slicer"], [6, 4, 1, "", "transformer_list_to_dict"]], "autots.tools.transform.AlignLastDiff": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.AlignLastValue": [[6, 2, 1, "", "find_centerpoint"], [6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.AnomalyRemoval": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_anomaly_classifier"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "score_to_anomaly"], [6, 2, 1, "", "transform"]], "autots.tools.transform.BKBandpassFilter": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.BTCD": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.CenterLastValue": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.CenterSplit": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.ClipOutliers": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.Cointegration": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.CumSumTransformer": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.DatepartRegressionTransformer": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "impute"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.Detrend": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.DiffSmoother": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "transform"]], "autots.tools.transform.DifferencedTransformer": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.Discretize": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.EWMAFilter": [[6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "transform"]], "autots.tools.transform.EmptyTransformer": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.FFTDecomposition": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.FFTFilter": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.FastICA": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.GeneralTransformer": [[6, 2, 1, "", "fill_na"], [6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "retrieve_transformer"], [6, 2, 1, "", "transform"]], "autots.tools.transform.HPFilter": [[6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "transform"]], "autots.tools.transform.HistoricValues": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.HolidayTransformer": [[6, 2, 1, "", "dates_to_holidays"], [6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.IntermittentOccurrence": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.KalmanSmoothing": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.LevelShiftMagic": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.LocalLinearTrend": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.MeanDifference": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.PCA": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.PctChangeTransformer": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.PositiveShift": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.RegressionFilter": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.ReplaceConstant": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.RollingMeanTransformer": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.Round": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.STLFilter": [[6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "transform"]], "autots.tools.transform.ScipyFilter": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.SeasonalDifference": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.SinTrend": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_sin"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.Slice": [[6, 2, 1, "", "fit"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "get_new_params"], [6, 2, 1, "", "inverse_transform"], [6, 2, 1, "", "transform"]], "autots.tools.transform.StatsmodelsFilter": [[6, 2, 1, "", "bkfilter"], [6, 2, 1, "", "cffilter"], [6, 2, 1, "", "convolution_filter"], [6, 2, 1, "", "fit_transform"], [6, 2, 1, "", "transform"]], "autots.tools.window_functions": [[6, 4, 1, "", "chunk_reshape"], [6, 4, 1, "", "last_window"], [6, 4, 1, "", "np_2d_arange"], [6, 4, 1, "", "retrieve_closest_indices"], [6, 4, 1, "", "rolling_window_view"], [6, 4, 1, "", "sliding_window_view"], [6, 4, 1, "", "window_id_maker"], [6, 4, 1, "", "window_lin_reg"], [6, 4, 1, "", "window_lin_reg_mean"], [6, 4, 1, "", "window_lin_reg_mean_no_nan"], [6, 4, 1, "", "window_maker"], [6, 4, 1, "", "window_maker_2"], [6, 4, 1, "", "window_maker_3"], [6, 4, 1, "", "window_sum_mean"], [6, 4, 1, "", "window_sum_mean_nan_tail"], [6, 4, 1, "", "window_sum_nan_mean"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method", "3": "py:attribute", "4": "py:function", "5": "py:data"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "attribute", "Python attribute"], "4": ["py", "function", "Python function"], "5": ["py", "data", "Python data"]}, "titleterms": {"autot": [0, 1, 2, 3, 4, 5, 6, 7, 8], "instal": [0, 7, 9], "get": 0, "start": 0, "modul": [0, 1, 2, 3, 4, 5, 6], "api": 0, "indic": 0, "tabl": [0, 7, 9], "packag": [1, 2, 3, 4, 5, 6, 9], "subpackag": 1, "content": [1, 2, 3, 4, 5, 6, 7, 9], "dataset": 2, "submodul": [2, 3, 4, 5, 6], "fred": 2, "evalu": 3, "anomaly_detector": 3, "auto_model": 3, "auto_t": 3, "benchmark": [3, 9], "event_forecast": 3, "metric": [3, 9], "valid": [3, 9], "model": [4, 9], "arch": 4, "base": 4, "basic": [4, 7], "cassandra": 4, "dnn": 4, "ensembl": [4, 9], "gluont": 4, "greykit": 4, "matrix_var": 4, "mlensembl": 4, "model_list": 4, "neural_forecast": 4, "prophet": 4, "pytorch": 4, "sklearn": 4, "statsmodel": 4, "tfp": 4, "tide": 4, "templat": [5, 9], "gener": 5, "tool": 6, "anomaly_util": 6, "calendar": 6, "cointegr": 6, "cpu_count": 6, "fast_kalman": 6, "usag": 6, "exampl": [6, 9], "fft": 6, "hierarchi": [6, 9], "holidai": 6, "imput": 6, "lunar": 6, "percentil": 6, "probabilist": 6, "profil": 6, "regressor": [6, 9], "season": 6, "shape": 6, "threshold": 6, "transform": [6, 9], "window_funct": 6, "intro": 7, "us": [7, 9], "tip": 7, "speed": [7, 9], "larg": 7, "data": [7, 9], "how": 7, "contribut": 7, "tutori": 9, "extend": 9, "A": 9, "simpl": 9, "import": 9, "you": 9, "can": 9, "tailor": 9, "process": 9, "few": 9, "wai": 9, "what": 9, "worri": 9, "about": 9, "cross": 9, "anoth": 9, "list": 9, "deploy": 9, "export": 9, "run": 9, "just": 9, "One": 9, "group": 9, "forecast": 9, "depend": 9, "version": 9, "requir": 9, "option": 9, "safest": 9, "bet": 9, "intel": 9, "conda": 9, "channel": 9, "sometim": 9, "faster": 9, "also": 9, "more": 9, "prone": 9, "bug": 9, "caveat": 9, "advic": 9, "mysteri": 9, "crash": 9, "seri": 9, "id": 9, "realli": 9, "need": 9, "uniqu": 9, "column": 9, "name": 9, "all": 9, "wide": 9, "short": 9, "train": 9, "histori": 9, "ad": 9, "other": 9, "inform": 9, "simul": 9, "event": 9, "risk": 9, "anomali": 9, "detect": 9, "hack": 9, "pass": 9, "paramet": 9, "aren": 9, "t": 9, "otherwis": 9, "avail": 9, "categor": 9, "custom": 9, "unusu": 9, "frequenc": 9, "independ": 9, "note": 9, "regress": 9}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 60}, "alltitles": {"AutoTS": [[0, "autots"], [7, "autots"]], "Installation": [[0, "installation"], [7, "id1"]], "Getting Started": [[0, "getting-started"]], "Modules API": [[0, "modules-api"]], "Indices and tables": [[0, "indices-and-tables"]], "autots package": [[1, "autots-package"]], "Subpackages": [[1, "subpackages"]], "Module contents": [[1, "module-autots"], [2, "module-autots.datasets"], [3, "module-autots.evaluator"], [4, "module-autots.models"], [5, "module-autots.templates"], [6, "module-autots.tools"]], "autots.datasets package": [[2, "autots-datasets-package"]], "Submodules": [[2, "submodules"], [3, "submodules"], [4, "submodules"], [5, "submodules"], [6, "submodules"]], "autots.datasets.fred module": [[2, "module-autots.datasets.fred"]], "autots.evaluator package": [[3, "autots-evaluator-package"]], "autots.evaluator.anomaly_detector module": [[3, "module-autots.evaluator.anomaly_detector"]], "autots.evaluator.auto_model module": [[3, "module-autots.evaluator.auto_model"]], "autots.evaluator.auto_ts module": [[3, "module-autots.evaluator.auto_ts"]], "autots.evaluator.benchmark module": [[3, "module-autots.evaluator.benchmark"]], "autots.evaluator.event_forecasting module": [[3, "module-autots.evaluator.event_forecasting"]], "autots.evaluator.metrics module": [[3, "module-autots.evaluator.metrics"]], "autots.evaluator.validation module": [[3, "module-autots.evaluator.validation"]], "autots.models package": [[4, "autots-models-package"]], "autots.models.arch module": [[4, "module-autots.models.arch"]], "autots.models.base module": [[4, "module-autots.models.base"]], "autots.models.basics module": [[4, "module-autots.models.basics"]], "autots.models.cassandra module": [[4, "module-autots.models.cassandra"]], "autots.models.dnn module": [[4, "module-autots.models.dnn"]], "autots.models.ensemble module": [[4, "module-autots.models.ensemble"]], "autots.models.gluonts module": [[4, "module-autots.models.gluonts"]], "autots.models.greykite module": [[4, "module-autots.models.greykite"]], "autots.models.matrix_var module": [[4, "module-autots.models.matrix_var"]], "autots.models.mlensemble module": [[4, "module-autots.models.mlensemble"]], "autots.models.model_list module": [[4, "module-autots.models.model_list"]], "autots.models.neural_forecast module": [[4, "module-autots.models.neural_forecast"]], "autots.models.prophet module": [[4, "module-autots.models.prophet"]], "autots.models.pytorch module": [[4, "module-autots.models.pytorch"]], "autots.models.sklearn module": [[4, "module-autots.models.sklearn"]], "autots.models.statsmodels module": [[4, "module-autots.models.statsmodels"]], "autots.models.tfp module": [[4, "module-autots.models.tfp"]], "autots.models.tide module": [[4, "module-autots.models.tide"]], "autots.templates package": [[5, "autots-templates-package"]], "autots.templates.general module": [[5, "module-autots.templates.general"]], "autots.tools package": [[6, "autots-tools-package"]], "autots.tools.anomaly_utils module": [[6, "module-autots.tools.anomaly_utils"]], "autots.tools.calendar module": [[6, "module-autots.tools.calendar"]], "autots.tools.cointegration module": [[6, "module-autots.tools.cointegration"]], "autots.tools.cpu_count module": [[6, "module-autots.tools.cpu_count"]], "autots.tools.fast_kalman module": [[6, "module-autots.tools.fast_kalman"]], "Usage example": [[6, "usage-example"]], "autots.tools.fft module": [[6, "module-autots.tools.fft"]], "autots.tools.hierarchial module": [[6, "module-autots.tools.hierarchial"]], "autots.tools.holiday module": [[6, "module-autots.tools.holiday"]], "autots.tools.impute module": [[6, "module-autots.tools.impute"]], "autots.tools.lunar module": [[6, "module-autots.tools.lunar"]], "autots.tools.percentile module": [[6, "module-autots.tools.percentile"]], "autots.tools.probabilistic module": [[6, "module-autots.tools.probabilistic"]], "autots.tools.profile module": [[6, "module-autots.tools.profile"]], "autots.tools.regressor module": [[6, "module-autots.tools.regressor"]], "autots.tools.seasonal module": [[6, "module-autots.tools.seasonal"]], "autots.tools.shaping module": [[6, "module-autots.tools.shaping"]], "autots.tools.thresholding module": [[6, "module-autots.tools.thresholding"]], "autots.tools.transform module": [[6, "module-autots.tools.transform"]], "autots.tools.window_functions module": [[6, "module-autots.tools.window_functions"]], "Intro": [[7, "intro"]], "Table of Contents": [[7, "table-of-contents"], [9, "table-of-contents"]], "Basic Use": [[7, "id2"]], "Tips for Speed and Large Data:": [[7, "id3"]], "How to Contribute:": [[7, "how-to-contribute"]], "autots": [[8, "autots"]], "Tutorial": [[9, "tutorial"]], "Extended Tutorial": [[9, "extended-tutorial"]], "A simple example": [[9, "id1"]], "Import of data": [[9, "import-of-data"]], "You can tailor the process in a few ways\u2026": [[9, "you-can-tailor-the-process-in-a-few-ways"]], "What to Worry About": [[9, "what-to-worry-about"]], "Validation and Cross Validation": [[9, "id2"]], "Another Example:": [[9, "id3"]], "Model Lists": [[9, "id4"]], "Deployment and Template Import/Export": [[9, "deployment-and-template-import-export"]], "Running Just One Model": [[9, "id5"]], "Metrics": [[9, "id6"]], "Hierarchial and Grouped Forecasts": [[9, "hierarchial-and-grouped-forecasts"]], "Ensembles": [[9, "id7"]], "Installation and Dependency Versioning": [[9, "installation-and-dependency-versioning"]], "Requirements:": [[9, "requirements"]], "Optional Packages": [[9, "optional-packages"]], "Safest bet for installation:": [[9, "safest-bet-for-installation"]], "Intel conda channel installation (sometime faster, also, more prone to bugs)": [[9, "intel-conda-channel-installation-sometime-faster-also-more-prone-to-bugs"]], "Speed Benchmark": [[9, "speed-benchmark"]], "Caveats and Advice": [[9, "caveats-and-advice"]], "Mysterious crashes": [[9, "mysterious-crashes"]], "Series IDs really need to be unique (or column names need to be all unique in wide data)": [[9, "series-ids-really-need-to-be-unique-or-column-names-need-to-be-all-unique-in-wide-data"]], "Short Training History": [[9, "short-training-history"]], "Adding regressors and other information": [[9, "adding-regressors-and-other-information"]], "Simulation Forecasting": [[9, "id8"]], "Event Risk Forecasting and Anomaly Detection": [[9, "event-risk-forecasting-and-anomaly-detection"]], "A Hack for Passing in Parameters (that aren\u2019t otherwise available)": [[9, "a-hack-for-passing-in-parameters-that-aren-t-otherwise-available"]], "Categorical Data": [[9, "categorical-data"]], "Custom and Unusual Frequencies": [[9, "custom-and-unusual-frequencies"]], "Using the Transformers independently": [[9, "using-the-transformers-independently"]], "Note on ~Regression Models": [[9, "note-on-regression-models"]], "Models": [[9, "id9"]]}, "indexentries": {"anomalydetector (class in autots)": [[1, "autots.AnomalyDetector"]], "autots (class in autots)": [[1, "autots.AutoTS"]], "cassandra (class in autots)": [[1, "autots.Cassandra"]], "eventriskforecast (class in autots)": [[1, "autots.EventRiskForecast"]], "generaltransformer (class in autots)": [[1, "autots.GeneralTransformer"]], "holidaydetector (class in autots)": [[1, "autots.HolidayDetector"]], "randomtransform() (in module autots)": [[1, "autots.RandomTransform"]], "transformts (in module autots)": [[1, "autots.TransformTS"]], "analyze_trend() (autots.cassandra method)": [[1, "autots.Cassandra.analyze_trend"]], "anomalies (autots.cassandra..anomaly_detector attribute)": [[1, "autots.Cassandra..anomaly_detector.anomalies"]], "auto_fit() (autots.cassandra method)": [[1, "autots.Cassandra.auto_fit"]], "autots": [[1, "module-autots"]], "back_forecast() (autots.autots method)": [[1, "autots.AutoTS.back_forecast"]], "base_scaler() (autots.cassandra method)": [[1, "autots.Cassandra.base_scaler"]], "best_model (autots.autots attribute)": [[1, "autots.AutoTS.best_model"]], "best_model_ensemble (autots.autots attribute)": [[1, "autots.AutoTS.best_model_ensemble"]], "best_model_name (autots.autots attribute)": [[1, "autots.AutoTS.best_model_name"]], "best_model_params (autots.autots attribute)": [[1, "autots.AutoTS.best_model_params"]], "best_model_per_series_mape() (autots.autots method)": [[1, "autots.AutoTS.best_model_per_series_mape"]], "best_model_per_series_score() (autots.autots method)": [[1, "autots.AutoTS.best_model_per_series_score"]], "best_model_transformation_params (autots.autots attribute)": [[1, "autots.AutoTS.best_model_transformation_params"]], "compare_actual_components() (autots.cassandra method)": [[1, "autots.Cassandra.compare_actual_components"]], "create_forecast_index() (autots.cassandra method)": [[1, "autots.Cassandra.create_forecast_index"]], "create_lagged_regressor() (in module autots)": [[1, "autots.create_lagged_regressor"]], "create_regressor() (in module autots)": [[1, "autots.create_regressor"]], "create_t() (autots.cassandra method)": [[1, "autots.Cassandra.create_t"]], "cross_validate() (autots.cassandra method)": [[1, "autots.Cassandra.cross_validate"]], "dates_to_holidays() (autots.cassandra.holiday_detector method)": [[1, "autots.Cassandra.holiday_detector.dates_to_holidays"]], "dates_to_holidays() (autots.holidaydetector method)": [[1, "autots.HolidayDetector.dates_to_holidays"]], "detect() (autots.anomalydetector method)": [[1, "autots.AnomalyDetector.detect"]], "detect() (autots.holidaydetector method)": [[1, "autots.HolidayDetector.detect"]], "df_wide_numeric (autots.autots attribute)": [[1, "autots.AutoTS.df_wide_numeric"]], "diagnose_params() (autots.autots method)": [[1, "autots.AutoTS.diagnose_params"]], "expand_horizontal() (autots.autots method)": [[1, "autots.AutoTS.expand_horizontal"]], "export_best_model() (autots.autots method)": [[1, "autots.AutoTS.export_best_model"]], "export_template() (autots.autots method)": [[1, "autots.AutoTS.export_template"]], "failure_rate() (autots.autots method)": [[1, "autots.AutoTS.failure_rate"]], "feature_importance() (autots.cassandra method)": [[1, "autots.Cassandra.feature_importance"]], "fill_na() (autots.generaltransformer method)": [[1, "autots.GeneralTransformer.fill_na"]], "fit() (autots.anomalydetector method)": [[1, "autots.AnomalyDetector.fit"]], "fit() (autots.autots method)": [[1, "autots.AutoTS.fit"]], "fit() (autots.cassandra method)": [[1, "autots.Cassandra.fit"], [1, "id0"]], "fit() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.fit"], [1, "id9"]], "fit() (autots.generaltransformer method)": [[1, "autots.GeneralTransformer.fit"]], "fit() (autots.holidaydetector method)": [[1, "autots.HolidayDetector.fit"]], "fit_anomaly_classifier() (autots.anomalydetector method)": [[1, "autots.AnomalyDetector.fit_anomaly_classifier"]], "fit_data() (autots.autots method)": [[1, "autots.AutoTS.fit_data"]], "fit_data() (autots.cassandra method)": [[1, "autots.Cassandra.fit_data"]], "fit_transform() (autots.generaltransformer method)": [[1, "autots.GeneralTransformer.fit_transform"]], "generate_historic_risk_array() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.generate_historic_risk_array"]], "generate_historic_risk_array() (autots.eventriskforecast static method)": [[1, "id10"]], "generate_result_windows() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.generate_result_windows"], [1, "id11"]], "generate_risk_array() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.generate_risk_array"]], "generate_risk_array() (autots.eventriskforecast static method)": [[1, "id12"]], "get_metric_corr() (autots.autots method)": [[1, "autots.AutoTS.get_metric_corr"]], "get_new_params() (autots.anomalydetector static method)": [[1, "autots.AnomalyDetector.get_new_params"]], "get_new_params() (autots.autots static method)": [[1, "autots.AutoTS.get_new_params"]], "get_new_params() (autots.cassandra method)": [[1, "autots.Cassandra.get_new_params"], [1, "id1"]], "get_new_params() (autots.generaltransformer static method)": [[1, "autots.GeneralTransformer.get_new_params"]], "get_new_params() (autots.holidaydetector static method)": [[1, "autots.HolidayDetector.get_new_params"]], "get_params() (autots.cassandra method)": [[1, "autots.Cassandra.get_params"]], "holiday_count (autots.cassandra. attribute)": [[1, "autots.Cassandra..holiday_count"]], "holidays (autots.cassandra. attribute)": [[1, "autots.Cassandra..holidays"]], "horizontal_per_generation() (autots.autots method)": [[1, "autots.AutoTS.horizontal_per_generation"]], "horizontal_to_df() (autots.autots method)": [[1, "autots.AutoTS.horizontal_to_df"]], "import_best_model() (autots.autots method)": [[1, "autots.AutoTS.import_best_model"]], "import_results() (autots.autots method)": [[1, "autots.AutoTS.import_results"]], "import_template() (autots.autots method)": [[1, "autots.AutoTS.import_template"]], "infer_frequency() (in module autots)": [[1, "autots.infer_frequency"]], "inverse_transform() (autots.generaltransformer method)": [[1, "autots.GeneralTransformer.inverse_transform"]], "list_failed_model_types() (autots.autots method)": [[1, "autots.AutoTS.list_failed_model_types"]], "load_artificial() (in module autots)": [[1, "autots.load_artificial"]], "load_daily() (in module autots)": [[1, "autots.load_daily"]], "load_hourly() (in module autots)": [[1, "autots.load_hourly"]], "load_linear() (in module autots)": [[1, "autots.load_linear"]], "load_live_daily() (in module autots)": [[1, "autots.load_live_daily"]], "load_monthly() (in module autots)": [[1, "autots.load_monthly"]], "load_sine() (in module autots)": [[1, "autots.load_sine"]], "load_template() (autots.autots method)": [[1, "autots.AutoTS.load_template"]], "load_weekdays() (in module autots)": [[1, "autots.load_weekdays"]], "load_weekly() (in module autots)": [[1, "autots.load_weekly"]], "load_yearly() (in module autots)": [[1, "autots.load_yearly"]], "long_to_wide() (in module autots)": [[1, "autots.long_to_wide"]], "model_forecast() (in module autots)": [[1, "autots.model_forecast"]], "model_results (autots.autots.initial_results attribute)": [[1, "autots.AutoTS.initial_results.model_results"]], "module": [[1, "module-autots"], [2, "module-autots.datasets"], [2, "module-autots.datasets.fred"], [3, "module-autots.evaluator"], [3, "module-autots.evaluator.anomaly_detector"], [3, "module-autots.evaluator.auto_model"], [3, "module-autots.evaluator.auto_ts"], [3, "module-autots.evaluator.benchmark"], [3, "module-autots.evaluator.event_forecasting"], [3, "module-autots.evaluator.metrics"], [3, "module-autots.evaluator.validation"], [4, "module-autots.models"], [4, "module-autots.models.arch"], [4, "module-autots.models.base"], [4, "module-autots.models.basics"], [4, "module-autots.models.cassandra"], [4, "module-autots.models.dnn"], [4, "module-autots.models.ensemble"], [4, "module-autots.models.gluonts"], [4, "module-autots.models.greykite"], [4, "module-autots.models.matrix_var"], [4, "module-autots.models.mlensemble"], [4, "module-autots.models.model_list"], [4, "module-autots.models.neural_forecast"], [4, "module-autots.models.prophet"], [4, "module-autots.models.pytorch"], [4, "module-autots.models.sklearn"], [4, "module-autots.models.statsmodels"], [4, "module-autots.models.tfp"], [4, "module-autots.models.tide"], [5, "module-autots.templates"], [5, "module-autots.templates.general"], [6, "module-autots.tools"], [6, "module-autots.tools.anomaly_utils"], [6, "module-autots.tools.calendar"], [6, "module-autots.tools.cointegration"], [6, "module-autots.tools.cpu_count"], [6, "module-autots.tools.fast_kalman"], [6, "module-autots.tools.fft"], [6, "module-autots.tools.hierarchial"], [6, "module-autots.tools.holiday"], [6, "module-autots.tools.impute"], [6, "module-autots.tools.lunar"], [6, "module-autots.tools.percentile"], [6, "module-autots.tools.probabilistic"], [6, "module-autots.tools.profile"], [6, "module-autots.tools.regressor"], [6, "module-autots.tools.seasonal"], [6, "module-autots.tools.shaping"], [6, "module-autots.tools.thresholding"], [6, "module-autots.tools.transform"], [6, "module-autots.tools.window_functions"]], "mosaic_to_df() (autots.autots method)": [[1, "autots.AutoTS.mosaic_to_df"]], "next_fit() (autots.cassandra method)": [[1, "autots.Cassandra.next_fit"]], "params (autots.cassandra. attribute)": [[1, "autots.Cassandra..params"]], "parse_best_model() (autots.autots method)": [[1, "autots.AutoTS.parse_best_model"]], "plot() (autots.anomalydetector method)": [[1, "autots.AnomalyDetector.plot"]], "plot() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.plot"], [1, "id13"]], "plot() (autots.holidaydetector method)": [[1, "autots.HolidayDetector.plot"]], "plot_anomaly() (autots.holidaydetector method)": [[1, "autots.HolidayDetector.plot_anomaly"]], "plot_back_forecast() (autots.autots method)": [[1, "autots.AutoTS.plot_back_forecast"]], "plot_backforecast() (autots.autots method)": [[1, "autots.AutoTS.plot_backforecast"]], "plot_components() (autots.cassandra method)": [[1, "autots.Cassandra.plot_components"], [1, "id2"]], "plot_eval() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.plot_eval"]], "plot_forecast() (autots.cassandra method)": [[1, "autots.Cassandra.plot_forecast"], [1, "id3"]], "plot_generation_loss() (autots.autots method)": [[1, "autots.AutoTS.plot_generation_loss"]], "plot_horizontal() (autots.autots method)": [[1, "autots.AutoTS.plot_horizontal"]], "plot_horizontal_model_count() (autots.autots method)": [[1, "autots.AutoTS.plot_horizontal_model_count"]], "plot_horizontal_per_generation() (autots.autots method)": [[1, "autots.AutoTS.plot_horizontal_per_generation"]], "plot_horizontal_transformers() (autots.autots method)": [[1, "autots.AutoTS.plot_horizontal_transformers"]], "plot_metric_corr() (autots.autots method)": [[1, "autots.AutoTS.plot_metric_corr"]], "plot_per_series_error() (autots.autots method)": [[1, "autots.AutoTS.plot_per_series_error"]], "plot_per_series_mape() (autots.autots method)": [[1, "autots.AutoTS.plot_per_series_mape"]], "plot_per_series_smape() (autots.autots method)": [[1, "autots.AutoTS.plot_per_series_smape"]], "plot_things() (autots.cassandra method)": [[1, "autots.Cassandra.plot_things"]], "plot_transformer_failure_rate() (autots.autots method)": [[1, "autots.AutoTS.plot_transformer_failure_rate"]], "plot_trend() (autots.cassandra method)": [[1, "autots.Cassandra.plot_trend"], [1, "id4"]], "plot_validations() (autots.autots method)": [[1, "autots.AutoTS.plot_validations"]], "predict() (autots.autots method)": [[1, "autots.AutoTS.predict"]], "predict() (autots.cassandra method)": [[1, "autots.Cassandra.predict"], [1, "id5"]], "predict() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.predict"], [1, "id14"]], "predict_historic() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.predict_historic"], [1, "id15"]], "predict_new_product() (autots.cassandra method)": [[1, "autots.Cassandra.predict_new_product"]], "predict_x_array (autots.cassandra. attribute)": [[1, "autots.Cassandra..predict_x_array"]], "predicted_trend (autots.cassandra. attribute)": [[1, "autots.Cassandra..predicted_trend"]], "process_components() (autots.cassandra method)": [[1, "autots.Cassandra.process_components"]], "regression_check (autots.autots attribute)": [[1, "autots.AutoTS.regression_check"]], "results() (autots.autots method)": [[1, "autots.AutoTS.results"]], "retrieve_transformer() (autots.generaltransformer class method)": [[1, "autots.GeneralTransformer.retrieve_transformer"]], "retrieve_validation_forecasts() (autots.autots method)": [[1, "autots.AutoTS.retrieve_validation_forecasts"]], "return_components() (autots.cassandra method)": [[1, "autots.Cassandra.return_components"], [1, "id6"]], "rolling_trend() (autots.cassandra method)": [[1, "autots.Cassandra.rolling_trend"]], "save_template() (autots.autots method)": [[1, "autots.AutoTS.save_template"]], "scale_data() (autots.cassandra method)": [[1, "autots.Cassandra.scale_data"]], "score_per_series (autots.autots attribute)": [[1, "autots.AutoTS.score_per_series"]], "score_to_anomaly() (autots.anomalydetector method)": [[1, "autots.AnomalyDetector.score_to_anomaly"]], "scores (autots.cassandra..anomaly_detector attribute)": [[1, "autots.Cassandra..anomaly_detector.scores"]], "set_limit() (autots.eventriskforecast method)": [[1, "autots.EventRiskForecast.set_limit"]], "set_limit() (autots.eventriskforecast static method)": [[1, "id16"]], "to_origin_space() (autots.cassandra method)": [[1, "autots.Cassandra.to_origin_space"]], "transform() (autots.generaltransformer method)": [[1, "autots.GeneralTransformer.transform"]], "treatment_causal_impact() (autots.cassandra method)": [[1, "autots.Cassandra.treatment_causal_impact"]], "trend_train (autots.cassandra. attribute)": [[1, "autots.Cassandra..trend_train"]], "validation_agg() (autots.autots method)": [[1, "autots.AutoTS.validation_agg"]], "x_array (autots.cassandra. attribute)": [[1, "autots.Cassandra..x_array"]], "autots.datasets": [[2, "module-autots.datasets"]], "autots.datasets.fred": [[2, "module-autots.datasets.fred"]], "get_fred_data() (in module autots.datasets.fred)": [[2, "autots.datasets.fred.get_fred_data"]], "load_artificial() (in module autots.datasets)": [[2, "autots.datasets.load_artificial"]], "load_daily() (in module autots.datasets)": [[2, "autots.datasets.load_daily"]], "load_hourly() (in module autots.datasets)": [[2, "autots.datasets.load_hourly"]], "load_linear() (in module autots.datasets)": [[2, "autots.datasets.load_linear"]], "load_live_daily() (in module autots.datasets)": [[2, "autots.datasets.load_live_daily"]], "load_monthly() (in module autots.datasets)": [[2, "autots.datasets.load_monthly"]], "load_sine() (in module autots.datasets)": [[2, "autots.datasets.load_sine"]], "load_weekdays() (in module autots.datasets)": [[2, "autots.datasets.load_weekdays"]], "load_weekly() (in module autots.datasets)": [[2, "autots.datasets.load_weekly"]], "load_yearly() (in module autots.datasets)": [[2, "autots.datasets.load_yearly"]], "load_zeroes() (in module autots.datasets)": [[2, "autots.datasets.load_zeroes"]], "anomalydetector (class in autots.evaluator.anomaly_detector)": [[3, "autots.evaluator.anomaly_detector.AnomalyDetector"]], "autots (class in autots.evaluator.auto_ts)": [[3, "autots.evaluator.auto_ts.AutoTS"]], "benchmark (class in autots.evaluator.benchmark)": [[3, "autots.evaluator.benchmark.Benchmark"]], "eventriskforecast (class in autots.evaluator.event_forecasting)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast"]], "holidaydetector (class in autots.evaluator.anomaly_detector)": [[3, "autots.evaluator.anomaly_detector.HolidayDetector"]], "modelmonster() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.ModelMonster"]], "modelprediction (class in autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.ModelPrediction"]], "newgenetictemplate() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.NewGeneticTemplate"]], "randomtemplate() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.RandomTemplate"]], "templateevalobject (class in autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.TemplateEvalObject"]], "templatewizard() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.TemplateWizard"]], "uniquetemplates() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.UniqueTemplates"]], "array_last_val() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.array_last_val"]], "autots.evaluator": [[3, "module-autots.evaluator"]], "autots.evaluator.anomaly_detector": [[3, "module-autots.evaluator.anomaly_detector"]], "autots.evaluator.auto_model": [[3, "module-autots.evaluator.auto_model"]], "autots.evaluator.auto_ts": [[3, "module-autots.evaluator.auto_ts"]], "autots.evaluator.benchmark": [[3, "module-autots.evaluator.benchmark"]], "autots.evaluator.event_forecasting": [[3, "module-autots.evaluator.event_forecasting"]], "autots.evaluator.metrics": [[3, "module-autots.evaluator.metrics"]], "autots.evaluator.validation": [[3, "module-autots.evaluator.validation"]], "back_forecast() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.back_forecast"]], "back_forecast() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.back_forecast"]], "best_model (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.best_model"]], "best_model_ensemble (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.best_model_ensemble"]], "best_model_name (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.best_model_name"]], "best_model_params (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.best_model_params"]], "best_model_per_series_mape() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.best_model_per_series_mape"]], "best_model_per_series_score() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.best_model_per_series_score"]], "best_model_transformation_params (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.best_model_transformation_params"]], "chi_squared_hist_distribution_loss() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.chi_squared_hist_distribution_loss"]], "concat() (autots.evaluator.auto_model.templateevalobject method)": [[3, "autots.evaluator.auto_model.TemplateEvalObject.concat"]], "containment() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.containment"]], "contour() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.contour"]], "create_model_id() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.create_model_id"]], "dates_to_holidays() (autots.evaluator.anomaly_detector.holidaydetector method)": [[3, "autots.evaluator.anomaly_detector.HolidayDetector.dates_to_holidays"]], "default_scaler() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.default_scaler"]], "detect() (autots.evaluator.anomaly_detector.anomalydetector method)": [[3, "autots.evaluator.anomaly_detector.AnomalyDetector.detect"]], "detect() (autots.evaluator.anomaly_detector.holidaydetector method)": [[3, "autots.evaluator.anomaly_detector.HolidayDetector.detect"]], "df_wide_numeric (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.df_wide_numeric"]], "diagnose_params() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.diagnose_params"]], "dict_recombination() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.dict_recombination"]], "dwae() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.dwae"]], "error_correlations() (in module autots.evaluator.auto_ts)": [[3, "autots.evaluator.auto_ts.error_correlations"]], "expand_horizontal() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.expand_horizontal"]], "export_best_model() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.export_best_model"]], "export_template() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.export_template"]], "extract_result_windows() (in module autots.evaluator.event_forecasting)": [[3, "autots.evaluator.event_forecasting.extract_result_windows"]], "extract_seasonal_val_periods() (in module autots.evaluator.validation)": [[3, "autots.evaluator.validation.extract_seasonal_val_periods"]], "extract_window_index() (in module autots.evaluator.event_forecasting)": [[3, "autots.evaluator.event_forecasting.extract_window_index"]], "failure_rate() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.failure_rate"]], "fake_regressor() (in module autots.evaluator.auto_ts)": [[3, "autots.evaluator.auto_ts.fake_regressor"]], "fit() (autots.evaluator.anomaly_detector.anomalydetector method)": [[3, "autots.evaluator.anomaly_detector.AnomalyDetector.fit"]], "fit() (autots.evaluator.anomaly_detector.holidaydetector method)": [[3, "autots.evaluator.anomaly_detector.HolidayDetector.fit"]], "fit() (autots.evaluator.auto_model.modelprediction method)": [[3, "autots.evaluator.auto_model.ModelPrediction.fit"]], "fit() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.fit"]], "fit() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.fit"], [3, "id0"]], "fit_anomaly_classifier() (autots.evaluator.anomaly_detector.anomalydetector method)": [[3, "autots.evaluator.anomaly_detector.AnomalyDetector.fit_anomaly_classifier"]], "fit_data() (autots.evaluator.auto_model.modelprediction method)": [[3, "autots.evaluator.auto_model.ModelPrediction.fit_data"]], "fit_data() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.fit_data"]], "full_mae_errors (autots.evaluator.auto_model.templateevalobject attribute)": [[3, "autots.evaluator.auto_model.TemplateEvalObject.full_mae_errors"]], "full_mae_ids (autots.evaluator.auto_model.templateevalobject attribute)": [[3, "autots.evaluator.auto_model.TemplateEvalObject.full_mae_ids"]], "full_metric_evaluation() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.full_metric_evaluation"]], "generate_historic_risk_array() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.generate_historic_risk_array"]], "generate_historic_risk_array() (autots.evaluator.event_forecasting.eventriskforecast static method)": [[3, "id7"]], "generate_result_windows() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.generate_result_windows"], [3, "id8"]], "generate_risk_array() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.generate_risk_array"]], "generate_risk_array() (autots.evaluator.event_forecasting.eventriskforecast static method)": [[3, "id9"]], "generate_score() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.generate_score"]], "generate_score_per_series() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.generate_score_per_series"]], "generate_validation_indices() (in module autots.evaluator.validation)": [[3, "autots.evaluator.validation.generate_validation_indices"]], "get_metric_corr() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.get_metric_corr"]], "get_new_params() (autots.evaluator.anomaly_detector.anomalydetector static method)": [[3, "autots.evaluator.anomaly_detector.AnomalyDetector.get_new_params"]], "get_new_params() (autots.evaluator.anomaly_detector.holidaydetector static method)": [[3, "autots.evaluator.anomaly_detector.HolidayDetector.get_new_params"]], "get_new_params() (autots.evaluator.auto_ts.autots static method)": [[3, "autots.evaluator.auto_ts.AutoTS.get_new_params"]], "horizontal_per_generation() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.horizontal_per_generation"]], "horizontal_template_to_model_list() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.horizontal_template_to_model_list"]], "horizontal_to_df() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.horizontal_to_df"]], "import_best_model() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.import_best_model"]], "import_results() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.import_results"]], "import_template() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.import_template"]], "kde() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.kde"]], "kde_kl_distance() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.kde_kl_distance"]], "kl_divergence() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.kl_divergence"]], "linearity() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.linearity"]], "list_failed_model_types() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.list_failed_model_types"]], "load() (autots.evaluator.auto_model.templateevalobject method)": [[3, "autots.evaluator.auto_model.TemplateEvalObject.load"]], "load_template() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.load_template"]], "mae() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.mae"]], "mda() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.mda"]], "mean_absolute_differential_error() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.mean_absolute_differential_error"]], "mean_absolute_error() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.mean_absolute_error"]], "medae() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.medae"]], "median_absolute_error() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.median_absolute_error"]], "mlvb() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.mlvb"]], "model_forecast() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.model_forecast"]], "model_results (autots.evaluator.auto_ts.autots.initial_results attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.initial_results.model_results"]], "mosaic_to_df() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.mosaic_to_df"]], "mqae() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.mqae"]], "msle() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.msle"]], "numpy_ffill() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.numpy_ffill"]], "oda() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.oda"]], "parse_best_model() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.parse_best_model"]], "pinball_loss() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.pinball_loss"]], "plot() (autots.evaluator.anomaly_detector.anomalydetector method)": [[3, "autots.evaluator.anomaly_detector.AnomalyDetector.plot"]], "plot() (autots.evaluator.anomaly_detector.holidaydetector method)": [[3, "autots.evaluator.anomaly_detector.HolidayDetector.plot"]], "plot() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.plot"], [3, "id10"]], "plot_anomaly() (autots.evaluator.anomaly_detector.holidaydetector method)": [[3, "autots.evaluator.anomaly_detector.HolidayDetector.plot_anomaly"]], "plot_back_forecast() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_back_forecast"]], "plot_backforecast() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_backforecast"]], "plot_eval() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.plot_eval"]], "plot_generation_loss() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_generation_loss"]], "plot_horizontal() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_horizontal"]], "plot_horizontal_model_count() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_horizontal_model_count"]], "plot_horizontal_per_generation() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_horizontal_per_generation"]], "plot_horizontal_transformers() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_horizontal_transformers"]], "plot_metric_corr() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_metric_corr"]], "plot_per_series_error() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_per_series_error"]], "plot_per_series_mape() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_per_series_mape"]], "plot_per_series_smape() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_per_series_smape"]], "plot_transformer_failure_rate() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_transformer_failure_rate"]], "plot_validations() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.plot_validations"]], "precomp_wasserstein() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.precomp_wasserstein"]], "predict() (autots.evaluator.auto_model.modelprediction method)": [[3, "autots.evaluator.auto_model.ModelPrediction.predict"]], "predict() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.predict"]], "predict() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.predict"], [3, "id11"]], "predict_historic() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.predict_historic"], [3, "id12"]], "qae() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.qae"]], "random_model() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.random_model"]], "regression_check (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.regression_check"]], "remove_leading_zeros() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.remove_leading_zeros"]], "results() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.results"]], "retrieve_validation_forecasts() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.retrieve_validation_forecasts"]], "rmse() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.rmse"]], "root_mean_square_error() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.root_mean_square_error"]], "rps() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.rps"]], "run() (autots.evaluator.benchmark.benchmark method)": [[3, "autots.evaluator.benchmark.Benchmark.run"]], "save() (autots.evaluator.auto_model.templateevalobject method)": [[3, "autots.evaluator.auto_model.TemplateEvalObject.save"]], "save_template() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.save_template"]], "scaled_pinball_loss() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.scaled_pinball_loss"]], "score_per_series (autots.evaluator.auto_ts.autots attribute)": [[3, "autots.evaluator.auto_ts.AutoTS.score_per_series"]], "score_to_anomaly() (autots.evaluator.anomaly_detector.anomalydetector method)": [[3, "autots.evaluator.anomaly_detector.AnomalyDetector.score_to_anomaly"]], "set_limit() (autots.evaluator.event_forecasting.eventriskforecast method)": [[3, "autots.evaluator.event_forecasting.EventRiskForecast.set_limit"]], "set_limit() (autots.evaluator.event_forecasting.eventriskforecast static method)": [[3, "id13"]], "set_limit_forecast() (in module autots.evaluator.event_forecasting)": [[3, "autots.evaluator.event_forecasting.set_limit_forecast"]], "set_limit_forecast_historic() (in module autots.evaluator.event_forecasting)": [[3, "autots.evaluator.event_forecasting.set_limit_forecast_historic"]], "smape() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.smape"]], "smoothness() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.smoothness"]], "spl() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.spl"]], "symmetric_mean_absolute_percentage_error() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.symmetric_mean_absolute_percentage_error"]], "threshold_loss() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.threshold_loss"]], "trans_dict_recomb() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.trans_dict_recomb"]], "unpack_ensemble_models() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.unpack_ensemble_models"]], "unsorted_wasserstein() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.unsorted_wasserstein"]], "validate_num_validations() (in module autots.evaluator.validation)": [[3, "autots.evaluator.validation.validate_num_validations"]], "validation_agg() (autots.evaluator.auto_ts.autots method)": [[3, "autots.evaluator.auto_ts.AutoTS.validation_agg"]], "validation_aggregation() (in module autots.evaluator.auto_model)": [[3, "autots.evaluator.auto_model.validation_aggregation"]], "wasserstein() (in module autots.evaluator.metrics)": [[3, "autots.evaluator.metrics.wasserstein"]], "arch (class in autots.models.arch)": [[4, "autots.models.arch.ARCH"]], "ardl (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.ARDL"]], "arima (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.ARIMA"]], "averagevaluenaive (class in autots.models.basics)": [[4, "autots.models.basics.AverageValueNaive"]], "balltreemultivariatemotif (class in autots.models.basics)": [[4, "autots.models.basics.BallTreeMultivariateMotif"]], "bayesianmultioutputregression (class in autots.models.cassandra)": [[4, "autots.models.cassandra.BayesianMultiOutputRegression"]], "bestnensemble() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.BestNEnsemble"]], "cassandra (class in autots.models.cassandra)": [[4, "autots.models.cassandra.Cassandra"]], "componentanalysis (class in autots.models.sklearn)": [[4, "autots.models.sklearn.ComponentAnalysis"]], "constantnaive (class in autots.models.basics)": [[4, "autots.models.basics.ConstantNaive"]], "datepartregression (class in autots.models.sklearn)": [[4, "autots.models.sklearn.DatepartRegression"]], "distensemble() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.DistEnsemble"]], "dynamicfactor (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.DynamicFactor"]], "dynamicfactormq (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.DynamicFactorMQ"]], "ets (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.ETS"]], "ensembleforecast() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.EnsembleForecast"]], "ensembletemplategenerator() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.EnsembleTemplateGenerator"]], "fbprophet (class in autots.models.prophet)": [[4, "autots.models.prophet.FBProphet"]], "fft (class in autots.models.basics)": [[4, "autots.models.basics.FFT"]], "glm (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.GLM"]], "gls (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.GLS"]], "gluonts (class in autots.models.gluonts)": [[4, "autots.models.gluonts.GluonTS"]], "greykite (class in autots.models.greykite)": [[4, "autots.models.greykite.Greykite"]], "hdistensemble() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.HDistEnsemble"]], "horizontalensemble() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.HorizontalEnsemble"]], "horizontaltemplategenerator() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.HorizontalTemplateGenerator"]], "kalmanstatespace (class in autots.models.basics)": [[4, "autots.models.basics.KalmanStateSpace"]], "kerasrnn (class in autots.models.dnn)": [[4, "autots.models.dnn.KerasRNN"]], "latc (class in autots.models.matrix_var)": [[4, "autots.models.matrix_var.LATC"]], "lastvaluenaive (class in autots.models.basics)": [[4, "autots.models.basics.LastValueNaive"]], "mar (class in autots.models.matrix_var)": [[4, "autots.models.matrix_var.MAR"]], "mlensemble (class in autots.models.mlensemble)": [[4, "autots.models.mlensemble.MLEnsemble"]], "metricmotif (class in autots.models.basics)": [[4, "autots.models.basics.MetricMotif"]], "modelobject (class in autots.models.base)": [[4, "autots.models.base.ModelObject"]], "mosaicensemble() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.MosaicEnsemble"]], "motif (class in autots.models.basics)": [[4, "autots.models.basics.Motif"]], "motifsimulation (class in autots.models.basics)": [[4, "autots.models.basics.MotifSimulation"]], "multivariateregression (class in autots.models.sklearn)": [[4, "autots.models.sklearn.MultivariateRegression"]], "nvar (class in autots.models.basics)": [[4, "autots.models.basics.NVAR"]], "neuralforecast (class in autots.models.neural_forecast)": [[4, "autots.models.neural_forecast.NeuralForecast"]], "neuralprophet (class in autots.models.prophet)": [[4, "autots.models.prophet.NeuralProphet"]], "predictionobject (class in autots.models.base)": [[4, "autots.models.base.PredictionObject"]], "preprocessingregression (class in autots.models.sklearn)": [[4, "autots.models.sklearn.PreprocessingRegression"]], "pytorchforecasting (class in autots.models.pytorch)": [[4, "autots.models.pytorch.PytorchForecasting"]], "rrvar (class in autots.models.matrix_var)": [[4, "autots.models.matrix_var.RRVAR"]], "rollingregression (class in autots.models.sklearn)": [[4, "autots.models.sklearn.RollingRegression"]], "seasonalnaive (class in autots.models.basics)": [[4, "autots.models.basics.SeasonalNaive"]], "seasonalitymotif (class in autots.models.basics)": [[4, "autots.models.basics.SeasonalityMotif"]], "sectionalmotif (class in autots.models.basics)": [[4, "autots.models.basics.SectionalMotif"]], "tfpregression (class in autots.models.tfp)": [[4, "autots.models.tfp.TFPRegression"]], "tfpregressor (class in autots.models.tfp)": [[4, "autots.models.tfp.TFPRegressor"]], "tmf (class in autots.models.matrix_var)": [[4, "autots.models.matrix_var.TMF"]], "tensorflowsts (class in autots.models.tfp)": [[4, "autots.models.tfp.TensorflowSTS"]], "theta (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.Theta"]], "tide (class in autots.models.tide)": [[4, "autots.models.tide.TiDE"]], "timecovariates (class in autots.models.tide)": [[4, "autots.models.tide.TimeCovariates"]], "timeseriesdata (class in autots.models.tide)": [[4, "autots.models.tide.TimeSeriesdata"]], "transformer (class in autots.models.dnn)": [[4, "autots.models.dnn.Transformer"]], "univariateregression (class in autots.models.sklearn)": [[4, "autots.models.sklearn.UnivariateRegression"]], "unobservedcomponents (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.UnobservedComponents"]], "var (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.VAR"]], "varmax (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.VARMAX"]], "vecm (class in autots.models.statsmodels)": [[4, "autots.models.statsmodels.VECM"]], "vectorizedmultioutputgpr (class in autots.models.sklearn)": [[4, "autots.models.sklearn.VectorizedMultiOutputGPR"]], "windowregression (class in autots.models.sklearn)": [[4, "autots.models.sklearn.WindowRegression"]], "zeroesnaive (in module autots.models.basics)": [[4, "autots.models.basics.ZeroesNaive"]], "analyze_trend() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.analyze_trend"]], "anomalies (autots.models.cassandra.cassandra..anomaly_detector attribute)": [[4, "autots.models.cassandra.Cassandra..anomaly_detector.anomalies"]], "apply_constraints() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.apply_constraints"], [4, "id0"]], "apply_constraints() (in module autots.models.base)": [[4, "autots.models.base.apply_constraints"]], "arima_seek_the_oracle() (in module autots.models.statsmodels)": [[4, "autots.models.statsmodels.arima_seek_the_oracle"]], "auto_fit() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.auto_fit"]], "auto_model_list() (in module autots.models.model_list)": [[4, "autots.models.model_list.auto_model_list"]], "autots.models": [[4, "module-autots.models"]], "autots.models.arch": [[4, "module-autots.models.arch"]], "autots.models.base": [[4, "module-autots.models.base"]], "autots.models.basics": [[4, "module-autots.models.basics"]], "autots.models.cassandra": [[4, "module-autots.models.cassandra"]], "autots.models.dnn": [[4, "module-autots.models.dnn"]], "autots.models.ensemble": [[4, "module-autots.models.ensemble"]], "autots.models.gluonts": [[4, "module-autots.models.gluonts"]], "autots.models.greykite": [[4, "module-autots.models.greykite"]], "autots.models.matrix_var": [[4, "module-autots.models.matrix_var"]], "autots.models.mlensemble": [[4, "module-autots.models.mlensemble"]], "autots.models.model_list": [[4, "module-autots.models.model_list"]], "autots.models.neural_forecast": [[4, "module-autots.models.neural_forecast"]], "autots.models.prophet": [[4, "module-autots.models.prophet"]], "autots.models.pytorch": [[4, "module-autots.models.pytorch"]], "autots.models.sklearn": [[4, "module-autots.models.sklearn"]], "autots.models.statsmodels": [[4, "module-autots.models.statsmodels"]], "autots.models.tfp": [[4, "module-autots.models.tfp"]], "autots.models.tide": [[4, "module-autots.models.tide"]], "base_scaler() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.base_scaler"]], "base_scaler() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.base_scaler"]], "basic_profile() (autots.models.base.modelobject method)": [[4, "autots.models.base.ModelObject.basic_profile"]], "calculate_peak_density() (in module autots.models.base)": [[4, "autots.models.base.calculate_peak_density"]], "clean_regressor() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.clean_regressor"]], "compare_actual_components() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.compare_actual_components"]], "conj_grad_w() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.conj_grad_w"]], "conj_grad_x() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.conj_grad_x"]], "cost_function() (autots.models.basics.kalmanstatespace method)": [[4, "autots.models.basics.KalmanStateSpace.cost_function"]], "cost_function_dwae() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.cost_function_dwae"]], "cost_function_l1() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.cost_function_l1"]], "cost_function_l1_positive() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.cost_function_l1_positive"]], "cost_function_l2() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.cost_function_l2"]], "cost_function_quantile() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.cost_function_quantile"]], "create_feature() (in module autots.models.mlensemble)": [[4, "autots.models.mlensemble.create_feature"]], "create_forecast_index() (autots.models.base.modelobject method)": [[4, "autots.models.base.ModelObject.create_forecast_index"]], "create_forecast_index() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.create_forecast_index"]], "create_forecast_index() (in module autots.models.base)": [[4, "autots.models.base.create_forecast_index"]], "create_seaborn_palette_from_cmap() (in module autots.models.base)": [[4, "autots.models.base.create_seaborn_palette_from_cmap"]], "create_t() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.create_t"]], "create_t() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.create_t"]], "cross_validate() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.cross_validate"]], "dates_to_holidays() (autots.models.cassandra.cassandra.holiday_detector method)": [[4, "autots.models.cassandra.Cassandra.holiday_detector.dates_to_holidays"]], "dmd() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.dmd"]], "dmd4cast() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.dmd4cast"]], "ell_w() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.ell_w"]], "ell_x() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.ell_x"]], "evaluate() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.evaluate"], [4, "id1"]], "extract_ensemble_runtimes() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.extract_ensemble_runtimes"]], "extract_single_series_from_horz() (in module autots.models.base)": [[4, "autots.models.base.extract_single_series_from_horz"]], "extract_single_transformer() (in module autots.models.base)": [[4, "autots.models.base.extract_single_transformer"]], "feature_importance() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.feature_importance"]], "find_pattern() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.find_pattern"]], "fit() (autots.models.arch.arch method)": [[4, "autots.models.arch.ARCH.fit"]], "fit() (autots.models.basics.averagevaluenaive method)": [[4, "autots.models.basics.AverageValueNaive.fit"]], "fit() (autots.models.basics.balltreemultivariatemotif method)": [[4, "autots.models.basics.BallTreeMultivariateMotif.fit"]], "fit() (autots.models.basics.constantnaive method)": [[4, "autots.models.basics.ConstantNaive.fit"]], "fit() (autots.models.basics.fft method)": [[4, "autots.models.basics.FFT.fit"]], "fit() (autots.models.basics.kalmanstatespace method)": [[4, "autots.models.basics.KalmanStateSpace.fit"]], "fit() (autots.models.basics.lastvaluenaive method)": [[4, "autots.models.basics.LastValueNaive.fit"]], "fit() (autots.models.basics.metricmotif method)": [[4, "autots.models.basics.MetricMotif.fit"]], "fit() (autots.models.basics.motif method)": [[4, "autots.models.basics.Motif.fit"]], "fit() (autots.models.basics.motifsimulation method)": [[4, "autots.models.basics.MotifSimulation.fit"]], "fit() (autots.models.basics.nvar method)": [[4, "autots.models.basics.NVAR.fit"]], "fit() (autots.models.basics.seasonalnaive method)": [[4, "autots.models.basics.SeasonalNaive.fit"]], "fit() (autots.models.basics.seasonalitymotif method)": [[4, "autots.models.basics.SeasonalityMotif.fit"]], "fit() (autots.models.basics.sectionalmotif method)": [[4, "autots.models.basics.SectionalMotif.fit"]], "fit() (autots.models.cassandra.bayesianmultioutputregression method)": [[4, "autots.models.cassandra.BayesianMultiOutputRegression.fit"]], "fit() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.fit"], [4, "id5"]], "fit() (autots.models.dnn.kerasrnn method)": [[4, "autots.models.dnn.KerasRNN.fit"]], "fit() (autots.models.dnn.transformer method)": [[4, "autots.models.dnn.Transformer.fit"]], "fit() (autots.models.gluonts.gluonts method)": [[4, "autots.models.gluonts.GluonTS.fit"]], "fit() (autots.models.greykite.greykite method)": [[4, "autots.models.greykite.Greykite.fit"]], "fit() (autots.models.matrix_var.latc method)": [[4, "autots.models.matrix_var.LATC.fit"]], "fit() (autots.models.matrix_var.mar method)": [[4, "autots.models.matrix_var.MAR.fit"]], "fit() (autots.models.matrix_var.rrvar method)": [[4, "autots.models.matrix_var.RRVAR.fit"]], "fit() (autots.models.matrix_var.tmf method)": [[4, "autots.models.matrix_var.TMF.fit"]], "fit() (autots.models.mlensemble.mlensemble method)": [[4, "autots.models.mlensemble.MLEnsemble.fit"]], "fit() (autots.models.neural_forecast.neuralforecast method)": [[4, "autots.models.neural_forecast.NeuralForecast.fit"]], "fit() (autots.models.prophet.fbprophet method)": [[4, "autots.models.prophet.FBProphet.fit"]], "fit() (autots.models.prophet.neuralprophet method)": [[4, "autots.models.prophet.NeuralProphet.fit"]], "fit() (autots.models.pytorch.pytorchforecasting method)": [[4, "autots.models.pytorch.PytorchForecasting.fit"]], "fit() (autots.models.sklearn.componentanalysis method)": [[4, "autots.models.sklearn.ComponentAnalysis.fit"]], "fit() (autots.models.sklearn.datepartregression method)": [[4, "autots.models.sklearn.DatepartRegression.fit"]], "fit() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.fit"]], "fit() (autots.models.sklearn.preprocessingregression method)": [[4, "autots.models.sklearn.PreprocessingRegression.fit"]], "fit() (autots.models.sklearn.rollingregression method)": [[4, "autots.models.sklearn.RollingRegression.fit"]], "fit() (autots.models.sklearn.univariateregression method)": [[4, "autots.models.sklearn.UnivariateRegression.fit"]], "fit() (autots.models.sklearn.vectorizedmultioutputgpr method)": [[4, "autots.models.sklearn.VectorizedMultiOutputGPR.fit"]], "fit() (autots.models.sklearn.windowregression method)": [[4, "autots.models.sklearn.WindowRegression.fit"]], "fit() (autots.models.statsmodels.ardl method)": [[4, "autots.models.statsmodels.ARDL.fit"]], "fit() (autots.models.statsmodels.arima method)": [[4, "autots.models.statsmodels.ARIMA.fit"]], "fit() (autots.models.statsmodels.dynamicfactor method)": [[4, "autots.models.statsmodels.DynamicFactor.fit"]], "fit() (autots.models.statsmodels.dynamicfactormq method)": [[4, "autots.models.statsmodels.DynamicFactorMQ.fit"]], "fit() (autots.models.statsmodels.ets method)": [[4, "autots.models.statsmodels.ETS.fit"]], "fit() (autots.models.statsmodels.glm method)": [[4, "autots.models.statsmodels.GLM.fit"]], "fit() (autots.models.statsmodels.gls method)": [[4, "autots.models.statsmodels.GLS.fit"]], "fit() (autots.models.statsmodels.theta method)": [[4, "autots.models.statsmodels.Theta.fit"]], "fit() (autots.models.statsmodels.unobservedcomponents method)": [[4, "autots.models.statsmodels.UnobservedComponents.fit"]], "fit() (autots.models.statsmodels.var method)": [[4, "autots.models.statsmodels.VAR.fit"]], "fit() (autots.models.statsmodels.varmax method)": [[4, "autots.models.statsmodels.VARMAX.fit"]], "fit() (autots.models.statsmodels.vecm method)": [[4, "autots.models.statsmodels.VECM.fit"]], "fit() (autots.models.tfp.tfpregression method)": [[4, "autots.models.tfp.TFPRegression.fit"]], "fit() (autots.models.tfp.tfpregressor method)": [[4, "autots.models.tfp.TFPRegressor.fit"]], "fit() (autots.models.tfp.tensorflowsts method)": [[4, "autots.models.tfp.TensorflowSTS.fit"]], "fit() (autots.models.tide.tide method)": [[4, "autots.models.tide.TiDE.fit"]], "fit_data() (autots.models.base.modelobject method)": [[4, "autots.models.base.ModelObject.fit_data"]], "fit_data() (autots.models.basics.kalmanstatespace method)": [[4, "autots.models.basics.KalmanStateSpace.fit_data"]], "fit_data() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.fit_data"]], "fit_data() (autots.models.gluonts.gluonts method)": [[4, "autots.models.gluonts.GluonTS.fit_data"]], "fit_data() (autots.models.sklearn.datepartregression method)": [[4, "autots.models.sklearn.DatepartRegression.fit_data"]], "fit_data() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.fit_data"]], "fit_data() (autots.models.sklearn.preprocessingregression method)": [[4, "autots.models.sklearn.PreprocessingRegression.fit_data"]], "fit_data() (autots.models.sklearn.windowregression method)": [[4, "autots.models.sklearn.WindowRegression.fit_data"]], "fit_linear_model() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.fit_linear_model"]], "forecast (autots.models.base.predictionobject attribute)": [[4, "autots.models.base.PredictionObject.forecast"]], "generalize_horizontal() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.generalize_horizontal"]], "generate_psi() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.generate_Psi"]], "generate_classifier_params() (in module autots.models.sklearn)": [[4, "autots.models.sklearn.generate_classifier_params"]], "generate_crosshair_score() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.generate_crosshair_score"]], "generate_crosshair_score_list() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.generate_crosshair_score_list"]], "generate_mosaic_template() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.generate_mosaic_template"]], "generate_regressor_params() (in module autots.models.sklearn)": [[4, "autots.models.sklearn.generate_regressor_params"]], "get_holidays() (in module autots.models.tide)": [[4, "autots.models.tide.get_HOLIDAYS"]], "get_covariates() (autots.models.tide.timecovariates method)": [[4, "autots.models.tide.TimeCovariates.get_covariates"]], "get_new_params() (autots.models.arch.arch method)": [[4, "autots.models.arch.ARCH.get_new_params"]], "get_new_params() (autots.models.base.modelobject method)": [[4, "autots.models.base.ModelObject.get_new_params"]], "get_new_params() (autots.models.basics.averagevaluenaive method)": [[4, "autots.models.basics.AverageValueNaive.get_new_params"]], "get_new_params() (autots.models.basics.balltreemultivariatemotif method)": [[4, "autots.models.basics.BallTreeMultivariateMotif.get_new_params"]], "get_new_params() (autots.models.basics.constantnaive method)": [[4, "autots.models.basics.ConstantNaive.get_new_params"]], "get_new_params() (autots.models.basics.fft method)": [[4, "autots.models.basics.FFT.get_new_params"]], "get_new_params() (autots.models.basics.kalmanstatespace method)": [[4, "autots.models.basics.KalmanStateSpace.get_new_params"]], "get_new_params() (autots.models.basics.lastvaluenaive method)": [[4, "autots.models.basics.LastValueNaive.get_new_params"]], "get_new_params() (autots.models.basics.metricmotif method)": [[4, "autots.models.basics.MetricMotif.get_new_params"]], "get_new_params() (autots.models.basics.motif method)": [[4, "autots.models.basics.Motif.get_new_params"]], "get_new_params() (autots.models.basics.motifsimulation method)": [[4, "autots.models.basics.MotifSimulation.get_new_params"]], "get_new_params() (autots.models.basics.nvar method)": [[4, "autots.models.basics.NVAR.get_new_params"]], "get_new_params() (autots.models.basics.seasonalnaive method)": [[4, "autots.models.basics.SeasonalNaive.get_new_params"]], "get_new_params() (autots.models.basics.seasonalitymotif method)": [[4, "autots.models.basics.SeasonalityMotif.get_new_params"]], "get_new_params() (autots.models.basics.sectionalmotif method)": [[4, "autots.models.basics.SectionalMotif.get_new_params"]], "get_new_params() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.get_new_params"], [4, "id6"]], "get_new_params() (autots.models.gluonts.gluonts method)": [[4, "autots.models.gluonts.GluonTS.get_new_params"]], "get_new_params() (autots.models.greykite.greykite method)": [[4, "autots.models.greykite.Greykite.get_new_params"]], "get_new_params() (autots.models.matrix_var.latc method)": [[4, "autots.models.matrix_var.LATC.get_new_params"]], "get_new_params() (autots.models.matrix_var.mar method)": [[4, "autots.models.matrix_var.MAR.get_new_params"]], "get_new_params() (autots.models.matrix_var.rrvar method)": [[4, "autots.models.matrix_var.RRVAR.get_new_params"]], "get_new_params() (autots.models.matrix_var.tmf method)": [[4, "autots.models.matrix_var.TMF.get_new_params"]], "get_new_params() (autots.models.mlensemble.mlensemble method)": [[4, "autots.models.mlensemble.MLEnsemble.get_new_params"]], "get_new_params() (autots.models.neural_forecast.neuralforecast method)": [[4, "autots.models.neural_forecast.NeuralForecast.get_new_params"]], "get_new_params() (autots.models.prophet.fbprophet method)": [[4, "autots.models.prophet.FBProphet.get_new_params"]], "get_new_params() (autots.models.prophet.neuralprophet method)": [[4, "autots.models.prophet.NeuralProphet.get_new_params"]], "get_new_params() (autots.models.pytorch.pytorchforecasting method)": [[4, "autots.models.pytorch.PytorchForecasting.get_new_params"]], "get_new_params() (autots.models.sklearn.componentanalysis method)": [[4, "autots.models.sklearn.ComponentAnalysis.get_new_params"]], "get_new_params() (autots.models.sklearn.datepartregression method)": [[4, "autots.models.sklearn.DatepartRegression.get_new_params"]], "get_new_params() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.get_new_params"]], "get_new_params() (autots.models.sklearn.preprocessingregression method)": [[4, "autots.models.sklearn.PreprocessingRegression.get_new_params"]], "get_new_params() (autots.models.sklearn.rollingregression method)": [[4, "autots.models.sklearn.RollingRegression.get_new_params"]], "get_new_params() (autots.models.sklearn.univariateregression method)": [[4, "autots.models.sklearn.UnivariateRegression.get_new_params"]], "get_new_params() (autots.models.sklearn.windowregression method)": [[4, "autots.models.sklearn.WindowRegression.get_new_params"]], "get_new_params() (autots.models.statsmodels.ardl method)": [[4, "autots.models.statsmodels.ARDL.get_new_params"]], "get_new_params() (autots.models.statsmodels.arima method)": [[4, "autots.models.statsmodels.ARIMA.get_new_params"]], "get_new_params() (autots.models.statsmodels.dynamicfactor method)": [[4, "autots.models.statsmodels.DynamicFactor.get_new_params"]], "get_new_params() (autots.models.statsmodels.dynamicfactormq method)": [[4, "autots.models.statsmodels.DynamicFactorMQ.get_new_params"]], "get_new_params() (autots.models.statsmodels.ets method)": [[4, "autots.models.statsmodels.ETS.get_new_params"]], "get_new_params() (autots.models.statsmodels.glm method)": [[4, "autots.models.statsmodels.GLM.get_new_params"]], "get_new_params() (autots.models.statsmodels.gls method)": [[4, "autots.models.statsmodels.GLS.get_new_params"]], "get_new_params() (autots.models.statsmodels.theta method)": [[4, "autots.models.statsmodels.Theta.get_new_params"]], "get_new_params() (autots.models.statsmodels.unobservedcomponents method)": [[4, "autots.models.statsmodels.UnobservedComponents.get_new_params"]], "get_new_params() (autots.models.statsmodels.var method)": [[4, "autots.models.statsmodels.VAR.get_new_params"]], "get_new_params() (autots.models.statsmodels.varmax method)": [[4, "autots.models.statsmodels.VARMAX.get_new_params"]], "get_new_params() (autots.models.statsmodels.vecm method)": [[4, "autots.models.statsmodels.VECM.get_new_params"]], "get_new_params() (autots.models.tfp.tfpregression method)": [[4, "autots.models.tfp.TFPRegression.get_new_params"]], "get_new_params() (autots.models.tfp.tensorflowsts method)": [[4, "autots.models.tfp.TensorflowSTS.get_new_params"]], "get_new_params() (autots.models.tide.tide method)": [[4, "autots.models.tide.TiDE.get_new_params"]], "get_params() (autots.models.arch.arch method)": [[4, "autots.models.arch.ARCH.get_params"]], "get_params() (autots.models.base.modelobject method)": [[4, "autots.models.base.ModelObject.get_params"]], "get_params() (autots.models.basics.averagevaluenaive method)": [[4, "autots.models.basics.AverageValueNaive.get_params"]], "get_params() (autots.models.basics.balltreemultivariatemotif method)": [[4, "autots.models.basics.BallTreeMultivariateMotif.get_params"]], "get_params() (autots.models.basics.constantnaive method)": [[4, "autots.models.basics.ConstantNaive.get_params"]], "get_params() (autots.models.basics.fft method)": [[4, "autots.models.basics.FFT.get_params"]], "get_params() (autots.models.basics.kalmanstatespace method)": [[4, "autots.models.basics.KalmanStateSpace.get_params"]], "get_params() (autots.models.basics.lastvaluenaive method)": [[4, "autots.models.basics.LastValueNaive.get_params"]], "get_params() (autots.models.basics.metricmotif method)": [[4, "autots.models.basics.MetricMotif.get_params"]], "get_params() (autots.models.basics.motif method)": [[4, "autots.models.basics.Motif.get_params"]], "get_params() (autots.models.basics.motifsimulation method)": [[4, "autots.models.basics.MotifSimulation.get_params"]], "get_params() (autots.models.basics.nvar method)": [[4, "autots.models.basics.NVAR.get_params"]], "get_params() (autots.models.basics.seasonalnaive method)": [[4, "autots.models.basics.SeasonalNaive.get_params"]], "get_params() (autots.models.basics.seasonalitymotif method)": [[4, "autots.models.basics.SeasonalityMotif.get_params"]], "get_params() (autots.models.basics.sectionalmotif method)": [[4, "autots.models.basics.SectionalMotif.get_params"]], "get_params() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.get_params"]], "get_params() (autots.models.gluonts.gluonts method)": [[4, "autots.models.gluonts.GluonTS.get_params"]], "get_params() (autots.models.greykite.greykite method)": [[4, "autots.models.greykite.Greykite.get_params"]], "get_params() (autots.models.matrix_var.latc method)": [[4, "autots.models.matrix_var.LATC.get_params"]], "get_params() (autots.models.matrix_var.mar method)": [[4, "autots.models.matrix_var.MAR.get_params"]], "get_params() (autots.models.matrix_var.rrvar method)": [[4, "autots.models.matrix_var.RRVAR.get_params"]], "get_params() (autots.models.matrix_var.tmf method)": [[4, "autots.models.matrix_var.TMF.get_params"]], "get_params() (autots.models.mlensemble.mlensemble method)": [[4, "autots.models.mlensemble.MLEnsemble.get_params"]], "get_params() (autots.models.neural_forecast.neuralforecast method)": [[4, "autots.models.neural_forecast.NeuralForecast.get_params"]], "get_params() (autots.models.prophet.fbprophet method)": [[4, "autots.models.prophet.FBProphet.get_params"]], "get_params() (autots.models.prophet.neuralprophet method)": [[4, "autots.models.prophet.NeuralProphet.get_params"]], "get_params() (autots.models.pytorch.pytorchforecasting method)": [[4, "autots.models.pytorch.PytorchForecasting.get_params"]], "get_params() (autots.models.sklearn.componentanalysis method)": [[4, "autots.models.sklearn.ComponentAnalysis.get_params"]], "get_params() (autots.models.sklearn.datepartregression method)": [[4, "autots.models.sklearn.DatepartRegression.get_params"]], "get_params() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.get_params"]], "get_params() (autots.models.sklearn.preprocessingregression method)": [[4, "autots.models.sklearn.PreprocessingRegression.get_params"]], "get_params() (autots.models.sklearn.rollingregression method)": [[4, "autots.models.sklearn.RollingRegression.get_params"]], "get_params() (autots.models.sklearn.univariateregression method)": [[4, "autots.models.sklearn.UnivariateRegression.get_params"]], "get_params() (autots.models.sklearn.windowregression method)": [[4, "autots.models.sklearn.WindowRegression.get_params"]], "get_params() (autots.models.statsmodels.ardl method)": [[4, "autots.models.statsmodels.ARDL.get_params"]], "get_params() (autots.models.statsmodels.arima method)": [[4, "autots.models.statsmodels.ARIMA.get_params"]], "get_params() (autots.models.statsmodels.dynamicfactor method)": [[4, "autots.models.statsmodels.DynamicFactor.get_params"]], "get_params() (autots.models.statsmodels.dynamicfactormq method)": [[4, "autots.models.statsmodels.DynamicFactorMQ.get_params"]], "get_params() (autots.models.statsmodels.ets method)": [[4, "autots.models.statsmodels.ETS.get_params"]], "get_params() (autots.models.statsmodels.glm method)": [[4, "autots.models.statsmodels.GLM.get_params"]], "get_params() (autots.models.statsmodels.gls method)": [[4, "autots.models.statsmodels.GLS.get_params"]], "get_params() (autots.models.statsmodels.theta method)": [[4, "autots.models.statsmodels.Theta.get_params"]], "get_params() (autots.models.statsmodels.unobservedcomponents method)": [[4, "autots.models.statsmodels.UnobservedComponents.get_params"]], "get_params() (autots.models.statsmodels.var method)": [[4, "autots.models.statsmodels.VAR.get_params"]], "get_params() (autots.models.statsmodels.varmax method)": [[4, "autots.models.statsmodels.VARMAX.get_params"]], "get_params() (autots.models.statsmodels.vecm method)": [[4, "autots.models.statsmodels.VECM.get_params"]], "get_params() (autots.models.tfp.tfpregression method)": [[4, "autots.models.tfp.TFPRegression.get_params"]], "get_params() (autots.models.tfp.tensorflowsts method)": [[4, "autots.models.tfp.TensorflowSTS.get_params"]], "get_params() (autots.models.tide.tide method)": [[4, "autots.models.tide.TiDE.get_params"]], "glm_forecast_by_column() (in module autots.models.statsmodels)": [[4, "autots.models.statsmodels.glm_forecast_by_column"]], "holiday_count (autots.models.cassandra.cassandra. attribute)": [[4, "autots.models.cassandra.Cassandra..holiday_count"]], "holidays (autots.models.cassandra.cassandra. attribute)": [[4, "autots.models.cassandra.Cassandra..holidays"]], "horizontal_classifier() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.horizontal_classifier"]], "horizontal_xy() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.horizontal_xy"]], "is_horizontal() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.is_horizontal"]], "is_mosaic() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.is_mosaic"]], "latc_imputer() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.latc_imputer"]], "latc_predictor() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.latc_predictor"]], "long_form_results() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.long_form_results"], [4, "id2"]], "looped_motif() (in module autots.models.basics)": [[4, "autots.models.basics.looped_motif"]], "lower_forecast (autots.models.base.predictionobject attribute)": [[4, "autots.models.base.PredictionObject.lower_forecast"]], "lstsq_minimize() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.lstsq_minimize"]], "lstsq_solve() (in module autots.models.cassandra)": [[4, "autots.models.cassandra.lstsq_solve"]], "mae_loss() (in module autots.models.tide)": [[4, "autots.models.tide.mae_loss"]], "mape() (in module autots.models.tide)": [[4, "autots.models.tide.mape"]], "mar() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.mar"]], "mat2ten() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.mat2ten"]], "mlens_helper() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.mlens_helper"]], "model_list_to_dict() (in module autots.models.model_list)": [[4, "autots.models.model_list.model_list_to_dict"]], "model_name (autots.models.base.predictionobject attribute)": [[4, "autots.models.base.PredictionObject.model_name"]], "model_parameters (autots.models.base.predictionobject attribute)": [[4, "autots.models.base.PredictionObject.model_parameters"]], "mosaic_classifier() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.mosaic_classifier"]], "mosaic_or_horizontal() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.mosaic_or_horizontal"]], "mosaic_to_horizontal() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.mosaic_to_horizontal"]], "mosaic_xy() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.mosaic_xy"]], "n_limited_horz() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.n_limited_horz"]], "next_fit() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.next_fit"]], "nrmse() (in module autots.models.tide)": [[4, "autots.models.tide.nrmse"]], "params (autots.models.cassandra.cassandra. attribute)": [[4, "autots.models.cassandra.Cassandra..params"]], "parse_forecast_length() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.parse_forecast_length"]], "parse_horizontal() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.parse_horizontal"]], "parse_mosaic() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.parse_mosaic"]], "plot() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.plot"], [4, "id3"]], "plot_components() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.plot_components"], [4, "id7"]], "plot_df() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.plot_df"]], "plot_distributions() (in module autots.models.base)": [[4, "autots.models.base.plot_distributions"]], "plot_ensemble_runtimes() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.plot_ensemble_runtimes"]], "plot_forecast() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.plot_forecast"], [4, "id8"]], "plot_grid() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.plot_grid"]], "plot_things() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.plot_things"]], "plot_trend() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.plot_trend"], [4, "id9"]], "predict() (autots.models.arch.arch method)": [[4, "autots.models.arch.ARCH.predict"]], "predict() (autots.models.basics.averagevaluenaive method)": [[4, "autots.models.basics.AverageValueNaive.predict"]], "predict() (autots.models.basics.balltreemultivariatemotif method)": [[4, "autots.models.basics.BallTreeMultivariateMotif.predict"]], "predict() (autots.models.basics.constantnaive method)": [[4, "autots.models.basics.ConstantNaive.predict"]], "predict() (autots.models.basics.fft method)": [[4, "autots.models.basics.FFT.predict"]], "predict() (autots.models.basics.kalmanstatespace method)": [[4, "autots.models.basics.KalmanStateSpace.predict"]], "predict() (autots.models.basics.lastvaluenaive method)": [[4, "autots.models.basics.LastValueNaive.predict"]], "predict() (autots.models.basics.metricmotif method)": [[4, "autots.models.basics.MetricMotif.predict"]], "predict() (autots.models.basics.motif method)": [[4, "autots.models.basics.Motif.predict"]], "predict() (autots.models.basics.motifsimulation method)": [[4, "autots.models.basics.MotifSimulation.predict"]], "predict() (autots.models.basics.nvar method)": [[4, "autots.models.basics.NVAR.predict"]], "predict() (autots.models.basics.seasonalnaive method)": [[4, "autots.models.basics.SeasonalNaive.predict"]], "predict() (autots.models.basics.seasonalitymotif method)": [[4, "autots.models.basics.SeasonalityMotif.predict"]], "predict() (autots.models.basics.sectionalmotif method)": [[4, "autots.models.basics.SectionalMotif.predict"]], "predict() (autots.models.cassandra.bayesianmultioutputregression method)": [[4, "autots.models.cassandra.BayesianMultiOutputRegression.predict"]], "predict() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.predict"], [4, "id10"]], "predict() (autots.models.dnn.kerasrnn method)": [[4, "autots.models.dnn.KerasRNN.predict"]], "predict() (autots.models.dnn.transformer method)": [[4, "autots.models.dnn.Transformer.predict"]], "predict() (autots.models.gluonts.gluonts method)": [[4, "autots.models.gluonts.GluonTS.predict"]], "predict() (autots.models.greykite.greykite method)": [[4, "autots.models.greykite.Greykite.predict"]], "predict() (autots.models.matrix_var.latc method)": [[4, "autots.models.matrix_var.LATC.predict"]], "predict() (autots.models.matrix_var.mar method)": [[4, "autots.models.matrix_var.MAR.predict"]], "predict() (autots.models.matrix_var.rrvar method)": [[4, "autots.models.matrix_var.RRVAR.predict"]], "predict() (autots.models.matrix_var.tmf method)": [[4, "autots.models.matrix_var.TMF.predict"]], "predict() (autots.models.mlensemble.mlensemble method)": [[4, "autots.models.mlensemble.MLEnsemble.predict"]], "predict() (autots.models.neural_forecast.neuralforecast method)": [[4, "autots.models.neural_forecast.NeuralForecast.predict"]], "predict() (autots.models.prophet.fbprophet method)": [[4, "autots.models.prophet.FBProphet.predict"]], "predict() (autots.models.prophet.neuralprophet method)": [[4, "autots.models.prophet.NeuralProphet.predict"]], "predict() (autots.models.pytorch.pytorchforecasting method)": [[4, "autots.models.pytorch.PytorchForecasting.predict"]], "predict() (autots.models.sklearn.componentanalysis method)": [[4, "autots.models.sklearn.ComponentAnalysis.predict"]], "predict() (autots.models.sklearn.datepartregression method)": [[4, "autots.models.sklearn.DatepartRegression.predict"]], "predict() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.predict"]], "predict() (autots.models.sklearn.preprocessingregression method)": [[4, "autots.models.sklearn.PreprocessingRegression.predict"]], "predict() (autots.models.sklearn.rollingregression method)": [[4, "autots.models.sklearn.RollingRegression.predict"]], "predict() (autots.models.sklearn.univariateregression method)": [[4, "autots.models.sklearn.UnivariateRegression.predict"]], "predict() (autots.models.sklearn.vectorizedmultioutputgpr method)": [[4, "autots.models.sklearn.VectorizedMultiOutputGPR.predict"]], "predict() (autots.models.sklearn.windowregression method)": [[4, "autots.models.sklearn.WindowRegression.predict"]], "predict() (autots.models.statsmodels.ardl method)": [[4, "autots.models.statsmodels.ARDL.predict"]], "predict() (autots.models.statsmodels.arima method)": [[4, "autots.models.statsmodels.ARIMA.predict"]], "predict() (autots.models.statsmodels.dynamicfactor method)": [[4, "autots.models.statsmodels.DynamicFactor.predict"]], "predict() (autots.models.statsmodels.dynamicfactormq method)": [[4, "autots.models.statsmodels.DynamicFactorMQ.predict"]], "predict() (autots.models.statsmodels.ets method)": [[4, "autots.models.statsmodels.ETS.predict"]], "predict() (autots.models.statsmodels.glm method)": [[4, "autots.models.statsmodels.GLM.predict"]], "predict() (autots.models.statsmodels.gls method)": [[4, "autots.models.statsmodels.GLS.predict"]], "predict() (autots.models.statsmodels.theta method)": [[4, "autots.models.statsmodels.Theta.predict"]], "predict() (autots.models.statsmodels.unobservedcomponents method)": [[4, "autots.models.statsmodels.UnobservedComponents.predict"]], "predict() (autots.models.statsmodels.var method)": [[4, "autots.models.statsmodels.VAR.predict"]], "predict() (autots.models.statsmodels.varmax method)": [[4, "autots.models.statsmodels.VARMAX.predict"]], "predict() (autots.models.statsmodels.vecm method)": [[4, "autots.models.statsmodels.VECM.predict"]], "predict() (autots.models.tfp.tfpregression method)": [[4, "autots.models.tfp.TFPRegression.predict"]], "predict() (autots.models.tfp.tfpregressor method)": [[4, "autots.models.tfp.TFPRegressor.predict"]], "predict() (autots.models.tfp.tensorflowsts method)": [[4, "autots.models.tfp.TensorflowSTS.predict"]], "predict() (autots.models.tide.tide method)": [[4, "autots.models.tide.TiDE.predict"]], "predict_new_product() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.predict_new_product"]], "predict_proba() (autots.models.sklearn.vectorizedmultioutputgpr method)": [[4, "autots.models.sklearn.VectorizedMultiOutputGPR.predict_proba"]], "predict_reservoir() (in module autots.models.basics)": [[4, "autots.models.basics.predict_reservoir"]], "predict_x_array (autots.models.cassandra.cassandra. attribute)": [[4, "autots.models.cassandra.Cassandra..predict_x_array"]], "predicted_trend (autots.models.cassandra.cassandra. attribute)": [[4, "autots.models.cassandra.Cassandra..predicted_trend"]], "process_components() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.process_components"]], "process_mosaic_arrays() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.process_mosaic_arrays"]], "retrieve_classifier() (in module autots.models.sklearn)": [[4, "autots.models.sklearn.retrieve_classifier"]], "retrieve_regressor() (in module autots.models.sklearn)": [[4, "autots.models.sklearn.retrieve_regressor"]], "return_components() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.return_components"], [4, "id11"]], "rmse() (in module autots.models.tide)": [[4, "autots.models.tide.rmse"]], "rolling_trend() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.rolling_trend"]], "rolling_x_regressor() (in module autots.models.sklearn)": [[4, "autots.models.sklearn.rolling_x_regressor"]], "rolling_x_regressor_regressor() (in module autots.models.sklearn)": [[4, "autots.models.sklearn.rolling_x_regressor_regressor"]], "rrvar() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.rrvar"]], "sample_posterior() (autots.models.cassandra.bayesianmultioutputregression method)": [[4, "autots.models.cassandra.BayesianMultiOutputRegression.sample_posterior"]], "scale_data() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.scale_data"]], "scale_data() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.scale_data"]], "scores (autots.models.cassandra.cassandra..anomaly_detector attribute)": [[4, "autots.models.cassandra.Cassandra..anomaly_detector.scores"]], "seek_the_oracle() (in module autots.models.greykite)": [[4, "autots.models.greykite.seek_the_oracle"]], "smape() (in module autots.models.tide)": [[4, "autots.models.tide.smape"]], "summarize_series() (in module autots.models.ensemble)": [[4, "autots.models.ensemble.summarize_series"]], "svt_tnn() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.svt_tnn"]], "ten2mat() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.ten2mat"]], "test_val_gen() (autots.models.tide.timeseriesdata method)": [[4, "autots.models.tide.TimeSeriesdata.test_val_gen"]], "tf_dataset() (autots.models.tide.timeseriesdata method)": [[4, "autots.models.tide.TimeSeriesdata.tf_dataset"]], "time() (autots.models.base.modelobject static method)": [[4, "autots.models.base.ModelObject.time"]], "tmf() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.tmf"]], "to_origin_space() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.to_origin_space"]], "to_origin_space() (autots.models.sklearn.multivariateregression method)": [[4, "autots.models.sklearn.MultivariateRegression.to_origin_space"]], "total_runtime() (autots.models.base.predictionobject method)": [[4, "autots.models.base.PredictionObject.total_runtime"], [4, "id4"]], "train_gen() (autots.models.tide.timeseriesdata method)": [[4, "autots.models.tide.TimeSeriesdata.train_gen"]], "transformation_parameters (autots.models.base.predictionobject attribute)": [[4, "autots.models.base.PredictionObject.transformation_parameters"]], "transformer_build_model() (in module autots.models.dnn)": [[4, "autots.models.dnn.transformer_build_model"]], "transformer_encoder() (in module autots.models.dnn)": [[4, "autots.models.dnn.transformer_encoder"]], "treatment_causal_impact() (autots.models.cassandra.cassandra method)": [[4, "autots.models.cassandra.Cassandra.treatment_causal_impact"]], "trend_train (autots.models.cassandra.cassandra. attribute)": [[4, "autots.models.cassandra.Cassandra..trend_train"]], "tune_observational_noise() (autots.models.basics.kalmanstatespace method)": [[4, "autots.models.basics.KalmanStateSpace.tune_observational_noise"]], "update_cg() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.update_cg"]], "upper_forecast (autots.models.base.predictionobject attribute)": [[4, "autots.models.base.PredictionObject.upper_forecast"]], "var() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.var"]], "var4cast() (in module autots.models.matrix_var)": [[4, "autots.models.matrix_var.var4cast"]], "wape() (in module autots.models.tide)": [[4, "autots.models.tide.wape"]], "x_array (autots.models.cassandra.cassandra. attribute)": [[4, "autots.models.cassandra.Cassandra..x_array"]], "autots.templates": [[5, "module-autots.templates"]], "autots.templates.general": [[5, "module-autots.templates.general"]], "general_template (in module autots.templates.general)": [[5, "autots.templates.general.general_template"]], "alignlastdiff (class in autots.tools.transform)": [[6, "autots.tools.transform.AlignLastDiff"]], "alignlastvalue (class in autots.tools.transform)": [[6, "autots.tools.transform.AlignLastValue"]], "anomalyremoval (class in autots.tools.transform)": [[6, "autots.tools.transform.AnomalyRemoval"]], "bkbandpassfilter (class in autots.tools.transform)": [[6, "autots.tools.transform.BKBandpassFilter"]], "btcd (class in autots.tools.transform)": [[6, "autots.tools.transform.BTCD"]], "centerlastvalue (class in autots.tools.transform)": [[6, "autots.tools.transform.CenterLastValue"]], "centersplit (class in autots.tools.transform)": [[6, "autots.tools.transform.CenterSplit"]], "clipoutliers (class in autots.tools.transform)": [[6, "autots.tools.transform.ClipOutliers"]], "cointegration (class in autots.tools.transform)": [[6, "autots.tools.transform.Cointegration"]], "cumsumtransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.CumSumTransformer"]], "datepartregression (in module autots.tools.transform)": [[6, "autots.tools.transform.DatepartRegression"]], "datepartregressiontransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.DatepartRegressionTransformer"]], "detrend (class in autots.tools.transform)": [[6, "autots.tools.transform.Detrend"]], "diffsmoother (class in autots.tools.transform)": [[6, "autots.tools.transform.DiffSmoother"]], "differencedtransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.DifferencedTransformer"]], "discretize (class in autots.tools.transform)": [[6, "autots.tools.transform.Discretize"]], "ewmafilter (class in autots.tools.transform)": [[6, "autots.tools.transform.EWMAFilter"]], "emptytransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.EmptyTransformer"]], "fft (class in autots.tools.fft)": [[6, "autots.tools.fft.FFT"]], "fftdecomposition (class in autots.tools.transform)": [[6, "autots.tools.transform.FFTDecomposition"]], "fftfilter (class in autots.tools.transform)": [[6, "autots.tools.transform.FFTFilter"]], "fastica (class in autots.tools.transform)": [[6, "autots.tools.transform.FastICA"]], "fillna() (in module autots.tools.impute)": [[6, "autots.tools.impute.FillNA"]], "gaussian (class in autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.Gaussian"]], "generaltransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.GeneralTransformer"]], "hpfilter (class in autots.tools.transform)": [[6, "autots.tools.transform.HPFilter"]], "historicvalues (class in autots.tools.transform)": [[6, "autots.tools.transform.HistoricValues"]], "holidaytransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.HolidayTransformer"]], "intermittentoccurrence (class in autots.tools.transform)": [[6, "autots.tools.transform.IntermittentOccurrence"]], "kalmanfilter (class in autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.KalmanFilter"]], "kalmanfilter.result (class in autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.KalmanFilter.Result"]], "kalmansmoothing (class in autots.tools.transform)": [[6, "autots.tools.transform.KalmanSmoothing"]], "levelshiftmagic (class in autots.tools.transform)": [[6, "autots.tools.transform.LevelShiftMagic"]], "levelshifttransformer (in module autots.tools.transform)": [[6, "autots.tools.transform.LevelShiftTransformer"]], "locallineartrend (class in autots.tools.transform)": [[6, "autots.tools.transform.LocalLinearTrend"]], "meandifference (class in autots.tools.transform)": [[6, "autots.tools.transform.MeanDifference"]], "nonparametricthreshold (class in autots.tools.thresholding)": [[6, "autots.tools.thresholding.NonparametricThreshold"]], "numerictransformer (class in autots.tools.shaping)": [[6, "autots.tools.shaping.NumericTransformer"]], "pca (class in autots.tools.transform)": [[6, "autots.tools.transform.PCA"]], "pctchangetransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.PctChangeTransformer"]], "point_to_probability() (in module autots.tools.probabilistic)": [[6, "autots.tools.probabilistic.Point_to_Probability"]], "positiveshift (class in autots.tools.transform)": [[6, "autots.tools.transform.PositiveShift"]], "randomtransform() (in module autots.tools.transform)": [[6, "autots.tools.transform.RandomTransform"]], "regressionfilter (class in autots.tools.transform)": [[6, "autots.tools.transform.RegressionFilter"]], "replaceconstant (class in autots.tools.transform)": [[6, "autots.tools.transform.ReplaceConstant"]], "rollingmeantransformer (class in autots.tools.transform)": [[6, "autots.tools.transform.RollingMeanTransformer"]], "round (class in autots.tools.transform)": [[6, "autots.tools.transform.Round"]], "stlfilter (class in autots.tools.transform)": [[6, "autots.tools.transform.STLFilter"]], "scipyfilter (class in autots.tools.transform)": [[6, "autots.tools.transform.ScipyFilter"]], "seasonaldifference (class in autots.tools.transform)": [[6, "autots.tools.transform.SeasonalDifference"]], "seasonalitymotifimputer (class in autots.tools.impute)": [[6, "autots.tools.impute.SeasonalityMotifImputer"]], "simpleseasonalitymotifimputer (class in autots.tools.impute)": [[6, "autots.tools.impute.SimpleSeasonalityMotifImputer"]], "sintrend (class in autots.tools.transform)": [[6, "autots.tools.transform.SinTrend"]], "slice (class in autots.tools.transform)": [[6, "autots.tools.transform.Slice"]], "statsmodelsfilter (class in autots.tools.transform)": [[6, "autots.tools.transform.StatsmodelsFilter"]], "variable_point_to_probability() (in module autots.tools.probabilistic)": [[6, "autots.tools.probabilistic.Variable_Point_to_Probability"]], "anomaly_df_to_holidays() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.anomaly_df_to_holidays"]], "anomaly_new_params() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.anomaly_new_params"]], "autoshape() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.autoshape"]], "autots.tools": [[6, "module-autots.tools"]], "autots.tools.anomaly_utils": [[6, "module-autots.tools.anomaly_utils"]], "autots.tools.calendar": [[6, "module-autots.tools.calendar"]], "autots.tools.cointegration": [[6, "module-autots.tools.cointegration"]], "autots.tools.cpu_count": [[6, "module-autots.tools.cpu_count"]], "autots.tools.fast_kalman": [[6, "module-autots.tools.fast_kalman"]], "autots.tools.fft": [[6, "module-autots.tools.fft"]], "autots.tools.hierarchial": [[6, "module-autots.tools.hierarchial"]], "autots.tools.holiday": [[6, "module-autots.tools.holiday"]], "autots.tools.impute": [[6, "module-autots.tools.impute"]], "autots.tools.lunar": [[6, "module-autots.tools.lunar"]], "autots.tools.percentile": [[6, "module-autots.tools.percentile"]], "autots.tools.probabilistic": [[6, "module-autots.tools.probabilistic"]], "autots.tools.profile": [[6, "module-autots.tools.profile"]], "autots.tools.regressor": [[6, "module-autots.tools.regressor"]], "autots.tools.seasonal": [[6, "module-autots.tools.seasonal"]], "autots.tools.shaping": [[6, "module-autots.tools.shaping"]], "autots.tools.thresholding": [[6, "module-autots.tools.thresholding"]], "autots.tools.transform": [[6, "module-autots.tools.transform"]], "autots.tools.window_functions": [[6, "module-autots.tools.window_functions"]], "biased_ffill() (in module autots.tools.impute)": [[6, "autots.tools.impute.biased_ffill"]], "bkfilter() (autots.tools.transform.statsmodelsfilter method)": [[6, "autots.tools.transform.StatsmodelsFilter.bkfilter"]], "bkfilter_st() (in module autots.tools.transform)": [[6, "autots.tools.transform.bkfilter_st"]], "btcd_decompose() (in module autots.tools.cointegration)": [[6, "autots.tools.cointegration.btcd_decompose"]], "cffilter() (autots.tools.transform.statsmodelsfilter method)": [[6, "autots.tools.transform.StatsmodelsFilter.cffilter"]], "chunk_reshape() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.chunk_reshape"]], "clean_weights() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.clean_weights"]], "clip_outliers() (in module autots.tools.transform)": [[6, "autots.tools.transform.clip_outliers"]], "coint_johansen() (in module autots.tools.cointegration)": [[6, "autots.tools.cointegration.coint_johansen"]], "compare_to_epsilon() (autots.tools.thresholding.nonparametricthreshold method)": [[6, "autots.tools.thresholding.NonparametricThreshold.compare_to_epsilon"]], "compute() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.compute"]], "consecutive_groups() (in module autots.tools.thresholding)": [[6, "autots.tools.thresholding.consecutive_groups"]], "convolution_filter() (autots.tools.transform.statsmodelsfilter method)": [[6, "autots.tools.transform.StatsmodelsFilter.convolution_filter"]], "cpu_count() (in module autots.tools.cpu_count)": [[6, "autots.tools.cpu_count.cpu_count"]], "create_datepart_components() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.create_datepart_components"]], "create_dates_df() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.create_dates_df"]], "create_lagged_regressor() (in module autots.tools.regressor)": [[6, "autots.tools.regressor.create_lagged_regressor"]], "create_regressor() (in module autots.tools.regressor)": [[6, "autots.tools.regressor.create_regressor"]], "create_seasonality_feature() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.create_seasonality_feature"]], "data_profile() (in module autots.tools.profile)": [[6, "autots.tools.profile.data_profile"]], "date_part() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.date_part"]], "dates_to_holidays() (autots.tools.transform.holidaytransformer method)": [[6, "autots.tools.transform.HolidayTransformer.dates_to_holidays"]], "dates_to_holidays() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.dates_to_holidays"]], "dcos() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.dcos"]], "ddot() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.ddot"]], "ddot_t_right() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.ddot_t_right"]], "ddot_t_right_old() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.ddot_t_right_old"]], "detect_anomalies() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.detect_anomalies"]], "df_cleanup() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.df_cleanup"]], "dinv() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.dinv"]], "douter() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.douter"]], "dsin() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.dsin"]], "em() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.em"]], "em_initial_state() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.em_initial_state"]], "em_observation_noise() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.em_observation_noise"]], "em_process_noise() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.em_process_noise"]], "empty() (autots.tools.fast_kalman.gaussian static method)": [[6, "autots.tools.fast_kalman.Gaussian.empty"]], "ensure_matrix() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.ensure_matrix"]], "exponential_decay() (in module autots.tools.transform)": [[6, "autots.tools.transform.exponential_decay"]], "fake_date_fill() (in module autots.tools.impute)": [[6, "autots.tools.impute.fake_date_fill"]], "fake_date_fill_old() (in module autots.tools.impute)": [[6, "autots.tools.impute.fake_date_fill_old"]], "fill_forward() (in module autots.tools.impute)": [[6, "autots.tools.impute.fill_forward"]], "fill_forward_alt() (in module autots.tools.impute)": [[6, "autots.tools.impute.fill_forward_alt"]], "fill_mean() (in module autots.tools.impute)": [[6, "autots.tools.impute.fill_mean"]], "fill_mean_old() (in module autots.tools.impute)": [[6, "autots.tools.impute.fill_mean_old"]], "fill_median() (in module autots.tools.impute)": [[6, "autots.tools.impute.fill_median"]], "fill_median_old() (in module autots.tools.impute)": [[6, "autots.tools.impute.fill_median_old"]], "fill_na() (autots.tools.transform.generaltransformer method)": [[6, "autots.tools.transform.GeneralTransformer.fill_na"]], "fill_zero() (in module autots.tools.impute)": [[6, "autots.tools.impute.fill_zero"]], "fillna_np() (in module autots.tools.impute)": [[6, "autots.tools.impute.fillna_np"]], "find_centerpoint() (autots.tools.transform.alignlastvalue static method)": [[6, "autots.tools.transform.AlignLastValue.find_centerpoint"]], "find_epsilon() (autots.tools.thresholding.nonparametricthreshold method)": [[6, "autots.tools.thresholding.NonparametricThreshold.find_epsilon"]], "fit() (autots.tools.fft.fft method)": [[6, "autots.tools.fft.FFT.fit"]], "fit() (autots.tools.hierarchial.hierarchial method)": [[6, "autots.tools.hierarchial.hierarchial.fit"]], "fit() (autots.tools.shaping.numerictransformer method)": [[6, "autots.tools.shaping.NumericTransformer.fit"]], "fit() (autots.tools.transform.alignlastdiff method)": [[6, "autots.tools.transform.AlignLastDiff.fit"]], "fit() (autots.tools.transform.alignlastvalue method)": [[6, "autots.tools.transform.AlignLastValue.fit"]], "fit() (autots.tools.transform.anomalyremoval method)": [[6, "autots.tools.transform.AnomalyRemoval.fit"]], "fit() (autots.tools.transform.bkbandpassfilter method)": [[6, "autots.tools.transform.BKBandpassFilter.fit"]], "fit() (autots.tools.transform.btcd method)": [[6, "autots.tools.transform.BTCD.fit"]], "fit() (autots.tools.transform.centerlastvalue method)": [[6, "autots.tools.transform.CenterLastValue.fit"]], "fit() (autots.tools.transform.centersplit method)": [[6, "autots.tools.transform.CenterSplit.fit"]], "fit() (autots.tools.transform.clipoutliers method)": [[6, "autots.tools.transform.ClipOutliers.fit"]], "fit() (autots.tools.transform.cointegration method)": [[6, "autots.tools.transform.Cointegration.fit"]], "fit() (autots.tools.transform.cumsumtransformer method)": [[6, "autots.tools.transform.CumSumTransformer.fit"]], "fit() (autots.tools.transform.datepartregressiontransformer method)": [[6, "autots.tools.transform.DatepartRegressionTransformer.fit"]], "fit() (autots.tools.transform.detrend method)": [[6, "autots.tools.transform.Detrend.fit"]], "fit() (autots.tools.transform.diffsmoother method)": [[6, "autots.tools.transform.DiffSmoother.fit"]], "fit() (autots.tools.transform.differencedtransformer method)": [[6, "autots.tools.transform.DifferencedTransformer.fit"]], "fit() (autots.tools.transform.discretize method)": [[6, "autots.tools.transform.Discretize.fit"]], "fit() (autots.tools.transform.emptytransformer method)": [[6, "autots.tools.transform.EmptyTransformer.fit"]], "fit() (autots.tools.transform.fftdecomposition method)": [[6, "autots.tools.transform.FFTDecomposition.fit"]], "fit() (autots.tools.transform.fftfilter method)": [[6, "autots.tools.transform.FFTFilter.fit"]], "fit() (autots.tools.transform.fastica method)": [[6, "autots.tools.transform.FastICA.fit"]], "fit() (autots.tools.transform.generaltransformer method)": [[6, "autots.tools.transform.GeneralTransformer.fit"]], "fit() (autots.tools.transform.historicvalues method)": [[6, "autots.tools.transform.HistoricValues.fit"]], "fit() (autots.tools.transform.holidaytransformer method)": [[6, "autots.tools.transform.HolidayTransformer.fit"]], "fit() (autots.tools.transform.intermittentoccurrence method)": [[6, "autots.tools.transform.IntermittentOccurrence.fit"]], "fit() (autots.tools.transform.kalmansmoothing method)": [[6, "autots.tools.transform.KalmanSmoothing.fit"]], "fit() (autots.tools.transform.levelshiftmagic method)": [[6, "autots.tools.transform.LevelShiftMagic.fit"]], "fit() (autots.tools.transform.locallineartrend method)": [[6, "autots.tools.transform.LocalLinearTrend.fit"]], "fit() (autots.tools.transform.meandifference method)": [[6, "autots.tools.transform.MeanDifference.fit"]], "fit() (autots.tools.transform.pca method)": [[6, "autots.tools.transform.PCA.fit"]], "fit() (autots.tools.transform.pctchangetransformer method)": [[6, "autots.tools.transform.PctChangeTransformer.fit"]], "fit() (autots.tools.transform.positiveshift method)": [[6, "autots.tools.transform.PositiveShift.fit"]], "fit() (autots.tools.transform.regressionfilter method)": [[6, "autots.tools.transform.RegressionFilter.fit"]], "fit() (autots.tools.transform.replaceconstant method)": [[6, "autots.tools.transform.ReplaceConstant.fit"]], "fit() (autots.tools.transform.rollingmeantransformer method)": [[6, "autots.tools.transform.RollingMeanTransformer.fit"]], "fit() (autots.tools.transform.round method)": [[6, "autots.tools.transform.Round.fit"]], "fit() (autots.tools.transform.scipyfilter method)": [[6, "autots.tools.transform.ScipyFilter.fit"]], "fit() (autots.tools.transform.seasonaldifference method)": [[6, "autots.tools.transform.SeasonalDifference.fit"]], "fit() (autots.tools.transform.sintrend method)": [[6, "autots.tools.transform.SinTrend.fit"]], "fit() (autots.tools.transform.slice method)": [[6, "autots.tools.transform.Slice.fit"]], "fit_anomaly_classifier() (autots.tools.transform.anomalyremoval method)": [[6, "autots.tools.transform.AnomalyRemoval.fit_anomaly_classifier"]], "fit_sin() (autots.tools.transform.sintrend static method)": [[6, "autots.tools.transform.SinTrend.fit_sin"]], "fit_transform() (autots.tools.shaping.numerictransformer method)": [[6, "autots.tools.shaping.NumericTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.alignlastdiff method)": [[6, "autots.tools.transform.AlignLastDiff.fit_transform"]], "fit_transform() (autots.tools.transform.alignlastvalue method)": [[6, "autots.tools.transform.AlignLastValue.fit_transform"]], "fit_transform() (autots.tools.transform.anomalyremoval method)": [[6, "autots.tools.transform.AnomalyRemoval.fit_transform"]], "fit_transform() (autots.tools.transform.bkbandpassfilter method)": [[6, "autots.tools.transform.BKBandpassFilter.fit_transform"]], "fit_transform() (autots.tools.transform.btcd method)": [[6, "autots.tools.transform.BTCD.fit_transform"]], "fit_transform() (autots.tools.transform.centerlastvalue method)": [[6, "autots.tools.transform.CenterLastValue.fit_transform"]], "fit_transform() (autots.tools.transform.centersplit method)": [[6, "autots.tools.transform.CenterSplit.fit_transform"]], "fit_transform() (autots.tools.transform.clipoutliers method)": [[6, "autots.tools.transform.ClipOutliers.fit_transform"]], "fit_transform() (autots.tools.transform.cointegration method)": [[6, "autots.tools.transform.Cointegration.fit_transform"]], "fit_transform() (autots.tools.transform.cumsumtransformer method)": [[6, "autots.tools.transform.CumSumTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.datepartregressiontransformer method)": [[6, "autots.tools.transform.DatepartRegressionTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.detrend method)": [[6, "autots.tools.transform.Detrend.fit_transform"]], "fit_transform() (autots.tools.transform.diffsmoother method)": [[6, "autots.tools.transform.DiffSmoother.fit_transform"]], "fit_transform() (autots.tools.transform.differencedtransformer method)": [[6, "autots.tools.transform.DifferencedTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.discretize method)": [[6, "autots.tools.transform.Discretize.fit_transform"]], "fit_transform() (autots.tools.transform.ewmafilter method)": [[6, "autots.tools.transform.EWMAFilter.fit_transform"]], "fit_transform() (autots.tools.transform.emptytransformer method)": [[6, "autots.tools.transform.EmptyTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.fftdecomposition method)": [[6, "autots.tools.transform.FFTDecomposition.fit_transform"]], "fit_transform() (autots.tools.transform.fftfilter method)": [[6, "autots.tools.transform.FFTFilter.fit_transform"]], "fit_transform() (autots.tools.transform.fastica method)": [[6, "autots.tools.transform.FastICA.fit_transform"]], "fit_transform() (autots.tools.transform.generaltransformer method)": [[6, "autots.tools.transform.GeneralTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.hpfilter method)": [[6, "autots.tools.transform.HPFilter.fit_transform"]], "fit_transform() (autots.tools.transform.historicvalues method)": [[6, "autots.tools.transform.HistoricValues.fit_transform"]], "fit_transform() (autots.tools.transform.holidaytransformer method)": [[6, "autots.tools.transform.HolidayTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.intermittentoccurrence method)": [[6, "autots.tools.transform.IntermittentOccurrence.fit_transform"]], "fit_transform() (autots.tools.transform.kalmansmoothing method)": [[6, "autots.tools.transform.KalmanSmoothing.fit_transform"]], "fit_transform() (autots.tools.transform.levelshiftmagic method)": [[6, "autots.tools.transform.LevelShiftMagic.fit_transform"]], "fit_transform() (autots.tools.transform.locallineartrend method)": [[6, "autots.tools.transform.LocalLinearTrend.fit_transform"]], "fit_transform() (autots.tools.transform.meandifference method)": [[6, "autots.tools.transform.MeanDifference.fit_transform"]], "fit_transform() (autots.tools.transform.pca method)": [[6, "autots.tools.transform.PCA.fit_transform"]], "fit_transform() (autots.tools.transform.pctchangetransformer method)": [[6, "autots.tools.transform.PctChangeTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.positiveshift method)": [[6, "autots.tools.transform.PositiveShift.fit_transform"]], "fit_transform() (autots.tools.transform.regressionfilter method)": [[6, "autots.tools.transform.RegressionFilter.fit_transform"]], "fit_transform() (autots.tools.transform.replaceconstant method)": [[6, "autots.tools.transform.ReplaceConstant.fit_transform"]], "fit_transform() (autots.tools.transform.rollingmeantransformer method)": [[6, "autots.tools.transform.RollingMeanTransformer.fit_transform"]], "fit_transform() (autots.tools.transform.round method)": [[6, "autots.tools.transform.Round.fit_transform"]], "fit_transform() (autots.tools.transform.stlfilter method)": [[6, "autots.tools.transform.STLFilter.fit_transform"]], "fit_transform() (autots.tools.transform.scipyfilter method)": [[6, "autots.tools.transform.ScipyFilter.fit_transform"]], "fit_transform() (autots.tools.transform.seasonaldifference method)": [[6, "autots.tools.transform.SeasonalDifference.fit_transform"]], "fit_transform() (autots.tools.transform.sintrend method)": [[6, "autots.tools.transform.SinTrend.fit_transform"]], "fit_transform() (autots.tools.transform.slice method)": [[6, "autots.tools.transform.Slice.fit_transform"]], "fit_transform() (autots.tools.transform.statsmodelsfilter method)": [[6, "autots.tools.transform.StatsmodelsFilter.fit_transform"]], "fixangle() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.fixangle"]], "fourier_df() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.fourier_df"]], "fourier_extrapolation() (in module autots.tools.fft)": [[6, "autots.tools.fft.fourier_extrapolation"]], "fourier_series() (in module autots.tools.cointegration)": [[6, "autots.tools.cointegration.fourier_series"]], "fourier_series() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.fourier_series"]], "freq_to_timedelta() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.freq_to_timedelta"]], "get_new_params() (autots.tools.transform.alignlastdiff static method)": [[6, "autots.tools.transform.AlignLastDiff.get_new_params"]], "get_new_params() (autots.tools.transform.alignlastvalue static method)": [[6, "autots.tools.transform.AlignLastValue.get_new_params"]], "get_new_params() (autots.tools.transform.anomalyremoval static method)": [[6, "autots.tools.transform.AnomalyRemoval.get_new_params"]], "get_new_params() (autots.tools.transform.bkbandpassfilter static method)": [[6, "autots.tools.transform.BKBandpassFilter.get_new_params"]], "get_new_params() (autots.tools.transform.btcd static method)": [[6, "autots.tools.transform.BTCD.get_new_params"]], "get_new_params() (autots.tools.transform.centerlastvalue static method)": [[6, "autots.tools.transform.CenterLastValue.get_new_params"]], "get_new_params() (autots.tools.transform.centersplit static method)": [[6, "autots.tools.transform.CenterSplit.get_new_params"]], "get_new_params() (autots.tools.transform.clipoutliers static method)": [[6, "autots.tools.transform.ClipOutliers.get_new_params"]], "get_new_params() (autots.tools.transform.cointegration static method)": [[6, "autots.tools.transform.Cointegration.get_new_params"]], "get_new_params() (autots.tools.transform.datepartregressiontransformer static method)": [[6, "autots.tools.transform.DatepartRegressionTransformer.get_new_params"]], "get_new_params() (autots.tools.transform.detrend static method)": [[6, "autots.tools.transform.Detrend.get_new_params"]], "get_new_params() (autots.tools.transform.diffsmoother static method)": [[6, "autots.tools.transform.DiffSmoother.get_new_params"]], "get_new_params() (autots.tools.transform.discretize static method)": [[6, "autots.tools.transform.Discretize.get_new_params"]], "get_new_params() (autots.tools.transform.ewmafilter static method)": [[6, "autots.tools.transform.EWMAFilter.get_new_params"]], "get_new_params() (autots.tools.transform.emptytransformer static method)": [[6, "autots.tools.transform.EmptyTransformer.get_new_params"]], "get_new_params() (autots.tools.transform.fftdecomposition static method)": [[6, "autots.tools.transform.FFTDecomposition.get_new_params"]], "get_new_params() (autots.tools.transform.fftfilter static method)": [[6, "autots.tools.transform.FFTFilter.get_new_params"]], "get_new_params() (autots.tools.transform.fastica static method)": [[6, "autots.tools.transform.FastICA.get_new_params"]], "get_new_params() (autots.tools.transform.generaltransformer static method)": [[6, "autots.tools.transform.GeneralTransformer.get_new_params"]], "get_new_params() (autots.tools.transform.hpfilter static method)": [[6, "autots.tools.transform.HPFilter.get_new_params"]], "get_new_params() (autots.tools.transform.historicvalues static method)": [[6, "autots.tools.transform.HistoricValues.get_new_params"]], "get_new_params() (autots.tools.transform.holidaytransformer static method)": [[6, "autots.tools.transform.HolidayTransformer.get_new_params"]], "get_new_params() (autots.tools.transform.intermittentoccurrence static method)": [[6, "autots.tools.transform.IntermittentOccurrence.get_new_params"]], "get_new_params() (autots.tools.transform.kalmansmoothing static method)": [[6, "autots.tools.transform.KalmanSmoothing.get_new_params"]], "get_new_params() (autots.tools.transform.levelshiftmagic static method)": [[6, "autots.tools.transform.LevelShiftMagic.get_new_params"]], "get_new_params() (autots.tools.transform.locallineartrend static method)": [[6, "autots.tools.transform.LocalLinearTrend.get_new_params"]], "get_new_params() (autots.tools.transform.pca static method)": [[6, "autots.tools.transform.PCA.get_new_params"]], "get_new_params() (autots.tools.transform.regressionfilter static method)": [[6, "autots.tools.transform.RegressionFilter.get_new_params"]], "get_new_params() (autots.tools.transform.replaceconstant static method)": [[6, "autots.tools.transform.ReplaceConstant.get_new_params"]], "get_new_params() (autots.tools.transform.rollingmeantransformer static method)": [[6, "autots.tools.transform.RollingMeanTransformer.get_new_params"]], "get_new_params() (autots.tools.transform.round static method)": [[6, "autots.tools.transform.Round.get_new_params"]], "get_new_params() (autots.tools.transform.stlfilter static method)": [[6, "autots.tools.transform.STLFilter.get_new_params"]], "get_new_params() (autots.tools.transform.scipyfilter static method)": [[6, "autots.tools.transform.ScipyFilter.get_new_params"]], "get_new_params() (autots.tools.transform.seasonaldifference static method)": [[6, "autots.tools.transform.SeasonalDifference.get_new_params"]], "get_new_params() (autots.tools.transform.sintrend static method)": [[6, "autots.tools.transform.SinTrend.get_new_params"]], "get_new_params() (autots.tools.transform.slice static method)": [[6, "autots.tools.transform.Slice.get_new_params"]], "get_transformer_params() (in module autots.tools.transform)": [[6, "autots.tools.transform.get_transformer_params"]], "gregorian_to_chinese() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.gregorian_to_chinese"]], "gregorian_to_christian_lunar() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.gregorian_to_christian_lunar"]], "gregorian_to_hebrew() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.gregorian_to_hebrew"]], "gregorian_to_islamic() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.gregorian_to_islamic"]], "heb_is_leap() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.heb_is_leap"]], "hierarchial (class in autots.tools.hierarchial)": [[6, "autots.tools.hierarchial.hierarchial"]], "historic_quantile() (in module autots.tools.probabilistic)": [[6, "autots.tools.probabilistic.historic_quantile"]], "holiday_flag() (in module autots.tools.holiday)": [[6, "autots.tools.holiday.holiday_flag"]], "holiday_new_params() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.holiday_new_params"]], "holt_winters_damped_matrices() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.holt_winters_damped_matrices"]], "impute() (autots.tools.impute.seasonalitymotifimputer method)": [[6, "autots.tools.impute.SeasonalityMotifImputer.impute"]], "impute() (autots.tools.impute.simpleseasonalitymotifimputer method)": [[6, "autots.tools.impute.SimpleSeasonalityMotifImputer.impute"]], "impute() (autots.tools.transform.datepartregressiontransformer method)": [[6, "autots.tools.transform.DatepartRegressionTransformer.impute"]], "infer_frequency() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.infer_frequency"]], "inferred_normal() (in module autots.tools.probabilistic)": [[6, "autots.tools.probabilistic.inferred_normal"]], "inverse_transform() (autots.tools.shaping.numerictransformer method)": [[6, "autots.tools.shaping.NumericTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.alignlastdiff method)": [[6, "autots.tools.transform.AlignLastDiff.inverse_transform"]], "inverse_transform() (autots.tools.transform.alignlastvalue method)": [[6, "autots.tools.transform.AlignLastValue.inverse_transform"]], "inverse_transform() (autots.tools.transform.bkbandpassfilter method)": [[6, "autots.tools.transform.BKBandpassFilter.inverse_transform"]], "inverse_transform() (autots.tools.transform.btcd method)": [[6, "autots.tools.transform.BTCD.inverse_transform"]], "inverse_transform() (autots.tools.transform.centerlastvalue method)": [[6, "autots.tools.transform.CenterLastValue.inverse_transform"]], "inverse_transform() (autots.tools.transform.centersplit method)": [[6, "autots.tools.transform.CenterSplit.inverse_transform"]], "inverse_transform() (autots.tools.transform.clipoutliers method)": [[6, "autots.tools.transform.ClipOutliers.inverse_transform"]], "inverse_transform() (autots.tools.transform.cointegration method)": [[6, "autots.tools.transform.Cointegration.inverse_transform"]], "inverse_transform() (autots.tools.transform.cumsumtransformer method)": [[6, "autots.tools.transform.CumSumTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.datepartregressiontransformer method)": [[6, "autots.tools.transform.DatepartRegressionTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.detrend method)": [[6, "autots.tools.transform.Detrend.inverse_transform"]], "inverse_transform() (autots.tools.transform.differencedtransformer method)": [[6, "autots.tools.transform.DifferencedTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.discretize method)": [[6, "autots.tools.transform.Discretize.inverse_transform"]], "inverse_transform() (autots.tools.transform.emptytransformer method)": [[6, "autots.tools.transform.EmptyTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.fftdecomposition method)": [[6, "autots.tools.transform.FFTDecomposition.inverse_transform"]], "inverse_transform() (autots.tools.transform.fftfilter method)": [[6, "autots.tools.transform.FFTFilter.inverse_transform"]], "inverse_transform() (autots.tools.transform.fastica method)": [[6, "autots.tools.transform.FastICA.inverse_transform"]], "inverse_transform() (autots.tools.transform.generaltransformer method)": [[6, "autots.tools.transform.GeneralTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.historicvalues method)": [[6, "autots.tools.transform.HistoricValues.inverse_transform"]], "inverse_transform() (autots.tools.transform.holidaytransformer method)": [[6, "autots.tools.transform.HolidayTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.intermittentoccurrence method)": [[6, "autots.tools.transform.IntermittentOccurrence.inverse_transform"]], "inverse_transform() (autots.tools.transform.kalmansmoothing method)": [[6, "autots.tools.transform.KalmanSmoothing.inverse_transform"]], "inverse_transform() (autots.tools.transform.levelshiftmagic method)": [[6, "autots.tools.transform.LevelShiftMagic.inverse_transform"]], "inverse_transform() (autots.tools.transform.locallineartrend method)": [[6, "autots.tools.transform.LocalLinearTrend.inverse_transform"]], "inverse_transform() (autots.tools.transform.meandifference method)": [[6, "autots.tools.transform.MeanDifference.inverse_transform"]], "inverse_transform() (autots.tools.transform.pca method)": [[6, "autots.tools.transform.PCA.inverse_transform"]], "inverse_transform() (autots.tools.transform.pctchangetransformer method)": [[6, "autots.tools.transform.PctChangeTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.positiveshift method)": [[6, "autots.tools.transform.PositiveShift.inverse_transform"]], "inverse_transform() (autots.tools.transform.regressionfilter method)": [[6, "autots.tools.transform.RegressionFilter.inverse_transform"]], "inverse_transform() (autots.tools.transform.replaceconstant method)": [[6, "autots.tools.transform.ReplaceConstant.inverse_transform"]], "inverse_transform() (autots.tools.transform.rollingmeantransformer method)": [[6, "autots.tools.transform.RollingMeanTransformer.inverse_transform"]], "inverse_transform() (autots.tools.transform.round method)": [[6, "autots.tools.transform.Round.inverse_transform"]], "inverse_transform() (autots.tools.transform.scipyfilter method)": [[6, "autots.tools.transform.ScipyFilter.inverse_transform"]], "inverse_transform() (autots.tools.transform.seasonaldifference method)": [[6, "autots.tools.transform.SeasonalDifference.inverse_transform"]], "inverse_transform() (autots.tools.transform.sintrend method)": [[6, "autots.tools.transform.SinTrend.inverse_transform"]], "inverse_transform() (autots.tools.transform.slice method)": [[6, "autots.tools.transform.Slice.inverse_transform"]], "kepler() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.kepler"]], "lagmat() (in module autots.tools.cointegration)": [[6, "autots.tools.cointegration.lagmat"]], "last_window() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.last_window"]], "limits_to_anomalies() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.limits_to_anomalies"]], "long_to_wide() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.long_to_wide"]], "loop_sk_outliers() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.loop_sk_outliers"]], "lunar_from_lunar() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.lunar_from_lunar"]], "lunar_from_lunar_full() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.lunar_from_lunar_full"]], "moon_phase() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.moon_phase"]], "moon_phase_df() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.moon_phase_df"]], "nan_percentile() (in module autots.tools.percentile)": [[6, "autots.tools.percentile.nan_percentile"]], "nan_quantile() (in module autots.tools.percentile)": [[6, "autots.tools.percentile.nan_quantile"]], "new_kalman_params() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.new_kalman_params"]], "nonparametric() (in module autots.tools.thresholding)": [[6, "autots.tools.thresholding.nonparametric"]], "nonparametric_multivariate() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.nonparametric_multivariate"]], "np_2d_arange() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.np_2d_arange"]], "percentileofscore_appliable() (in module autots.tools.probabilistic)": [[6, "autots.tools.probabilistic.percentileofscore_appliable"]], "phase_string() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.phase_string"]], "predict() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.predict"]], "predict() (autots.tools.fft.fft method)": [[6, "autots.tools.fft.FFT.predict"]], "predict() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.predict"]], "predict_next() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.predict_next"]], "predict_observation() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.predict_observation"]], "predict_observation() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.predict_observation"]], "priv_smooth() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.priv_smooth"]], "priv_update_with_nan_check() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.priv_update_with_nan_check"]], "prune_anoms() (autots.tools.thresholding.nonparametricthreshold method)": [[6, "autots.tools.thresholding.NonparametricThreshold.prune_anoms"]], "query_holidays() (in module autots.tools.holiday)": [[6, "autots.tools.holiday.query_holidays"]], "random_cleaners() (in module autots.tools.transform)": [[6, "autots.tools.transform.random_cleaners"]], "random_datepart() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.random_datepart"]], "random_state_space() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.random_state_space"]], "reconcile() (autots.tools.hierarchial.hierarchial method)": [[6, "autots.tools.hierarchial.hierarchial.reconcile"]], "remove_outliers() (in module autots.tools.transform)": [[6, "autots.tools.transform.remove_outliers"]], "retrieve_closest_indices() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.retrieve_closest_indices"]], "retrieve_transformer() (autots.tools.transform.generaltransformer class method)": [[6, "autots.tools.transform.GeneralTransformer.retrieve_transformer"]], "rolling_mean() (in module autots.tools.impute)": [[6, "autots.tools.impute.rolling_mean"]], "rolling_window_view() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.rolling_window_view"]], "score_anomalies() (autots.tools.thresholding.nonparametricthreshold method)": [[6, "autots.tools.thresholding.NonparametricThreshold.score_anomalies"]], "score_to_anomaly() (autots.tools.transform.anomalyremoval method)": [[6, "autots.tools.transform.AnomalyRemoval.score_to_anomaly"]], "seasonal_independent_match() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.seasonal_independent_match"]], "seasonal_int() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.seasonal_int"]], "seasonal_window_match() (in module autots.tools.seasonal)": [[6, "autots.tools.seasonal.seasonal_window_match"]], "set_n_jobs() (in module autots.tools.cpu_count)": [[6, "autots.tools.cpu_count.set_n_jobs"]], "simple_context_slicer() (in module autots.tools.transform)": [[6, "autots.tools.transform.simple_context_slicer"]], "simple_train_test_split() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.simple_train_test_split"]], "sk_outliers() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.sk_outliers"]], "sliding_window_view() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.sliding_window_view"]], "smooth() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.smooth"]], "smooth() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.smooth"]], "smooth_current() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.smooth_current"]], "split_digits_and_non_digits() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.split_digits_and_non_digits"]], "subset_series() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.subset_series"]], "to_jd() (in module autots.tools.calendar)": [[6, "autots.tools.calendar.to_jd"]], "todeg() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.todeg"]], "torad() (in module autots.tools.lunar)": [[6, "autots.tools.lunar.torad"]], "transform() (autots.tools.hierarchial.hierarchial method)": [[6, "autots.tools.hierarchial.hierarchial.transform"]], "transform() (autots.tools.shaping.numerictransformer method)": [[6, "autots.tools.shaping.NumericTransformer.transform"]], "transform() (autots.tools.transform.alignlastdiff method)": [[6, "autots.tools.transform.AlignLastDiff.transform"]], "transform() (autots.tools.transform.alignlastvalue method)": [[6, "autots.tools.transform.AlignLastValue.transform"]], "transform() (autots.tools.transform.anomalyremoval method)": [[6, "autots.tools.transform.AnomalyRemoval.transform"]], "transform() (autots.tools.transform.bkbandpassfilter method)": [[6, "autots.tools.transform.BKBandpassFilter.transform"]], "transform() (autots.tools.transform.btcd method)": [[6, "autots.tools.transform.BTCD.transform"]], "transform() (autots.tools.transform.centerlastvalue method)": [[6, "autots.tools.transform.CenterLastValue.transform"]], "transform() (autots.tools.transform.centersplit method)": [[6, "autots.tools.transform.CenterSplit.transform"]], "transform() (autots.tools.transform.clipoutliers method)": [[6, "autots.tools.transform.ClipOutliers.transform"]], "transform() (autots.tools.transform.cointegration method)": [[6, "autots.tools.transform.Cointegration.transform"]], "transform() (autots.tools.transform.cumsumtransformer method)": [[6, "autots.tools.transform.CumSumTransformer.transform"]], "transform() (autots.tools.transform.datepartregressiontransformer method)": [[6, "autots.tools.transform.DatepartRegressionTransformer.transform"]], "transform() (autots.tools.transform.detrend method)": [[6, "autots.tools.transform.Detrend.transform"]], "transform() (autots.tools.transform.diffsmoother method)": [[6, "autots.tools.transform.DiffSmoother.transform"]], "transform() (autots.tools.transform.differencedtransformer method)": [[6, "autots.tools.transform.DifferencedTransformer.transform"]], "transform() (autots.tools.transform.discretize method)": [[6, "autots.tools.transform.Discretize.transform"]], "transform() (autots.tools.transform.ewmafilter method)": [[6, "autots.tools.transform.EWMAFilter.transform"]], "transform() (autots.tools.transform.emptytransformer method)": [[6, "autots.tools.transform.EmptyTransformer.transform"]], "transform() (autots.tools.transform.fftdecomposition method)": [[6, "autots.tools.transform.FFTDecomposition.transform"]], "transform() (autots.tools.transform.fftfilter method)": [[6, "autots.tools.transform.FFTFilter.transform"]], "transform() (autots.tools.transform.fastica method)": [[6, "autots.tools.transform.FastICA.transform"]], "transform() (autots.tools.transform.generaltransformer method)": [[6, "autots.tools.transform.GeneralTransformer.transform"]], "transform() (autots.tools.transform.hpfilter method)": [[6, "autots.tools.transform.HPFilter.transform"]], "transform() (autots.tools.transform.historicvalues method)": [[6, "autots.tools.transform.HistoricValues.transform"]], "transform() (autots.tools.transform.holidaytransformer method)": [[6, "autots.tools.transform.HolidayTransformer.transform"]], "transform() (autots.tools.transform.intermittentoccurrence method)": [[6, "autots.tools.transform.IntermittentOccurrence.transform"]], "transform() (autots.tools.transform.kalmansmoothing method)": [[6, "autots.tools.transform.KalmanSmoothing.transform"]], "transform() (autots.tools.transform.levelshiftmagic method)": [[6, "autots.tools.transform.LevelShiftMagic.transform"]], "transform() (autots.tools.transform.locallineartrend method)": [[6, "autots.tools.transform.LocalLinearTrend.transform"]], "transform() (autots.tools.transform.meandifference method)": [[6, "autots.tools.transform.MeanDifference.transform"]], "transform() (autots.tools.transform.pca method)": [[6, "autots.tools.transform.PCA.transform"]], "transform() (autots.tools.transform.pctchangetransformer method)": [[6, "autots.tools.transform.PctChangeTransformer.transform"]], "transform() (autots.tools.transform.positiveshift method)": [[6, "autots.tools.transform.PositiveShift.transform"]], "transform() (autots.tools.transform.regressionfilter method)": [[6, "autots.tools.transform.RegressionFilter.transform"]], "transform() (autots.tools.transform.replaceconstant method)": [[6, "autots.tools.transform.ReplaceConstant.transform"]], "transform() (autots.tools.transform.rollingmeantransformer method)": [[6, "autots.tools.transform.RollingMeanTransformer.transform"]], "transform() (autots.tools.transform.round method)": [[6, "autots.tools.transform.Round.transform"]], "transform() (autots.tools.transform.stlfilter method)": [[6, "autots.tools.transform.STLFilter.transform"]], "transform() (autots.tools.transform.scipyfilter method)": [[6, "autots.tools.transform.ScipyFilter.transform"]], "transform() (autots.tools.transform.seasonaldifference method)": [[6, "autots.tools.transform.SeasonalDifference.transform"]], "transform() (autots.tools.transform.sintrend method)": [[6, "autots.tools.transform.SinTrend.transform"]], "transform() (autots.tools.transform.slice method)": [[6, "autots.tools.transform.Slice.transform"]], "transform() (autots.tools.transform.statsmodelsfilter method)": [[6, "autots.tools.transform.StatsmodelsFilter.transform"]], "transformer_list_to_dict() (in module autots.tools.transform)": [[6, "autots.tools.transform.transformer_list_to_dict"]], "trimmed_mean() (in module autots.tools.percentile)": [[6, "autots.tools.percentile.trimmed_mean"]], "unvectorize_state() (autots.tools.fast_kalman.gaussian method)": [[6, "autots.tools.fast_kalman.Gaussian.unvectorize_state"]], "unvectorize_vars() (autots.tools.fast_kalman.gaussian method)": [[6, "autots.tools.fast_kalman.Gaussian.unvectorize_vars"]], "update() (autots.tools.fast_kalman.kalmanfilter method)": [[6, "autots.tools.fast_kalman.KalmanFilter.update"]], "update() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.update"]], "update_with_nan_check() (in module autots.tools.fast_kalman)": [[6, "autots.tools.fast_kalman.update_with_nan_check"]], "values_to_anomalies() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.values_to_anomalies"]], "wide_to_3d() (in module autots.tools.shaping)": [[6, "autots.tools.shaping.wide_to_3d"]], "window_id_maker() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_id_maker"]], "window_lin_reg() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_lin_reg"]], "window_lin_reg_mean() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_lin_reg_mean"]], "window_lin_reg_mean_no_nan() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_lin_reg_mean_no_nan"]], "window_maker() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_maker"]], "window_maker_2() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_maker_2"]], "window_maker_3() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_maker_3"]], "window_sum_mean() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_sum_mean"]], "window_sum_mean_nan_tail() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_sum_mean_nan_tail"]], "window_sum_nan_mean() (in module autots.tools.window_functions)": [[6, "autots.tools.window_functions.window_sum_nan_mean"]], "zscore_survival_function() (in module autots.tools.anomaly_utils)": [[6, "autots.tools.anomaly_utils.zscore_survival_function"]]}}) \ No newline at end of file diff --git a/docs/build/html/source/autots.datasets.html b/docs/build/html/source/autots.datasets.html index f19983ca..baf4f2bd 100644 --- a/docs/build/html/source/autots.datasets.html +++ b/docs/build/html/source/autots.datasets.html @@ -1,17 +1,25 @@ - - + + + + + autots.datasets package — AutoTS 0.6.10 documentation - - - - - + + + + + @@ -33,18 +41,18 @@
          -

          autots.datasets package

          +

          autots.datasets package

          -

          Submodules

          +

          Submodules

          -

          autots.datasets.fred module

          +

          autots.datasets.fred module

          FRED (Federal Reserve Economic Data) Data Import

          requires API key from FRED and pip install fredapi

          -autots.datasets.fred.get_fred_data(fredkey: str, SeriesNameDict: dict | None = None, long=True, observation_start=None, sleep_seconds: int = 1, **kwargs)
          +autots.datasets.fred.get_fred_data(fredkey: str, SeriesNameDict: dict | None = None, long=True, observation_start=None, sleep_seconds: int = 1, **kwargs)

          Imports Data from Federal Reserve. For simplest results, make sure requested series are all of the same frequency.

          @@ -64,11 +72,11 @@

          Submodules -

          Module contents

          +

          Module contents

          Tools for Importing Sample Data

          -autots.datasets.load_artificial(long=False, date_start=None, date_end=None)
          +autots.datasets.load_artificial(long=False, date_start=None, date_end=None)

          Load artifically generated series from random distributions.

          Parameters:
          @@ -83,7 +91,7 @@

          Submodules
          -autots.datasets.load_daily(long: bool = True)
          +autots.datasets.load_daily(long: bool = True)

          Daily sample data.

          wiki = [

          “Germany”, “Thanksgiving”, ‘all’, ‘Microsoft’, @@ -106,13 +114,13 @@

          Submodules
          -autots.datasets.load_hourly(long: bool = True)
          +autots.datasets.load_hourly(long: bool = True)

          Traffic data from the MN DOT via the UCI data repository.

          -autots.datasets.load_linear(long=False, shape=None, start_date: str = '2021-01-01', introduce_nan: float | None = None, introduce_random: float | None = None, random_seed: int = 123)
          +autots.datasets.load_linear(long=False, shape=None, start_date: str = '2021-01-01', introduce_nan: float | None = None, introduce_random: float | None = None, random_seed: int = 123)

          Create a dataset of just zeroes for testing edge case.

          Parameters:
          @@ -130,7 +138,7 @@

          Submodules
          -autots.datasets.load_live_daily(long: bool = False, observation_start: str | None = None, observation_end: str | None = None, fred_key: str | None = None, fred_series=['DGS10', 'T5YIE', 'SP500', 'DCOILWTICO', 'DEXUSEU', 'WPU0911'], tickers: list = ['MSFT'], trends_list: list = ['forecasting', 'cycling', 'microsoft'], trends_geo: str = 'US', weather_data_types: list = ['AWND', 'WSF2', 'TAVG'], weather_stations: list = ['USW00094846', 'USW00014925'], weather_years: int = 5, london_air_stations: list = ['CT3', 'SK8'], london_air_species: str = 'PM25', london_air_days: int = 180, earthquake_days: int = 180, earthquake_min_magnitude: int = 5, gsa_key: str | None = None, gov_domain_list=['nasa.gov'], gov_domain_limit: int = 600, wikipedia_pages: list = ['Microsoft_Office', 'List_of_highest-grossing_films'], wiki_language: str = 'en', weather_event_types=['%28Z%29+Winter+Weather', '%28Z%29+Winter+Storm'], caiso_query: str = 'ENE_SLRS', timeout: float = 300.05, sleep_seconds: int = 2, **kwargs)
          +autots.datasets.load_live_daily(long: bool = False, observation_start: str | None = None, observation_end: str | None = None, fred_key: str | None = None, fred_series=['DGS10', 'T5YIE', 'SP500', 'DCOILWTICO', 'DEXUSEU', 'WPU0911'], tickers: list = ['MSFT'], trends_list: list = ['forecasting', 'cycling', 'microsoft'], trends_geo: str = 'US', weather_data_types: list = ['AWND', 'WSF2', 'TAVG'], weather_stations: list = ['USW00094846', 'USW00014925'], weather_years: int = 5, london_air_stations: list = ['CT3', 'SK8'], london_air_species: str = 'PM25', london_air_days: int = 180, earthquake_days: int = 180, earthquake_min_magnitude: int = 5, gsa_key: str | None = None, gov_domain_list=['nasa.gov'], gov_domain_limit: int = 600, wikipedia_pages: list = ['Microsoft_Office', 'List_of_highest-grossing_films'], wiki_language: str = 'en', weather_event_types=['%28Z%29+Winter+Weather', '%28Z%29+Winter+Storm'], caiso_query: str = 'ENE_SLRS', timeout: float = 300.05, sleep_seconds: int = 2, **kwargs)

          Generates a dataframe of data up to the present day. Requires active internet connection. Try to be respectful of these free data sources by not calling too much too heavily. Pass None instead of specification lists to exclude a data source.

          @@ -167,19 +175,19 @@

          Submodules
          -autots.datasets.load_monthly(long: bool = True)
          +autots.datasets.load_monthly(long: bool = True)

          Federal Reserve of St. Louis monthly economic indicators.

          -autots.datasets.load_sine(long=False, shape=None, start_date: str = '2021-01-01', introduce_random: float | None = None, random_seed: int = 123)
          +autots.datasets.load_sine(long=False, shape=None, start_date: str = '2021-01-01', introduce_random: float | None = None, random_seed: int = 123)

          Create a dataset of just zeroes for testing edge case.

          -autots.datasets.load_weekdays(long: bool = False, categorical: bool = True, periods: int = 180)
          +autots.datasets.load_weekdays(long: bool = False, categorical: bool = True, periods: int = 180)

          Test edge cases by creating a Series with values as day of week.

          Parameters:
          @@ -195,19 +203,19 @@

          Submodules
          -autots.datasets.load_weekly(long: bool = True)
          +autots.datasets.load_weekly(long: bool = True)

          Weekly petroleum industry data from the EIA.

          -autots.datasets.load_yearly(long: bool = True)
          +autots.datasets.load_yearly(long: bool = True)

          Federal Reserve of St. Louis annual economic indicators.

          -autots.datasets.load_zeroes(long=False, shape=None, start_date: str = '2021-01-01')
          +autots.datasets.load_zeroes(long=False, shape=None, start_date: str = '2021-01-01')

          Create a dataset of just zeroes for testing edge case.

          @@ -304,21 +312,5 @@

          Quick search

          - - \ No newline at end of file diff --git a/docs/build/html/source/autots.evaluator.html b/docs/build/html/source/autots.evaluator.html index 4ff67144..9a19062a 100644 --- a/docs/build/html/source/autots.evaluator.html +++ b/docs/build/html/source/autots.evaluator.html @@ -1,17 +1,25 @@ - - + + + + + autots.evaluator package — AutoTS 0.6.10 documentation - - - - - + + + + + @@ -33,22 +41,22 @@
          -

          autots.evaluator package

          +

          autots.evaluator package

          -

          Submodules

          +

          Submodules

          -

          autots.evaluator.anomaly_detector module

          +

          autots.evaluator.anomaly_detector module

          Anomaly Detector Created on Mon Jul 18 14:19:55 2022

          @author: Colin

          -class autots.evaluator.anomaly_detector.AnomalyDetector(output='multivariate', method='zscore', transform_dict={'transformation_params': {0: {'datepart_method': 'simple_3', 'regression_model': {'model': 'ElasticNet', 'model_params': {}}}}, 'transformations': {0: 'DatepartRegression'}}, forecast_params=None, method_params={}, eval_period=None, isolated_only=False, n_jobs=1)
          +class autots.evaluator.anomaly_detector.AnomalyDetector(output='multivariate', method='zscore', transform_dict={'transformation_params': {0: {'datepart_method': 'simple_3', 'regression_model': {'model': 'ElasticNet', 'model_params': {}}}}, 'transformations': {0: 'DatepartRegression'}}, forecast_params=None, method_params={}, eval_period=None, isolated_only=False, n_jobs=1)

          Bases: object

          -detect(df)
          +detect(df)

          All will return -1 for anomalies.

          Parameters:
          @@ -62,18 +70,18 @@

          Submodules
          -fit(df)
          +fit(df)

          -fit_anomaly_classifier()
          +fit_anomaly_classifier()

          Fit a model to predict if a score is an anomaly.

          -static get_new_params(method='random')
          +static get_new_params(method='random')

          Generate random new parameter combinations.

          Parameters:
          @@ -84,12 +92,12 @@

          Submodules
          -plot(series_name=None, title=None, plot_kwargs={})
          +plot(series_name=None, title=None, plot_kwargs={})

          -score_to_anomaly(scores)
          +score_to_anomaly(scores)

          A DecisionTree model, used as models are nonstandard (and nonparametric).

          @@ -97,11 +105,11 @@

          Submodules
          -class autots.evaluator.anomaly_detector.HolidayDetector(anomaly_detector_params={}, threshold=0.8, min_occurrences=2, splash_threshold=0.65, use_dayofmonth_holidays=True, use_wkdom_holidays=True, use_wkdeom_holidays=True, use_lunar_holidays=True, use_lunar_weekday=False, use_islamic_holidays=True, use_hebrew_holidays=True, output: str = 'multivariate', n_jobs: int = 1)
          +class autots.evaluator.anomaly_detector.HolidayDetector(anomaly_detector_params={}, threshold=0.8, min_occurrences=2, splash_threshold=0.65, use_dayofmonth_holidays=True, use_wkdom_holidays=True, use_wkdeom_holidays=True, use_lunar_holidays=True, use_lunar_weekday=False, use_islamic_holidays=True, use_hebrew_holidays=True, output: str = 'multivariate', n_jobs: int = 1)

          Bases: object

          -dates_to_holidays(dates, style='flag', holiday_impacts=False)
          +dates_to_holidays(dates, style='flag', holiday_impacts=False)

          Populate date information for a given pd.DatetimeIndex.

          Parameters:
          @@ -122,39 +130,39 @@

          Submodules
          -detect(df)
          +detect(df)

          Run holiday detection. Input wide-style pandas time series.

          -fit(df)
          +fit(df)
          -static get_new_params(method='random')
          +static get_new_params(method='random')
          -plot(series_name=None, include_anomalies=True, title=None, plot_kwargs={}, series=None)
          +plot(series_name=None, include_anomalies=True, title=None, plot_kwargs={}, series=None)
          -plot_anomaly(kwargs={})
          +plot_anomaly(kwargs={})

          -

          autots.evaluator.auto_model module

          +

          autots.evaluator.auto_model module

          Mid-level helper functions for AutoTS.

          -autots.evaluator.auto_model.ModelMonster(model: str, parameters: dict = {}, frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', startTimeStamps=None, forecast_length: int = 14, random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, **kwargs)
          +autots.evaluator.auto_model.ModelMonster(model: str, parameters: dict = {}, frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', startTimeStamps=None, forecast_length: int = 14, random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, **kwargs)

          Directs strings and parameters to appropriate model objects.

          Parameters:
          @@ -168,7 +176,7 @@

          Submodules
          -class autots.evaluator.auto_model.ModelPrediction(forecast_length: int, transformation_dict: dict, model_str: str, parameter_dict: dict, frequency: str = 'infer', prediction_interval: float = 0.9, no_negatives: bool = False, constraint: float | None = None, holiday_country: str = 'US', startTimeStamps=None, grouping_ids=None, fail_on_forecast_nan: bool = True, return_model: bool = False, random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, current_model_file: str | None = None, model_count: int = 0, force_gc: bool = False)
          +class autots.evaluator.auto_model.ModelPrediction(forecast_length: int, transformation_dict: dict, model_str: str, parameter_dict: dict, frequency: str = 'infer', prediction_interval: float = 0.9, no_negatives: bool = False, constraint: float | None = None, holiday_country: str = 'US', startTimeStamps=None, grouping_ids=None, fail_on_forecast_nan: bool = True, return_model: bool = False, random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, current_model_file: str | None = None, model_count: int = 0, force_gc: bool = False)

          Bases: ModelObject

          Feed parameters into modeling pipeline. A class object, does NOT work with ensembles.

          @@ -201,24 +209,24 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          -fit_data(df, future_regressor=None)
          +fit_data(df, future_regressor=None)
          -predict(forecast_length=None, future_regressor=None)
          +predict(forecast_length=None, future_regressor=None)

          -autots.evaluator.auto_model.NewGeneticTemplate(model_results, submitted_parameters, sort_column: str = 'Score', sort_ascending: bool = True, max_results: int = 50, max_per_model_class: int = 5, top_n: int = 50, template_cols: list = ['Model', 'ModelParameters', 'TransformationParameters', 'Ensemble'], transformer_list: dict = {}, transformer_max_depth: int = 8, models_mode: str = 'default', score_per_series=None, recursive_count=0, model_list=None)
          +autots.evaluator.auto_model.NewGeneticTemplate(model_results, submitted_parameters, sort_column: str = 'Score', sort_ascending: bool = True, max_results: int = 50, max_per_model_class: int = 5, top_n: int = 50, template_cols: list = ['Model', 'ModelParameters', 'TransformationParameters', 'Ensemble'], transformer_list: dict = {}, transformer_max_depth: int = 8, models_mode: str = 'default', score_per_series=None, recursive_count=0, model_list=None)

          Return new template given old template with model accuracies.

          “No mating!” - Pattern, Sanderson

          @@ -233,7 +241,7 @@

          Submodules
          -autots.evaluator.auto_model.RandomTemplate(n: int = 10, model_list: list = ['ZeroesNaive', 'LastValueNaive', 'AverageValueNaive', 'GLS', 'GLM', 'ETS'], transformer_list: dict = 'fast', transformer_max_depth: int = 8, models_mode: str = 'default')
          +autots.evaluator.auto_model.RandomTemplate(n: int = 10, model_list: list = ['ZeroesNaive', 'LastValueNaive', 'AverageValueNaive', 'GLS', 'GLM', 'ETS'], transformer_list: dict = 'fast', transformer_max_depth: int = 8, models_mode: str = 'default')

          Returns a template dataframe of randomly generated transformations, models, and hyperparameters.

          Parameters:
          @@ -244,12 +252,12 @@

          Submodules
          -class autots.evaluator.auto_model.TemplateEvalObject(model_results=Empty DataFrame Columns: [] Index: [], per_timestamp_smape=Empty DataFrame Columns: [] Index: [], per_series_metrics=Empty DataFrame Columns: [] Index: [], per_series_mae=None, per_series_rmse=None, per_series_made=None, per_series_contour=None, per_series_spl=None, per_series_mle=None, per_series_imle=None, per_series_maxe=None, per_series_oda=None, per_series_mqae=None, per_series_dwae=None, per_series_ewmae=None, per_series_uwmse=None, per_series_smoothness=None, per_series_mate=None, per_series_matse=None, per_series_wasserstein=None, per_series_dwd=None, model_count: int = 0)
          +class autots.evaluator.auto_model.TemplateEvalObject(model_results=Empty DataFrame Columns: [] Index: [], per_timestamp_smape=Empty DataFrame Columns: [] Index: [], per_series_metrics=Empty DataFrame Columns: [] Index: [], per_series_mae=None, per_series_rmse=None, per_series_made=None, per_series_contour=None, per_series_spl=None, per_series_mle=None, per_series_imle=None, per_series_maxe=None, per_series_oda=None, per_series_mqae=None, per_series_dwae=None, per_series_ewmae=None, per_series_uwmse=None, per_series_smoothness=None, per_series_mate=None, per_series_matse=None, per_series_wasserstein=None, per_series_dwd=None, model_count: int = 0)

          Bases: object

          Object to contain all the failures!.

          -full_mae_ids
          +full_mae_ids

          list of model_ids corresponding to full_mae_errors

          Type:
          @@ -260,7 +268,7 @@

          Submodules
          -full_mae_errors
          +full_mae_errors

          list of numpy arrays of shape (rows, columns) appended in order of validation only provided for ‘mosaic’ ensembling

          @@ -272,18 +280,18 @@

          Submodules
          -concat(another_eval)
          +concat(another_eval)

          Merge another TemplateEvalObject onto this one.

          -load(filename)
          +load(filename)
          -save(filename='initial_results.pickle')
          +save(filename='initial_results.pickle')

          Save results to a file.

          Parameters:
          @@ -296,7 +304,7 @@

          Submodules
          -autots.evaluator.auto_model.TemplateWizard(template, df_train, df_test, weights, model_count: int = 0, ensemble: list = ['mosaic', 'distance'], forecast_length: int = 14, frequency: str = 'infer', prediction_interval: float = 0.9, no_negatives: bool = False, constraint: float | None = None, future_regressor_train=None, future_regressor_forecast=None, holiday_country: str = 'US', startTimeStamps=None, random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, validation_round: int = 0, current_generation: int = 0, max_generations: str = '0', model_interrupt: bool = False, grouping_ids=None, template_cols: list = ['Model', 'ModelParameters', 'TransformationParameters', 'Ensemble'], traceback: bool = False, current_model_file: str | None = None, mosaic_used=None, force_gc: bool = False, additional_msg: str = '')
          +autots.evaluator.auto_model.TemplateWizard(template, df_train, df_test, weights, model_count: int = 0, ensemble: list = ['mosaic', 'distance'], forecast_length: int = 14, frequency: str = 'infer', prediction_interval: float = 0.9, no_negatives: bool = False, constraint: float | None = None, future_regressor_train=None, future_regressor_forecast=None, holiday_country: str = 'US', startTimeStamps=None, random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, validation_round: int = 0, current_generation: int = 0, max_generations: str = '0', model_interrupt: bool = False, grouping_ids=None, template_cols: list = ['Model', 'ModelParameters', 'TransformationParameters', 'Ensemble'], traceback: bool = False, current_model_file: str | None = None, mosaic_used=None, force_gc: bool = False, additional_msg: str = '')

          Take Template, returns Results.

          There are some who call me… Tim. - Python

          @@ -336,7 +344,7 @@

          Submodules
          -autots.evaluator.auto_model.UniqueTemplates(existing_templates, new_possibilities, selection_cols: list = ['Model', 'ModelParameters', 'TransformationParameters', 'Ensemble'])
          +autots.evaluator.auto_model.UniqueTemplates(existing_templates, new_possibilities, selection_cols: list = ['Model', 'ModelParameters', 'TransformationParameters', 'Ensemble'])

          Returns unique dataframe rows from new_possiblities not in existing_templates.

          Parameters:
          @@ -347,7 +355,7 @@

          Submodules
          -autots.evaluator.auto_model.back_forecast(df, model_name, model_param_dict, model_transform_dict, future_regressor_train=None, n_splits: int = 'auto', forecast_length=7, frequency='infer', prediction_interval=0.9, no_negatives=False, constraint=None, holiday_country='US', random_seed=123, n_jobs='auto', verbose=0, eval_periods: int | None = None, current_model_file: str | None = None, force_gc: bool = False, **kwargs)
          +autots.evaluator.auto_model.back_forecast(df, model_name, model_param_dict, model_transform_dict, future_regressor_train=None, n_splits: int = 'auto', forecast_length=7, frequency='infer', prediction_interval=0.9, no_negatives=False, constraint=None, holiday_country='US', random_seed=123, n_jobs='auto', verbose=0, eval_periods: int | None = None, current_model_file: str | None = None, force_gc: bool = False, **kwargs)

          Create forecasts for the historical training data, ie. backcast or back forecast.

          This actually forecasts on historical data, these are not fit model values as are often returned by other packages. As such, this will be slower, but more representative of real world model performance. @@ -364,19 +372,19 @@

          Submodules
          -autots.evaluator.auto_model.create_model_id(model_str: str, parameter_dict: dict = {}, transformation_dict: dict = {})
          +autots.evaluator.auto_model.create_model_id(model_str: str, parameter_dict: dict = {}, transformation_dict: dict = {})

          Create a hash ID which should be unique to the model parameters.

          -autots.evaluator.auto_model.dict_recombination(a: dict, b: dict)
          +autots.evaluator.auto_model.dict_recombination(a: dict, b: dict)

          Recombine two dictionaries with identical keys. Return new dict.

          -autots.evaluator.auto_model.generate_score(model_results, metric_weighting: dict = {}, prediction_interval: float = 0.9)
          +autots.evaluator.auto_model.generate_score(model_results, metric_weighting: dict = {}, prediction_interval: float = 0.9)

          Generate score based on relative accuracies.

          SMAPE - smaller is better MAE - smaller is better @@ -394,19 +402,19 @@

          Submodules
          -autots.evaluator.auto_model.generate_score_per_series(results_object, metric_weighting, total_validations=1, models_to_use=None)
          +autots.evaluator.auto_model.generate_score_per_series(results_object, metric_weighting, total_validations=1, models_to_use=None)

          Score generation on per_series_metrics for ensembles.

          -autots.evaluator.auto_model.horizontal_template_to_model_list(template)
          +autots.evaluator.auto_model.horizontal_template_to_model_list(template)

          helper function to take template dataframe of ensembles to a single list of models.

          -autots.evaluator.auto_model.model_forecast(model_name, model_param_dict, model_transform_dict, df_train, forecast_length: int, frequency: str = 'infer', prediction_interval: float = 0.9, no_negatives: bool = False, constraint: float | None = None, future_regressor_train=None, future_regressor_forecast=None, holiday_country: str = 'US', startTimeStamps=None, grouping_ids=None, fail_on_forecast_nan: bool = True, random_seed: int = 2020, verbose: int = 0, n_jobs: int = 'auto', template_cols: list = ['Model', 'ModelParameters', 'TransformationParameters', 'Ensemble'], horizontal_subset: list | None = None, return_model: bool = False, current_model_file: str | None = None, model_count: int = 0, force_gc: bool = False, **kwargs)
          +autots.evaluator.auto_model.model_forecast(model_name, model_param_dict, model_transform_dict, df_train, forecast_length: int, frequency: str = 'infer', prediction_interval: float = 0.9, no_negatives: bool = False, constraint: float | None = None, future_regressor_train=None, future_regressor_forecast=None, holiday_country: str = 'US', startTimeStamps=None, grouping_ids=None, fail_on_forecast_nan: bool = True, random_seed: int = 2020, verbose: int = 0, n_jobs: int = 'auto', template_cols: list = ['Model', 'ModelParameters', 'TransformationParameters', 'Ensemble'], horizontal_subset: list | None = None, return_model: bool = False, current_model_file: str | None = None, model_count: int = 0, force_gc: bool = False, **kwargs)

          Takes numeric data, returns numeric forecasts.

          Only one model (albeit potentially an ensemble)! Horizontal ensembles can not be nested, other ensemble types can be.

          @@ -449,25 +457,25 @@

          Submodules
          -autots.evaluator.auto_model.random_model(model_list, model_prob, transformer_list='fast', transformer_max_depth=2, models_mode='random', counter=15, n_models=5, keyword_format=False)
          +autots.evaluator.auto_model.random_model(model_list, model_prob, transformer_list='fast', transformer_max_depth=2, models_mode='random', counter=15, n_models=5, keyword_format=False)

          Generate a random model from a given list of models and probabilities.

          -autots.evaluator.auto_model.remove_leading_zeros(df)
          +autots.evaluator.auto_model.remove_leading_zeros(df)

          Accepts wide dataframe, returns dataframe with zeroes preceeding any non-zero value as NaN.

          -autots.evaluator.auto_model.trans_dict_recomb(dict_array)
          +autots.evaluator.auto_model.trans_dict_recomb(dict_array)

          Recombine two transformation param dictionaries from array of dicts.

          -autots.evaluator.auto_model.unpack_ensemble_models(template, template_cols: list = ['Model', 'ModelParameters', 'TransformationParameters', 'Ensemble'], keep_ensemble: bool = True, recursive: bool = False)
          +autots.evaluator.auto_model.unpack_ensemble_models(template, template_cols: list = ['Model', 'ModelParameters', 'TransformationParameters', 'Ensemble'], keep_ensemble: bool = True, recursive: bool = False)

          Take ensemble models from template and add as new rows. Some confusion may exist as Ensembles require both ‘Ensemble’ column > 0 and model name ‘Ensemble’

          @@ -483,17 +491,17 @@

          Submodules
          -autots.evaluator.auto_model.validation_aggregation(validation_results, df_train=None, groupby_cols=['ID', 'Model', 'ModelParameters', 'TransformationParameters', 'Ensemble'])
          +autots.evaluator.auto_model.validation_aggregation(validation_results, df_train=None, groupby_cols=['ID', 'Model', 'ModelParameters', 'TransformationParameters', 'Ensemble'])

          Aggregate a TemplateEvalObject.

          -

          autots.evaluator.auto_ts module

          +

          autots.evaluator.auto_ts module

          Higher-level functions of automated time series modeling.

          -class autots.evaluator.auto_ts.AutoTS(forecast_length: int = 14, frequency: str = 'infer', prediction_interval: float = 0.9, max_generations: int = 20, no_negatives: bool = False, constraint: float | None = None, ensemble: str | None = None, initial_template: str = 'General+Random', random_seed: int = 2022, holiday_country: str = 'US', subset: int | None = None, aggfunc: str = 'first', na_tolerance: float = 1, metric_weighting: dict = {'containment_weighting': 0, 'contour_weighting': 0.01, 'imle_weighting': 0, 'made_weighting': 0.05, 'mae_weighting': 2, 'mage_weighting': 0, 'mle_weighting': 0, 'oda_weighting': 0.001, 'rmse_weighting': 2, 'runtime_weighting': 0.01, 'smape_weighting': 5, 'spl_weighting': 3, 'wasserstein_weighting': 0.01}, drop_most_recent: int = 0, drop_data_older_than_periods: int | None = None, model_list: str = 'default', transformer_list: dict = 'auto', transformer_max_depth: int = 6, models_mode: str = 'random', num_validations: int = 'auto', models_to_validate: float = 0.15, max_per_model_class: int | None = None, validation_method: str = 'backwards', min_allowed_train_percent: float = 0.5, remove_leading_zeroes: bool = False, prefill_na: str | None = None, introduce_na: bool | None = None, preclean: dict | None = None, model_interrupt: bool = True, generation_timeout: int | None = None, current_model_file: str | None = None, force_gc: bool = False, verbose: int = 1, n_jobs: int = 0.5)
          +class autots.evaluator.auto_ts.AutoTS(forecast_length: int = 14, frequency: str = 'infer', prediction_interval: float = 0.9, max_generations: int = 20, no_negatives: bool = False, constraint: float | None = None, ensemble: str | None = None, initial_template: str = 'General+Random', random_seed: int = 2022, holiday_country: str = 'US', subset: int | None = None, aggfunc: str = 'first', na_tolerance: float = 1, metric_weighting: dict = {'containment_weighting': 0, 'contour_weighting': 0.01, 'imle_weighting': 0, 'made_weighting': 0.05, 'mae_weighting': 2, 'mage_weighting': 0, 'mle_weighting': 0, 'oda_weighting': 0.001, 'rmse_weighting': 2, 'runtime_weighting': 0.01, 'smape_weighting': 5, 'spl_weighting': 3, 'wasserstein_weighting': 0.01}, drop_most_recent: int = 0, drop_data_older_than_periods: int | None = None, model_list: str = 'default', transformer_list: dict = 'auto', transformer_max_depth: int = 6, models_mode: str = 'random', num_validations: int = 'auto', models_to_validate: float = 0.15, max_per_model_class: int | None = None, validation_method: str = 'backwards', min_allowed_train_percent: float = 0.5, remove_leading_zeroes: bool = False, prefill_na: str | None = None, introduce_na: bool | None = None, preclean: dict | None = None, model_interrupt: bool = True, generation_timeout: int | None = None, current_model_file: str | None = None, force_gc: bool = False, verbose: int = 1, n_jobs: int = 0.5)

          Bases: object

          Automate time series modeling using a genetic algorithm.

          @@ -588,7 +596,7 @@

          Submodules
          -best_model
          +best_model

          DataFrame containing template for the best ranked model

          Type:
          @@ -599,7 +607,7 @@

          Submodules
          -best_model_name
          +best_model_name

          model name

          Type:
          @@ -610,7 +618,7 @@

          Submodules
          -best_model_params
          +best_model_params

          model params

          Type:
          @@ -621,7 +629,7 @@

          Submodules
          -best_model_transformation_params
          +best_model_transformation_params

          transformation parameters

          Type:
          @@ -632,7 +640,7 @@

          Submodules
          -best_model_ensemble
          +best_model_ensemble

          Ensemble type int id

          Type:
          @@ -643,7 +651,7 @@

          Submodules
          -regression_check
          +regression_check

          If True, the best_model uses an input ‘User’ future_regressor

          Type:
          @@ -654,7 +662,7 @@

          Submodules
          -df_wide_numeric
          +df_wide_numeric

          dataframe containing shaped final data, will include preclean

          Type:
          @@ -665,7 +673,7 @@

          Submodules
          -initial_results.model_results
          +initial_results.model_results

          contains a collection of result metrics

          Type:
          @@ -676,7 +684,7 @@

          Submodules
          -score_per_series
          +score_per_series

          generated score of metrics given per input series, if horizontal ensembles

          Type:
          @@ -712,7 +720,7 @@

          Submodules
          -back_forecast(series=None, n_splits: int = 'auto', tail: int = 'auto', verbose: int = 0)
          +back_forecast(series=None, n_splits: int = 'auto', tail: int = 'auto', verbose: int = 0)

          Create forecasts for the historical training data, ie. backcast or back forecast. OUT OF SAMPLE

          This actually forecasts on historical data, these are not fit model values as are often returned by other packages. As such, this will be slower, but more representative of real world model performance. @@ -729,18 +737,18 @@

          Submodules
          -best_model_per_series_mape()
          +best_model_per_series_mape()

          This isn’t quite classic mape but is a percentage mean error intended for quick visuals not final statistics (see model.results()).

          -best_model_per_series_score()
          +best_model_per_series_score()
          -diagnose_params(target='runtime', waterfall_plots=True)
          +diagnose_params(target='runtime', waterfall_plots=True)

          Attempt to explain params causing measured outcomes using shap and linear regression coefficients.

          Parameters:
          @@ -754,20 +762,20 @@

          Submodules
          -expand_horizontal()
          +expand_horizontal()

          Enables expanding horizontal models trained on a subset to full data. Reruns template models and generates new template.

          -export_best_model(filename, **kwargs)
          +export_best_model(filename, **kwargs)

          Basically the same as export_template but only ever the one best model.

          -export_template(filename=None, models: str = 'best', n: int = 40, max_per_model_class: int | None = None, include_results: bool = False, unpack_ensembles: bool = False, min_metrics: list = ['smape', 'spl'], max_metrics: list | None = None)
          +export_template(filename=None, models: str = 'best', n: int = 40, max_per_model_class: int | None = None, include_results: bool = False, unpack_ensembles: bool = False, min_metrics: list = ['smape', 'spl'], max_metrics: list | None = None)

          Export top results as a reusable template.

          Parameters:
          @@ -789,7 +797,7 @@

          Submodules
          -failure_rate(result_set: str = 'initial')
          +failure_rate(result_set: str = 'initial')

          Return fraction of models passing with exceptions.

          Parameters:
          @@ -803,7 +811,7 @@

          Submodules
          -fit(df, date_col: str | None = None, value_col: str | None = None, id_col: str | None = None, future_regressor=None, weights: dict = {}, result_file: str | None = None, grouping_ids=None, validation_indexes: list | None = None)
          +fit(df, date_col: str | None = None, value_col: str | None = None, id_col: str | None = None, future_regressor=None, weights: dict = {}, result_file: str | None = None, grouping_ids=None, validation_indexes: list | None = None)

          Train algorithm given data supplied.

          Parameters:
          @@ -828,13 +836,13 @@

          Submodules
          -fit_data(df, date_col=None, value_col=None, id_col=None, future_regressor=None, weights={})
          +fit_data(df, date_col=None, value_col=None, id_col=None, future_regressor=None, weights={})

          Part of the setup that involves fitting the initial data but not running any models.

          -get_metric_corr(percent_best=0.1)
          +get_metric_corr(percent_best=0.1)

          Returns a dataframe of correlation among evaluation metrics across evaluations.

          Parameters:
          @@ -845,24 +853,24 @@

          Submodules
          -static get_new_params(method='random')
          +static get_new_params(method='random')

          Randomly generate new parameters for the class.

          -horizontal_per_generation()
          +horizontal_per_generation()
          -horizontal_to_df()
          +horizontal_to_df()

          helper function for plotting.

          -import_best_model(import_target, enforce_model_list: bool = True, include_ensemble: bool = True)
          +import_best_model(import_target, enforce_model_list: bool = True, include_ensemble: bool = True)

          Load a best model, overriding any existing setting.

          Parameters:
          @@ -873,7 +881,7 @@

          Submodules
          -import_results(filename)
          +import_results(filename)

          Add results from another run on the same data.

          Input can be filename with .csv or .pickle. or can be a DataFrame of model results or a full TemplateEvalObject

          @@ -881,7 +889,7 @@

          Submodules
          -import_template(filename: str, method: str = 'add_on', enforce_model_list: bool = True, include_ensemble: bool = False, include_horizontal: bool = False, force_validation: bool = False)
          +import_template(filename: str, method: str = 'add_on', enforce_model_list: bool = True, include_ensemble: bool = False, include_horizontal: bool = False, force_validation: bool = False)

          Import a previously exported template of model parameters. Must be done before the AutoTS object is .fit().

          @@ -901,36 +909,36 @@

          Submodules
          -list_failed_model_types()
          +list_failed_model_types()

          Return a list of model types (ie ETS, LastValueNaive) that failed. If all had at least one success, then return an empty list.

          -load_template(filename)
          +load_template(filename)

          Helper funciton for just loading the file part of import_template.

          -mosaic_to_df()
          +mosaic_to_df()

          Helper function to create a readable df of models in mosaic.

          -parse_best_model()
          +parse_best_model()
          -plot_back_forecast(**kwargs)
          +plot_back_forecast(**kwargs)
          -plot_backforecast(series=None, n_splits: int = 'auto', start_date='auto', title=None, alpha=0.25, facecolor='black', loc='upper left', **kwargs)
          +plot_backforecast(series=None, n_splits: int = 'auto', start_date='auto', title=None, alpha=0.25, facecolor='black', loc='upper left', **kwargs)

          Plot the historical data and fit forecast on historic. Out of sample in chunks = forecast_length by default.

          Parameters:
          @@ -947,7 +955,7 @@

          Submodules
          -plot_generation_loss(title='Single Model Accuracy Gain Over Generations', **kwargs)
          +plot_generation_loss(title='Single Model Accuracy Gain Over Generations', **kwargs)

          Plot improvement in accuracy over generations. Note: this is only “one size fits all” accuracy and doesn’t account for the benefits seen for ensembling.

          @@ -960,7 +968,7 @@

          Submodules
          -plot_horizontal(max_series: int = 20, title='Model Types Chosen by Series', **kwargs)
          +plot_horizontal(max_series: int = 20, title='Model Types Chosen by Series', **kwargs)

          Simple plot to visualize assigned series: models.

          Note that for ‘mosaic’ ensembles, it only plots the type of the most common model_id for that series, or the first if all are mode.

          @@ -975,19 +983,19 @@

          Submodules
          -plot_horizontal_model_count(color_list=None, top_n: int = 20, title='Most Frequently Chosen Models', **kwargs)
          +plot_horizontal_model_count(color_list=None, top_n: int = 20, title='Most Frequently Chosen Models', **kwargs)

          Plots most common models. Does not factor in nested in non-horizontal Ensembles.

          -plot_horizontal_per_generation(title='Horizontal Ensemble Accuracy Gain (first eval sample only)', **kwargs)
          +plot_horizontal_per_generation(title='Horizontal Ensemble Accuracy Gain (first eval sample only)', **kwargs)

          Plot how well the horizontal ensembles would do after each new generation. Slow.

          -plot_horizontal_transformers(method='transformers', color_list=None, **kwargs)
          +plot_horizontal_transformers(method='transformers', color_list=None, **kwargs)

          Simple plot to visualize transformers used. Note this doesn’t capture transformers nested in simple ensembles.

          @@ -1003,7 +1011,7 @@

          Submodules
          -plot_metric_corr(cols=None, percent_best=0.1)
          +plot_metric_corr(cols=None, percent_best=0.1)

          Plot correlation in results among metrics. The metrics that are highly correlated are those that mostly the unscaled ones

          @@ -1018,7 +1026,7 @@

          Submodules
          -plot_per_series_error(title: str = 'Top Series Contributing Score Error', max_series: int = 10, max_name_chars: int = 25, color: str = '#ff9912', figsize=(12, 4), kind: str = 'bar', upper_clip: float = 1000, **kwargs)
          +plot_per_series_error(title: str = 'Top Series Contributing Score Error', max_series: int = 10, max_name_chars: int = 25, color: str = '#ff9912', figsize=(12, 4), kind: str = 'bar', upper_clip: float = 1000, **kwargs)

          Plot which series are contributing most to error (Score) of final model. Avg of validations for best_model

          Parameters:
          @@ -1038,7 +1046,7 @@

          Submodules
          -plot_per_series_mape(title: str | None = None, max_series: int = 10, max_name_chars: int = 25, color: str = '#ff9912', figsize=(12, 4), kind: str = 'bar', **kwargs)
          +plot_per_series_mape(title: str | None = None, max_series: int = 10, max_name_chars: int = 25, color: str = '#ff9912', figsize=(12, 4), kind: str = 'bar', **kwargs)

          Plot which series are contributing most to SMAPE of final model. Avg of validations for best_model

          Parameters:
          @@ -1057,19 +1065,19 @@

          Submodules
          -plot_per_series_smape(title: str | None = None, max_series: int = 10, max_name_chars: int = 25, color: str = '#ff9912', figsize=(12, 4), kind: str = 'bar', **kwargs)
          +plot_per_series_smape(title: str | None = None, max_series: int = 10, max_name_chars: int = 25, color: str = '#ff9912', figsize=(12, 4), kind: str = 'bar', **kwargs)

          To be backwards compatible, not necessarily maintained, plot_per_series_mape is to be preferred.

          -plot_transformer_failure_rate()
          +plot_transformer_failure_rate()

          Failure Rate per Transformer type (ignoring ensembles), failure may be due to other model or transformer.

          -plot_validations(df_wide=None, models=None, series=None, title=None, start_date='auto', end_date='auto', subset=None, compare_horizontal=False, colors=None, include_bounds=True, alpha=0.35, start_color='darkred', end_color='#A2AD9C', **kwargs)
          +plot_validations(df_wide=None, models=None, series=None, title=None, start_date='auto', end_date='auto', subset=None, compare_horizontal=False, colors=None, include_bounds=True, alpha=0.35, start_color='darkred', end_color='#A2AD9C', **kwargs)

          Similar to plot_backforecast but using the model’s validation segments specifically. Must reforecast. Saves results to self.validation_forecasts and caches. Set that to None to force rerun otherwise it uses stored (when models is the same). ‘chosen’ refers to best_model_id, the model chosen to run for predict @@ -1094,7 +1102,7 @@

          Submodules
          -predict(forecast_length: int = 'self', prediction_interval: float = 'self', future_regressor=None, hierarchy=None, just_point_forecast: bool = False, fail_on_forecast_nan: bool = True, verbose: int = 'self', df=None)
          +predict(forecast_length: int = 'self', prediction_interval: float = 'self', future_regressor=None, hierarchy=None, just_point_forecast: bool = False, fail_on_forecast_nan: bool = True, verbose: int = 'self', df=None)

          Generate forecast data immediately following dates of index supplied to .fit().

          If using a model from update_fit list, with no ensembling, underlying model will not be retrained when used as below, with a single prediction interval: This designed for high speed forecasting. Full retraining is best when there is sufficient time. @@ -1137,7 +1145,7 @@

          Submodules
          -results(result_set: str = 'initial')
          +results(result_set: str = 'initial')

          Convenience function to return tested models table.

          Parameters:
          @@ -1148,25 +1156,25 @@

          Submodules
          -retrieve_validation_forecasts(models=None, compare_horizontal=False, id_name='SeriesID', value_name='Value', interval_name='PredictionInterval')
          +retrieve_validation_forecasts(models=None, compare_horizontal=False, id_name='SeriesID', value_name='Value', interval_name='PredictionInterval')

          -save_template(filename, export_template, **kwargs)
          +save_template(filename, export_template, **kwargs)

          Helper function for the save part of export_template.

          -validation_agg()
          +validation_agg()

          -autots.evaluator.auto_ts.error_correlations(all_result, result: str = 'corr')
          +autots.evaluator.auto_ts.error_correlations(all_result, result: str = 'corr')

          Onehot encode AutoTS result df and return df or correlation with errors.

          Parameters:
          @@ -1180,22 +1188,22 @@

          Submodules
          -autots.evaluator.auto_ts.fake_regressor(df, forecast_length: int = 14, date_col: str | None = None, value_col: str | None = None, id_col: str | None = None, frequency: str = 'infer', aggfunc: str = 'first', drop_most_recent: int = 0, na_tolerance: float = 0.95, drop_data_older_than_periods: int = 100000, dimensions: int = 1, verbose: int = 0)
          +autots.evaluator.auto_ts.fake_regressor(df, forecast_length: int = 14, date_col: str | None = None, value_col: str | None = None, id_col: str | None = None, frequency: str = 'infer', aggfunc: str = 'first', drop_most_recent: int = 0, na_tolerance: float = 0.95, drop_data_older_than_periods: int = 100000, dimensions: int = 1, verbose: int = 0)

          Create a fake regressor of random numbers for testing purposes.

          -

          autots.evaluator.benchmark module

          +

          autots.evaluator.benchmark module

          Created on Fri Nov 5 13:45:01 2021

          @author: Colin

          -class autots.evaluator.benchmark.Benchmark
          +class autots.evaluator.benchmark.Benchmark

          Bases: object

          -run(n_jobs: int = 'auto', times: int = 3, random_seed: int = 123, base_models_only=False, verbose: int = 0)
          +run(n_jobs: int = 'auto', times: int = 3, random_seed: int = 123, base_models_only=False, verbose: int = 0)

          Run benchmark.

          Parameters:
          @@ -1213,12 +1221,12 @@

          Submodules -

          autots.evaluator.event_forecasting module

          +

          autots.evaluator.event_forecasting module

          Generate probabilities of forecastings crossing limit thresholds. Created on Thu Jan 27 13:36:18 2022

          -class autots.evaluator.event_forecasting.EventRiskForecast(df_train, forecast_length, frequency: str = 'infer', prediction_interval=0.9, lower_limit=0.05, upper_limit=0.95, model_name='UnivariateMotif', model_param_dict={'distance_metric': 'euclidean', 'k': 10, 'pointed_method': 'median', 'return_result_windows': True, 'window': 14}, model_transform_dict={'fillna': 'pchip', 'transformation_params': {'0': {'method': 0.5}, '1': {}, '2': {'fixed': False, 'window': 7}, '3': {}}, 'transformations': {'0': 'Slice', '1': 'DifferencedTransformer', '2': 'RollingMeanTransformer', '3': 'MaxAbsScaler'}}, model_forecast_kwargs={'max_generations': 30, 'n_jobs': 'auto', 'random_seed': 321, 'verbose': 1}, future_regressor_train=None, future_regressor_forecast=None)
          +class autots.evaluator.event_forecasting.EventRiskForecast(df_train, forecast_length, frequency: str = 'infer', prediction_interval=0.9, lower_limit=0.05, upper_limit=0.95, model_name='UnivariateMotif', model_param_dict={'distance_metric': 'euclidean', 'k': 10, 'pointed_method': 'median', 'return_result_windows': True, 'window': 14}, model_transform_dict={'fillna': 'pchip', 'transformation_params': {'0': {'method': 0.5}, '1': {}, '2': {'fixed': False, 'window': 7}, '3': {}}, 'transformations': {'0': 'Slice', '1': 'DifferencedTransformer', '2': 'RollingMeanTransformer', '3': 'MaxAbsScaler'}}, model_forecast_kwargs={'max_generations': 30, 'n_jobs': 'auto', 'random_seed': 321, 'verbose': 1}, future_regressor_train=None, future_regressor_forecast=None)

          Bases: object

          Generate a risk score (0 to 1, but usually close to 0) for a future event exceeding user specified upper or lower bounds.

          Upper and lower limits can be one of four types, and may each be different. @@ -1260,42 +1268,42 @@

          Submodules
          -fit()
          +fit()

          -predict()
          +predict()
          -predict_historic()
          +predict_historic()
          -generate_result_windows()
          +generate_result_windows()
          -generate_risk_array()
          +generate_risk_array()
          -generate_historic_risk_array()
          +generate_historic_risk_array()
          -set_limit()
          +set_limit()
          -plot()
          +plot()
          @@ -1310,7 +1318,7 @@

          Submodules
          -fit(df_train=None, forecast_length=None, prediction_interval=None, models_mode='event_risk', model_list=['UnivariateMotif', 'MultivariateMotif', 'SectionalMotif', 'ARCH', 'MetricMotif', 'SeasonalityMotif'], ensemble=None, autots_kwargs=None, future_regressor_train=None)
          +fit(df_train=None, forecast_length=None, prediction_interval=None, models_mode='event_risk', model_list=['UnivariateMotif', 'MultivariateMotif', 'SectionalMotif', 'ARCH', 'MetricMotif', 'SeasonalityMotif'], ensemble=None, autots_kwargs=None, future_regressor_train=None)

          Shortcut for generating model params.

          args specified are those suggested for an otherwise normal AutoTS run

          @@ -1329,13 +1337,13 @@

          Submodules
          -static generate_historic_risk_array(df, limit, direction='upper')
          +static generate_historic_risk_array(df, limit, direction='upper')

          Given a df and a limit, returns a 0/1 array of whether limit was equaled or exceeded.

          -generate_result_windows(df_train=None, forecast_length=None, frequency=None, prediction_interval=None, model_name=None, model_param_dict=None, model_transform_dict=None, model_forecast_kwargs=None, future_regressor_train=None, future_regressor_forecast=None)
          +generate_result_windows(df_train=None, forecast_length=None, frequency=None, prediction_interval=None, model_name=None, model_param_dict=None, model_transform_dict=None, model_forecast_kwargs=None, future_regressor_train=None, future_regressor_forecast=None)

          For event risk forecasting. Params default to class init but can be overridden here.

          Returns:
          @@ -1349,13 +1357,13 @@

          Submodules
          -static generate_risk_array(result_windows, limit, direction='upper')
          +static generate_risk_array(result_windows, limit, direction='upper')

          Given a df and a limit, returns a 0/1 array of whether limit was equaled or exceeded.

          -plot(column_idx=0, grays=['#838996', '#c0c0c0', '#dcdcdc', '#a9a9a9', '#808080', '#989898', '#808080', '#757575', '#696969', '#c9c0bb', '#c8c8c8', '#323232', '#e5e4e2', '#778899', '#4f666a', '#848482', '#414a4c', '#8a7f80', '#c4c3d0', '#bebebe', '#dbd7d2'], up_low_color=['#ff4500', '#ff5349'], bar_color='#6495ED', bar_ylim=[0.0, 0.5], figsize=(14, 8), result_windows=None, lower_limit_2d=None, upper_limit_2d=None, upper_risk_array=None, lower_risk_array=None)
          +plot(column_idx=0, grays=['#838996', '#c0c0c0', '#dcdcdc', '#a9a9a9', '#808080', '#989898', '#808080', '#757575', '#696969', '#c9c0bb', '#c8c8c8', '#323232', '#e5e4e2', '#778899', '#4f666a', '#848482', '#414a4c', '#8a7f80', '#c4c3d0', '#bebebe', '#dbd7d2'], up_low_color=['#ff4500', '#ff5349'], bar_color='#6495ED', bar_ylim=[0.0, 0.5], figsize=(14, 8), result_windows=None, lower_limit_2d=None, upper_limit_2d=None, upper_risk_array=None, lower_risk_array=None)

          Plot a sample of the risk forecast outcomes.

          Parameters:
          @@ -1373,7 +1381,7 @@

          Submodules
          -plot_eval(df_test, column_idx=0, actuals_color=['#00BFFF'], up_low_color=['#ff4500', '#ff5349'], bar_color='#6495ED', bar_ylim=[0.0, 0.5], figsize=(14, 8), lower_limit_2d=None, upper_limit_2d=None, upper_risk_array=None, lower_risk_array=None)
          +plot_eval(df_test, column_idx=0, actuals_color=['#00BFFF'], up_low_color=['#ff4500', '#ff5349'], bar_color='#6495ED', bar_ylim=[0.0, 0.5], figsize=(14, 8), lower_limit_2d=None, upper_limit_2d=None, upper_risk_array=None, lower_risk_array=None)

          Plot a sample of the risk forecast with known value vs risk score.

          Parameters:
          @@ -1392,13 +1400,13 @@

          Submodules
          -predict()
          +predict()

          Returns forecast upper, lower risk probability arrays for input limits.

          -predict_historic(upper_limit=None, lower_limit=None, eval_periods=None)
          +predict_historic(upper_limit=None, lower_limit=None, eval_periods=None)

          Returns upper, lower risk probability arrays for input limits for the historic data. If manual numpy array limits are used, the limits will need to be appropriate shape (for df_train and eval_periods if used)

          @@ -1414,7 +1422,7 @@

          Submodules
          -static set_limit(limit, target_shape, df_train, direction='upper', period='forecast', forecast_length=None, eval_periods=None)
          +static set_limit(limit, target_shape, df_train, direction='upper', period='forecast', forecast_length=None, eval_periods=None)

          Handles all limit input styles and returns numpy array.

          Parameters:
          @@ -1435,30 +1443,30 @@

          Submodules
          -autots.evaluator.event_forecasting.extract_result_windows(forecasts, model_name=None)
          +autots.evaluator.event_forecasting.extract_result_windows(forecasts, model_name=None)

          standardize result windows from different models.

          -autots.evaluator.event_forecasting.extract_window_index(forecasts)
          +autots.evaluator.event_forecasting.extract_window_index(forecasts)
          -autots.evaluator.event_forecasting.set_limit_forecast(df_train, forecast_length, model_name='SeasonalNaive', model_param_dict={'lag_1': 28, 'lag_2': None, 'method': 'median'}, model_transform_dict={'fillna': 'nearest', 'transformation_params': {}, 'transformations': {}}, prediction_interval=0.9, frequency='infer', model_forecast_kwargs={'n_jobs': 'auto', 'random_seed': 321, 'verbose': 1}, future_regressor_train=None, future_regressor_forecast=None)
          +autots.evaluator.event_forecasting.set_limit_forecast(df_train, forecast_length, model_name='SeasonalNaive', model_param_dict={'lag_1': 28, 'lag_2': None, 'method': 'median'}, model_transform_dict={'fillna': 'nearest', 'transformation_params': {}, 'transformations': {}}, prediction_interval=0.9, frequency='infer', model_forecast_kwargs={'n_jobs': 'auto', 'random_seed': 321, 'verbose': 1}, future_regressor_train=None, future_regressor_forecast=None)

          Helper function for forecast limits set by forecast algorithms.

          -autots.evaluator.event_forecasting.set_limit_forecast_historic(df_train, forecast_length, model_name='SeasonalNaive', model_param_dict={'lag_1': 28, 'lag_2': None, 'method': 'median'}, model_transform_dict={'fillna': 'nearest', 'transformation_params': {}, 'transformations': {}}, prediction_interval=0.9, frequency='infer', model_forecast_kwargs={'n_jobs': 'auto', 'random_seed': 321, 'verbose': 2}, future_regressor_train=None, future_regressor_forecast=None, eval_periods=None)
          +autots.evaluator.event_forecasting.set_limit_forecast_historic(df_train, forecast_length, model_name='SeasonalNaive', model_param_dict={'lag_1': 28, 'lag_2': None, 'method': 'median'}, model_transform_dict={'fillna': 'nearest', 'transformation_params': {}, 'transformations': {}}, prediction_interval=0.9, frequency='infer', model_forecast_kwargs={'n_jobs': 'auto', 'random_seed': 321, 'verbose': 2}, future_regressor_train=None, future_regressor_forecast=None, eval_periods=None)

          Helper function for forecast limits set by forecast algorithms.

          -

          autots.evaluator.metrics module

          +

          autots.evaluator.metrics module

          Tools for calculating forecast errors.

          Some common args:

          A or actual (np.array): actuals ndim 2 (timesteps, series) @@ -1468,18 +1476,18 @@

          Submodules
          -autots.evaluator.metrics.array_last_val(arr)
          +autots.evaluator.metrics.array_last_val(arr)

          -autots.evaluator.metrics.chi_squared_hist_distribution_loss(F, A, bins='auto', plot=False)
          +autots.evaluator.metrics.chi_squared_hist_distribution_loss(F, A, bins='auto', plot=False)

          Distribution loss, chi-squared distance from histograms.

          -autots.evaluator.metrics.containment(lower_forecast, upper_forecast, actual)
          +autots.evaluator.metrics.containment(lower_forecast, upper_forecast, actual)

          Expects two, 2-D numpy arrays of forecast_length * n series.

          Returns a 1-D array of results in len n series

          @@ -1494,7 +1502,7 @@

          Submodules
          -autots.evaluator.metrics.contour(A, F)
          +autots.evaluator.metrics.contour(A, F)

          A measure of how well the actual and forecast follow the same pattern of change. Note: If actual values are unchanging, will match positive changing forecasts. This is faster, and because if actuals are a flat line, contour probably isn’t a concern regardless.

          @@ -1515,18 +1523,18 @@

          Submodules
          -autots.evaluator.metrics.default_scaler(df_train)
          +autots.evaluator.metrics.default_scaler(df_train)

          -autots.evaluator.metrics.dwae(A, F, last_of_array)
          +autots.evaluator.metrics.dwae(A, F, last_of_array)

          Direcitonal Weighted Absolute Error, the accuracy of growth or decline relative to most recent data.

          -autots.evaluator.metrics.full_metric_evaluation(A, F, upper_forecast, lower_forecast, df_train, prediction_interval, columns=None, scaler=None, return_components=False, cumsum_A=None, diff_A=None, last_of_array=None, **kwargs)
          +autots.evaluator.metrics.full_metric_evaluation(A, F, upper_forecast, lower_forecast, df_train, prediction_interval, columns=None, scaler=None, return_components=False, cumsum_A=None, diff_A=None, last_of_array=None, **kwargs)

          Create a pd.DataFrame of metrics per series given actuals, forecast, and precalculated errors. There are some extra args which are precomputed metrics for efficiency in loops, don’t worry about them.

          @@ -1542,36 +1550,36 @@

          Submodules
          -autots.evaluator.metrics.kde(actuals, forecasts, bandwidth, x)
          +autots.evaluator.metrics.kde(actuals, forecasts, bandwidth, x)

          -autots.evaluator.metrics.kde_kl_distance(F, A, bandwidth=0.5, x=None)
          +autots.evaluator.metrics.kde_kl_distance(F, A, bandwidth=0.5, x=None)

          Distribution loss by means of KDE and KL Divergence.

          -autots.evaluator.metrics.kl_divergence(p, q, epsilon=1e-10)
          +autots.evaluator.metrics.kl_divergence(p, q, epsilon=1e-10)

          Compute KL Divergence between two distributions.

          -autots.evaluator.metrics.linearity(arr)
          +autots.evaluator.metrics.linearity(arr)

          Score perecentage of a np.array with linear progression, along the index (0) axis.

          -autots.evaluator.metrics.mae(ae)
          +autots.evaluator.metrics.mae(ae)

          Accepting abs error already calculated

          -autots.evaluator.metrics.mda(A, F)
          +autots.evaluator.metrics.mda(A, F)

          A measure of how well the actual and forecast follow the same pattern of change. Expects two, 2-D numpy arrays of forecast_length * n series Returns a 1-D array of results in len n series

          @@ -1589,7 +1597,7 @@

          Submodules
          -autots.evaluator.metrics.mean_absolute_differential_error(A, F, order: int = 1, df_train=None, scaler=None)
          +autots.evaluator.metrics.mean_absolute_differential_error(A, F, order: int = 1, df_train=None, scaler=None)

          Expects two, 2-D numpy arrays of forecast_length * n series.

          Returns a 1-D array of results in len n series

          @@ -1612,7 +1620,7 @@

          Submodules
          -autots.evaluator.metrics.mean_absolute_error(A, F)
          +autots.evaluator.metrics.mean_absolute_error(A, F)

          Expects two, 2-D numpy arrays of forecast_length * n series.

          Returns a 1-D array of results in len n series

          @@ -1627,13 +1635,13 @@

          Submodules
          -autots.evaluator.metrics.medae(ae, nan_flag=True)
          +autots.evaluator.metrics.medae(ae, nan_flag=True)

          Accepting abs error already calculated

          -autots.evaluator.metrics.median_absolute_error(A, F)
          +autots.evaluator.metrics.median_absolute_error(A, F)

          Expects two, 2-D numpy arrays of forecast_length * n series.

          Returns a 1-D array of results in len n series

          @@ -1648,7 +1656,7 @@

          Submodules
          -autots.evaluator.metrics.mlvb(A, F, last_of_array)
          +autots.evaluator.metrics.mlvb(A, F, last_of_array)

          Mean last value baseline, the % difference of forecast vs last value naive forecast. Does poorly with near-zero values.

          @@ -1664,14 +1672,14 @@

          Submodules
          -autots.evaluator.metrics.mqae(ae, q=0.85, nan_flag=True)
          +autots.evaluator.metrics.mqae(ae, q=0.85, nan_flag=True)

          Return the mean of errors less than q quantile of the errors per series. np.nans count as largest values, and so are removed as part of the > q group.

          -autots.evaluator.metrics.msle(full_errors, ae, le, nan_flag=True)
          +autots.evaluator.metrics.msle(full_errors, ae, le, nan_flag=True)

          input is array of y_pred - y_true to over-penalize underestimate. Use instead y_true - y_pred to over-penalize overestimate. AE used here for the log just to avoid divide by zero warnings (values aren’t used either way)

          @@ -1679,43 +1687,43 @@

          Submodules
          -autots.evaluator.metrics.numpy_ffill(arr)
          +autots.evaluator.metrics.numpy_ffill(arr)

          Fill np.nan forward down the zero axis.

          -autots.evaluator.metrics.oda(A, F, last_of_array)
          +autots.evaluator.metrics.oda(A, F, last_of_array)

          Origin Directional Accuracy, the accuracy of growth or decline relative to most recent data.

          -autots.evaluator.metrics.pinball_loss(A, F, quantile)
          +autots.evaluator.metrics.pinball_loss(A, F, quantile)

          Bigger is bad-er.

          -autots.evaluator.metrics.precomp_wasserstein(F, cumsum_A)
          +autots.evaluator.metrics.precomp_wasserstein(F, cumsum_A)
          -autots.evaluator.metrics.qae(ae, q=0.9, nan_flag=True)
          +autots.evaluator.metrics.qae(ae, q=0.9, nan_flag=True)

          Return the q quantile of the errors per series. np.nans count as smallest values and will push more values into the exclusion group.

          -autots.evaluator.metrics.rmse(sqe)
          +autots.evaluator.metrics.rmse(sqe)

          Accepting squared error already calculated

          -autots.evaluator.metrics.root_mean_square_error(actual, forecast)
          +autots.evaluator.metrics.root_mean_square_error(actual, forecast)

          Expects two, 2-D numpy arrays of forecast_length * n series.

          Returns a 1-D array of results in len n series

          @@ -1730,7 +1738,7 @@

          Submodules
          -autots.evaluator.metrics.rps(predictions, observed)
          +autots.evaluator.metrics.rps(predictions, observed)

          Vectorized version of Ranked Probability Score. A lower value is a better score. From: Colin Catlin, https://syllepsis.live/2022/01/22/ranked-probability-score-in-python/

          @@ -1746,7 +1754,7 @@

          Submodules
          -autots.evaluator.metrics.scaled_pinball_loss(A, F, df_train, quantile)
          +autots.evaluator.metrics.scaled_pinball_loss(A, F, df_train, quantile)

          Scaled pinball loss.

          Parameters:
          @@ -1762,25 +1770,25 @@

          Submodules
          -autots.evaluator.metrics.smape(actual, forecast, ae, nan_flag=True)
          +autots.evaluator.metrics.smape(actual, forecast, ae, nan_flag=True)

          Accepting abs error already calculated

          -autots.evaluator.metrics.smoothness(arr)
          +autots.evaluator.metrics.smoothness(arr)

          A gradient measure of linearity, where 0 is linear and larger values are more volatile.

          -autots.evaluator.metrics.spl(precomputed_spl, scaler)
          +autots.evaluator.metrics.spl(precomputed_spl, scaler)

          Accepting most of it already calculated

          -autots.evaluator.metrics.symmetric_mean_absolute_percentage_error(actual, forecast)
          +autots.evaluator.metrics.symmetric_mean_absolute_percentage_error(actual, forecast)

          Expect two, 2-D numpy arrays of forecast_length * n series. Allows NaN in actuals, and corresponding NaN in forecast, but not unmatched NaN in forecast Also doesn’t like zeroes in either forecast or actual - results in poor error value even if forecast is accurate

          @@ -1799,7 +1807,7 @@

          Submodules
          -autots.evaluator.metrics.threshold_loss(actual, forecast, threshold, penalty_threshold=None)
          +autots.evaluator.metrics.threshold_loss(actual, forecast, threshold, penalty_threshold=None)

          Run once for overestimate then again for underestimate. Add both for combined view.

          Parameters:
          @@ -1814,31 +1822,31 @@

          Submodules
          -autots.evaluator.metrics.unsorted_wasserstein(F, A)
          +autots.evaluator.metrics.unsorted_wasserstein(F, A)

          Also known as earth moving distance.

          -autots.evaluator.metrics.wasserstein(F, A)
          +autots.evaluator.metrics.wasserstein(F, A)

          This version has sorting, which is perhaps less relevant on average than the unsorted.

          -

          autots.evaluator.validation module

          +

          autots.evaluator.validation module

          Extracted from auto_ts.py, the functions to create validation segments.

          Warning, these are used in AMFM, possibly other places. Avoid modification of function structures, if possible.

          Created on Mon Jan 16 11:36:01 2023

          @author: Colin

          -autots.evaluator.validation.extract_seasonal_val_periods(validation_method)
          +autots.evaluator.validation.extract_seasonal_val_periods(validation_method)
          -autots.evaluator.validation.generate_validation_indices(validation_method, forecast_length, num_validations, df_wide_numeric, validation_params={}, preclean=None, verbose=0)
          +autots.evaluator.validation.generate_validation_indices(validation_method, forecast_length, num_validations, df_wide_numeric, validation_params={}, preclean=None, verbose=0)

          generate validation indices (equals num_validations + 1 as includes initial eval).

          Parameters:
          @@ -1856,13 +1864,13 @@

          Submodules
          -autots.evaluator.validation.validate_num_validations(validation_method, num_validations, df_wide_numeric, forecast_length, min_allowed_train_percent=0.5, verbose=0)
          +autots.evaluator.validation.validate_num_validations(validation_method, num_validations, df_wide_numeric, forecast_length, min_allowed_train_percent=0.5, verbose=0)

          Check how many validations are possible given the length of the data. Beyond initial eval split which is always assumed.

          -

          Module contents

          +

          Module contents

          Model Evaluators

          @@ -1957,21 +1965,5 @@

          Quick search

          - - \ No newline at end of file diff --git a/docs/build/html/source/autots.html b/docs/build/html/source/autots.html index 3b33f7fc..03027ce0 100644 --- a/docs/build/html/source/autots.html +++ b/docs/build/html/source/autots.html @@ -1,17 +1,25 @@ - - + + + + + autots package — AutoTS 0.6.10 documentation - - - - - + + + + + @@ -33,9 +41,9 @@
          -

          autots package

          +

          autots package

          -

          Subpackages

          +

          Subpackages

          • autots.datasets package
              @@ -1442,16 +1450,16 @@

              Subpackages -

              Module contents

              +

              Module contents

              Automated Time Series Model Selection for Python

              https://github.com/winedarksea/AutoTS

              -class autots.AnomalyDetector(output='multivariate', method='zscore', transform_dict={'transformation_params': {0: {'datepart_method': 'simple_3', 'regression_model': {'model': 'ElasticNet', 'model_params': {}}}}, 'transformations': {0: 'DatepartRegression'}}, forecast_params=None, method_params={}, eval_period=None, isolated_only=False, n_jobs=1)
              +class autots.AnomalyDetector(output='multivariate', method='zscore', transform_dict={'transformation_params': {0: {'datepart_method': 'simple_3', 'regression_model': {'model': 'ElasticNet', 'model_params': {}}}}, 'transformations': {0: 'DatepartRegression'}}, forecast_params=None, method_params={}, eval_period=None, isolated_only=False, n_jobs=1)

              Bases: object

              -detect(df)
              +detect(df)

              All will return -1 for anomalies.

              Parameters:
              @@ -1465,18 +1473,18 @@

              Subpackages
              -fit(df)
              +fit(df)

              -fit_anomaly_classifier()
              +fit_anomaly_classifier()

              Fit a model to predict if a score is an anomaly.

              -static get_new_params(method='random')
              +static get_new_params(method='random')

              Generate random new parameter combinations.

              Parameters:
              @@ -1487,12 +1495,12 @@

              Subpackages
              -plot(series_name=None, title=None, plot_kwargs={})
              +plot(series_name=None, title=None, plot_kwargs={})

              -score_to_anomaly(scores)
              +score_to_anomaly(scores)

              A DecisionTree model, used as models are nonstandard (and nonparametric).

              @@ -1500,7 +1508,7 @@

              Subpackages
              -class autots.AutoTS(forecast_length: int = 14, frequency: str = 'infer', prediction_interval: float = 0.9, max_generations: int = 20, no_negatives: bool = False, constraint: float | None = None, ensemble: str | None = None, initial_template: str = 'General+Random', random_seed: int = 2022, holiday_country: str = 'US', subset: int | None = None, aggfunc: str = 'first', na_tolerance: float = 1, metric_weighting: dict = {'containment_weighting': 0, 'contour_weighting': 0.01, 'imle_weighting': 0, 'made_weighting': 0.05, 'mae_weighting': 2, 'mage_weighting': 0, 'mle_weighting': 0, 'oda_weighting': 0.001, 'rmse_weighting': 2, 'runtime_weighting': 0.01, 'smape_weighting': 5, 'spl_weighting': 3, 'wasserstein_weighting': 0.01}, drop_most_recent: int = 0, drop_data_older_than_periods: int | None = None, model_list: str = 'default', transformer_list: dict = 'auto', transformer_max_depth: int = 6, models_mode: str = 'random', num_validations: int = 'auto', models_to_validate: float = 0.15, max_per_model_class: int | None = None, validation_method: str = 'backwards', min_allowed_train_percent: float = 0.5, remove_leading_zeroes: bool = False, prefill_na: str | None = None, introduce_na: bool | None = None, preclean: dict | None = None, model_interrupt: bool = True, generation_timeout: int | None = None, current_model_file: str | None = None, force_gc: bool = False, verbose: int = 1, n_jobs: int = 0.5)
              +class autots.AutoTS(forecast_length: int = 14, frequency: str = 'infer', prediction_interval: float = 0.9, max_generations: int = 20, no_negatives: bool = False, constraint: float | None = None, ensemble: str | None = None, initial_template: str = 'General+Random', random_seed: int = 2022, holiday_country: str = 'US', subset: int | None = None, aggfunc: str = 'first', na_tolerance: float = 1, metric_weighting: dict = {'containment_weighting': 0, 'contour_weighting': 0.01, 'imle_weighting': 0, 'made_weighting': 0.05, 'mae_weighting': 2, 'mage_weighting': 0, 'mle_weighting': 0, 'oda_weighting': 0.001, 'rmse_weighting': 2, 'runtime_weighting': 0.01, 'smape_weighting': 5, 'spl_weighting': 3, 'wasserstein_weighting': 0.01}, drop_most_recent: int = 0, drop_data_older_than_periods: int | None = None, model_list: str = 'default', transformer_list: dict = 'auto', transformer_max_depth: int = 6, models_mode: str = 'random', num_validations: int = 'auto', models_to_validate: float = 0.15, max_per_model_class: int | None = None, validation_method: str = 'backwards', min_allowed_train_percent: float = 0.5, remove_leading_zeroes: bool = False, prefill_na: str | None = None, introduce_na: bool | None = None, preclean: dict | None = None, model_interrupt: bool = True, generation_timeout: int | None = None, current_model_file: str | None = None, force_gc: bool = False, verbose: int = 1, n_jobs: int = 0.5)

              Bases: object

              Automate time series modeling using a genetic algorithm.

              @@ -1595,7 +1603,7 @@

              Subpackages
              -best_model
              +best_model

              DataFrame containing template for the best ranked model

              Type:
              @@ -1606,7 +1614,7 @@

              Subpackages
              -best_model_name
              +best_model_name

              model name

              Type:
              @@ -1617,7 +1625,7 @@

              Subpackages
              -best_model_params
              +best_model_params

              model params

              Type:
              @@ -1628,7 +1636,7 @@

              Subpackages
              -best_model_transformation_params
              +best_model_transformation_params

              transformation parameters

              Type:
              @@ -1639,7 +1647,7 @@

              Subpackages
              -best_model_ensemble
              +best_model_ensemble

              Ensemble type int id

              Type:
              @@ -1650,7 +1658,7 @@

              Subpackages
              -regression_check
              +regression_check

              If True, the best_model uses an input ‘User’ future_regressor

              Type:
              @@ -1661,7 +1669,7 @@

              Subpackages
              -df_wide_numeric
              +df_wide_numeric

              dataframe containing shaped final data, will include preclean

              Type:
              @@ -1672,7 +1680,7 @@

              Subpackages
              -initial_results.model_results
              +initial_results.model_results

              contains a collection of result metrics

              Type:
              @@ -1683,7 +1691,7 @@

              Subpackages
              -score_per_series
              +score_per_series

              generated score of metrics given per input series, if horizontal ensembles

              Type:
              @@ -1719,7 +1727,7 @@

              Subpackages
              -back_forecast(series=None, n_splits: int = 'auto', tail: int = 'auto', verbose: int = 0)
              +back_forecast(series=None, n_splits: int = 'auto', tail: int = 'auto', verbose: int = 0)

              Create forecasts for the historical training data, ie. backcast or back forecast. OUT OF SAMPLE

              This actually forecasts on historical data, these are not fit model values as are often returned by other packages. As such, this will be slower, but more representative of real world model performance. @@ -1736,18 +1744,18 @@

              Subpackages
              -best_model_per_series_mape()
              +best_model_per_series_mape()

              This isn’t quite classic mape but is a percentage mean error intended for quick visuals not final statistics (see model.results()).

              -best_model_per_series_score()
              +best_model_per_series_score()
              -diagnose_params(target='runtime', waterfall_plots=True)
              +diagnose_params(target='runtime', waterfall_plots=True)

              Attempt to explain params causing measured outcomes using shap and linear regression coefficients.

              Parameters:
              @@ -1761,20 +1769,20 @@

              Subpackages
              -expand_horizontal()
              +expand_horizontal()

              Enables expanding horizontal models trained on a subset to full data. Reruns template models and generates new template.

              -export_best_model(filename, **kwargs)
              +export_best_model(filename, **kwargs)

              Basically the same as export_template but only ever the one best model.

              -export_template(filename=None, models: str = 'best', n: int = 40, max_per_model_class: int | None = None, include_results: bool = False, unpack_ensembles: bool = False, min_metrics: list = ['smape', 'spl'], max_metrics: list | None = None)
              +export_template(filename=None, models: str = 'best', n: int = 40, max_per_model_class: int | None = None, include_results: bool = False, unpack_ensembles: bool = False, min_metrics: list = ['smape', 'spl'], max_metrics: list | None = None)

              Export top results as a reusable template.

              Parameters:
              @@ -1796,7 +1804,7 @@

              Subpackages
              -failure_rate(result_set: str = 'initial')
              +failure_rate(result_set: str = 'initial')

              Return fraction of models passing with exceptions.

              Parameters:
              @@ -1810,7 +1818,7 @@

              Subpackages
              -fit(df, date_col: str | None = None, value_col: str | None = None, id_col: str | None = None, future_regressor=None, weights: dict = {}, result_file: str | None = None, grouping_ids=None, validation_indexes: list | None = None)
              +fit(df, date_col: str | None = None, value_col: str | None = None, id_col: str | None = None, future_regressor=None, weights: dict = {}, result_file: str | None = None, grouping_ids=None, validation_indexes: list | None = None)

              Train algorithm given data supplied.

              Parameters:
              @@ -1835,13 +1843,13 @@

              Subpackages
              -fit_data(df, date_col=None, value_col=None, id_col=None, future_regressor=None, weights={})
              +fit_data(df, date_col=None, value_col=None, id_col=None, future_regressor=None, weights={})

              Part of the setup that involves fitting the initial data but not running any models.

              -get_metric_corr(percent_best=0.1)
              +get_metric_corr(percent_best=0.1)

              Returns a dataframe of correlation among evaluation metrics across evaluations.

              Parameters:
              @@ -1852,24 +1860,24 @@

              Subpackages
              -static get_new_params(method='random')
              +static get_new_params(method='random')

              Randomly generate new parameters for the class.

              -horizontal_per_generation()
              +horizontal_per_generation()
              -horizontal_to_df()
              +horizontal_to_df()

              helper function for plotting.

              -import_best_model(import_target, enforce_model_list: bool = True, include_ensemble: bool = True)
              +import_best_model(import_target, enforce_model_list: bool = True, include_ensemble: bool = True)

              Load a best model, overriding any existing setting.

              Parameters:
              @@ -1880,7 +1888,7 @@

              Subpackages
              -import_results(filename)
              +import_results(filename)

              Add results from another run on the same data.

              Input can be filename with .csv or .pickle. or can be a DataFrame of model results or a full TemplateEvalObject

              @@ -1888,7 +1896,7 @@

              Subpackages
              -import_template(filename: str, method: str = 'add_on', enforce_model_list: bool = True, include_ensemble: bool = False, include_horizontal: bool = False, force_validation: bool = False)
              +import_template(filename: str, method: str = 'add_on', enforce_model_list: bool = True, include_ensemble: bool = False, include_horizontal: bool = False, force_validation: bool = False)

              Import a previously exported template of model parameters. Must be done before the AutoTS object is .fit().

              @@ -1908,36 +1916,36 @@

              Subpackages
              -list_failed_model_types()
              +list_failed_model_types()

              Return a list of model types (ie ETS, LastValueNaive) that failed. If all had at least one success, then return an empty list.

              -load_template(filename)
              +load_template(filename)

              Helper funciton for just loading the file part of import_template.

              -mosaic_to_df()
              +mosaic_to_df()

              Helper function to create a readable df of models in mosaic.

              -parse_best_model()
              +parse_best_model()
              -plot_back_forecast(**kwargs)
              +plot_back_forecast(**kwargs)
              -plot_backforecast(series=None, n_splits: int = 'auto', start_date='auto', title=None, alpha=0.25, facecolor='black', loc='upper left', **kwargs)
              +plot_backforecast(series=None, n_splits: int = 'auto', start_date='auto', title=None, alpha=0.25, facecolor='black', loc='upper left', **kwargs)

              Plot the historical data and fit forecast on historic. Out of sample in chunks = forecast_length by default.

              Parameters:
              @@ -1954,7 +1962,7 @@

              Subpackages
              -plot_generation_loss(title='Single Model Accuracy Gain Over Generations', **kwargs)
              +plot_generation_loss(title='Single Model Accuracy Gain Over Generations', **kwargs)

              Plot improvement in accuracy over generations. Note: this is only “one size fits all” accuracy and doesn’t account for the benefits seen for ensembling.

              @@ -1967,7 +1975,7 @@

              Subpackages
              -plot_horizontal(max_series: int = 20, title='Model Types Chosen by Series', **kwargs)
              +plot_horizontal(max_series: int = 20, title='Model Types Chosen by Series', **kwargs)

              Simple plot to visualize assigned series: models.

              Note that for ‘mosaic’ ensembles, it only plots the type of the most common model_id for that series, or the first if all are mode.

              @@ -1982,19 +1990,19 @@

              Subpackages
              -plot_horizontal_model_count(color_list=None, top_n: int = 20, title='Most Frequently Chosen Models', **kwargs)
              +plot_horizontal_model_count(color_list=None, top_n: int = 20, title='Most Frequently Chosen Models', **kwargs)

              Plots most common models. Does not factor in nested in non-horizontal Ensembles.

              -plot_horizontal_per_generation(title='Horizontal Ensemble Accuracy Gain (first eval sample only)', **kwargs)
              +plot_horizontal_per_generation(title='Horizontal Ensemble Accuracy Gain (first eval sample only)', **kwargs)

              Plot how well the horizontal ensembles would do after each new generation. Slow.

              -plot_horizontal_transformers(method='transformers', color_list=None, **kwargs)
              +plot_horizontal_transformers(method='transformers', color_list=None, **kwargs)

              Simple plot to visualize transformers used. Note this doesn’t capture transformers nested in simple ensembles.

              @@ -2010,7 +2018,7 @@

              Subpackages
              -plot_metric_corr(cols=None, percent_best=0.1)
              +plot_metric_corr(cols=None, percent_best=0.1)

              Plot correlation in results among metrics. The metrics that are highly correlated are those that mostly the unscaled ones

              @@ -2025,7 +2033,7 @@

              Subpackages
              -plot_per_series_error(title: str = 'Top Series Contributing Score Error', max_series: int = 10, max_name_chars: int = 25, color: str = '#ff9912', figsize=(12, 4), kind: str = 'bar', upper_clip: float = 1000, **kwargs)
              +plot_per_series_error(title: str = 'Top Series Contributing Score Error', max_series: int = 10, max_name_chars: int = 25, color: str = '#ff9912', figsize=(12, 4), kind: str = 'bar', upper_clip: float = 1000, **kwargs)

              Plot which series are contributing most to error (Score) of final model. Avg of validations for best_model

              Parameters:
              @@ -2045,7 +2053,7 @@

              Subpackages
              -plot_per_series_mape(title: str | None = None, max_series: int = 10, max_name_chars: int = 25, color: str = '#ff9912', figsize=(12, 4), kind: str = 'bar', **kwargs)
              +plot_per_series_mape(title: str | None = None, max_series: int = 10, max_name_chars: int = 25, color: str = '#ff9912', figsize=(12, 4), kind: str = 'bar', **kwargs)

              Plot which series are contributing most to SMAPE of final model. Avg of validations for best_model

              Parameters:
              @@ -2064,19 +2072,19 @@

              Subpackages
              -plot_per_series_smape(title: str | None = None, max_series: int = 10, max_name_chars: int = 25, color: str = '#ff9912', figsize=(12, 4), kind: str = 'bar', **kwargs)
              +plot_per_series_smape(title: str | None = None, max_series: int = 10, max_name_chars: int = 25, color: str = '#ff9912', figsize=(12, 4), kind: str = 'bar', **kwargs)

              To be backwards compatible, not necessarily maintained, plot_per_series_mape is to be preferred.

              -plot_transformer_failure_rate()
              +plot_transformer_failure_rate()

              Failure Rate per Transformer type (ignoring ensembles), failure may be due to other model or transformer.

              -plot_validations(df_wide=None, models=None, series=None, title=None, start_date='auto', end_date='auto', subset=None, compare_horizontal=False, colors=None, include_bounds=True, alpha=0.35, start_color='darkred', end_color='#A2AD9C', **kwargs)
              +plot_validations(df_wide=None, models=None, series=None, title=None, start_date='auto', end_date='auto', subset=None, compare_horizontal=False, colors=None, include_bounds=True, alpha=0.35, start_color='darkred', end_color='#A2AD9C', **kwargs)

              Similar to plot_backforecast but using the model’s validation segments specifically. Must reforecast. Saves results to self.validation_forecasts and caches. Set that to None to force rerun otherwise it uses stored (when models is the same). ‘chosen’ refers to best_model_id, the model chosen to run for predict @@ -2101,7 +2109,7 @@

              Subpackages
              -predict(forecast_length: int = 'self', prediction_interval: float = 'self', future_regressor=None, hierarchy=None, just_point_forecast: bool = False, fail_on_forecast_nan: bool = True, verbose: int = 'self', df=None)
              +predict(forecast_length: int = 'self', prediction_interval: float = 'self', future_regressor=None, hierarchy=None, just_point_forecast: bool = False, fail_on_forecast_nan: bool = True, verbose: int = 'self', df=None)

              Generate forecast data immediately following dates of index supplied to .fit().

              If using a model from update_fit list, with no ensembling, underlying model will not be retrained when used as below, with a single prediction interval: This designed for high speed forecasting. Full retraining is best when there is sufficient time. @@ -2144,7 +2152,7 @@

              Subpackages
              -results(result_set: str = 'initial')
              +results(result_set: str = 'initial')

              Convenience function to return tested models table.

              Parameters:
              @@ -2155,25 +2163,25 @@

              Subpackages
              -retrieve_validation_forecasts(models=None, compare_horizontal=False, id_name='SeriesID', value_name='Value', interval_name='PredictionInterval')
              +retrieve_validation_forecasts(models=None, compare_horizontal=False, id_name='SeriesID', value_name='Value', interval_name='PredictionInterval')

              -save_template(filename, export_template, **kwargs)
              +save_template(filename, export_template, **kwargs)

              Helper function for the save part of export_template.

              -validation_agg()
              +validation_agg()

              -class autots.Cassandra(preprocessing_transformation: dict | None = None, scaling: str = 'BaseScaler', past_impacts_intervention: str | None = None, seasonalities: dict = ['common_fourier'], ar_lags: list | None = None, ar_interaction_seasonality: dict | None = None, anomaly_detector_params: dict | None = None, anomaly_intervention: str | None = None, holiday_detector_params: dict | None = None, holiday_countries: dict | None = None, holiday_countries_used: bool = True, multivariate_feature: str | None = None, multivariate_transformation: str | None = None, regressor_transformation: dict | None = None, regressors_used: bool = True, linear_model: dict | None = None, randomwalk_n: int | None = None, trend_window: int = 30, trend_standin: str | None = None, trend_anomaly_detector_params: dict | None = None, trend_transformation: dict = {}, trend_model: dict = {'Model': 'LastValueNaive', 'ModelParameters': {}}, trend_phi: float | None = None, constraint: dict | None = None, max_colinearity: float = 0.998, max_multicolinearity: float = 0.001, frequency: str = 'infer', prediction_interval: float = 0.9, random_seed: int = 2022, verbose: int = 0, n_jobs: int = 'auto', **kwargs)
              +class autots.Cassandra(preprocessing_transformation: dict | None = None, scaling: str = 'BaseScaler', past_impacts_intervention: str | None = None, seasonalities: dict = ['common_fourier'], ar_lags: list | None = None, ar_interaction_seasonality: dict | None = None, anomaly_detector_params: dict | None = None, anomaly_intervention: str | None = None, holiday_detector_params: dict | None = None, holiday_countries: dict | None = None, holiday_countries_used: bool = True, multivariate_feature: str | None = None, multivariate_transformation: str | None = None, regressor_transformation: dict | None = None, regressors_used: bool = True, linear_model: dict | None = None, randomwalk_n: int | None = None, trend_window: int = 30, trend_standin: str | None = None, trend_anomaly_detector_params: dict | None = None, trend_transformation: dict = {}, trend_model: dict = {'Model': 'LastValueNaive', 'ModelParameters': {}}, trend_phi: float | None = None, constraint: dict | None = None, max_colinearity: float = 0.998, max_multicolinearity: float = 0.001, frequency: str = 'infer', prediction_interval: float = 0.9, random_seed: int = 2022, verbose: int = 0, n_jobs: int = 'auto', **kwargs)

              Bases: ModelObject

              Explainable decomposition-based forecasting with advanced trend modeling and preprocessing.

              Tunc etiam fatis aperit Cassandra futuris @@ -2207,68 +2215,68 @@

              Subpackages
              -fit()
              +fit()

              -predict()
              +predict()
              -holiday_detector.dates_to_holidays()
              +holiday_detector.dates_to_holidays()
              -create_forecast_index()
              +create_forecast_index()

              after .fit, can be used to create index of prediction

              -plot_forecast()
              +plot_forecast()
              -plot_components()
              +plot_components()
              -plot_trend()
              +plot_trend()
              -get_new_params()
              +get_new_params()
              -return_components()
              +return_components()
              -.anomaly_detector.anomalies
              +.anomaly_detector.anomalies
              -.anomaly_detector.scores
              +.anomaly_detector.scores
              -.holiday_count
              +.holiday_count
              -.holidays
              +.holidays
              Type:

              series flags, holiday detector only

              @@ -2278,7 +2286,7 @@

              Subpackages
              -.params
              +.params

              @@ -2288,94 +2296,94 @@

              Subpackages
              -.x_array
              +.x_array

              -.predict_x_array
              +.predict_x_array
              -.trend_train
              +.trend_train
              -.predicted_trend
              +.predicted_trend
              -analyze_trend(slope, index)
              +analyze_trend(slope, index)
              -auto_fit(df, validation_method)
              +auto_fit(df, validation_method)
              -base_scaler(df)
              +base_scaler(df)
              -compare_actual_components()
              +compare_actual_components()
              -create_t(DTindex)
              +create_t(DTindex)
              -cross_validate(df, validation_method)
              +cross_validate(df, validation_method)
              -feature_importance()
              +feature_importance()
              -fit(df, future_regressor=None, regressor_per_series=None, flag_regressors=None, categorical_groups=None, past_impacts=None)
              +fit(df, future_regressor=None, regressor_per_series=None, flag_regressors=None, categorical_groups=None, past_impacts=None)
              -fit_data(df, forecast_length=None, future_regressor=None, regressor_per_series=None, flag_regressors=None, future_impacts=None, regressor_forecast_model=None, regressor_forecast_model_params=None, regressor_forecast_transformations=None, include_history=False, past_impacts=None)
              +fit_data(df, forecast_length=None, future_regressor=None, regressor_per_series=None, flag_regressors=None, future_impacts=None, regressor_forecast_model=None, regressor_forecast_model_params=None, regressor_forecast_transformations=None, include_history=False, past_impacts=None)
              -get_new_params(method='fast')
              +get_new_params(method='fast')

              Return dict of new parameters for parameter tuning.

              -get_params()
              +get_params()

              Return dict of current parameters.

              -next_fit()
              +next_fit()
              -plot_components(prediction=None, series=None, figsize=(16, 9), to_origin_space=True, title=None, start_date=None)
              +plot_components(prediction=None, series=None, figsize=(16, 9), to_origin_space=True, title=None, start_date=None)
              -plot_forecast(prediction, actuals=None, series=None, start_date=None, anomaly_color='darkslateblue', holiday_color='darkgreen', trend_anomaly_color='slategray', point_size=12.0)
              +plot_forecast(prediction, actuals=None, series=None, start_date=None, anomaly_color='darkslateblue', holiday_color='darkgreen', trend_anomaly_color='slategray', point_size=12.0)

              Plot a forecast time series.

              Parameters:
              @@ -2395,17 +2403,17 @@

              Subpackages
              -plot_things()
              +plot_things()

              -plot_trend(series=None, vline=None, colors=['#d4f74f', '#82ab5a', '#ff6c05', '#c12600'], title=None, start_date=None, **kwargs)
              +plot_trend(series=None, vline=None, colors=['#d4f74f', '#82ab5a', '#ff6c05', '#c12600'], title=None, start_date=None, **kwargs)
              -predict(forecast_length=None, include_history=False, future_regressor=None, regressor_per_series=None, flag_regressors=None, future_impacts=None, new_df=None, regressor_forecast_model=None, regressor_forecast_model_params=None, regressor_forecast_transformations=None, include_organic=False, df=None, past_impacts=None)
              +predict(forecast_length=None, include_history=False, future_regressor=None, regressor_per_series=None, flag_regressors=None, future_impacts=None, new_df=None, regressor_forecast_model=None, regressor_forecast_model_params=None, regressor_forecast_transformations=None, include_organic=False, df=None, past_impacts=None)

              Generate a forecast.

              future_regressor and regressor_per_series should only include new future values, history is already stored they should match on forecast_length and index of forecasts

              @@ -2425,18 +2433,18 @@

              Subpackages
              -predict_new_product()
              +predict_new_product()

              -process_components(to_origin_space=True)
              +process_components(to_origin_space=True)

              Scale and standardize component outputs.

              -return_components(to_origin_space=True, include_impacts=False)
              +return_components(to_origin_space=True, include_impacts=False)

              Return additive elements of forecast, linear and trend. If impacts included, it is a multiplicative term.

              Parameters:
              @@ -2450,30 +2458,30 @@

              Subpackages
              -rolling_trend(trend_residuals, t)
              +rolling_trend(trend_residuals, t)

              -scale_data(df)
              +scale_data(df)
              -to_origin_space(df, trans_method='forecast', components=False, bounds=False)
              +to_origin_space(df, trans_method='forecast', components=False, bounds=False)

              Take transformed outputs back to original feature space.

              -treatment_causal_impact(df, intervention_dates)
              +treatment_causal_impact(df, intervention_dates)
              -class autots.EventRiskForecast(df_train, forecast_length, frequency: str = 'infer', prediction_interval=0.9, lower_limit=0.05, upper_limit=0.95, model_name='UnivariateMotif', model_param_dict={'distance_metric': 'euclidean', 'k': 10, 'pointed_method': 'median', 'return_result_windows': True, 'window': 14}, model_transform_dict={'fillna': 'pchip', 'transformation_params': {'0': {'method': 0.5}, '1': {}, '2': {'fixed': False, 'window': 7}, '3': {}}, 'transformations': {'0': 'Slice', '1': 'DifferencedTransformer', '2': 'RollingMeanTransformer', '3': 'MaxAbsScaler'}}, model_forecast_kwargs={'max_generations': 30, 'n_jobs': 'auto', 'random_seed': 321, 'verbose': 1}, future_regressor_train=None, future_regressor_forecast=None)
              +class autots.EventRiskForecast(df_train, forecast_length, frequency: str = 'infer', prediction_interval=0.9, lower_limit=0.05, upper_limit=0.95, model_name='UnivariateMotif', model_param_dict={'distance_metric': 'euclidean', 'k': 10, 'pointed_method': 'median', 'return_result_windows': True, 'window': 14}, model_transform_dict={'fillna': 'pchip', 'transformation_params': {'0': {'method': 0.5}, '1': {}, '2': {'fixed': False, 'window': 7}, '3': {}}, 'transformations': {'0': 'Slice', '1': 'DifferencedTransformer', '2': 'RollingMeanTransformer', '3': 'MaxAbsScaler'}}, model_forecast_kwargs={'max_generations': 30, 'n_jobs': 'auto', 'random_seed': 321, 'verbose': 1}, future_regressor_train=None, future_regressor_forecast=None)

              Bases: object

              Generate a risk score (0 to 1, but usually close to 0) for a future event exceeding user specified upper or lower bounds.

              Upper and lower limits can be one of four types, and may each be different. @@ -2515,42 +2523,42 @@

              Subpackages
              -fit()
              +fit()

              -predict()
              +predict()
              -predict_historic()
              +predict_historic()
              -generate_result_windows()
              +generate_result_windows()
              -generate_risk_array()
              +generate_risk_array()
              -generate_historic_risk_array()
              +generate_historic_risk_array()
              -set_limit()
              +set_limit()
              -plot()
              +plot()
              @@ -2565,7 +2573,7 @@

              Subpackages
              -fit(df_train=None, forecast_length=None, prediction_interval=None, models_mode='event_risk', model_list=['UnivariateMotif', 'MultivariateMotif', 'SectionalMotif', 'ARCH', 'MetricMotif', 'SeasonalityMotif'], ensemble=None, autots_kwargs=None, future_regressor_train=None)
              +fit(df_train=None, forecast_length=None, prediction_interval=None, models_mode='event_risk', model_list=['UnivariateMotif', 'MultivariateMotif', 'SectionalMotif', 'ARCH', 'MetricMotif', 'SeasonalityMotif'], ensemble=None, autots_kwargs=None, future_regressor_train=None)

              Shortcut for generating model params.

              args specified are those suggested for an otherwise normal AutoTS run

              @@ -2584,13 +2592,13 @@

              Subpackages
              -static generate_historic_risk_array(df, limit, direction='upper')
              +static generate_historic_risk_array(df, limit, direction='upper')

              Given a df and a limit, returns a 0/1 array of whether limit was equaled or exceeded.

              -generate_result_windows(df_train=None, forecast_length=None, frequency=None, prediction_interval=None, model_name=None, model_param_dict=None, model_transform_dict=None, model_forecast_kwargs=None, future_regressor_train=None, future_regressor_forecast=None)
              +generate_result_windows(df_train=None, forecast_length=None, frequency=None, prediction_interval=None, model_name=None, model_param_dict=None, model_transform_dict=None, model_forecast_kwargs=None, future_regressor_train=None, future_regressor_forecast=None)

              For event risk forecasting. Params default to class init but can be overridden here.

              Returns:
              @@ -2604,13 +2612,13 @@

              Subpackages
              -static generate_risk_array(result_windows, limit, direction='upper')
              +static generate_risk_array(result_windows, limit, direction='upper')

              Given a df and a limit, returns a 0/1 array of whether limit was equaled or exceeded.

              -plot(column_idx=0, grays=['#838996', '#c0c0c0', '#dcdcdc', '#a9a9a9', '#808080', '#989898', '#808080', '#757575', '#696969', '#c9c0bb', '#c8c8c8', '#323232', '#e5e4e2', '#778899', '#4f666a', '#848482', '#414a4c', '#8a7f80', '#c4c3d0', '#bebebe', '#dbd7d2'], up_low_color=['#ff4500', '#ff5349'], bar_color='#6495ED', bar_ylim=[0.0, 0.5], figsize=(14, 8), result_windows=None, lower_limit_2d=None, upper_limit_2d=None, upper_risk_array=None, lower_risk_array=None)
              +plot(column_idx=0, grays=['#838996', '#c0c0c0', '#dcdcdc', '#a9a9a9', '#808080', '#989898', '#808080', '#757575', '#696969', '#c9c0bb', '#c8c8c8', '#323232', '#e5e4e2', '#778899', '#4f666a', '#848482', '#414a4c', '#8a7f80', '#c4c3d0', '#bebebe', '#dbd7d2'], up_low_color=['#ff4500', '#ff5349'], bar_color='#6495ED', bar_ylim=[0.0, 0.5], figsize=(14, 8), result_windows=None, lower_limit_2d=None, upper_limit_2d=None, upper_risk_array=None, lower_risk_array=None)

              Plot a sample of the risk forecast outcomes.

              Parameters:
              @@ -2628,7 +2636,7 @@

              Subpackages
              -plot_eval(df_test, column_idx=0, actuals_color=['#00BFFF'], up_low_color=['#ff4500', '#ff5349'], bar_color='#6495ED', bar_ylim=[0.0, 0.5], figsize=(14, 8), lower_limit_2d=None, upper_limit_2d=None, upper_risk_array=None, lower_risk_array=None)
              +plot_eval(df_test, column_idx=0, actuals_color=['#00BFFF'], up_low_color=['#ff4500', '#ff5349'], bar_color='#6495ED', bar_ylim=[0.0, 0.5], figsize=(14, 8), lower_limit_2d=None, upper_limit_2d=None, upper_risk_array=None, lower_risk_array=None)

              Plot a sample of the risk forecast with known value vs risk score.

              Parameters:
              @@ -2647,13 +2655,13 @@

              Subpackages
              -predict()
              +predict()

              Returns forecast upper, lower risk probability arrays for input limits.

              -predict_historic(upper_limit=None, lower_limit=None, eval_periods=None)
              +predict_historic(upper_limit=None, lower_limit=None, eval_periods=None)

              Returns upper, lower risk probability arrays for input limits for the historic data. If manual numpy array limits are used, the limits will need to be appropriate shape (for df_train and eval_periods if used)

              @@ -2669,7 +2677,7 @@

              Subpackages
              -static set_limit(limit, target_shape, df_train, direction='upper', period='forecast', forecast_length=None, eval_periods=None)
              +static set_limit(limit, target_shape, df_train, direction='upper', period='forecast', forecast_length=None, eval_periods=None)

              Handles all limit input styles and returns numpy array.

              Parameters:
              @@ -2690,7 +2698,7 @@

              Subpackages
              -class autots.GeneralTransformer(fillna: str | None = None, transformations: dict = {}, transformation_params: dict = {}, grouping: str | None = None, reconciliation: str | None = None, grouping_ids=None, random_seed: int = 2020, n_jobs: int = 1, holiday_country: list | None = None, verbose: int = 0)
              +class autots.GeneralTransformer(fillna: str | None = None, transformations: dict = {}, transformation_params: dict = {}, grouping: str | None = None, reconciliation: str | None = None, grouping_ids=None, random_seed: int = 2020, n_jobs: int = 1, holiday_country: list | None = None, verbose: int = 0)

              Bases: object

              Remove fillNA and then mathematical transformations.

              Expects a chronologically sorted pandas.DataFrame with a DatetimeIndex, only numeric data, and a ‘wide’ (one column per series) shape.

              @@ -2795,7 +2803,7 @@

              Subpackages
              -fill_na(df, window: int = 10)
              +fill_na(df, window: int = 10)
              Parameters:
                @@ -2811,7 +2819,7 @@

                Subpackages
                -fit(df)
                +fit(df)

                Apply transformations and return transformer object.

                Parameters:
                @@ -2822,18 +2830,18 @@

                Subpackages
                -fit_transform(df)
                +fit_transform(df)

                Directly fit and apply transformations to convert df.

                -static get_new_params(method='fast')
                +static get_new_params(method='fast')
                -inverse_transform(df, trans_method: str = 'forecast', fillzero: bool = False, bounds: bool = False)
                +inverse_transform(df, trans_method: str = 'forecast', fillzero: bool = False, bounds: bool = False)

                Undo the madness.

                Parameters:
                @@ -2849,7 +2857,7 @@

                Subpackages
                -classmethod retrieve_transformer(transformation: str | None = None, param: dict = {}, df=None, random_seed: int = 2020, n_jobs: int = 1, holiday_country: list | None = None)
                +classmethod retrieve_transformer(transformation: str | None = None, param: dict = {}, df=None, random_seed: int = 2020, n_jobs: int = 1, holiday_country: list | None = None)

                Retrieves a specific transformer object from a string.

                Parameters:
                @@ -2867,7 +2875,7 @@

                Subpackages
                -transform(df)
                +transform(df)

                Apply transformations to convert df.

                @@ -2875,11 +2883,11 @@

                Subpackages
                -class autots.HolidayDetector(anomaly_detector_params={}, threshold=0.8, min_occurrences=2, splash_threshold=0.65, use_dayofmonth_holidays=True, use_wkdom_holidays=True, use_wkdeom_holidays=True, use_lunar_holidays=True, use_lunar_weekday=False, use_islamic_holidays=True, use_hebrew_holidays=True, output: str = 'multivariate', n_jobs: int = 1)
                +class autots.HolidayDetector(anomaly_detector_params={}, threshold=0.8, min_occurrences=2, splash_threshold=0.65, use_dayofmonth_holidays=True, use_wkdom_holidays=True, use_wkdeom_holidays=True, use_lunar_holidays=True, use_lunar_weekday=False, use_islamic_holidays=True, use_hebrew_holidays=True, output: str = 'multivariate', n_jobs: int = 1)

                Bases: object

                -dates_to_holidays(dates, style='flag', holiday_impacts=False)
                +dates_to_holidays(dates, style='flag', holiday_impacts=False)

                Populate date information for a given pd.DatetimeIndex.

                Parameters:
                @@ -2900,48 +2908,48 @@

                Subpackages
                -detect(df)
                +detect(df)

                Run holiday detection. Input wide-style pandas time series.

                -fit(df)
                +fit(df)
                -static get_new_params(method='random')
                +static get_new_params(method='random')
                -plot(series_name=None, include_anomalies=True, title=None, plot_kwargs={}, series=None)
                +plot(series_name=None, include_anomalies=True, title=None, plot_kwargs={}, series=None)
                -plot_anomaly(kwargs={})
                +plot_anomaly(kwargs={})
                -autots.RandomTransform(transformer_list: dict = {None: 0.0, 'MinMaxScaler': 0.03, 'PowerTransformer': 0.01, 'QuantileTransformer': 0.03, 'MaxAbsScaler': 0.03, 'StandardScaler': 0.04, 'RobustScaler': 0.03, 'PCA': 0.01, 'FastICA': 0.01, 'Detrend': 0.02, 'RollingMeanTransformer': 0.02, 'RollingMean100thN': 0.01, 'DifferencedTransformer': 0.05, 'SinTrend': 0.01, 'PctChangeTransformer': 0.01, 'CumSumTransformer': 0.02, 'PositiveShift': 0.02, 'Log': 0.01, 'IntermittentOccurrence': 0.01, 'SeasonalDifference': 0.06, 'cffilter': 0.01, 'bkfilter': 0.05, 'convolution_filter': 0.001, 'HPFilter': 0.01, 'DatepartRegression': 0.01, 'ClipOutliers': 0.03, 'Discretize': 0.01, 'CenterLastValue': 0.01, 'Round': 0.02, 'Slice': 0.02, 'ScipyFilter': 0.02, 'STLFilter': 0.01, 'EWMAFilter': 0.02, 'MeanDifference': 0.002, 'BTCD': 0.01, 'Cointegration': 0.01, 'AlignLastValue': 0.2, 'AnomalyRemoval': 0.03, 'HolidayTransformer': 0.01, 'LocalLinearTrend': 0.01, 'KalmanSmoothing': 0.02, 'RegressionFilter': 0.02, 'LevelShiftTransformer': 0.03, 'CenterSplit': 0.01, 'FFTFilter': 0.01, 'FFTDecomposition': 0.01, 'ReplaceConstant': 0.02, 'AlignLastDiff': 0.01, 'DiffSmoother': 0.005, 'HistoricValues': 0.01, 'BKBandpassFilter': 0.01}, transformer_max_depth: int = 4, na_prob_dict: dict = {'ffill': 0.4, 'fake_date': 0.1, 'rolling_mean': 0.1, 'rolling_mean_24': 0.1, 'IterativeImputer': 0.025, 'mean': 0.06, 'zero': 0.05, 'ffill_mean_biased': 0.1, 'median': 0.03, None: 0.001, 'interpolate': 0.4, 'KNNImputer': 0.05, 'IterativeImputerExtraTrees': 0.0001, 'SeasonalityMotifImputer': 0.1, 'SeasonalityMotifImputerLinMix': 0.01, 'SeasonalityMotifImputer1K': 0.01, 'DatepartRegressionImputer': 0.05}, fast_params: bool | None = None, superfast_params: bool | None = None, traditional_order: bool = False, transformer_min_depth: int = 1, allow_none: bool = True, no_nan_fill: bool = False)
                +autots.RandomTransform(transformer_list: dict = {'AlignLastDiff': 0.01, 'AlignLastValue': 0.2, 'AnomalyRemoval': 0.03, 'BKBandpassFilter': 0.01, 'BTCD': 0.01, 'CenterLastValue': 0.01, 'CenterSplit': 0.01, 'ClipOutliers': 0.03, 'Cointegration': 0.01, 'CumSumTransformer': 0.02, 'DatepartRegression': 0.01, 'Detrend': 0.02, 'DiffSmoother': 0.005, 'DifferencedTransformer': 0.05, 'Discretize': 0.01, 'EWMAFilter': 0.02, 'FFTDecomposition': 0.01, 'FFTFilter': 0.01, 'FastICA': 0.01, 'HPFilter': 0.01, 'HistoricValues': 0.01, 'HolidayTransformer': 0.01, 'IntermittentOccurrence': 0.01, 'KalmanSmoothing': 0.02, 'LevelShiftTransformer': 0.03, 'LocalLinearTrend': 0.01, 'Log': 0.01, 'MaxAbsScaler': 0.03, 'MeanDifference': 0.002, 'MinMaxScaler': 0.03, 'PCA': 0.01, 'PctChangeTransformer': 0.01, 'PositiveShift': 0.02, 'PowerTransformer': 0.01, 'QuantileTransformer': 0.03, 'RegressionFilter': 0.02, 'ReplaceConstant': 0.02, 'RobustScaler': 0.03, 'RollingMean100thN': 0.01, 'RollingMeanTransformer': 0.02, 'Round': 0.02, 'STLFilter': 0.01, 'ScipyFilter': 0.02, 'SeasonalDifference': 0.06, 'SinTrend': 0.01, 'Slice': 0.02, 'StandardScaler': 0.04, 'bkfilter': 0.05, 'cffilter': 0.01, 'convolution_filter': 0.001, None: 0.0}, transformer_max_depth: int = 4, na_prob_dict: dict = {'DatepartRegressionImputer': 0.05, 'IterativeImputer': 0.025, 'IterativeImputerExtraTrees': 0.0001, 'KNNImputer': 0.05, 'SeasonalityMotifImputer': 0.1, 'SeasonalityMotifImputer1K': 0.01, 'SeasonalityMotifImputerLinMix': 0.01, 'fake_date': 0.1, 'ffill': 0.4, 'ffill_mean_biased': 0.1, 'interpolate': 0.4, 'mean': 0.06, 'median': 0.03, 'rolling_mean': 0.1, 'rolling_mean_24': 0.1, 'zero': 0.05, None: 0.001}, fast_params: bool | None = None, superfast_params: bool | None = None, traditional_order: bool = False, transformer_min_depth: int = 1, allow_none: bool = True, no_nan_fill: bool = False)

                Return a dict of randomly choosen transformation selections.

                BTCD is used as a signal that slow parameters are allowed.

                -autots.TransformTS
                +autots.TransformTS

                alias of GeneralTransformer

                -autots.create_lagged_regressor(df, forecast_length: int, frequency: str = 'infer', scale: bool = True, summarize: str | None = None, backfill: str = 'bfill', n_jobs: str = 'auto', fill_na: str = 'ffill')
                +autots.create_lagged_regressor(df, forecast_length: int, frequency: str = 'infer', scale: bool = True, summarize: str | None = None, backfill: str = 'bfill', n_jobs: str = 'auto', fill_na: str = 'ffill')

                Create a regressor of features lagged by forecast length. Useful to some models that don’t otherwise use such information.

                It is recommended that the .head(forecast_length) of both regressor_train and the df for training are dropped. @@ -2970,7 +2978,7 @@

                Subpackages
                -autots.create_regressor(df, forecast_length, frequency: str = 'infer', holiday_countries: list = ['US'], datepart_method: str = 'simple_binarized', drop_most_recent: int = 0, scale: bool = True, summarize: str = 'auto', backfill: str = 'bfill', n_jobs: str = 'auto', fill_na: str = 'ffill', aggfunc: str = 'first', encode_holiday_type=False, holiday_detector_params={'anomaly_detector_params': {'forecast_params': None, 'method': 'mad', 'method_params': {'alpha': 0.05, 'distribution': 'gamma'}, 'transform_dict': {'fillna': None, 'transformation_params': {'0': {}}, 'transformations': {'0': 'DifferencedTransformer'}}}, 'output': 'univariate', 'splash_threshold': None, 'threshold': 0.8, 'use_dayofmonth_holidays': True, 'use_hebrew_holidays': False, 'use_islamic_holidays': False, 'use_lunar_holidays': False, 'use_lunar_weekday': False, 'use_wkdeom_holidays': False, 'use_wkdom_holidays': True}, holiday_regr_style: str = 'flag', preprocessing_params: dict | None = None)
                +autots.create_regressor(df, forecast_length, frequency: str = 'infer', holiday_countries: list = ['US'], datepart_method: str = 'simple_binarized', drop_most_recent: int = 0, scale: bool = True, summarize: str = 'auto', backfill: str = 'bfill', n_jobs: str = 'auto', fill_na: str = 'ffill', aggfunc: str = 'first', encode_holiday_type=False, holiday_detector_params={'anomaly_detector_params': {'forecast_params': None, 'method': 'mad', 'method_params': {'alpha': 0.05, 'distribution': 'gamma'}, 'transform_dict': {'fillna': None, 'transformation_params': {'0': {}}, 'transformations': {'0': 'DifferencedTransformer'}}}, 'output': 'univariate', 'splash_threshold': None, 'threshold': 0.8, 'use_dayofmonth_holidays': True, 'use_hebrew_holidays': False, 'use_islamic_holidays': False, 'use_lunar_holidays': False, 'use_lunar_weekday': False, 'use_wkdeom_holidays': False, 'use_wkdom_holidays': True}, holiday_regr_style: str = 'flag', preprocessing_params: dict | None = None)

                Create a regressor from information available in the existing dataset. Components: are lagged data, datepart information, and holiday.

                This function has been confusing people. This is NOT necessary for machine learning models, in AutoTS they internally create more elaborate feature sets separately. @@ -3012,7 +3020,7 @@

                Subpackages
                -autots.infer_frequency(df_wide, warn=True, **kwargs)
                +autots.infer_frequency(df_wide, warn=True, **kwargs)

                Infer the frequency in a slightly more robust way.

                Parameters:
                @@ -3026,7 +3034,7 @@

                Subpackages
                -autots.load_artificial(long=False, date_start=None, date_end=None)
                +autots.load_artificial(long=False, date_start=None, date_end=None)

                Load artifically generated series from random distributions.

                Parameters:
                @@ -3041,7 +3049,7 @@

                Subpackages
                -autots.load_daily(long: bool = True)
                +autots.load_daily(long: bool = True)

                Daily sample data.

                wiki = [

                “Germany”, “Thanksgiving”, ‘all’, ‘Microsoft’, @@ -3064,13 +3072,13 @@

                Subpackages
                -autots.load_hourly(long: bool = True)
                +autots.load_hourly(long: bool = True)

                Traffic data from the MN DOT via the UCI data repository.

                -autots.load_linear(long=False, shape=None, start_date: str = '2021-01-01', introduce_nan: float | None = None, introduce_random: float | None = None, random_seed: int = 123)
                +autots.load_linear(long=False, shape=None, start_date: str = '2021-01-01', introduce_nan: float | None = None, introduce_random: float | None = None, random_seed: int = 123)

                Create a dataset of just zeroes for testing edge case.

                Parameters:
                @@ -3088,7 +3096,7 @@

                Subpackages
                -autots.load_live_daily(long: bool = False, observation_start: str | None = None, observation_end: str | None = None, fred_key: str | None = None, fred_series=['DGS10', 'T5YIE', 'SP500', 'DCOILWTICO', 'DEXUSEU', 'WPU0911'], tickers: list = ['MSFT'], trends_list: list = ['forecasting', 'cycling', 'microsoft'], trends_geo: str = 'US', weather_data_types: list = ['AWND', 'WSF2', 'TAVG'], weather_stations: list = ['USW00094846', 'USW00014925'], weather_years: int = 5, london_air_stations: list = ['CT3', 'SK8'], london_air_species: str = 'PM25', london_air_days: int = 180, earthquake_days: int = 180, earthquake_min_magnitude: int = 5, gsa_key: str | None = None, gov_domain_list=['nasa.gov'], gov_domain_limit: int = 600, wikipedia_pages: list = ['Microsoft_Office', 'List_of_highest-grossing_films'], wiki_language: str = 'en', weather_event_types=['%28Z%29+Winter+Weather', '%28Z%29+Winter+Storm'], caiso_query: str = 'ENE_SLRS', timeout: float = 300.05, sleep_seconds: int = 2, **kwargs)
                +autots.load_live_daily(long: bool = False, observation_start: str | None = None, observation_end: str | None = None, fred_key: str | None = None, fred_series=['DGS10', 'T5YIE', 'SP500', 'DCOILWTICO', 'DEXUSEU', 'WPU0911'], tickers: list = ['MSFT'], trends_list: list = ['forecasting', 'cycling', 'microsoft'], trends_geo: str = 'US', weather_data_types: list = ['AWND', 'WSF2', 'TAVG'], weather_stations: list = ['USW00094846', 'USW00014925'], weather_years: int = 5, london_air_stations: list = ['CT3', 'SK8'], london_air_species: str = 'PM25', london_air_days: int = 180, earthquake_days: int = 180, earthquake_min_magnitude: int = 5, gsa_key: str | None = None, gov_domain_list=['nasa.gov'], gov_domain_limit: int = 600, wikipedia_pages: list = ['Microsoft_Office', 'List_of_highest-grossing_films'], wiki_language: str = 'en', weather_event_types=['%28Z%29+Winter+Weather', '%28Z%29+Winter+Storm'], caiso_query: str = 'ENE_SLRS', timeout: float = 300.05, sleep_seconds: int = 2, **kwargs)

                Generates a dataframe of data up to the present day. Requires active internet connection. Try to be respectful of these free data sources by not calling too much too heavily. Pass None instead of specification lists to exclude a data source.

                @@ -3125,19 +3133,19 @@

                Subpackages
                -autots.load_monthly(long: bool = True)
                +autots.load_monthly(long: bool = True)

                Federal Reserve of St. Louis monthly economic indicators.

                -autots.load_sine(long=False, shape=None, start_date: str = '2021-01-01', introduce_random: float | None = None, random_seed: int = 123)
                +autots.load_sine(long=False, shape=None, start_date: str = '2021-01-01', introduce_random: float | None = None, random_seed: int = 123)

                Create a dataset of just zeroes for testing edge case.

                -autots.load_weekdays(long: bool = False, categorical: bool = True, periods: int = 180)
                +autots.load_weekdays(long: bool = False, categorical: bool = True, periods: int = 180)

                Test edge cases by creating a Series with values as day of week.

                Parameters:
                @@ -3153,19 +3161,19 @@

                Subpackages
                -autots.load_weekly(long: bool = True)
                +autots.load_weekly(long: bool = True)

                Weekly petroleum industry data from the EIA.

                -autots.load_yearly(long: bool = True)
                +autots.load_yearly(long: bool = True)

                Federal Reserve of St. Louis annual economic indicators.

                -autots.long_to_wide(df, date_col: str = 'datetime', value_col: str = 'value', id_col: str = 'series_id', aggfunc: str = 'first')
                +autots.long_to_wide(df, date_col: str = 'datetime', value_col: str = 'value', id_col: str = 'series_id', aggfunc: str = 'first')

                Take long data and convert into wide, cleaner data.

                Parameters:
                @@ -3193,7 +3201,7 @@

                Subpackages
                -autots.model_forecast(model_name, model_param_dict, model_transform_dict, df_train, forecast_length: int, frequency: str = 'infer', prediction_interval: float = 0.9, no_negatives: bool = False, constraint: float | None = None, future_regressor_train=None, future_regressor_forecast=None, holiday_country: str = 'US', startTimeStamps=None, grouping_ids=None, fail_on_forecast_nan: bool = True, random_seed: int = 2020, verbose: int = 0, n_jobs: int = 'auto', template_cols: list = ['Model', 'ModelParameters', 'TransformationParameters', 'Ensemble'], horizontal_subset: list | None = None, return_model: bool = False, current_model_file: str | None = None, model_count: int = 0, force_gc: bool = False, **kwargs)
                +autots.model_forecast(model_name, model_param_dict, model_transform_dict, df_train, forecast_length: int, frequency: str = 'infer', prediction_interval: float = 0.9, no_negatives: bool = False, constraint: float | None = None, future_regressor_train=None, future_regressor_forecast=None, holiday_country: str = 'US', startTimeStamps=None, grouping_ids=None, fail_on_forecast_nan: bool = True, random_seed: int = 2020, verbose: int = 0, n_jobs: int = 'auto', template_cols: list = ['Model', 'ModelParameters', 'TransformationParameters', 'Ensemble'], horizontal_subset: list | None = None, return_model: bool = False, current_model_file: str | None = None, model_count: int = 0, force_gc: bool = False, **kwargs)

                Takes numeric data, returns numeric forecasts.

                Only one model (albeit potentially an ensemble)! Horizontal ensembles can not be nested, other ensemble types can be.

                @@ -3325,21 +3333,5 @@

                Quick search

                - - \ No newline at end of file diff --git a/docs/build/html/source/autots.models.html b/docs/build/html/source/autots.models.html index 5ef2e021..7c2f283a 100644 --- a/docs/build/html/source/autots.models.html +++ b/docs/build/html/source/autots.models.html @@ -1,17 +1,25 @@ - - + + + + + autots.models package — AutoTS 0.6.10 documentation - - - - - + + + + + @@ -33,16 +41,16 @@
                -

                autots.models package

                +

                autots.models package

                -

                Submodules

                +

                Submodules

                -

                autots.models.arch module

                +

                autots.models.arch module

                Arch Models from arch package.

                -class autots.models.arch.ARCH(name: str = 'ARCH', frequency: str = 'infer', prediction_interval: float = 0.9, mean: str = 'Constant', lags: int = 2, vol: str = 'GARCH', p: int = 1, o: int = 0, q: int = 1, power: float = 2.0, dist: str = 'normal', rescale: bool = False, maxiter: int = 200, simulations: int = 1000, regression_type: str | None = None, return_result_windows: bool = False, holiday_country: str = 'US', random_seed: int = 2022, verbose: int = 0, n_jobs: int | None = None, **kwargs)
                +class autots.models.arch.ARCH(name: str = 'ARCH', frequency: str = 'infer', prediction_interval: float = 0.9, mean: str = 'Constant', lags: int = 2, vol: str = 'GARCH', p: int = 1, o: int = 0, q: int = 1, power: float = 2.0, dist: str = 'normal', rescale: bool = False, maxiter: int = 200, simulations: int = 1000, regression_type: str | None = None, return_result_windows: bool = False, holiday_country: str = 'US', random_seed: int = 2022, verbose: int = 0, n_jobs: int | None = None, **kwargs)

                Bases: ModelObject

                ARCH model family from arch package. See arch package for arg details. Not to be confused with a linux distro.

                @@ -59,7 +67,7 @@

                Submodules
                -fit(df, future_regressor=None)
                +fit(df, future_regressor=None)

                Train algorithm given data supplied .

                Parameters:
                @@ -70,19 +78,19 @@

                Submodules
                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Return dict of new parameters for parameter tuning.

                -get_params()
                +get_params()

                Return dict of current parameters.

                -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
                +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

                Generate forecast data immediately following dates of index supplied to .fit().

                Parameters:
                @@ -103,12 +111,12 @@

                Submodules -

                autots.models.base module

                +

                autots.models.base module

                Base model information

                @author: Colin

                -class autots.models.base.ModelObject(name: str = 'Uninitiated Model Name', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, fit_runtime=datetime.timedelta(0), holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int = -1)
                +class autots.models.base.ModelObject(name: str = 'Uninitiated Model Name', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, fit_runtime=datetime.timedelta(0), holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int = -1)

                Bases: object

                Generic class for holding forecasting models.

                @@ -129,13 +137,13 @@

                Submodules
                -basic_profile(df)
                +basic_profile(df)

                Capture basic training details.

                -create_forecast_index(forecast_length: int, last_date=None)
                +create_forecast_index(forecast_length: int, last_date=None)

                Generate a pd.DatetimeIndex appropriate for a new forecast.

                Warning

                @@ -145,93 +153,93 @@

                Submodules
                -fit_data(df, future_regressor=None)
                +fit_data(df, future_regressor=None)

                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Return dict of new parameters for parameter tuning.

                -get_params()
                +get_params()

                Return dict of current parameters.

                -static time()
                +static time()
                -class autots.models.base.PredictionObject(model_name: str = 'Uninitiated', forecast_length: int = 0, forecast_index=nan, forecast_columns=nan, lower_forecast=nan, forecast=nan, upper_forecast=nan, prediction_interval: float = 0.9, predict_runtime=datetime.timedelta(0), fit_runtime=datetime.timedelta(0), model_parameters={}, transformation_parameters={}, transformation_runtime=datetime.timedelta(0), per_series_metrics=nan, per_timestamp=nan, avg_metrics=nan, avg_metrics_weighted=nan, full_mae_error=None, model=None, transformer=None)
                +class autots.models.base.PredictionObject(model_name: str = 'Uninitiated', forecast_length: int = 0, forecast_index=nan, forecast_columns=nan, lower_forecast=nan, forecast=nan, upper_forecast=nan, prediction_interval: float = 0.9, predict_runtime=datetime.timedelta(0), fit_runtime=datetime.timedelta(0), model_parameters={}, transformation_parameters={}, transformation_runtime=datetime.timedelta(0), per_series_metrics=nan, per_timestamp=nan, avg_metrics=nan, avg_metrics_weighted=nan, full_mae_error=None, model=None, transformer=None)

                Bases: object

                Generic class for holding forecast information.

                -model_name
                +model_name
                -model_parameters
                +model_parameters
                -transformation_parameters
                +transformation_parameters
                -forecast
                +forecast
                -upper_forecast
                +upper_forecast
                -lower_forecast
                +lower_forecast
                -long_form_results()
                +long_form_results()

                return complete results in long form

                -total_runtime()
                +total_runtime()

                return runtime for all model components in seconds

                -plot()
                +plot()
                -evaluate()
                +evaluate()
                -apply_constraints()
                +apply_constraints()
                -apply_constraints(constraint_method='quantile', constraint_regularization=0.5, upper_constraint=1.0, lower_constraint=0.0, bounds=True, df_train=None)
                +apply_constraints(constraint_method='quantile', constraint_regularization=0.5, upper_constraint=1.0, lower_constraint=0.0, bounds=True, df_train=None)

                Use constraint thresholds to adjust outputs by limit. Note that only one method of constraint can be used here, but if different methods are desired, this can be run twice, with None passed to the upper or lower constraint not being used.

                @@ -259,7 +267,7 @@

                Submodules
                -evaluate(actual, series_weights: dict | None = None, df_train=None, per_timestamp_errors: bool = False, full_mae_error: bool = True, scaler=None, cumsum_A=None, diff_A=None, last_of_array=None)
                +evaluate(actual, series_weights: dict | None = None, df_train=None, per_timestamp_errors: bool = False, full_mae_error: bool = True, scaler=None, cumsum_A=None, diff_A=None, last_of_array=None)

                Evalute prediction against test actual. Fills out attributes of base object.

                This fails with pd.NA values supplied.

                @@ -290,13 +298,13 @@

                Submodules
                -extract_ensemble_runtimes()
                +extract_ensemble_runtimes()

                Return a dataframe of final runtimes per model for standard ensembles.

                -long_form_results(id_name='SeriesID', value_name='Value', interval_name='PredictionInterval', update_datetime_name=None, datetime_column=None)
                +long_form_results(id_name='SeriesID', value_name='Value', interval_name='PredictionInterval', update_datetime_name=None, datetime_column=None)

                Export forecasts (including upper and lower) as single ‘long’ format output

                Parameters:
                @@ -316,7 +324,7 @@

                Submodules
                -plot(df_wide=None, series: str | None = None, remove_zeroes: bool = False, interpolate: str | None = None, start_date: str = 'auto', alpha=0.3, facecolor='black', loc='upper right', title=None, title_substring=None, vline=None, colors=None, include_bounds=True, **kwargs)
                +plot(df_wide=None, series: str | None = None, remove_zeroes: bool = False, interpolate: str | None = None, start_date: str = 'auto', alpha=0.3, facecolor='black', loc='upper right', title=None, title_substring=None, vline=None, colors=None, include_bounds=True, **kwargs)

                Generate an example plot of one series. Does not handle non-numeric forecasts.

                Parameters:
                @@ -341,24 +349,24 @@

                Submodules
                -plot_df(df_wide=None, series: str | None = None, remove_zeroes: bool = False, interpolate: str | None = None, start_date: str | None = None)
                +plot_df(df_wide=None, series: str | None = None, remove_zeroes: bool = False, interpolate: str | None = None, start_date: str | None = None)

                -plot_ensemble_runtimes(xlim_right=None)
                +plot_ensemble_runtimes(xlim_right=None)

                Plot ensemble runtimes by model type.

                -plot_grid(df_wide=None, start_date='auto', interpolate=None, remove_zeroes=False, figsize=(24, 18), title='AutoTS Forecasts', cols=None, colors=None, include_bounds=True)
                +plot_grid(df_wide=None, start_date='auto', interpolate=None, remove_zeroes=False, figsize=(24, 18), title='AutoTS Forecasts', cols=None, colors=None, include_bounds=True)

                Plots multiple series in a grid, if present. Mostly identical args to the single plot function.

                -total_runtime()
                +total_runtime()

                Combine runtimes.

                @@ -366,7 +374,7 @@

                Submodules
                -autots.models.base.apply_constraints(forecast, lower_forecast, upper_forecast, constraint_method, constraint_regularization, upper_constraint, lower_constraint, bounds, df_train=None)
                +autots.models.base.apply_constraints(forecast, lower_forecast, upper_forecast, constraint_method, constraint_regularization, upper_constraint, lower_constraint, bounds, df_train=None)

                Use constraint thresholds to adjust outputs by limit. Note that only one method of constraint can be used here, but if different methods are desired, this can be run twice, with None passed to the upper or lower constraint not being used.

                @@ -398,41 +406,41 @@

                Submodules
                -autots.models.base.calculate_peak_density(model, data, group_col='Model', y_col='TotalRuntimeSeconds')
                +autots.models.base.calculate_peak_density(model, data, group_col='Model', y_col='TotalRuntimeSeconds')

                -autots.models.base.create_forecast_index(frequency, forecast_length, train_last_date, last_date=None)
                +autots.models.base.create_forecast_index(frequency, forecast_length, train_last_date, last_date=None)
                -autots.models.base.create_seaborn_palette_from_cmap(cmap_name='gist_rainbow', n=10)
                +autots.models.base.create_seaborn_palette_from_cmap(cmap_name='gist_rainbow', n=10)
                -autots.models.base.extract_single_series_from_horz(series, model_name, model_parameters)
                +autots.models.base.extract_single_series_from_horz(series, model_name, model_parameters)
                -autots.models.base.extract_single_transformer(series, model_name, model_parameters, transformation_params)
                +autots.models.base.extract_single_transformer(series, model_name, model_parameters, transformation_params)
                -autots.models.base.plot_distributions(runtimes_data, group_col='Model', y_col='TotalRuntimeSeconds', xlim=None, xlim_right=None, title_suffix='')
                +autots.models.base.plot_distributions(runtimes_data, group_col='Model', y_col='TotalRuntimeSeconds', xlim=None, xlim_right=None, title_suffix='')

                -

                autots.models.basics module

                +

                autots.models.basics module

                Naives and Others Requiring No Additional Packages Beyond Numpy and Pandas

                -class autots.models.basics.AverageValueNaive(name: str = 'AverageValueNaive', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, method: str = 'median', window: int | None = None, **kwargs)
                +class autots.models.basics.AverageValueNaive(name: str = 'AverageValueNaive', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, method: str = 'median', window: int | None = None, **kwargs)

                Bases: ModelObject

                Naive forecasting predicting a dataframe of the series’ median values

                @@ -446,7 +454,7 @@

                Submodules
                -fit(df, future_regressor=None)
                +fit(df, future_regressor=None)

                Train algorithm given data supplied.

                Parameters:
                @@ -457,19 +465,19 @@

                Submodules
                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Returns dict of new parameters for parameter tuning

                -get_params()
                +get_params()

                Return dict of current parameters.

                -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
                +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

                Generates forecast data immediately following dates of index supplied to .fit()

                Parameters:
                @@ -490,7 +498,7 @@

                Submodules
                -class autots.models.basics.BallTreeMultivariateMotif(frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int = 1, window: int = 5, point_method: str = 'mean', distance_metric: str = 'canberra', k: int = 10, sample_fraction=None, **kwargs)
                +class autots.models.basics.BallTreeMultivariateMotif(frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int = 1, window: int = 5, point_method: str = 'mean', distance_metric: str = 'canberra', k: int = 10, sample_fraction=None, **kwargs)

                Bases: ModelObject

                Forecasts using a nearest neighbors type model adapted for probabilistic time series. Many of these motifs will struggle when the forecast_length is large and history is short.

                @@ -511,7 +519,7 @@

                Submodules
                -fit(df, future_regressor=None)
                +fit(df, future_regressor=None)

                Train algorithm given data supplied.

                Parameters:
                @@ -522,19 +530,19 @@

                Submodules
                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Returns dict of new parameters for parameter tuning

                -get_params()
                +get_params()

                Return dict of current parameters

                -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
                +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

                Generates forecast data immediately following dates of index supplied to .fit()

                Parameters:
                @@ -555,7 +563,7 @@

                Submodules
                -class autots.models.basics.ConstantNaive(name: str = 'ConstantNaive', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, constant: float = 0, **kwargs)
                +class autots.models.basics.ConstantNaive(name: str = 'ConstantNaive', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, constant: float = 0, **kwargs)

                Bases: ModelObject

                Naive forecasting predicting a dataframe of zeroes (0’s)

                @@ -570,7 +578,7 @@

                Submodules
                -fit(df, future_regressor=None)
                +fit(df, future_regressor=None)

                Train algorithm given data supplied

                Parameters:
                @@ -581,19 +589,19 @@

                Submodules
                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Returns dict of new parameters for parameter tuning

                -get_params()
                +get_params()

                Return dict of current parameters

                -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
                +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

                Generates forecast data immediately following dates of index supplied to .fit()

                Parameters:
                @@ -614,11 +622,11 @@

                Submodules
                -class autots.models.basics.FFT(name: str = 'FFT', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2023, verbose: int = 0, n_harmonics: int = 10, detrend: str = 'linear', **kwargs)
                +class autots.models.basics.FFT(name: str = 'FFT', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2023, verbose: int = 0, n_harmonics: int = 10, detrend: str = 'linear', **kwargs)

                Bases: ModelObject

                -fit(df, future_regressor=None)
                +fit(df, future_regressor=None)

                Train algorithm given data supplied.

                Parameters:
                @@ -632,19 +640,19 @@

                Submodules
                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Returns dict of new parameters for parameter tuning

                -get_params()
                +get_params()

                Return dict of current parameters

                -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
                +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

                Generates forecast data immediately following dates of index supplied to .fit()

                Parameters:
                @@ -665,7 +673,7 @@

                Submodules
                -class autots.models.basics.KalmanStateSpace(name: str = 'KalmanStateSpace', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, state_transition=[[1, 1], [0, 1]], process_noise=[[0.1, 0.0], [0.0, 0.01]], observation_model=[[1, 0]], observation_noise: float = 1.0, em_iter: int = 10, model_name: str = 'undefined', forecast_length: int | None = None, **kwargs)
                +class autots.models.basics.KalmanStateSpace(name: str = 'KalmanStateSpace', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, state_transition=[[1, 1], [0, 1]], process_noise=[[0.1, 0.0], [0.0, 0.01]], observation_model=[[1, 0]], observation_noise: float = 1.0, em_iter: int = 10, model_name: str = 'undefined', forecast_length: int | None = None, **kwargs)

                Bases: ModelObject

                Forecast using a state space model solved by a Kalman Filter.

                @@ -679,12 +687,12 @@

                Submodules
                -cost_function(param, df)
                +cost_function(param, df)

                -fit(df, future_regressor=None)
                +fit(df, future_regressor=None)

                Train algorithm given data supplied.

                Parameters:
                @@ -695,24 +703,24 @@

                Submodules
                -fit_data(df, future_regressor=None)
                +fit_data(df, future_regressor=None)

                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Return dict of new parameters for parameter tuning.

                -get_params()
                +get_params()

                Return dict of current parameters.

                -predict(forecast_length: int | None = None, future_regressor=None, just_point_forecast=False)
                +predict(forecast_length: int | None = None, future_regressor=None, just_point_forecast=False)

                Generates forecast data immediately following dates of index supplied to .fit()

                Parameters:
                @@ -731,14 +739,14 @@

                Submodules
                -tune_observational_noise(df)
                +tune_observational_noise(df)

                -class autots.models.basics.LastValueNaive(name: str = 'LastValueNaive', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, **kwargs)
                +class autots.models.basics.LastValueNaive(name: str = 'LastValueNaive', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, **kwargs)

                Bases: ModelObject

                Naive forecasting predicting a dataframe of the last series value

                @@ -752,7 +760,7 @@

                Submodules
                -fit(df, future_regressor=None)
                +fit(df, future_regressor=None)

                Train algorithm given data supplied

                Parameters:
                @@ -763,19 +771,19 @@

                Submodules
                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Returns dict of new parameters for parameter tuning

                -get_params()
                +get_params()

                Return dict of current parameters

                -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
                +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

                Generates forecast data immediately following dates of index supplied to .fit()

                Parameters:
                @@ -796,7 +804,7 @@

                Submodules
                -class autots.models.basics.MetricMotif(name: str = 'MetricMotif', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, regression_type: str | None = None, comparison_transformation: dict | None = None, combination_transformation: dict | None = None, window: int = 5, point_method: str = 'mean', distance_metric: str = 'mae', k: int = 10, **kwargs)
                +class autots.models.basics.MetricMotif(name: str = 'MetricMotif', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, regression_type: str | None = None, comparison_transformation: dict | None = None, combination_transformation: dict | None = None, window: int = 5, point_method: str = 'mean', distance_metric: str = 'mae', k: int = 10, **kwargs)

                Bases: ModelObject

                Forecasts using a nearest neighbors type model adapted for probabilistic time series. This version is fully vectorized, using basic metrics for distance comparison.

                @@ -816,7 +824,7 @@

                Submodules
                -fit(df, future_regressor=None)
                +fit(df, future_regressor=None)

                Train algorithm given data supplied.

                Parameters:
                @@ -830,19 +838,19 @@

                Submodules
                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Returns dict of new parameters for parameter tuning

                -get_params()
                +get_params()

                Return dict of current parameters

                -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
                +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

                Generates forecast data immediately following dates of index supplied to .fit()

                Parameters:
                @@ -863,7 +871,7 @@

                Submodules
                -class autots.models.basics.Motif(name: str = 'Motif', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int = 1, window: int = 5, point_method: str = 'weighted_mean', distance_metric: str = 'minkowski', k: int = 10, max_windows: int = 5000, multivariate: bool = False, return_result_windows: bool = False, **kwargs)
                +class autots.models.basics.Motif(name: str = 'Motif', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int = 1, window: int = 5, point_method: str = 'weighted_mean', distance_metric: str = 'minkowski', k: int = 10, max_windows: int = 5000, multivariate: bool = False, return_result_windows: bool = False, **kwargs)

                Bases: ModelObject

                Forecasts using a nearest neighbors type model adapted for probabilistic time series.

                @@ -887,7 +895,7 @@

                Submodules
                -fit(df, future_regressor=None)
                +fit(df, future_regressor=None)

                Train algorithm given data supplied.

                Parameters:
                @@ -898,19 +906,19 @@

                Submodules
                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Returns dict of new parameters for parameter tuning

                -get_params()
                +get_params()

                Return dict of current parameters

                -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
                +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

                Generates forecast data immediately following dates of index supplied to .fit()

                Parameters:
                @@ -931,7 +939,7 @@

                Submodules
                -class autots.models.basics.MotifSimulation(name: str = 'MotifSimulation', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, phrase_len: str = '5', comparison: str = 'magnitude_pct_change_sign', shared: bool = False, distance_metric: str = 'l2', max_motifs: float = 50, recency_weighting: float = 0.1, cutoff_threshold: float = 0.9, cutoff_minimum: int = 20, point_method: str = 'median', n_jobs: int = -1, verbose: int = 1, **kwargs)
                +class autots.models.basics.MotifSimulation(name: str = 'MotifSimulation', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, phrase_len: str = '5', comparison: str = 'magnitude_pct_change_sign', shared: bool = False, distance_metric: str = 'l2', max_motifs: float = 50, recency_weighting: float = 0.1, cutoff_threshold: float = 0.9, cutoff_minimum: int = 20, point_method: str = 'median', n_jobs: int = -1, verbose: int = 1, **kwargs)

                Bases: ModelObject

                More dark magic created by the evil mastermind of this project. Basically a highly-customized KNN

                @@ -956,7 +964,7 @@

                Submodules
                -fit(df, future_regressor=None)
                +fit(df, future_regressor=None)

                Train algorithm given data supplied.

                Parameters:
                @@ -967,19 +975,19 @@

                Submodules
                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Return dict of new parameters for parameter tuning.

                -get_params()
                +get_params()

                Return dict of current parameters.

                -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
                +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

                Generates forecast data immediately following dates of index supplied to .fit()

                Parameters:
                @@ -1000,7 +1008,7 @@

                Submodules
                -class autots.models.basics.NVAR(name: str = 'NVAR', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, k: int = 1, ridge_param: float = 2.5e-06, warmup_pts: int = 1, seed_pts: int = 1, seed_weighted: str | None = None, batch_size: int = 5, batch_method: str = 'input_order', **kwargs)
                +class autots.models.basics.NVAR(name: str = 'NVAR', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, k: int = 1, ridge_param: float = 2.5e-06, warmup_pts: int = 1, seed_pts: int = 1, seed_weighted: str | None = None, batch_size: int = 5, batch_method: str = 'input_order', **kwargs)

                Bases: ModelObject

                Nonlinear Variable Autoregression or ‘Next-Generation Reservoir Computing’

                based on https://github.com/quantinfo/ng-rc-paper-code/ @@ -1023,7 +1031,7 @@

                Submodules
                -fit(df, future_regressor=None)
                +fit(df, future_regressor=None)

                Train algorithm given data supplied.

                Parameters:
                @@ -1034,19 +1042,19 @@

                Submodules
                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Returns dict of new parameters for parameter tuning

                -get_params()
                +get_params()

                Return dict of current parameters.

                -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
                +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

                Generates forecast data immediately following dates of index supplied to .fit()

                Parameters:
                @@ -1067,7 +1075,7 @@

                Submodules
                -class autots.models.basics.SeasonalNaive(name: str = 'SeasonalNaive', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, lag_1: int = 7, lag_2: int | None = None, method: str = 'lastvalue', **kwargs)
                +class autots.models.basics.SeasonalNaive(name: str = 'SeasonalNaive', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, lag_1: int = 7, lag_2: int | None = None, method: str = 'lastvalue', **kwargs)

                Bases: ModelObject

                Naive forecasting predicting a dataframe with seasonal (lag) forecasts.

                Concerto No. 2 in G minor, Op. 8, RV 315

                @@ -1085,7 +1093,7 @@

                Submodules
                -fit(df, future_regressor=None)
                +fit(df, future_regressor=None)

                Train algorithm given data supplied.

                Parameters:
                @@ -1096,19 +1104,19 @@

                Submodules
                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Return dict of new parameters for parameter tuning.

                -get_params()
                +get_params()

                Return dict of current parameters.

                -predict(forecast_length: int, future_regressor=None, just_point_forecast: bool = False)
                +predict(forecast_length: int, future_regressor=None, just_point_forecast: bool = False)

                Generate forecast data immediately following dates of .fit().

                Parameters:
                @@ -1129,7 +1137,7 @@

                Submodules
                -class autots.models.basics.SeasonalityMotif(name: str = 'SeasonalityMotif', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, regression_type: str | None = None, window: int = 5, point_method: str = 'mean', distance_metric: str = 'mae', k: int = 10, datepart_method: str = 'common_fourier', independent: bool = False, **kwargs)
                +class autots.models.basics.SeasonalityMotif(name: str = 'SeasonalityMotif', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, regression_type: str | None = None, window: int = 5, point_method: str = 'mean', distance_metric: str = 'mae', k: int = 10, datepart_method: str = 'common_fourier', independent: bool = False, **kwargs)

                Bases: ModelObject

                Forecasts using a nearest neighbors type model adapted for probabilistic time series. This version is fully vectorized, using basic metrics for distance comparison.

                @@ -1150,7 +1158,7 @@

                Submodules
                -fit(df, future_regressor=None)
                +fit(df, future_regressor=None)

                Train algorithm given data supplied.

                Parameters:
                @@ -1164,19 +1172,19 @@

                Submodules
                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Returns dict of new parameters for parameter tuning

                -get_params()
                +get_params()

                Return dict of current parameters

                -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
                +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

                Generates forecast data immediately following dates of index supplied to .fit()

                Parameters:
                @@ -1197,7 +1205,7 @@

                Submodules
                -class autots.models.basics.SectionalMotif(name: str = 'SectionalMotif', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, regression_type: str | None = None, window: int = 5, point_method: str = 'weighted_mean', distance_metric: str = 'nan_euclidean', include_differenced: bool = False, k: int = 10, stride_size: int = 1, **kwargs)
                +class autots.models.basics.SectionalMotif(name: str = 'SectionalMotif', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, regression_type: str | None = None, window: int = 5, point_method: str = 'weighted_mean', distance_metric: str = 'nan_euclidean', include_differenced: bool = False, k: int = 10, stride_size: int = 1, **kwargs)

                Bases: ModelObject

                Forecasts using a nearest neighbors type model adapted for probabilistic time series. This version takes the distance metric average for all series at once.

                @@ -1220,7 +1228,7 @@

                Submodules
                -fit(df, future_regressor=None)
                +fit(df, future_regressor=None)

                Train algorithm given data supplied.

                Parameters:
                @@ -1234,19 +1242,19 @@

                Submodules
                -get_new_params(method: str = 'random')
                +get_new_params(method: str = 'random')

                Returns dict of new parameters for parameter tuning

                -get_params()
                +get_params()

                Return dict of current parameters

                -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
                +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

                Generates forecast data immediately following dates of index supplied to .fit()

                Parameters:
                @@ -1267,19 +1275,19 @@

                Submodules
                -autots.models.basics.ZeroesNaive
                +autots.models.basics.ZeroesNaive

                alias of ConstantNaive

                -autots.models.basics.looped_motif(Xa, Xb, name, r_arr=None, window=10, distance_metric='minkowski', k=10, point_method='mean', prediction_interval=0.9, return_result_windows=False)
                +autots.models.basics.looped_motif(Xa, Xb, name, r_arr=None, window=10, distance_metric='minkowski', k=10, point_method='mean', prediction_interval=0.9, return_result_windows=False)

                inner function for Motif model.

                -autots.models.basics.predict_reservoir(df, forecast_length, prediction_interval=None, warmup_pts=1, k=2, ridge_param=2.5e-06, seed_pts: int = 1, seed_weighted: str | None = None)
                +autots.models.basics.predict_reservoir(df, forecast_length, prediction_interval=None, warmup_pts=1, k=2, ridge_param=2.5e-06, seed_pts: int = 1, seed_weighted: str | None = None)

                Nonlinear Variable Autoregression or ‘Next-Generation Reservoir Computing’

                based on https://github.com/quantinfo/ng-rc-paper-code/ Gauthier, D.J., Bollt, E., Griffith, A. et al. Next generation reservoir computing. Nat Commun 12, 5564 (2021). @@ -1308,14 +1316,14 @@

                Submodules -

                autots.models.cassandra module

                +

                autots.models.cassandra module

                Cassandra Model. Created on Tue Sep 13 19:45:57 2022

                @author: Colin with assistance from @crgillespie22

                -class autots.models.cassandra.BayesianMultiOutputRegression(gaussian_prior_mean=0, alpha=1.0, wishart_prior_scale=1.0, wishart_dof_excess=0)
                +class autots.models.cassandra.BayesianMultiOutputRegression(gaussian_prior_mean=0, alpha=1.0, wishart_prior_scale=1.0, wishart_dof_excess=0)

                Bases: object

                Bayesian Linear Regression, conjugate prior update.

                @@ -1330,24 +1338,24 @@

                Submodules
                -fit(X, Y)
                +fit(X, Y)

                -predict(X, return_std=False)
                +predict(X, return_std=False)
                -sample_posterior(n_samples=1)
                +sample_posterior(n_samples=1)
                -class autots.models.cassandra.Cassandra(preprocessing_transformation: dict | None = None, scaling: str = 'BaseScaler', past_impacts_intervention: str | None = None, seasonalities: dict = ['common_fourier'], ar_lags: list | None = None, ar_interaction_seasonality: dict | None = None, anomaly_detector_params: dict | None = None, anomaly_intervention: str | None = None, holiday_detector_params: dict | None = None, holiday_countries: dict | None = None, holiday_countries_used: bool = True, multivariate_feature: str | None = None, multivariate_transformation: str | None = None, regressor_transformation: dict | None = None, regressors_used: bool = True, linear_model: dict | None = None, randomwalk_n: int | None = None, trend_window: int = 30, trend_standin: str | None = None, trend_anomaly_detector_params: dict | None = None, trend_transformation: dict = {}, trend_model: dict = {'Model': 'LastValueNaive', 'ModelParameters': {}}, trend_phi: float | None = None, constraint: dict | None = None, max_colinearity: float = 0.998, max_multicolinearity: float = 0.001, frequency: str = 'infer', prediction_interval: float = 0.9, random_seed: int = 2022, verbose: int = 0, n_jobs: int = 'auto', **kwargs)
                +class autots.models.cassandra.Cassandra(preprocessing_transformation: dict | None = None, scaling: str = 'BaseScaler', past_impacts_intervention: str | None = None, seasonalities: dict = ['common_fourier'], ar_lags: list | None = None, ar_interaction_seasonality: dict | None = None, anomaly_detector_params: dict | None = None, anomaly_intervention: str | None = None, holiday_detector_params: dict | None = None, holiday_countries: dict | None = None, holiday_countries_used: bool = True, multivariate_feature: str | None = None, multivariate_transformation: str | None = None, regressor_transformation: dict | None = None, regressors_used: bool = True, linear_model: dict | None = None, randomwalk_n: int | None = None, trend_window: int = 30, trend_standin: str | None = None, trend_anomaly_detector_params: dict | None = None, trend_transformation: dict = {}, trend_model: dict = {'Model': 'LastValueNaive', 'ModelParameters': {}}, trend_phi: float | None = None, constraint: dict | None = None, max_colinearity: float = 0.998, max_multicolinearity: float = 0.001, frequency: str = 'infer', prediction_interval: float = 0.9, random_seed: int = 2022, verbose: int = 0, n_jobs: int = 'auto', **kwargs)

                Bases: ModelObject

                Explainable decomposition-based forecasting with advanced trend modeling and preprocessing.

                Tunc etiam fatis aperit Cassandra futuris @@ -1381,68 +1389,68 @@

                Submodules
                -fit()
                +fit()

                -predict()
                +predict()
                -holiday_detector.dates_to_holidays()
                +holiday_detector.dates_to_holidays()
                -create_forecast_index()
                +create_forecast_index()

                after .fit, can be used to create index of prediction

                -plot_forecast()
                +plot_forecast()
                -plot_components()
                +plot_components()
                -plot_trend()
                +plot_trend()
                -get_new_params()
                +get_new_params()
                -return_components()
                +return_components()
                -.anomaly_detector.anomalies
                +.anomaly_detector.anomalies
                -.anomaly_detector.scores
                +.anomaly_detector.scores
                -.holiday_count
                +.holiday_count
                -.holidays
                +.holidays
                Type:

                series flags, holiday detector only

                @@ -1452,7 +1460,7 @@

                Submodules
                -.params
                +.params

                @@ -1462,94 +1470,94 @@

                Submodules
                -.x_array
                +.x_array

                -.predict_x_array
                +.predict_x_array
                -.trend_train
                +.trend_train
                -.predicted_trend
                +.predicted_trend
                -analyze_trend(slope, index)
                +analyze_trend(slope, index)
                -auto_fit(df, validation_method)
                +auto_fit(df, validation_method)
                -base_scaler(df)
                +base_scaler(df)
                -compare_actual_components()
                +compare_actual_components()
                -create_t(DTindex)
                +create_t(DTindex)
                -cross_validate(df, validation_method)
                +cross_validate(df, validation_method)
                -feature_importance()
                +feature_importance()
                -fit(df, future_regressor=None, regressor_per_series=None, flag_regressors=None, categorical_groups=None, past_impacts=None)
                +fit(df, future_regressor=None, regressor_per_series=None, flag_regressors=None, categorical_groups=None, past_impacts=None)
                -fit_data(df, forecast_length=None, future_regressor=None, regressor_per_series=None, flag_regressors=None, future_impacts=None, regressor_forecast_model=None, regressor_forecast_model_params=None, regressor_forecast_transformations=None, include_history=False, past_impacts=None)
                +fit_data(df, forecast_length=None, future_regressor=None, regressor_per_series=None, flag_regressors=None, future_impacts=None, regressor_forecast_model=None, regressor_forecast_model_params=None, regressor_forecast_transformations=None, include_history=False, past_impacts=None)
                -get_new_params(method='fast')
                +get_new_params(method='fast')

                Return dict of new parameters for parameter tuning.

                -get_params()
                +get_params()

                Return dict of current parameters.

                -next_fit()
                +next_fit()
                -plot_components(prediction=None, series=None, figsize=(16, 9), to_origin_space=True, title=None, start_date=None)
                +plot_components(prediction=None, series=None, figsize=(16, 9), to_origin_space=True, title=None, start_date=None)
                -plot_forecast(prediction, actuals=None, series=None, start_date=None, anomaly_color='darkslateblue', holiday_color='darkgreen', trend_anomaly_color='slategray', point_size=12.0)
                +plot_forecast(prediction, actuals=None, series=None, start_date=None, anomaly_color='darkslateblue', holiday_color='darkgreen', trend_anomaly_color='slategray', point_size=12.0)

                Plot a forecast time series.

                Parameters:
                @@ -1569,17 +1577,17 @@

                Submodules
                -plot_things()
                +plot_things()

                -plot_trend(series=None, vline=None, colors=['#d4f74f', '#82ab5a', '#ff6c05', '#c12600'], title=None, start_date=None, **kwargs)
                +plot_trend(series=None, vline=None, colors=['#d4f74f', '#82ab5a', '#ff6c05', '#c12600'], title=None, start_date=None, **kwargs)
                -predict(forecast_length=None, include_history=False, future_regressor=None, regressor_per_series=None, flag_regressors=None, future_impacts=None, new_df=None, regressor_forecast_model=None, regressor_forecast_model_params=None, regressor_forecast_transformations=None, include_organic=False, df=None, past_impacts=None)
                +predict(forecast_length=None, include_history=False, future_regressor=None, regressor_per_series=None, flag_regressors=None, future_impacts=None, new_df=None, regressor_forecast_model=None, regressor_forecast_model_params=None, regressor_forecast_transformations=None, include_organic=False, df=None, past_impacts=None)

                Generate a forecast.

                future_regressor and regressor_per_series should only include new future values, history is already stored they should match on forecast_length and index of forecasts

                @@ -1599,18 +1607,18 @@

                Submodules
                -predict_new_product()
                +predict_new_product()

                -process_components(to_origin_space=True)
                +process_components(to_origin_space=True)

                Scale and standardize component outputs.

                -return_components(to_origin_space=True, include_impacts=False)
                +return_components(to_origin_space=True, include_impacts=False)

                Return additive elements of forecast, linear and trend. If impacts included, it is a multiplicative term.

                Parameters:
                @@ -1624,85 +1632,85 @@

                Submodules
                -rolling_trend(trend_residuals, t)
                +rolling_trend(trend_residuals, t)

                -scale_data(df)
                +scale_data(df)
                -to_origin_space(df, trans_method='forecast', components=False, bounds=False)
                +to_origin_space(df, trans_method='forecast', components=False, bounds=False)

                Take transformed outputs back to original feature space.

                -treatment_causal_impact(df, intervention_dates)
                +treatment_causal_impact(df, intervention_dates)
                -autots.models.cassandra.clean_regressor(in_d, prefix='regr_')
                +autots.models.cassandra.clean_regressor(in_d, prefix='regr_')
                -autots.models.cassandra.cost_function_dwae(params, X, y)
                +autots.models.cassandra.cost_function_dwae(params, X, y)
                -autots.models.cassandra.cost_function_l1(params, X, y)
                +autots.models.cassandra.cost_function_l1(params, X, y)
                -autots.models.cassandra.cost_function_l1_positive(params, X, y)
                +autots.models.cassandra.cost_function_l1_positive(params, X, y)
                -autots.models.cassandra.cost_function_l2(params, X, y)
                +autots.models.cassandra.cost_function_l2(params, X, y)
                -autots.models.cassandra.cost_function_quantile(params, X, y, q=0.9)
                +autots.models.cassandra.cost_function_quantile(params, X, y, q=0.9)
                -autots.models.cassandra.create_t(ds)
                +autots.models.cassandra.create_t(ds)
                -autots.models.cassandra.fit_linear_model(x, y, params=None)
                +autots.models.cassandra.fit_linear_model(x, y, params=None)
                -autots.models.cassandra.lstsq_minimize(X, y, maxiter=15000, cost_function='l1', method=None)
                +autots.models.cassandra.lstsq_minimize(X, y, maxiter=15000, cost_function='l1', method=None)

                Any cost function version of lin reg.

                -autots.models.cassandra.lstsq_solve(X, y, lamb=1, identity_matrix=None)
                +autots.models.cassandra.lstsq_solve(X, y, lamb=1, identity_matrix=None)

                -

                autots.models.dnn module

                +

                autots.models.dnn module

                Neural Nets.

                -class autots.models.dnn.KerasRNN(rnn_type: str = 'LSTM', kernel_initializer: str = 'lecun_uniform', hidden_layer_sizes: tuple = (32, 32, 32), optimizer: str = 'adam', loss: str = 'huber', epochs: int = 50, batch_size: int = 32, shape=1, verbose: int = 1, random_seed: int = 2020)
                +class autots.models.dnn.KerasRNN(rnn_type: str = 'LSTM', kernel_initializer: str = 'lecun_uniform', hidden_layer_sizes: tuple = (32, 32, 32), optimizer: str = 'adam', loss: str = 'huber', epochs: int = 50, batch_size: int = 32, shape=1, verbose: int = 1, random_seed: int = 2020)

                Bases: object

                Wrapper for Tensorflow Keras based RNN.

                @@ -1722,13 +1730,13 @@

                Submodules
                -fit(X, Y)
                +fit(X, Y)

                Train the model on dataframes of X and Y.

                -predict(X)
                +predict(X)

                Predict on dataframe of X.

                @@ -1736,7 +1744,7 @@

                Submodules
                -class autots.models.dnn.Transformer(head_size=256, num_heads=4, ff_dim=4, num_transformer_blocks=4, mlp_units=[128], mlp_dropout=0.4, dropout=0.25, optimizer: str = 'adam', loss: str = 'huber', epochs: int = 50, batch_size: int = 32, verbose: int = 1, random_seed: int = 2020)
                +class autots.models.dnn.Transformer(head_size=256, num_heads=4, ff_dim=4, num_transformer_blocks=4, mlp_units=[128], mlp_dropout=0.4, dropout=0.25, optimizer: str = 'adam', loss: str = 'huber', epochs: int = 50, batch_size: int = 32, verbose: int = 1, random_seed: int = 2020)

                Bases: object

                Wrapper for Tensorflow Keras based Transformer.

                based on: https://keras.io/examples/timeseries/timeseries_transformer_classification/

                @@ -1754,13 +1762,13 @@

                Submodules
                -fit(X, Y)
                +fit(X, Y)

                Train the model on dataframes of X and Y.

                -predict(X)
                +predict(X)

                Predict on dataframe of X.

                @@ -1768,21 +1776,21 @@

                Submodules
                -autots.models.dnn.transformer_build_model(input_shape, output_shape, head_size, num_heads, ff_dim, num_transformer_blocks, mlp_units, dropout=0, mlp_dropout=0)
                +autots.models.dnn.transformer_build_model(input_shape, output_shape, head_size, num_heads, ff_dim, num_transformer_blocks, mlp_units, dropout=0, mlp_dropout=0)

                -autots.models.dnn.transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout=0)
                +autots.models.dnn.transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout=0)

          -

          autots.models.ensemble module

          +

          autots.models.ensemble module

          Tools for generating and forecasting with ensembles of models.

          -autots.models.ensemble.BestNEnsemble(ensemble_params, forecasts, lower_forecasts, upper_forecasts, forecasts_runtime: dict, prediction_interval: float = 0.9)
          +autots.models.ensemble.BestNEnsemble(ensemble_params, forecasts, lower_forecasts, upper_forecasts, forecasts_runtime: dict, prediction_interval: float = 0.9)

          Generate mean forecast for ensemble of models.

          model_weights and point_methods other than ‘mean’ are incompatible

          @@ -1801,43 +1809,43 @@

          Submodules
          -autots.models.ensemble.DistEnsemble(ensemble_params, forecasts_list, forecasts, lower_forecasts, upper_forecasts, forecasts_runtime, prediction_interval)
          +autots.models.ensemble.DistEnsemble(ensemble_params, forecasts_list, forecasts, lower_forecasts, upper_forecasts, forecasts_runtime, prediction_interval)

          Generate forecast for distance ensemble.

          -autots.models.ensemble.EnsembleForecast(ensemble_str, ensemble_params, forecasts_list, forecasts, lower_forecasts, upper_forecasts, forecasts_runtime, prediction_interval, df_train=None, prematched_series: dict | None = None)
          +autots.models.ensemble.EnsembleForecast(ensemble_str, ensemble_params, forecasts_list, forecasts, lower_forecasts, upper_forecasts, forecasts_runtime, prediction_interval, df_train=None, prematched_series: dict | None = None)

          Return PredictionObject for given ensemble method.

          -autots.models.ensemble.EnsembleTemplateGenerator(initial_results, forecast_length: int = 14, ensemble: str = 'simple', score_per_series=None, use_validation=False)
          +autots.models.ensemble.EnsembleTemplateGenerator(initial_results, forecast_length: int = 14, ensemble: str = 'simple', score_per_series=None, use_validation=False)

          Generate class 1 (non-horizontal) ensemble templates given a table of results.

          -autots.models.ensemble.HDistEnsemble(ensemble_params, forecasts_list, forecasts, lower_forecasts, upper_forecasts, forecasts_runtime, prediction_interval)
          +autots.models.ensemble.HDistEnsemble(ensemble_params, forecasts_list, forecasts, lower_forecasts, upper_forecasts, forecasts_runtime, prediction_interval)

          Generate forecast for per_series per distance ensembling.

          -autots.models.ensemble.HorizontalEnsemble(ensemble_params, forecasts_list, forecasts, lower_forecasts, upper_forecasts, forecasts_runtime, prediction_interval, df_train=None, prematched_series: dict | None = None)
          +autots.models.ensemble.HorizontalEnsemble(ensemble_params, forecasts_list, forecasts, lower_forecasts, upper_forecasts, forecasts_runtime, prediction_interval, df_train=None, prematched_series: dict | None = None)

          Generate forecast for per_series ensembling.

          -autots.models.ensemble.HorizontalTemplateGenerator(per_series, model_results, forecast_length: int = 14, ensemble: str = 'horizontal', subset_flag: bool = True, per_series2=None, only_specified: bool = False)
          +autots.models.ensemble.HorizontalTemplateGenerator(per_series, model_results, forecast_length: int = 14, ensemble: str = 'horizontal', subset_flag: bool = True, per_series2=None, only_specified: bool = False)

          Generate horizontal ensemble templates given a table of results.

          -autots.models.ensemble.MosaicEnsemble(ensemble_params, forecasts_list, forecasts, lower_forecasts, upper_forecasts, forecasts_runtime, prediction_interval, df_train=None, prematched_series: dict | None = None)
          +autots.models.ensemble.MosaicEnsemble(ensemble_params, forecasts_list, forecasts, lower_forecasts, upper_forecasts, forecasts_runtime, prediction_interval, df_train=None, prematched_series: dict | None = None)

          Generate forecast for mosaic ensembling.

          Parameters:
          @@ -1848,12 +1856,12 @@

          Submodules
          -autots.models.ensemble.find_pattern(strings, x, sep='-')
          +autots.models.ensemble.find_pattern(strings, x, sep='-')

          -autots.models.ensemble.generalize_horizontal(df_train, known_matches: dict, available_models: list, full_models: list | None = None)
          +autots.models.ensemble.generalize_horizontal(df_train, known_matches: dict, available_models: list, full_models: list | None = None)

          generalize a horizontal model trained on a subset of all series

          Parameters:
          @@ -1869,23 +1877,23 @@

          Submodules
          -autots.models.ensemble.generate_crosshair_score(error_matrix, method=None)
          +autots.models.ensemble.generate_crosshair_score(error_matrix, method=None)

          -autots.models.ensemble.generate_crosshair_score_list(error_list)
          +autots.models.ensemble.generate_crosshair_score_list(error_list)
          -autots.models.ensemble.generate_mosaic_template(initial_results, full_mae_ids, num_validations, col_names, full_mae_errors, smoothing_window=None, metric_name='MAE', models_to_use=None, **kwargs)
          +autots.models.ensemble.generate_mosaic_template(initial_results, full_mae_ids, num_validations, col_names, full_mae_errors, smoothing_window=None, metric_name='MAE', models_to_use=None, **kwargs)

          Generate an ensemble template from results.

          -autots.models.ensemble.horizontal_classifier(df_train, known: dict, method: str = 'whatever', classifier_params=None)
          +autots.models.ensemble.horizontal_classifier(df_train, known: dict, method: str = 'whatever', classifier_params=None)

          CLassify unknown series with the appropriate model for horizontal ensembling.

          Parameters:
          @@ -1902,34 +1910,34 @@

          Submodules
          -autots.models.ensemble.horizontal_xy(df_train, known)
          +autots.models.ensemble.horizontal_xy(df_train, known)

          Construct X, Y, X_predict features for generalization.

          -autots.models.ensemble.is_horizontal(ensemble_list)
          +autots.models.ensemble.is_horizontal(ensemble_list)
          -autots.models.ensemble.is_mosaic(ensemble_list)
          +autots.models.ensemble.is_mosaic(ensemble_list)
          -autots.models.ensemble.mlens_helper(models, models_source='bestn')
          +autots.models.ensemble.mlens_helper(models, models_source='bestn')
          -autots.models.ensemble.mosaic_classifier(df_train, known, classifier_params=None)
          +autots.models.ensemble.mosaic_classifier(df_train, known, classifier_params=None)

          CLassify unknown series with the appropriate model for mosaic ensembles.

          -autots.models.ensemble.mosaic_or_horizontal(all_series: dict)
          +autots.models.ensemble.mosaic_or_horizontal(all_series: dict)

          Take a mosaic or horizontal model and return series or models.

          Parameters:
          @@ -1940,7 +1948,7 @@

          Submodules
          -autots.models.ensemble.mosaic_to_horizontal(ModelParameters, forecast_period: int = 0)
          +autots.models.ensemble.mosaic_to_horizontal(ModelParameters, forecast_period: int = 0)

          Take a mosaic template and pull a single forecast step as a horizontal model.

          Parameters:
          @@ -1960,22 +1968,22 @@

          Submodules
          -autots.models.ensemble.mosaic_xy(df_train, known)
          +autots.models.ensemble.mosaic_xy(df_train, known)

          -autots.models.ensemble.n_limited_horz(per_series, K, safety_model=False)
          +autots.models.ensemble.n_limited_horz(per_series, K, safety_model=False)
          -autots.models.ensemble.parse_forecast_length(forecast_length)
          +autots.models.ensemble.parse_forecast_length(forecast_length)
          -autots.models.ensemble.parse_horizontal(all_series: dict, model_id: str | None = None, series_id: str | None = None)
          +autots.models.ensemble.parse_horizontal(all_series: dict, model_id: str | None = None, series_id: str | None = None)

          Take a mosaic or horizontal model and return series or models.

          Parameters:
          @@ -1993,23 +2001,23 @@

          Submodules
          -autots.models.ensemble.parse_mosaic(ensemble)
          +autots.models.ensemble.parse_mosaic(ensemble)

          -autots.models.ensemble.process_mosaic_arrays(local_results, full_mae_ids, full_mae_errors, total_vals, models_to_use=None, smoothing_window=None)
          +autots.models.ensemble.process_mosaic_arrays(local_results, full_mae_ids, full_mae_errors, total_vals, models_to_use=None, smoothing_window=None)
          -autots.models.ensemble.summarize_series(df)
          +autots.models.ensemble.summarize_series(df)

          Summarize time series data. For now just df.describe().

          -

          autots.models.gluonts module

          +

          autots.models.gluonts module

          GluonTS

          Best neuralnet models currently available, released by Amazon, scale well. Except it is really the only thing I use that runs mxnet, and it takes a while to train these guys… @@ -2018,7 +2026,7 @@

          Submodules
          -class autots.models.gluonts.GluonTS(name: str = 'GluonTS', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, gluon_model: str = 'DeepAR', epochs: int = 20, learning_rate: float = 0.001, context_length=10, forecast_length: int = 14, **kwargs)
          +class autots.models.gluonts.GluonTS(name: str = 'GluonTS', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, gluon_model: str = 'DeepAR', epochs: int = 20, learning_rate: float = 0.001, context_length=10, forecast_length: int = 14, **kwargs)

          Bases: ModelObject

          GluonTS based on mxnet.

          @@ -2038,7 +2046,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -2049,24 +2057,24 @@

          Submodules
          -fit_data(df, future_regressor=None)
          +fit_data(df, future_regressor=None)

          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int | None = None, future_regressor=[], just_point_forecast=False)
          +predict(forecast_length: int | None = None, future_regressor=[], just_point_forecast=False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -2087,11 +2095,11 @@

          Submodules -

          autots.models.greykite module

          +

          autots.models.greykite module

          Greykite.

          -class autots.models.greykite.Greykite(name: str = 'Greykite', frequency: str = 'infer', prediction_interval: float = 0.9, holiday: bool = False, growth: str | None = None, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None)
          +class autots.models.greykite.Greykite(name: str = 'Greykite', frequency: str = 'infer', prediction_interval: float = 0.9, holiday: bool = False, growth: str | None = None, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None)

          Bases: ModelObject

          Parameters:
          @@ -2106,7 +2114,7 @@

          Submodules
          -fit(df, future_regressor=[])
          +fit(df, future_regressor=[])

          Train algorithm given data supplied.

          Parameters:
          @@ -2117,19 +2125,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=[], just_point_forecast: bool = False)
          +predict(forecast_length: int, future_regressor=[], just_point_forecast: bool = False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -2150,13 +2158,13 @@

          Submodules
          -autots.models.greykite.seek_the_oracle(df_index, series, col, forecast_length, freq, prediction_interval=0.9, model_template='silverkite', growth=None, holiday=True, holiday_country='UnitedStates', regressors=None, verbose=0, inner_n_jobs=1, **kwargs)
          +autots.models.greykite.seek_the_oracle(df_index, series, col, forecast_length, freq, prediction_interval=0.9, model_template='silverkite', growth=None, holiday=True, holiday_country='UnitedStates', regressors=None, verbose=0, inner_n_jobs=1, **kwargs)

          Internal. For loop or parallel version of Greykite.

          -

          autots.models.matrix_var module

          +

          autots.models.matrix_var module

          VAR models based on matrix factorization and related methods.

          Heavily borrowing on the work of Xinyu Chen See https://github.com/xinychen/transdim and corresponding Medium articles

          @@ -2164,7 +2172,7 @@

          Submodules
          -class autots.models.matrix_var.LATC(name: str = 'LATC', frequency: str = 'infer', prediction_interval: float = 0.9, time_horizon: float = 1, seasonality: int = 7, time_lags: list = [1], lambda0: float = 1, learning_rate: float = 1, theta: float = 1, window: int = 30, epsilon: float = 0.0001, alpha: list = [0.33333333, 0.33333333, 0.33333333], maxiter: int = 100, holiday_country: str = 'US', random_seed: int = 2022, verbose: int = 0, n_jobs: int | None = None, **kwargs)
          +class autots.models.matrix_var.LATC(name: str = 'LATC', frequency: str = 'infer', prediction_interval: float = 0.9, time_horizon: float = 1, seasonality: int = 7, time_lags: list = [1], lambda0: float = 1, learning_rate: float = 1, theta: float = 1, window: int = 30, epsilon: float = 0.0001, alpha: list = [0.33333333, 0.33333333, 0.33333333], maxiter: int = 100, holiday_country: str = 'US', random_seed: int = 2022, verbose: int = 0, n_jobs: int | None = None, **kwargs)

          Bases: ModelObject

          Low Rank Autoregressive Tensor Completion. Based on https://arxiv.org/abs/2104.14936 @@ -2184,7 +2192,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied .

          Parameters:
          @@ -2195,19 +2203,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generate forecast data immediately following dates of index supplied to .fit().

          Parameters:
          @@ -2228,7 +2236,7 @@

          Submodules
          -class autots.models.matrix_var.MAR(name: str = 'MAR', frequency: str = 'infer', prediction_interval: float = 0.9, seasonality: float = 7, family: str = 'gaussian', maxiter: int = 200, holiday_country: str = 'US', random_seed: int = 2022, verbose: int = 0, n_jobs: int | None = None, **kwargs)
          +class autots.models.matrix_var.MAR(name: str = 'MAR', frequency: str = 'infer', prediction_interval: float = 0.9, seasonality: float = 7, family: str = 'gaussian', maxiter: int = 200, holiday_country: str = 'US', random_seed: int = 2022, verbose: int = 0, n_jobs: int | None = None, **kwargs)

          Bases: ModelObject

          Matrix Autoregressive model based on the code of Xinyu Chen.

          @@ -2244,7 +2252,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied .

          Parameters:
          @@ -2255,19 +2263,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generate forecast data immediately following dates of index supplied to .fit().

          Parameters:
          @@ -2288,7 +2296,7 @@

          Submodules
          -class autots.models.matrix_var.RRVAR(name: str = 'RRVAR', frequency: str = 'infer', prediction_interval: float = 0.9, method: str = 'als', rank: float = 0.1, maxiter: int = 200, holiday_country: str = 'US', random_seed: int = 2022, verbose: int = 0, n_jobs: int | None = None, **kwargs)
          +class autots.models.matrix_var.RRVAR(name: str = 'RRVAR', frequency: str = 'infer', prediction_interval: float = 0.9, method: str = 'als', rank: float = 0.1, maxiter: int = 200, holiday_country: str = 'US', random_seed: int = 2022, verbose: int = 0, n_jobs: int | None = None, **kwargs)

          Bases: ModelObject

          Reduced Rank VAR models based on the code of Xinyu Chen.

          @@ -2304,7 +2312,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied .

          Parameters:
          @@ -2315,19 +2323,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generate forecast data immediately following dates of index supplied to .fit().

          Parameters:
          @@ -2348,7 +2356,7 @@

          Submodules
          -class autots.models.matrix_var.TMF(name: str = 'TMF', frequency: str = 'infer', prediction_interval: float = 0.9, d: int = 1, lambda0: float = 1, rho: float = 1, rank: float = 0.4, maxiter: int = 100, inner_maxiter: int = 10, holiday_country: str = 'US', random_seed: int = 2022, verbose: int = 0, n_jobs: int | None = None, **kwargs)
          +class autots.models.matrix_var.TMF(name: str = 'TMF', frequency: str = 'infer', prediction_interval: float = 0.9, d: int = 1, lambda0: float = 1, rho: float = 1, rank: float = 0.4, maxiter: int = 100, inner_maxiter: int = 10, holiday_country: str = 'US', random_seed: int = 2022, verbose: int = 0, n_jobs: int | None = None, **kwargs)

          Bases: ModelObject

          Temporal Matrix Factorization VAR model based on the code of Xinyu Chen.

          @@ -2364,7 +2372,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied .

          Parameters:
          @@ -2375,19 +2383,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generate forecast data immediately following dates of index supplied to .fit().

          Parameters:
          @@ -2408,108 +2416,108 @@

          Submodules
          -autots.models.matrix_var.conj_grad_w(sparse_mat, ind, W, X, rho, maxiter=5)
          +autots.models.matrix_var.conj_grad_w(sparse_mat, ind, W, X, rho, maxiter=5)

          -autots.models.matrix_var.conj_grad_x(sparse_mat, ind, W, X, A, Psi, d, lambda0, rho, maxiter=5)
          +autots.models.matrix_var.conj_grad_x(sparse_mat, ind, W, X, A, Psi, d, lambda0, rho, maxiter=5)
          -autots.models.matrix_var.dmd(data, r)
          +autots.models.matrix_var.dmd(data, r)

          Dynamic Mode Decomposition (DMD) algorithm.

          -autots.models.matrix_var.dmd4cast(data, r, pred_step)
          +autots.models.matrix_var.dmd4cast(data, r, pred_step)
          -autots.models.matrix_var.ell_w(ind, W, X, rho)
          +autots.models.matrix_var.ell_w(ind, W, X, rho)
          -autots.models.matrix_var.ell_x(ind, W, X, A, Psi, d, lambda0, rho)
          +autots.models.matrix_var.ell_x(ind, W, X, A, Psi, d, lambda0, rho)
          -autots.models.matrix_var.generate_Psi(T, d)
          +autots.models.matrix_var.generate_Psi(T, d)
          -autots.models.matrix_var.latc_imputer(sparse_tensor, time_lags, alpha, rho0, lambda0, theta, epsilon, maxiter)
          +autots.models.matrix_var.latc_imputer(sparse_tensor, time_lags, alpha, rho0, lambda0, theta, epsilon, maxiter)

          Low-Rank Autoregressive Tensor Completion, LATC-imputer. Recognizes 0 as NaN.

          -autots.models.matrix_var.latc_predictor(sparse_mat, pred_time_steps, time_horizon, time_intervals, time_lags, alpha, rho, lambda0, theta, window, epsilon, maxiter)
          +autots.models.matrix_var.latc_predictor(sparse_mat, pred_time_steps, time_horizon, time_intervals, time_lags, alpha, rho, lambda0, theta, window, epsilon, maxiter)

          LATC-predictor kernel.

          -autots.models.matrix_var.mar(X, pred_step, family='gaussian', maxiter=100)
          +autots.models.matrix_var.mar(X, pred_step, family='gaussian', maxiter=100)
          -autots.models.matrix_var.mat2ten(mat, dim, mode)
          +autots.models.matrix_var.mat2ten(mat, dim, mode)
          -autots.models.matrix_var.rrvar(data, R, pred_step, maxiter=100)
          +autots.models.matrix_var.rrvar(data, R, pred_step, maxiter=100)

          Reduced-rank VAR algorithm using ALS.

          -autots.models.matrix_var.svt_tnn(mat, tau, theta)
          +autots.models.matrix_var.svt_tnn(mat, tau, theta)
          -autots.models.matrix_var.ten2mat(tensor, mode)
          +autots.models.matrix_var.ten2mat(tensor, mode)
          -autots.models.matrix_var.tmf(sparse_mat, rank, d, lambda0, rho, maxiter=50, inner_maxiter=10)
          +autots.models.matrix_var.tmf(sparse_mat, rank, d, lambda0, rho, maxiter=50, inner_maxiter=10)
          -autots.models.matrix_var.update_cg(var, r, q, Aq, rold)
          +autots.models.matrix_var.update_cg(var, r, q, Aq, rold)
          -autots.models.matrix_var.var(X, pred_step)
          +autots.models.matrix_var.var(X, pred_step)

          Simple VAR.

          -autots.models.matrix_var.var4cast(X, A, d, delta)
          +autots.models.matrix_var.var4cast(X, A, d, delta)

          -

          autots.models.mlensemble module

          +

          autots.models.mlensemble module

          Created on Sun Jan 15 19:28:57 2023

          @author: Colin

          -class autots.models.mlensemble.MLEnsemble(name: str = 'MLEnsemble', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, forecast_length: int = 10, regression_type: str | None = None, regression_model=None, models=[{'Model': 'Cassandra', 'ModelParameters': {}, 'TransformationParameters': {}}, {'Model': 'MetricMotif', 'ModelParameters': {}, 'TransformationParameters': {}}, {'Model': 'SeasonalityMotif', 'ModelParameters': {}, 'TransformationParameters': {}}], num_validations=2, validation_method='backwards', min_allowed_train_percent=0.5, datepart_method='expanded_binarized', models_source: str = 'random', **kwargs)
          +class autots.models.mlensemble.MLEnsemble(name: str = 'MLEnsemble', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, forecast_length: int = 10, regression_type: str | None = None, regression_model=None, models=[{'Model': 'Cassandra', 'ModelParameters': {}, 'TransformationParameters': {}}, {'Model': 'MetricMotif', 'ModelParameters': {}, 'TransformationParameters': {}}, {'Model': 'SeasonalityMotif', 'ModelParameters': {}, 'TransformationParameters': {}}], num_validations=2, validation_method='backwards', min_allowed_train_percent=0.5, datepart_method='expanded_binarized', models_source: str = 'random', **kwargs)

          Bases: ModelObject

          Combine models using an ML model across validations.

          @@ -2523,7 +2531,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -2537,19 +2545,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Returns dict of new parameters for parameter tuning

          -get_params()
          +get_params()

          Return dict of current parameters

          -predict(forecast_length: int | None = None, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int | None = None, future_regressor=None, just_point_forecast=False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -2570,31 +2578,31 @@

          Submodules
          -autots.models.mlensemble.create_feature(df_train, models, forecast_length, future_regressor_train=None, future_regressor_forecast=None, datepart_method=None)
          +autots.models.mlensemble.create_feature(df_train, models, forecast_length, future_regressor_train=None, future_regressor_forecast=None, datepart_method=None)

          -

          autots.models.model_list module

          +

          autots.models.model_list module

          Lists of models grouped by aspects.

          -autots.models.model_list.auto_model_list(n_jobs, n_series, frequency)
          +autots.models.model_list.auto_model_list(n_jobs, n_series, frequency)
          -autots.models.model_list.model_list_to_dict(model_list)
          +autots.models.model_list.model_list_to_dict(model_list)

          Convert various possibilities to dict.

          -

          autots.models.neural_forecast module

          +

          autots.models.neural_forecast module

          Nixtla’s NeuralForecast. Be warned, as of writing, their package has commercial restrictions.

          -class autots.models.neural_forecast.NeuralForecast(name: str = 'NeuralForecast', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2023, verbose: int = 0, forecast_length: int = 28, regression_type: str | None = None, n_jobs: int = 1, model='LSTM', loss='MQLoss', input_size='2ForecastLength', max_steps=100, learning_rate=0.001, early_stop_patience_steps=-1, activation='ReLU', scaler_type='robust', model_args={}, point_quantile=None, **kwargs)
          +class autots.models.neural_forecast.NeuralForecast(name: str = 'NeuralForecast', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2023, verbose: int = 0, forecast_length: int = 28, regression_type: str | None = None, n_jobs: int = 1, model='LSTM', loss='MQLoss', input_size='2ForecastLength', max_steps=100, learning_rate=0.001, early_stop_patience_steps=-1, activation='ReLU', scaler_type='robust', model_args={}, point_quantile=None, **kwargs)

          Bases: ModelObject

          See NeuralForecast documentation for details.

          temp[‘ModelParameters’].str.extract(‘model”: “([a-zA-Z]+)’)

          @@ -2612,7 +2620,7 @@

          Submodules
          -fit(df, future_regressor=None, static_regressor=None, regressor_per_series=None)
          +fit(df, future_regressor=None, static_regressor=None, regressor_per_series=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -2623,31 +2631,31 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length=None, future_regressor=None, just_point_forecast=False, regressor_per_series=None)
          +predict(forecast_length=None, future_regressor=None, just_point_forecast=False, regressor_per_series=None)

          -

          autots.models.prophet module

          +

          autots.models.prophet module

          Facebook’s Prophet

          Since Prophet install can be finicky on Windows, it will be an optional dependency.

          -class autots.models.prophet.FBProphet(name: str = 'FBProphet', frequency: str = 'infer', prediction_interval: float = 0.9, holiday: bool = False, regression_type: str | None = None, holiday_country: str = 'US', yearly_seasonality='auto', weekly_seasonality='auto', daily_seasonality='auto', growth: str = 'linear', n_changepoints: int = 25, changepoint_prior_scale: float = 0.05, seasonality_mode: str = 'additive', changepoint_range: float = 0.8, seasonality_prior_scale: float = 10.0, holidays_prior_scale: float = 10.0, random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None)
          +class autots.models.prophet.FBProphet(name: str = 'FBProphet', frequency: str = 'infer', prediction_interval: float = 0.9, holiday: bool = False, regression_type: str | None = None, holiday_country: str = 'US', yearly_seasonality='auto', weekly_seasonality='auto', daily_seasonality='auto', growth: str = 'linear', n_changepoints: int = 25, changepoint_prior_scale: float = 0.05, seasonality_mode: str = 'additive', changepoint_range: float = 0.8, seasonality_prior_scale: float = 10.0, holidays_prior_scale: float = 10.0, random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None)

          Bases: ModelObject

          Facebook’s Prophet

          ‘thou shall count to 3, no more, no less, 3 shall be the number thou shall count, and the number of the counting @@ -2666,7 +2674,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -2677,19 +2685,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast: bool = False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast: bool = False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -2710,7 +2718,7 @@

          Submodules
          -class autots.models.prophet.NeuralProphet(name: str = 'NeuralProphet', frequency: str = 'infer', prediction_interval: float = 0.9, holiday: bool = False, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, growth: str = 'off', n_changepoints: int = 10, changepoints_range: float = 0.9, trend_reg: float = 0, trend_reg_threshold: bool = False, ar_sparsity: float | None = None, yearly_seasonality: str = 'auto', weekly_seasonality: str = 'auto', daily_seasonality: str = 'auto', seasonality_mode: str = 'additive', seasonality_reg: float = 0, n_lags: int = 0, num_hidden_layers: int = 0, d_hidden: int | None = None, learning_rate: float | None = None, loss_func: str = 'Huber', train_speed: int | None = None, normalize: str = 'auto')
          +class autots.models.prophet.NeuralProphet(name: str = 'NeuralProphet', frequency: str = 'infer', prediction_interval: float = 0.9, holiday: bool = False, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, growth: str = 'off', n_changepoints: int = 10, changepoints_range: float = 0.9, trend_reg: float = 0, trend_reg_threshold: bool = False, ar_sparsity: float | None = None, yearly_seasonality: str = 'auto', weekly_seasonality: str = 'auto', daily_seasonality: str = 'auto', seasonality_mode: str = 'additive', seasonality_reg: float = 0, n_lags: int = 0, num_hidden_layers: int = 0, d_hidden: int | None = None, learning_rate: float | None = None, loss_func: str = 'Huber', train_speed: int | None = None, normalize: str = 'auto')

          Bases: ModelObject

          Facebook’s Prophet got caught in a net.

          n_jobs is implemented here but it should be set to 1. PyTorch already maxes out cores in all observed cases.

          @@ -2727,7 +2735,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -2738,19 +2746,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast: bool = False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast: bool = False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -2771,12 +2779,12 @@

          Submodules -

          autots.models.pytorch module

          +

          autots.models.pytorch module

          Created on Tue May 24 13:32:12 2022

          @author: Colin

          -class autots.models.pytorch.PytorchForecasting(name: str = 'PytorchForecasting', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2022, verbose: int = 0, n_jobs: int = 1, forecast_length: int = 90, max_epochs: int = 100, batch_size: int = 128, max_encoder_length: int = 12, learning_rate: float = 0.03, hidden_size: int = 32, n_layers: int = 2, dropout: float = 0.1, datepart_method: str = 'simple', add_target_scales: bool = False, lags: dict = {}, target_normalizer: str = 'EncoderNormalizer', model: str = 'TemporalFusionTransformer', quantiles: list = [0.01, 0.1, 0.22, 0.36, 0.5, 0.64, 0.78, 0.9, 0.99], model_kwargs: dict = {}, trainer_kwargs: dict = {}, callbacks: list | None = None, **kwargs)
          +class autots.models.pytorch.PytorchForecasting(name: str = 'PytorchForecasting', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2022, verbose: int = 0, n_jobs: int = 1, forecast_length: int = 90, max_epochs: int = 100, batch_size: int = 128, max_encoder_length: int = 12, learning_rate: float = 0.03, hidden_size: int = 32, n_layers: int = 2, dropout: float = 0.1, datepart_method: str = 'simple', add_target_scales: bool = False, lags: dict = {}, target_normalizer: str = 'EncoderNormalizer', model: str = 'TemporalFusionTransformer', quantiles: list = [0.01, 0.1, 0.22, 0.36, 0.5, 0.64, 0.78, 0.9, 0.99], model_kwargs: dict = {}, trainer_kwargs: dict = {}, callbacks: list | None = None, **kwargs)

          Bases: ModelObject

          pytorch-forecasting for the world’s over-obsession of neural nets.

          This is generally going to require more data than most other models.

          @@ -2795,7 +2803,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -2806,19 +2814,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int | None = None, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int | None = None, future_regressor=None, just_point_forecast=False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -2840,12 +2848,12 @@

          Submodules -

          autots.models.sklearn module

          +

          autots.models.sklearn module

          Sklearn dependent models

          Decision Tree, Elastic Net, Random Forest, MLPRegressor, KNN, Adaboost

          -class autots.models.sklearn.ComponentAnalysis(name: str = 'ComponentAnalysis', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_components: int = 10, forecast_length: int = 14, model: str = 'GLS', model_parameters: dict = {}, decomposition: str = 'PCA', n_jobs: int = -1)
          +class autots.models.sklearn.ComponentAnalysis(name: str = 'ComponentAnalysis', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_components: int = 10, forecast_length: int = 14, model: str = 'GLS', model_parameters: dict = {}, decomposition: str = 'PCA', n_jobs: int = -1)

          Bases: ModelObject

          Forecasting on principle components.

          @@ -2863,7 +2871,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -2874,19 +2882,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int | None = None, future_regressor=None, just_point_forecast: bool = False)
          +predict(forecast_length: int | None = None, future_regressor=None, just_point_forecast: bool = False)

          Generate forecast data immediately following dates of .fit().

          Parameters:
          @@ -2907,7 +2915,7 @@

          Submodules
          -class autots.models.sklearn.DatepartRegression(name: str = 'DatepartRegression', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, forecast_length: int = 1, n_jobs: int | None = None, regression_model: dict = {'model': 'DecisionTree', 'model_params': {'max_depth': 5, 'min_samples_split': 2}}, datepart_method: str = 'expanded', polynomial_degree: int | None = None, regression_type: str | None = None, **kwargs)
          +class autots.models.sklearn.DatepartRegression(name: str = 'DatepartRegression', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, forecast_length: int = 1, n_jobs: int | None = None, regression_model: dict = {'model': 'DecisionTree', 'model_params': {'max_depth': 5, 'min_samples_split': 2}}, datepart_method: str = 'expanded', polynomial_degree: int | None = None, regression_type: str | None = None, **kwargs)

          Bases: ModelObject

          Regression not on series but datetime

          @@ -2921,7 +2929,7 @@

          Submodules
          -fit(df, future_regressor=None, static_regressor=None, regressor_per_series=None)
          +fit(df, future_regressor=None, static_regressor=None, regressor_per_series=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -2932,24 +2940,24 @@

          Submodules
          -fit_data(df, future_regressor=None)
          +fit_data(df, future_regressor=None)

          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int | None = None, future_regressor=None, just_point_forecast: bool = False, df=None, regressor_per_series=None)
          +predict(forecast_length: int | None = None, future_regressor=None, just_point_forecast: bool = False, df=None, regressor_per_series=None)

          Generate forecast data immediately following dates of index supplied to .fit().

          Parameters:
          @@ -2970,7 +2978,7 @@

          Submodules
          -class autots.models.sklearn.MultivariateRegression(name: str = 'MultivariateRegression', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', verbose: int = 0, random_seed: int = 2020, forecast_length: int = 7, regression_model: dict = {'model': 'RandomForest', 'model_params': {}}, holiday: bool = False, mean_rolling_periods: int = 30, macd_periods: int | None = None, std_rolling_periods: int = 7, max_rolling_periods: int = 7, min_rolling_periods: int = 7, ewm_var_alpha: float | None = None, quantile90_rolling_periods: int | None = None, quantile10_rolling_periods: int | None = None, ewm_alpha: float = 0.5, additional_lag_periods: int | None = None, abs_energy: bool = False, rolling_autocorr_periods: int | None = None, nonzero_last_n: int | None = None, datepart_method: str | None = None, polynomial_degree: int | None = None, window: int = 5, probabilistic: bool = False, scale_full_X: bool = False, quantile_params: dict = {'learning_rate': 0.1, 'max_depth': 20, 'min_samples_leaf': 4, 'min_samples_split': 5, 'n_estimators': 250}, cointegration: str | None = None, cointegration_lag: int = 1, series_hash: bool = False, n_jobs: int = -1, **kwargs)
          +class autots.models.sklearn.MultivariateRegression(name: str = 'MultivariateRegression', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', verbose: int = 0, random_seed: int = 2020, forecast_length: int = 7, regression_model: dict = {'model': 'RandomForest', 'model_params': {}}, holiday: bool = False, mean_rolling_periods: int = 30, macd_periods: int | None = None, std_rolling_periods: int = 7, max_rolling_periods: int = 7, min_rolling_periods: int = 7, ewm_var_alpha: float | None = None, quantile90_rolling_periods: int | None = None, quantile10_rolling_periods: int | None = None, ewm_alpha: float = 0.5, additional_lag_periods: int | None = None, abs_energy: bool = False, rolling_autocorr_periods: int | None = None, nonzero_last_n: int | None = None, datepart_method: str | None = None, polynomial_degree: int | None = None, window: int = 5, probabilistic: bool = False, scale_full_X: bool = False, quantile_params: dict = {'learning_rate': 0.1, 'max_depth': 20, 'min_samples_leaf': 4, 'min_samples_split': 5, 'n_estimators': 250}, cointegration: str | None = None, cointegration_lag: int = 1, series_hash: bool = False, n_jobs: int = -1, **kwargs)

          Bases: ModelObject

          Regression-framed approach to forecasting using sklearn. A multiariate version of rolling regression: ie each series is lagged independently but modeled together

          @@ -2987,12 +2995,12 @@

          Submodules
          -base_scaler(df)
          +base_scaler(df)

          -fit(df, future_regressor=None, static_regressor=None, regressor_per_series=None)
          +fit(df, future_regressor=None, static_regressor=None, regressor_per_series=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -3006,24 +3014,24 @@

          Submodules
          -fit_data(df, future_regressor=None, static_regressor=None, regressor_per_series=None)
          +fit_data(df, future_regressor=None, static_regressor=None, regressor_per_series=None)

          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int | None = None, just_point_forecast: bool = False, future_regressor=None, df=None, regressor_per_series=None)
          +predict(forecast_length: int | None = None, just_point_forecast: bool = False, future_regressor=None, df=None, regressor_per_series=None)

          Generate forecast data immediately following dates of index supplied to .fit().

          Parameters:
          @@ -3043,12 +3051,12 @@

          Submodules
          -scale_data(df)
          +scale_data(df)

          -to_origin_space(df, trans_method='forecast', components=False, bounds=False)
          +to_origin_space(df, trans_method='forecast', components=False, bounds=False)

          Take transformed outputs back to original feature space.

          @@ -3056,7 +3064,7 @@

          Submodules
          -class autots.models.sklearn.PreprocessingRegression(name: str = 'PreprocessingRegression', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2023, verbose: int = 0, window_size: int = 10, regression_model: dict = {'model': 'RandomForest', 'model_params': {}}, transformation_dict=None, max_history: int | None = None, one_step: bool = False, processed_y: bool = False, normalize_window: bool = False, datepart_method: str = 'common_fourier', forecast_length: int = 28, regression_type: str | None = None, n_jobs: int = -1, **kwargs)
          +class autots.models.sklearn.PreprocessingRegression(name: str = 'PreprocessingRegression', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2023, verbose: int = 0, window_size: int = 10, regression_model: dict = {'model': 'RandomForest', 'model_params': {}}, transformation_dict=None, max_history: int | None = None, one_step: bool = False, processed_y: bool = False, normalize_window: bool = False, datepart_method: str = 'common_fourier', forecast_length: int = 28, regression_type: str | None = None, n_jobs: int = -1, **kwargs)

          Bases: ModelObject

          Regression use the last n values as the basis of training data.

          @@ -3071,7 +3079,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -3082,24 +3090,24 @@

          Submodules
          -fit_data(df, future_regressor=None)
          +fit_data(df, future_regressor=None)

          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int | None = None, future_regressor=None, just_point_forecast: bool = False, df=None)
          +predict(forecast_length: int | None = None, future_regressor=None, just_point_forecast: bool = False, df=None)

          Generate forecast data immediately following dates of .fit().

          Parameters:
          @@ -3120,7 +3128,7 @@

          Submodules
          -class autots.models.sklearn.RollingRegression(name: str = 'RollingRegression', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', verbose: int = 0, random_seed: int = 2020, regression_model: dict = {'model': 'ExtraTrees', 'model_params': {}}, holiday: bool = False, mean_rolling_periods: int = 30, macd_periods: int | None = None, std_rolling_periods: int = 7, max_rolling_periods: int = 7, min_rolling_periods: int = 7, ewm_var_alpha: int | None = None, quantile90_rolling_periods: int | None = None, quantile10_rolling_periods: int | None = None, ewm_alpha: float = 0.5, additional_lag_periods: int = 7, abs_energy: bool = False, rolling_autocorr_periods: int | None = None, nonzero_last_n: int | None = None, add_date_part: str | None = None, polynomial_degree: int | None = None, x_transform: str | None = None, window: int | None = None, n_jobs: int = -1, **kwargs)
          +class autots.models.sklearn.RollingRegression(name: str = 'RollingRegression', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', verbose: int = 0, random_seed: int = 2020, regression_model: dict = {'model': 'ExtraTrees', 'model_params': {}}, holiday: bool = False, mean_rolling_periods: int = 30, macd_periods: int | None = None, std_rolling_periods: int = 7, max_rolling_periods: int = 7, min_rolling_periods: int = 7, ewm_var_alpha: int | None = None, quantile90_rolling_periods: int | None = None, quantile10_rolling_periods: int | None = None, ewm_alpha: float = 0.5, additional_lag_periods: int = 7, abs_energy: bool = False, rolling_autocorr_periods: int | None = None, nonzero_last_n: int | None = None, add_date_part: str | None = None, polynomial_degree: int | None = None, x_transform: str | None = None, window: int | None = None, n_jobs: int = -1, **kwargs)

          Bases: ModelObject

          General regression-framed approach to forecasting using sklearn.

          Who are you who are so wise in the ways of science? @@ -3138,7 +3146,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -3152,19 +3160,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast: bool = False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast: bool = False)

          Generate forecast data immediately following dates of index supplied to .fit().

          Parameters:
          @@ -3185,7 +3193,7 @@

          Submodules
          -class autots.models.sklearn.UnivariateRegression(name: str = 'UnivariateRegression', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', verbose: int = 0, random_seed: int = 2020, forecast_length: int = 7, regression_model: dict = {'model': 'ExtraTrees', 'model_params': {}}, holiday: bool = False, mean_rolling_periods: int = 30, macd_periods: int | None = None, std_rolling_periods: int = 7, max_rolling_periods: int = 7, min_rolling_periods: int = 7, ewm_var_alpha: float | None = None, ewm_alpha: float = 0.5, additional_lag_periods: int = 7, abs_energy: bool = False, rolling_autocorr_periods: int | None = None, add_date_part: str | None = None, polynomial_degree: int | None = None, x_transform: str | None = None, window: int | None = None, n_jobs: int = -1, **kwargs)
          +class autots.models.sklearn.UnivariateRegression(name: str = 'UnivariateRegression', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', verbose: int = 0, random_seed: int = 2020, forecast_length: int = 7, regression_model: dict = {'model': 'ExtraTrees', 'model_params': {}}, holiday: bool = False, mean_rolling_periods: int = 30, macd_periods: int | None = None, std_rolling_periods: int = 7, max_rolling_periods: int = 7, min_rolling_periods: int = 7, ewm_var_alpha: float | None = None, ewm_alpha: float = 0.5, additional_lag_periods: int = 7, abs_energy: bool = False, rolling_autocorr_periods: int | None = None, add_date_part: str | None = None, polynomial_degree: int | None = None, x_transform: str | None = None, window: int | None = None, n_jobs: int = -1, **kwargs)

          Bases: ModelObject

          Regression-framed approach to forecasting using sklearn. A univariate version of rolling regression: ie each series is modeled independently

          @@ -3204,7 +3212,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -3218,19 +3226,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int | None = None, just_point_forecast: bool = False, future_regressor=None)
          +predict(forecast_length: int | None = None, just_point_forecast: bool = False, future_regressor=None)

          Generate forecast data immediately following dates of index supplied to .fit().

          Parameters:
          @@ -3252,7 +3260,7 @@

          Submodules
          -class autots.models.sklearn.VectorizedMultiOutputGPR(kernel='rbf', noise_var=10, gamma=0.1, lambda_prime=0.1, p=7)
          +class autots.models.sklearn.VectorizedMultiOutputGPR(kernel='rbf', noise_var=10, gamma=0.1, lambda_prime=0.1, p=7)

          Bases: object

          Gaussian Process Regressor.

          @@ -3269,24 +3277,24 @@

          Submodules
          -fit(X, Y)
          +fit(X, Y)

          -predict(X)
          +predict(X)
          -predict_proba(X)
          +predict_proba(X)

          -class autots.models.sklearn.WindowRegression(name: str = 'WindowRegression', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2022, verbose: int = 0, window_size: int = 10, regression_model: dict = {'model': 'RandomForest', 'model_params': {}}, input_dim: str = 'univariate', output_dim: str = 'forecast_length', normalize_window: bool = False, shuffle: bool = False, forecast_length: int = 1, max_windows: int = 5000, regression_type: str | None = None, n_jobs: int = -1, **kwargs)
          +class autots.models.sklearn.WindowRegression(name: str = 'WindowRegression', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2022, verbose: int = 0, window_size: int = 10, regression_model: dict = {'model': 'RandomForest', 'model_params': {}}, input_dim: str = 'univariate', output_dim: str = 'forecast_length', normalize_window: bool = False, shuffle: bool = False, forecast_length: int = 1, max_windows: int = 5000, regression_type: str | None = None, n_jobs: int = -1, **kwargs)

          Bases: ModelObject

          Regression use the last n values as the basis of training data.

          @@ -3301,7 +3309,7 @@

          Submodules
          -fit(df, future_regressor=None, static_regressor=None)
          +fit(df, future_regressor=None, static_regressor=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -3312,24 +3320,24 @@

          Submodules
          -fit_data(df, future_regressor=None)
          +fit_data(df, future_regressor=None)

          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int | None = None, future_regressor=None, just_point_forecast: bool = False, df=None)
          +predict(forecast_length: int | None = None, future_regressor=None, just_point_forecast: bool = False, df=None)

          Generate forecast data immediately following dates of .fit().

          Parameters:
          @@ -3350,30 +3358,30 @@

          Submodules
          -autots.models.sklearn.generate_classifier_params(model_dict=None, method='default')
          +autots.models.sklearn.generate_classifier_params(model_dict=None, method='default')

          -autots.models.sklearn.generate_regressor_params(model_dict=None, method='default')
          +autots.models.sklearn.generate_regressor_params(model_dict=None, method='default')

          Generate new parameters for input to regressor.

          -autots.models.sklearn.retrieve_classifier(regression_model: dict = {'model': 'RandomForest', 'model_params': {'bootstrap': False, 'min_samples_leaf': 1, 'n_estimators': 300}}, verbose: int = 0, verbose_bool: bool = False, random_seed: int = 2020, n_jobs: int = 1, multioutput: bool = True)
          +autots.models.sklearn.retrieve_classifier(regression_model: dict = {'model': 'RandomForest', 'model_params': {'bootstrap': False, 'min_samples_leaf': 1, 'n_estimators': 300}}, verbose: int = 0, verbose_bool: bool = False, random_seed: int = 2020, n_jobs: int = 1, multioutput: bool = True)

          Convert a model param dict to model object for regression frameworks.

          -autots.models.sklearn.retrieve_regressor(regression_model: dict = {'model': 'RandomForest', 'model_params': {'bootstrap': False, 'min_samples_leaf': 1, 'n_estimators': 300}}, verbose: int = 0, verbose_bool: bool = False, random_seed: int = 2020, n_jobs: int = 1, multioutput: bool = True)
          +autots.models.sklearn.retrieve_regressor(regression_model: dict = {'model': 'RandomForest', 'model_params': {'bootstrap': False, 'min_samples_leaf': 1, 'n_estimators': 300}}, verbose: int = 0, verbose_bool: bool = False, random_seed: int = 2020, n_jobs: int = 1, multioutput: bool = True)

          Convert a model param dict to model object for regression frameworks.

          -autots.models.sklearn.rolling_x_regressor(df, mean_rolling_periods: int = 30, macd_periods: int | None = None, std_rolling_periods: int = 7, max_rolling_periods: int | None = None, min_rolling_periods: int | None = None, quantile90_rolling_periods: int | None = None, quantile10_rolling_periods: int | None = None, ewm_alpha: float = 0.5, ewm_var_alpha: float | None = None, additional_lag_periods: int = 7, abs_energy: bool = False, rolling_autocorr_periods: int | None = None, nonzero_last_n: int | None = None, add_date_part: str | None = None, holiday: bool = False, holiday_country: str = 'US', polynomial_degree: int | None = None, window: int | None = None, cointegration: str | None = None, cointegration_lag: int = 1)
          +autots.models.sklearn.rolling_x_regressor(df, mean_rolling_periods: int = 30, macd_periods: int | None = None, std_rolling_periods: int = 7, max_rolling_periods: int | None = None, min_rolling_periods: int | None = None, quantile90_rolling_periods: int | None = None, quantile10_rolling_periods: int | None = None, ewm_alpha: float = 0.5, ewm_var_alpha: float | None = None, additional_lag_periods: int = 7, abs_energy: bool = False, rolling_autocorr_periods: int | None = None, nonzero_last_n: int | None = None, add_date_part: str | None = None, holiday: bool = False, holiday_country: str = 'US', polynomial_degree: int | None = None, window: int | None = None, cointegration: str | None = None, cointegration_lag: int = 1)

          Generate more features from initial time series.

          macd_periods ignored if mean_rolling is None.

          Returns a dataframe of statistical features. Will need to be shifted by 1 or more to match Y for forecast. @@ -3383,20 +3391,20 @@

          Submodules
          -autots.models.sklearn.rolling_x_regressor_regressor(df, mean_rolling_periods: int = 30, macd_periods: int | None = None, std_rolling_periods: int = 7, max_rolling_periods: int | None = None, min_rolling_periods: int | None = None, quantile90_rolling_periods: int | None = None, quantile10_rolling_periods: int | None = None, ewm_alpha: float = 0.5, ewm_var_alpha: float | None = None, additional_lag_periods: int = 7, abs_energy: bool = False, rolling_autocorr_periods: int | None = None, nonzero_last_n: int | None = None, add_date_part: str | None = None, holiday: bool = False, holiday_country: str = 'US', polynomial_degree: int | None = None, window: int | None = None, future_regressor=None, regressor_per_series=None, static_regressor=None, cointegration: str | None = None, cointegration_lag: int = 1, series_id=None)
          +autots.models.sklearn.rolling_x_regressor_regressor(df, mean_rolling_periods: int = 30, macd_periods: int | None = None, std_rolling_periods: int = 7, max_rolling_periods: int | None = None, min_rolling_periods: int | None = None, quantile90_rolling_periods: int | None = None, quantile10_rolling_periods: int | None = None, ewm_alpha: float = 0.5, ewm_var_alpha: float | None = None, additional_lag_periods: int = 7, abs_energy: bool = False, rolling_autocorr_periods: int | None = None, nonzero_last_n: int | None = None, add_date_part: str | None = None, holiday: bool = False, holiday_country: str = 'US', polynomial_degree: int | None = None, window: int | None = None, future_regressor=None, regressor_per_series=None, static_regressor=None, cointegration: str | None = None, cointegration_lag: int = 1, series_id=None)

          Adds in the future_regressor.

          -

          autots.models.statsmodels module

          +

          autots.models.statsmodels module

          Statsmodels based forecasting models.

          Statsmodels documentation can be a bit confusing. And it seems standard at first, but each model likes to do things differently. For example: exog, exog_oos, and exog_fc all sometimes mean the same thing

          -class autots.models.statsmodels.ARDL(name: str = 'ARDL', frequency: str = 'infer', prediction_interval: float = 0.9, lags: int = 1, trend: str = 'c', order: int = 0, causal: bool = False, regression_type: str = 'holiday', holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, **kwargs)
          +class autots.models.statsmodels.ARDL(name: str = 'ARDL', frequency: str = 'infer', prediction_interval: float = 0.9, lags: int = 1, trend: str = 'c', order: int = 0, causal: bool = False, regression_type: str = 'holiday', holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, **kwargs)

          Bases: ModelObject

          ARDL from Statsmodels.

          @@ -3415,7 +3423,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied .

          Parameters:
          @@ -3426,19 +3434,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generate forecast data immediately following dates of index supplied to .fit().

          Parameters:
          @@ -3459,7 +3467,7 @@

          Submodules
          -class autots.models.statsmodels.ARIMA(name: str = 'ARIMA', frequency: str = 'infer', prediction_interval: float = 0.9, p: int = 0, d: int = 1, q: int = 0, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, **kwargs)
          +class autots.models.statsmodels.ARIMA(name: str = 'ARIMA', frequency: str = 'infer', prediction_interval: float = 0.9, p: int = 0, d: int = 1, q: int = 0, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, **kwargs)

          Bases: ModelObject

          ARIMA from Statsmodels.

          @@ -3478,7 +3486,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied .

          Parameters:
          @@ -3489,20 +3497,20 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          large p,d,q can be very slow (a p of 30 can take hours)

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generate forecast data immediately following dates of index supplied to .fit().

          Parameters:
          @@ -3523,7 +3531,7 @@

          Submodules
          -class autots.models.statsmodels.DynamicFactor(name: str = 'DynamicFactor', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, k_factors: int = 1, factor_order: int = 0, **kwargs)
          +class autots.models.statsmodels.DynamicFactor(name: str = 'DynamicFactor', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, k_factors: int = 1, factor_order: int = 0, **kwargs)

          Bases: ModelObject

          DynamicFactor from Statsmodels

          @@ -3538,7 +3546,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -3549,19 +3557,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -3587,7 +3595,7 @@

          Submodules
          -class autots.models.statsmodels.DynamicFactorMQ(name: str = 'DynamicFactorMQ', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, factors: int = 1, factor_orders: int = 2, factor_multiplicities: int | None = None, idiosyncratic_ar1: bool = False, maxiter: int = 1000, **kwargs)
          +class autots.models.statsmodels.DynamicFactorMQ(name: str = 'DynamicFactorMQ', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, factors: int = 1, factor_orders: int = 2, factor_multiplicities: int | None = None, idiosyncratic_ar1: bool = False, maxiter: int = 1000, **kwargs)

          Bases: ModelObject

          DynamicFactorMQ from Statsmodels

          @@ -3601,7 +3609,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -3612,19 +3620,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -3645,7 +3653,7 @@

          Submodules
          -class autots.models.statsmodels.ETS(name: str = 'ETS', frequency: str = 'infer', prediction_interval: float = 0.9, damped_trend: bool = False, trend: str | None = None, seasonal: str | None = None, seasonal_periods: int | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, **kwargs)
          +class autots.models.statsmodels.ETS(name: str = 'ETS', frequency: str = 'infer', prediction_interval: float = 0.9, damped_trend: bool = False, trend: str | None = None, seasonal: str | None = None, seasonal_periods: int | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, **kwargs)

          Bases: ModelObject

          Exponential Smoothing from Statsmodels

          @@ -3663,7 +3671,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied

          Parameters:
          @@ -3674,19 +3682,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -3707,7 +3715,7 @@

          Submodules
          -class autots.models.statsmodels.GLM(name: str = 'GLM', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, regression_type: str | None = None, family='Gaussian', constant: bool = False, verbose: int = 1, n_jobs: int | None = None, **kwargs)
          +class autots.models.statsmodels.GLM(name: str = 'GLM', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, regression_type: str | None = None, family='Gaussian', constant: bool = False, verbose: int = 1, n_jobs: int | None = None, **kwargs)

          Bases: ModelObject

          Simple linear regression from statsmodels

          @@ -3722,7 +3730,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied

          Parameters:
          @@ -3733,19 +3741,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -3766,7 +3774,7 @@

          Submodules
          -class autots.models.statsmodels.GLS(name: str = 'GLS', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, **kwargs)
          +class autots.models.statsmodels.GLS(name: str = 'GLS', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, **kwargs)

          Bases: ModelObject

          Simple linear regression from statsmodels

          @@ -3780,7 +3788,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied

          Parameters:
          @@ -3791,19 +3799,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Returns dict of new parameters for parameter tuning

          -get_params()
          +get_params()

          Return dict of current parameters

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -3824,7 +3832,7 @@

          Submodules
          -class autots.models.statsmodels.Theta(name: str = 'Theta', frequency: str = 'infer', prediction_interval: float = 0.9, deseasonalize: bool = True, use_test: bool = True, difference: bool = False, period: int | None = None, theta: float = 2, use_mle: bool = False, method: str = 'auto', holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, **kwargs)
          +class autots.models.statsmodels.Theta(name: str = 'Theta', frequency: str = 'infer', prediction_interval: float = 0.9, deseasonalize: bool = True, use_test: bool = True, difference: bool = False, period: int | None = None, theta: float = 2, use_mle: bool = False, method: str = 'auto', holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int | None = None, **kwargs)

          Bases: ModelObject

          Theta Model from Statsmodels

          @@ -3839,7 +3847,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied

          Parameters:
          @@ -3850,19 +3858,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -3883,7 +3891,7 @@

          Submodules
          -class autots.models.statsmodels.UnobservedComponents(name: str = 'UnobservedComponents', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int = 1, level: str = 'smooth trend', trend: bool = False, cycle: bool = False, damped_cycle: bool = False, irregular: bool = False, autoregressive: int | None = None, stochastic_cycle: bool = False, stochastic_trend: bool = False, stochastic_level: bool = False, maxiter: int = 100, cov_type: str = 'opg', method: str = 'lbfgs', model_kwargs: dict | None = None, **kwargs)
          +class autots.models.statsmodels.UnobservedComponents(name: str = 'UnobservedComponents', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, n_jobs: int = 1, level: str = 'smooth trend', trend: bool = False, cycle: bool = False, damped_cycle: bool = False, irregular: bool = False, autoregressive: int | None = None, stochastic_cycle: bool = False, stochastic_trend: bool = False, stochastic_level: bool = False, maxiter: int = 100, cov_type: str = 'opg', method: str = 'lbfgs', model_kwargs: dict | None = None, **kwargs)

          Bases: ModelObject

          UnobservedComponents from Statsmodels.

          @@ -3899,7 +3907,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied

          Parameters:
          @@ -3910,19 +3918,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast: bool = False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast: bool = False)

          Generate forecast data immediately following dates of index supplied to .fit().

          Parameters:
          @@ -3943,7 +3951,7 @@

          Submodules
          -class autots.models.statsmodels.VAR(name: str = 'VAR', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, maxlags: int = 15, ic: str = 'fpe', **kwargs)
          +class autots.models.statsmodels.VAR(name: str = 'VAR', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, maxlags: int = 15, ic: str = 'fpe', **kwargs)

          Bases: ModelObject

          VAR from Statsmodels.

          @@ -3958,7 +3966,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -3969,19 +3977,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -4002,7 +4010,7 @@

          Submodules
          -class autots.models.statsmodels.VARMAX(name: str = 'VARMAX', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, order: tuple = (1, 0), trend: str = 'c', **kwargs)
          +class autots.models.statsmodels.VARMAX(name: str = 'VARMAX', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, order: tuple = (1, 0), trend: str = 'c', **kwargs)

          Bases: ModelObject

          VARMAX from Statsmodels

          @@ -4017,7 +4025,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied

          Parameters:
          @@ -4028,19 +4036,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generate forecast data immediately following dates of index supplied to .fit().

          Parameters:
          @@ -4061,7 +4069,7 @@

          Submodules
          -class autots.models.statsmodels.VECM(name: str = 'VECM', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, deterministic: str = 'n', k_ar_diff: int = 1, seasons: int = 0, coint_rank: int = 1, **kwargs)
          +class autots.models.statsmodels.VECM(name: str = 'VECM', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, deterministic: str = 'n', k_ar_diff: int = 1, seasons: int = 0, coint_rank: int = 1, **kwargs)

          Bases: ModelObject

          VECM from Statsmodels

          @@ -4076,7 +4084,7 @@

          Submodules
          -fit(df, future_regressor=None)
          +fit(df, future_regressor=None)

          Train algorithm given data supplied.

          Parameters:
          @@ -4087,19 +4095,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=None, just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=None, just_point_forecast=False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -4120,21 +4128,21 @@

          Submodules
          -autots.models.statsmodels.arima_seek_the_oracle(current_series, args, series)
          +autots.models.statsmodels.arima_seek_the_oracle(current_series, args, series)

          -autots.models.statsmodels.glm_forecast_by_column(current_series, X, Xf, args)
          +autots.models.statsmodels.glm_forecast_by_column(current_series, X, Xf, args)

          Run one series of GLM and return prediction.

          -

          autots.models.tfp module

          +

          autots.models.tfp module

          -class autots.models.tfp.TFPRegression(name: str = 'TFPRegression', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 1, kernel_initializer: str = 'lecun_uniform', optimizer: str = 'adam', loss: str = 'negloglike', epochs: int = 50, batch_size: int = 32, dist: str = 'normal', regression_type: str | None = None)
          +class autots.models.tfp.TFPRegression(name: str = 'TFPRegression', frequency: str = 'infer', prediction_interval: float = 0.9, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 1, kernel_initializer: str = 'lecun_uniform', optimizer: str = 'adam', loss: str = 'negloglike', epochs: int = 50, batch_size: int = 32, dist: str = 'normal', regression_type: str | None = None)

          Bases: ModelObject

          Tensorflow Probability regression.

          @@ -4149,7 +4157,7 @@

          Submodules
          -fit(df, future_regressor=[])
          +fit(df, future_regressor=[])

          Train algorithm given data supplied

          Parameters:
          @@ -4160,19 +4168,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=[], just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=[], just_point_forecast=False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -4193,7 +4201,7 @@

          Submodules
          -class autots.models.tfp.TFPRegressor(kernel_initializer: str = 'lecun_uniform', optimizer: str = 'adam', loss: str = 'negloglike', epochs: int = 50, batch_size: int = 32, dist: str = 'normal', verbose: int = 1, random_seed: int = 2020)
          +class autots.models.tfp.TFPRegressor(kernel_initializer: str = 'lecun_uniform', optimizer: str = 'adam', loss: str = 'negloglike', epochs: int = 50, batch_size: int = 32, dist: str = 'normal', verbose: int = 1, random_seed: int = 2020)

          Bases: object

          Wrapper for Tensorflow Keras based RNN.

          @@ -4213,13 +4221,13 @@

          Submodules
          -fit(X, Y)
          +fit(X, Y)

          Train the model on dataframes of X and Y.

          -predict(X, conf_int: float | None = None)
          +predict(X, conf_int: float | None = None)

          Predict on dataframe of X.

          @@ -4227,7 +4235,7 @@

          Submodules
          -class autots.models.tfp.TensorflowSTS(name: str = 'TensorflowSTS', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, trend: str = 'local', seasonal_periods: int | None = None, ar_order: int | None = None, fit_method: str = 'hmc', num_steps: int = 200)
          +class autots.models.tfp.TensorflowSTS(name: str = 'TensorflowSTS', frequency: str = 'infer', prediction_interval: float = 0.9, regression_type: str | None = None, holiday_country: str = 'US', random_seed: int = 2020, verbose: int = 0, trend: str = 'local', seasonal_periods: int | None = None, ar_order: int | None = None, fit_method: str = 'hmc', num_steps: int = 200)

          Bases: ModelObject

          STS from TensorflowProbability.

          @@ -4242,7 +4250,7 @@

          Submodules
          -fit(df, future_regressor=[])
          +fit(df, future_regressor=[])

          Train algorithm given data supplied.

          Parameters:
          @@ -4253,19 +4261,19 @@

          Submodules
          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length: int, future_regressor=[], just_point_forecast=False)
          +predict(forecast_length: int, future_regressor=[], just_point_forecast=False)

          Generates forecast data immediately following dates of index supplied to .fit()

          Parameters:
          @@ -4286,45 +4294,45 @@

          Submodules -

          autots.models.tide module

          +

          autots.models.tide module

          -class autots.models.tide.TiDE(name: str = 'UnivariateRegression', random_seed=42, frequency='D', learning_rate=0.0009999, transform=False, layer_norm=False, holiday=True, dropout_rate=0.3, batch_size=512, hidden_size=256, num_layers=1, hist_len=720, decoder_output_dim=16, final_decoder_hidden=64, num_split=4, min_num_epochs=0, train_epochs=100, patience=10, epoch_len=None, permute=True, normalize=True, gpu_index=0, n_jobs: int = 'auto', forecast_length: int = 14, prediction_interval: float = 0.9, verbose: int = 0)
          +class autots.models.tide.TiDE(name: str = 'UnivariateRegression', random_seed=42, frequency='D', learning_rate=0.0009999, transform=False, layer_norm=False, holiday=True, dropout_rate=0.3, batch_size=512, hidden_size=256, num_layers=1, hist_len=720, decoder_output_dim=16, final_decoder_hidden=64, num_split=4, min_num_epochs=0, train_epochs=100, patience=10, epoch_len=None, permute=True, normalize=True, gpu_index=0, n_jobs: int = 'auto', forecast_length: int = 14, prediction_interval: float = 0.9, verbose: int = 0)

          Bases: ModelObject

          Google Research MLP based forecasting approach.

          -fit(df=None, num_cov_cols=None, cat_cov_cols=None, future_regressor=None)
          +fit(df=None, num_cov_cols=None, cat_cov_cols=None, future_regressor=None)

          Training TS code.

          -get_new_params(method: str = 'random')
          +get_new_params(method: str = 'random')

          Return dict of new parameters for parameter tuning.

          -get_params()
          +get_params()

          Return dict of current parameters.

          -predict(forecast_length='self', future_regressor=None, mode='test', just_point_forecast=False)
          +predict(forecast_length='self', future_regressor=None, mode='test', just_point_forecast=False)
          -class autots.models.tide.TimeCovariates(datetimes, normalized=True, holiday=False)
          +class autots.models.tide.TimeCovariates(datetimes, normalized=True, holiday=False)

          Bases: object

          Extract all time covariates except for holidays.

          -get_covariates()
          +get_covariates()

          Get all time covariates.

          @@ -4332,24 +4340,24 @@

          Submodules
          -class autots.models.tide.TimeSeriesdata(df, num_cov_cols, cat_cov_cols, ts_cols, train_range, val_range, test_range, hist_len, pred_len, batch_size, freq='D', normalize=True, epoch_len=None, holiday=False, permute=True)
          +class autots.models.tide.TimeSeriesdata(df, num_cov_cols, cat_cov_cols, ts_cols, train_range, val_range, test_range, hist_len, pred_len, batch_size, freq='D', normalize=True, epoch_len=None, holiday=False, permute=True)

          Bases: object

          Data loader class.

          -test_val_gen(mode='val')
          +test_val_gen(mode='val')

          Generator for validation/test data.

          -tf_dataset(mode='train')
          +tf_dataset(mode='train')

          Tensorflow Dataset.

          -train_gen()
          +train_gen()

          Generator for training data.

          @@ -4357,42 +4365,42 @@

          Submodules
          -autots.models.tide.get_HOLIDAYS()
          +autots.models.tide.get_HOLIDAYS()

          -autots.models.tide.mae_loss(y_pred, y_true)
          +autots.models.tide.mae_loss(y_pred, y_true)
          -autots.models.tide.mape(y_pred, y_true)
          +autots.models.tide.mape(y_pred, y_true)
          -autots.models.tide.nrmse(y_pred, y_true)
          +autots.models.tide.nrmse(y_pred, y_true)
          -autots.models.tide.rmse(y_pred, y_true)
          +autots.models.tide.rmse(y_pred, y_true)
          -autots.models.tide.smape(y_pred, y_true)
          +autots.models.tide.smape(y_pred, y_true)
          -autots.models.tide.wape(y_pred, y_true)
          +autots.models.tide.wape(y_pred, y_true)

          -

          Module contents

          +

          Module contents

          Model Models

          @@ -4487,21 +4495,5 @@

          Quick search

          - - \ No newline at end of file diff --git a/docs/build/html/source/autots.templates.html b/docs/build/html/source/autots.templates.html index 283069f1..33235878 100644 --- a/docs/build/html/source/autots.templates.html +++ b/docs/build/html/source/autots.templates.html @@ -1,17 +1,25 @@ - - + + + + + autots.templates package — AutoTS 0.6.10 documentation - - - - - + + + + + @@ -33,16 +41,16 @@
          -

          autots.templates package

          +

          autots.templates package

          -

          Submodules

          +

          Submodules

          -

          autots.templates.general module

          +

          autots.templates.general module

          Starting templates for models.

          -autots.templates.general.general_template = Model                                    ModelParameters                           TransformationParameters  Ensemble 1                ARIMA  {"p": 4, "d": 0, "q": 12, "regression_type": n...  {"fillna": "cubic", "transformations": {"0": "...         0 3    AverageValueNaive                                 {"method": "Mean"}  {"fillna": "fake_date", "transformations": {"0...         0 4    AverageValueNaive                                 {"method": "Mean"}  {"fillna": "mean", "transformations": {"0": "C...         0 5    AverageValueNaive                                 {"method": "Mean"}  {"fillna": "rolling_mean_24", "transformations...         0 6   DatepartRegression  {"regression_model": {"model": "DecisionTree",...  {"fillna": "mean", "transformations": {"0": "C...         0 ..                 ...                                                ...                                                ...       ... 68    SeasonalityMotif  {\n            "window": 5, "point_method": "w...  {"fillna": "nearest", "transformations": {"0":...         0 69                TiDE  {\n            "learning_rate": 0.000999999, "...  \n        {"fillna": "ffill", "transformations...         0 70           Cassandra  {"preprocessing_transformation": {"fillna": "f...  {"fillna": "linear", "transformations": {"0": ...         0 71           Cassandra  {"preprocessing_transformation": {"fillna": "m...  {"fillna": "pad", "transformations": {"0": "EW...         0 72      NeuralForecast  {"model": "MLP", "scaler_type": "minmax", "los...  {"fillna": "SeasonalityMotifImputerLinMix", "t...         0  [71 rows x 4 columns]
          +autots.templates.general.general_template = Model  ... Ensemble 1                ARIMA  ...        0 3    AverageValueNaive  ...        0 4    AverageValueNaive  ...        0 5    AverageValueNaive  ...        0 6   DatepartRegression  ...        0 ..                 ...  ...      ... 68    SeasonalityMotif  ...        0 69                TiDE  ...        0 70           Cassandra  ...        0 71           Cassandra  ...        0 72      NeuralForecast  ...        0  [71 rows x 4 columns]

          # Basic Template Construction Code # transformer_max_depth = 6 and transformer_list = “fast” from autots.evaluator.auto_model import unpack_ensemble_models @@ -84,7 +92,7 @@

          Submodules -

          Module contents

          +

          Module contents

          Model Templates

          @@ -179,21 +187,5 @@

          Quick search

          - - \ No newline at end of file diff --git a/docs/build/html/source/autots.tools.html b/docs/build/html/source/autots.tools.html index cfeee336..8144dee8 100644 --- a/docs/build/html/source/autots.tools.html +++ b/docs/build/html/source/autots.tools.html @@ -1,17 +1,25 @@ - - + + + + + autots.tools package — AutoTS 0.6.10 documentation - - - - - + + + + + @@ -33,12 +41,12 @@
          -

          autots.tools package

          +

          autots.tools package

          -

          Submodules

          +

          Submodules

          -

          autots.tools.anomaly_utils module

          +

          autots.tools.anomaly_utils module

          Created on Fri Jul 1 15:41:21 2022

          @author: Colin

          point, contextual, and collective. Point anomalies are single values @@ -49,23 +57,23 @@

          Submoduleshttps://arxiv.org/pdf/1802.04431.pdf

          -autots.tools.anomaly_utils.anomaly_df_to_holidays(anomaly_df, actuals=None, anomaly_scores=None, threshold=0.8, min_occurrences=2, splash_threshold=0.65, use_dayofmonth_holidays=True, use_wkdom_holidays=True, use_wkdeom_holidays=False, use_lunar_holidays=False, use_lunar_weekday=False, use_islamic_holidays=False, use_hebrew_holidays=False)
          +autots.tools.anomaly_utils.anomaly_df_to_holidays(anomaly_df, actuals=None, anomaly_scores=None, threshold=0.8, min_occurrences=2, splash_threshold=0.65, use_dayofmonth_holidays=True, use_wkdom_holidays=True, use_wkdeom_holidays=False, use_lunar_holidays=False, use_lunar_weekday=False, use_islamic_holidays=False, use_hebrew_holidays=False)
          -autots.tools.anomaly_utils.anomaly_new_params(method='random')
          +autots.tools.anomaly_utils.anomaly_new_params(method='random')
          -autots.tools.anomaly_utils.create_dates_df(dates)
          +autots.tools.anomaly_utils.create_dates_df(dates)

          Take a pd.DatetimeIndex and create simple date parts.

          -autots.tools.anomaly_utils.dates_to_holidays(dates, df_cols, style='long', holiday_impacts='value', day_holidays=None, wkdom_holidays=None, wkdeom_holidays=None, lunar_holidays=None, lunar_weekday=None, islamic_holidays=None, hebrew_holidays=None, max_features: int | None = None)
          +autots.tools.anomaly_utils.dates_to_holidays(dates, df_cols, style='long', holiday_impacts='value', day_holidays=None, wkdom_holidays=None, wkdeom_holidays=None, lunar_holidays=None, lunar_weekday=None, islamic_holidays=None, hebrew_holidays=None, max_features: int | None = None)

          Populate date information for a given pd.DatetimeIndex.

          Parameters:
          @@ -86,7 +94,7 @@

          Submodules
          -autots.tools.anomaly_utils.detect_anomalies(df, output, method, transform_dict=None, method_params={}, eval_period=None, n_jobs=1)
          +autots.tools.anomaly_utils.detect_anomalies(df, output, method, transform_dict=None, method_params={}, eval_period=None, n_jobs=1)

          All will return -1 for anomalies.

          Parameters:
          @@ -106,39 +114,39 @@

          Submodules
          -autots.tools.anomaly_utils.holiday_new_params(method='random')
          +autots.tools.anomaly_utils.holiday_new_params(method='random')

          -autots.tools.anomaly_utils.limits_to_anomalies(df, output, upper_limit, lower_limit, method_params=None)
          +autots.tools.anomaly_utils.limits_to_anomalies(df, output, upper_limit, lower_limit, method_params=None)
          -autots.tools.anomaly_utils.loop_sk_outliers(df, method, method_params={}, n_jobs=1)
          +autots.tools.anomaly_utils.loop_sk_outliers(df, method, method_params={}, n_jobs=1)

          Multiprocessing on each series for multivariate outliers with sklearn.

          -autots.tools.anomaly_utils.nonparametric_multivariate(df, output, method_params, n_jobs=1)
          +autots.tools.anomaly_utils.nonparametric_multivariate(df, output, method_params, n_jobs=1)
          -autots.tools.anomaly_utils.sk_outliers(df, method, method_params={})
          +autots.tools.anomaly_utils.sk_outliers(df, method, method_params={})

          scikit-learn outlier methods wrapper.

          -autots.tools.anomaly_utils.values_to_anomalies(df, output, threshold_method, method_params, n_jobs=1)
          +autots.tools.anomaly_utils.values_to_anomalies(df, output, threshold_method, method_params, n_jobs=1)
          -autots.tools.anomaly_utils.zscore_survival_function(df, output='multivariate', method='zscore', distribution='norm', rolling_periods: int = 200, center: bool = True)
          +autots.tools.anomaly_utils.zscore_survival_function(df, output='multivariate', method='zscore', distribution='norm', rolling_periods: int = 200, center: bool = True)

          Take a dataframe and generate zscores and then generating survival probabilities (smaller = more outliery).

          Parameters:
          @@ -159,31 +167,31 @@

          Submodules -

          autots.tools.calendar module

          +

          autots.tools.calendar module

          Calendar conversion functions.

          Includes Lunar, Chinese lunar, and Arabic lunar

          -autots.tools.calendar.gregorian_to_chinese(datetime_index)
          +autots.tools.calendar.gregorian_to_chinese(datetime_index)

          Convert a pandas DatetimeIndex to Chinese Lunar calendar. Potentially has errors.

          -autots.tools.calendar.gregorian_to_christian_lunar(datetime_index)
          +autots.tools.calendar.gregorian_to_christian_lunar(datetime_index)

          Convert a pandas DatetimeIndex to Christian Lunar calendar. Aspiration it doesn’t work exactly.

          -autots.tools.calendar.gregorian_to_hebrew(dates)
          +autots.tools.calendar.gregorian_to_hebrew(dates)

          Convert pd.Datetimes to a Hebrew date. From pyluach by simlist.

          This is the slowest of the lot and needs to be improved.

          -autots.tools.calendar.gregorian_to_islamic(date, epoch_adjustment=1.5)
          +autots.tools.calendar.gregorian_to_islamic(date, epoch_adjustment=1.5)

          Calculate Islamic dates for pandas DatetimeIndex. Approximately. From convertdate by fitnr.

          Parameters:
          @@ -194,50 +202,50 @@

          Submodules
          -autots.tools.calendar.heb_is_leap(year)
          +autots.tools.calendar.heb_is_leap(year)

          -autots.tools.calendar.lunar_from_lunar(new_moon)
          +autots.tools.calendar.lunar_from_lunar(new_moon)

          Assumes continuous daily data and pre-needed start.

          -autots.tools.calendar.lunar_from_lunar_full(full_moon)
          +autots.tools.calendar.lunar_from_lunar_full(full_moon)

          Assumes continuous daily data and pre-needed start.

          -autots.tools.calendar.to_jd(year, month, day)
          +autots.tools.calendar.to_jd(year, month, day)

          Determine Julian day count from Islamic date. From convertdate by fitnr.

          -

          autots.tools.cointegration module

          +

          autots.tools.cointegration module

          Cointegration

          Johansen heavily based on Statsmodels source code

          BTCD heavily based on D. Barba https://towardsdatascience.com/canonical-decomposition-a-forgotten-method-for-time-series-cointegration-and-beyond-4d1213396da1

          -autots.tools.cointegration.btcd_decompose(p_mat: ndarray, regression_model, max_lag: int = 1, return_eigenvalues=False)
          +autots.tools.cointegration.btcd_decompose(p_mat: ndarray, regression_model, max_lag: int = 1, return_eigenvalues=False)

          Calculate decomposition. p_mat is of shape(t,n), wide style data.

          -autots.tools.cointegration.coint_johansen(endog, det_order=-1, k_ar_diff=1, return_eigenvalues=False)
          +autots.tools.cointegration.coint_johansen(endog, det_order=-1, k_ar_diff=1, return_eigenvalues=False)

          Johansen cointegration test of the cointegration rank of a VECM, abbreviated from Statsmodels

          -autots.tools.cointegration.fourier_series(dates, period, series_order)
          +autots.tools.cointegration.fourier_series(dates, period, series_order)

          Provides Fourier series components with the specified frequency and order.

          @@ -256,17 +264,17 @@

          Submodules
          -autots.tools.cointegration.lagmat(x, maxlag: int, trim='forward', original='ex')
          +autots.tools.cointegration.lagmat(x, maxlag: int, trim='forward', original='ex')

          Create 2d array of lags. Modified from Statsmodels.

          -

          autots.tools.cpu_count module

          +

          autots.tools.cpu_count module

          CPU counter for multiprocesing.

          -autots.tools.cpu_count.cpu_count(modifier: float = 1)
          +autots.tools.cpu_count.cpu_count(modifier: float = 1)

          Find available CPU count, running on both Windows/Linux.

          Attempts to be very conservative:
          -

          autots.tools.fast_kalman module

          +

          autots.tools.fast_kalman module

          From SIMD KALMAN, (c) 2017 Otto Seiskari (MIT License)

          Some other resources that I have found useful:

          https://kevinkotze.github.io/ts-4-state-space/ @@ -318,7 +326,7 @@

          Submodules -

          Usage example

          +

          Usage example

          import numpy.random numpy.random.seed(0)

          @@ -383,28 +391,28 @@

          Usage example
          -class autots.tools.fast_kalman.Gaussian(mean, cov)
          +class autots.tools.fast_kalman.Gaussian(mean, cov)

          Bases: object

          -static empty(n_states, n_vars, n_measurements, cov=True)
          +static empty(n_states, n_vars, n_measurements, cov=True)
          -unvectorize_state()
          +unvectorize_state()
          -unvectorize_vars()
          +unvectorize_vars()

          -class autots.tools.fast_kalman.KalmanFilter(state_transition, process_noise, observation_model, observation_noise)
          +class autots.tools.fast_kalman.KalmanFilter(state_transition, process_noise, observation_model, observation_noise)

          Bases: object

          The main Kalman filter class providing convenient interfaces to vectorized smoothing and filtering operations on multiple independent @@ -434,13 +442,13 @@

          Usage example
          -class Result
          +class Result

          Bases: object

          -compute(data, n_test, initial_value=None, initial_covariance=None, smoothed=True, filtered=False, states=True, covariances=True, observations=True, likelihoods=False, gains=False, log_likelihood=False, verbose=False)
          +compute(data, n_test, initial_value=None, initial_covariance=None, smoothed=True, filtered=False, states=True, covariances=True, observations=True, likelihoods=False, gains=False, log_likelihood=False, verbose=False)

          Smoothing, filtering and prediction at the same time. Used internally by other methods, but can also be used directly if, e.g., both smoothed and predicted data is wanted.

          @@ -474,22 +482,22 @@

          Usage example
          -em(data, n_iter=5, initial_value=None, initial_covariance=None, verbose=False)
          +em(data, n_iter=5, initial_value=None, initial_covariance=None, verbose=False)

          -em_observation_noise(result, data, verbose=False)
          +em_observation_noise(result, data, verbose=False)
          -em_process_noise(result, verbose=False)
          +em_process_noise(result, verbose=False)
          -predict(data, n_test, initial_value=None, initial_covariance=None, states=True, observations=True, covariances=True, verbose=False)
          +predict(data, n_test, initial_value=None, initial_covariance=None, states=True, observations=True, covariances=True, verbose=False)

          Filter past data and predict a given number of future values. The data can be given as either of

          @@ -533,7 +541,7 @@

          Usage example
          -predict_next(m, P)
          +predict_next(m, P)

          Single prediction step

          Parameters:
          @@ -551,7 +559,7 @@

          Usage example
          -predict_observation(m, P)
          +predict_observation(m, P)

          Probability distribution of observation \(y\) for a given distribution of \(x\)

          @@ -570,7 +578,7 @@

          Usage example
          -smooth(data, initial_value=None, initial_covariance=None, observations=True, states=True, covariances=True, verbose=False)
          +smooth(data, initial_value=None, initial_covariance=None, observations=True, states=True, covariances=True, verbose=False)

          Smooth given data, which can be either

            @@ -612,7 +620,7 @@

            Usage example
            -smooth_current(m, P, ms, Ps)
            +smooth_current(m, P, ms, Ps)

            Simgle Kalman smoother backwards step

            Parameters:
            @@ -636,7 +644,7 @@

            Usage example
            -update(m, P, y, log_likelihood=False)
            +update(m, P, y, log_likelihood=False)

            Single update step with NaN check.

            Parameters:
            @@ -663,64 +671,64 @@

            Usage example
            -autots.tools.fast_kalman.autoshape(func)
            +autots.tools.fast_kalman.autoshape(func)

            Automatically shape arguments and return values

            -autots.tools.fast_kalman.ddot(A, B)
            +autots.tools.fast_kalman.ddot(A, B)

            Matrix multiplication over last two axes

            -autots.tools.fast_kalman.ddot_t_right(A, B)
            +autots.tools.fast_kalman.ddot_t_right(A, B)

            Matrix multiplication over last 2 axes with right operand transposed

            -autots.tools.fast_kalman.ddot_t_right_old(A, B)
            +autots.tools.fast_kalman.ddot_t_right_old(A, B)

            Matrix multiplication over last 2 axes with right operand transposed

            -autots.tools.fast_kalman.dinv(A)
            +autots.tools.fast_kalman.dinv(A)

            Matrix inverse applied to last two axes

            -autots.tools.fast_kalman.douter(a, b)
            +autots.tools.fast_kalman.douter(a, b)

            Outer product, last two axes

            -autots.tools.fast_kalman.em_initial_state(result, initial_means)
            +autots.tools.fast_kalman.em_initial_state(result, initial_means)
            -autots.tools.fast_kalman.ensure_matrix(x, dim=1)
            +autots.tools.fast_kalman.ensure_matrix(x, dim=1)
            -autots.tools.fast_kalman.holt_winters_damped_matrices(M, alpha, beta, gamma, phi=1.0)
            +autots.tools.fast_kalman.holt_winters_damped_matrices(M, alpha, beta, gamma, phi=1.0)

            Not sure if this is correct. It’s close, at least.

            -autots.tools.fast_kalman.new_kalman_params(method=None, allow_auto=True)
            +autots.tools.fast_kalman.new_kalman_params(method=None, allow_auto=True)
            -autots.tools.fast_kalman.predict(mean, covariance, state_transition, process_noise)
            +autots.tools.fast_kalman.predict(mean, covariance, state_transition, process_noise)

            Kalman filter prediction step

            Parameters:
            @@ -742,7 +750,7 @@

            Usage example
            -autots.tools.fast_kalman.predict_observation(mean, covariance, observation_model, observation_noise)
            +autots.tools.fast_kalman.predict_observation(mean, covariance, observation_model, observation_noise)

            Compute probability distribution of the observation \(y\), given the distribution of \(x\).

            @@ -762,23 +770,23 @@

            Usage example
            -autots.tools.fast_kalman.priv_smooth(posterior_mean, posterior_covariance, state_transition, process_noise, next_smooth_mean, next_smooth_covariance)
            +autots.tools.fast_kalman.priv_smooth(posterior_mean, posterior_covariance, state_transition, process_noise, next_smooth_mean, next_smooth_covariance)

            -autots.tools.fast_kalman.priv_update_with_nan_check(prior_mean, prior_covariance, observation_model, observation_noise, measurement, log_likelihood=False)
            +autots.tools.fast_kalman.priv_update_with_nan_check(prior_mean, prior_covariance, observation_model, observation_noise, measurement, log_likelihood=False)
            -autots.tools.fast_kalman.random_state_space()
            +autots.tools.fast_kalman.random_state_space()

            Return randomly generated statespace models.

            -autots.tools.fast_kalman.smooth(posterior_mean, posterior_covariance, state_transition, process_noise, next_smooth_mean, next_smooth_covariance)
            +autots.tools.fast_kalman.smooth(posterior_mean, posterior_covariance, state_transition, process_noise, next_smooth_mean, next_smooth_covariance)

            Kalman smoother backwards step

            Parameters:
            @@ -803,7 +811,7 @@

            Usage example
            -autots.tools.fast_kalman.update(prior_mean, prior_covariance, observation_model, observation_noise, measurement)
            +autots.tools.fast_kalman.update(prior_mean, prior_covariance, observation_model, observation_noise, measurement)

            Kalman filter update step

            Parameters:
            @@ -829,43 +837,43 @@

            Usage example
            -autots.tools.fast_kalman.update_with_nan_check(prior_mean, prior_covariance, observation_model, observation_noise, measurement)
            +autots.tools.fast_kalman.update_with_nan_check(prior_mean, prior_covariance, observation_model, observation_noise, measurement)

            Kalman filter update with a check for NaN observations. Like update but returns (prior_mean, prior_covariance) if measurement is NaN

          -

          autots.tools.fft module

          +

          autots.tools.fft module

          Created on Mon Oct 9 22:07:37 2023

          @author: colincatlin

          -class autots.tools.fft.FFT(n_harm=10, detrend='linear', freq_range=None)
          +class autots.tools.fft.FFT(n_harm=10, detrend='linear', freq_range=None)

          Bases: object

          -fit(x)
          +fit(x)
          -predict(forecast_length)
          +predict(forecast_length)
          -autots.tools.fft.fourier_extrapolation(x, forecast_length=10, n_harm=10, detrend='linear', freq_range=None)
          +autots.tools.fft.fourier_extrapolation(x, forecast_length=10, n_harm=10, detrend='linear', freq_range=None)
          -

          autots.tools.hierarchial module

          +

          autots.tools.hierarchial module

          -class autots.tools.hierarchial.hierarchial(grouping_method: str = 'tile', n_groups: int = 5, reconciliation: str = 'mean', grouping_ids: dict | None = None)
          +class autots.tools.hierarchial.hierarchial(grouping_method: str = 'tile', n_groups: int = 5, reconciliation: str = 'mean', grouping_ids: dict | None = None)

          Bases: object

          Create hierarchial series, then reconcile.

          Currently only performs one-level groupings. @@ -879,19 +887,19 @@

          Usage example
          -fit(df)
          +fit(df)

          Construct and save object info.

          -reconcile(df)
          +reconcile(df)

          Apply to forecasted data containing bottom and top levels.

          -transform(df)
          +transform(df)

          Apply hierarchy to existing data with bottom levels only.

          @@ -899,11 +907,11 @@

          Usage example -

          autots.tools.holiday module

          +

          autots.tools.holiday module

          Manage holiday features.

          -autots.tools.holiday.holiday_flag(DTindex, country: str = 'US', encode_holiday_type: bool = False, holidays_subdiv=None)
          +autots.tools.holiday.holiday_flag(DTindex, country: str = 'US', encode_holiday_type: bool = False, holidays_subdiv=None)

          Create a 0/1 flag for given datetime index. Includes fallback to pandas for US holidays if holidays package unavailable.

          Parameters:
          @@ -923,7 +931,7 @@

          Usage example
          -autots.tools.holiday.query_holidays(DTindex, country: str, encode_holiday_type: bool = False, holidays_subdiv=None)
          +autots.tools.holiday.query_holidays(DTindex, country: str, encode_holiday_type: bool = False, holidays_subdiv=None)

          Query holidays package for dates.

          Parameters:
          @@ -938,11 +946,11 @@

          Usage example -

          autots.tools.impute module

          +

          autots.tools.impute module

          Fill NA.

          -autots.tools.impute.FillNA(df, method: str = 'ffill', window: int = 10)
          +autots.tools.impute.FillNA(df, method: str = 'ffill', window: int = 10)

          Fill NA values using different methods.

          Parameters:
          @@ -963,11 +971,11 @@

          Usage example
          -class autots.tools.impute.SeasonalityMotifImputer(k: int = 3, datepart_method: str = 'simple_2', distance_metric: str = 'canberra', linear_mixed: bool = False)
          +class autots.tools.impute.SeasonalityMotifImputer(k: int = 3, datepart_method: str = 'simple_2', distance_metric: str = 'canberra', linear_mixed: bool = False)

          Bases: object

          -impute(df)
          +impute(df)

          Infer missing values on input df.

          @@ -975,11 +983,11 @@

          Usage example
          -class autots.tools.impute.SimpleSeasonalityMotifImputer(datepart_method: str = 'simple_2', distance_metric: str = 'canberra', linear_mixed: bool = False, max_iter: int = 100)
          +class autots.tools.impute.SimpleSeasonalityMotifImputer(datepart_method: str = 'simple_2', distance_metric: str = 'canberra', linear_mixed: bool = False, max_iter: int = 100)

          Bases: object

          -impute(df)
          +impute(df)

          Infer missing values on input df.

          @@ -987,13 +995,13 @@

          Usage example
          -autots.tools.impute.biased_ffill(df, mean_weight: float = 1)
          +autots.tools.impute.biased_ffill(df, mean_weight: float = 1)

          Fill NaN with average of last value and mean.

          -autots.tools.impute.fake_date_fill(df, back_method: str = 'slice')
          +autots.tools.impute.fake_date_fill(df, back_method: str = 'slice')

          Numpy vectorized version. Return a dataframe where na values are removed and values shifted forward.

          @@ -1013,7 +1021,7 @@

          Usage example
          -autots.tools.impute.fake_date_fill_old(df, back_method: str = 'slice')
          +autots.tools.impute.fake_date_fill_old(df, back_method: str = 'slice')

          Return a dataframe where na values are removed and values shifted forward.

          Warning

          @@ -1032,93 +1040,93 @@

          Usage example
          -autots.tools.impute.fill_forward(df)
          +autots.tools.impute.fill_forward(df)

          Fill NaN with previous values.

          -autots.tools.impute.fill_forward_alt(df)
          +autots.tools.impute.fill_forward_alt(df)

          Fill NaN with previous values.

          -autots.tools.impute.fill_mean(df)
          +autots.tools.impute.fill_mean(df)
          -autots.tools.impute.fill_mean_old(df)
          +autots.tools.impute.fill_mean_old(df)

          Fill NaN with mean.

          -autots.tools.impute.fill_median(df)
          +autots.tools.impute.fill_median(df)

          Fill nan with median values. Does not work with non-numeric types.

          -autots.tools.impute.fill_median_old(df)
          +autots.tools.impute.fill_median_old(df)

          Fill NaN with median.

          -autots.tools.impute.fill_zero(df)
          +autots.tools.impute.fill_zero(df)

          Fill NaN with zero.

          -autots.tools.impute.fillna_np(array, values)
          +autots.tools.impute.fillna_np(array, values)
          -autots.tools.impute.rolling_mean(df, window: int = 10)
          +autots.tools.impute.rolling_mean(df, window: int = 10)

          Fill NaN with mean of last window values.

          -

          autots.tools.lunar module

          +

          autots.tools.lunar module

          Phases of the moon. Modified from https://stackoverflow.com/a/2531541/9492254 by keturn and earlier from John Walker

          -autots.tools.lunar.dcos(d)
          +autots.tools.lunar.dcos(d)
          -autots.tools.lunar.dsin(d)
          +autots.tools.lunar.dsin(d)
          -autots.tools.lunar.fixangle(a)
          +autots.tools.lunar.fixangle(a)
          -autots.tools.lunar.kepler(m, ecc=0.016718)
          +autots.tools.lunar.kepler(m, ecc=0.016718)

          Solve the equation of Kepler.

          -autots.tools.lunar.moon_phase(datetime_index, epsilon=1e-06, epoch=2444237.905, ecliptic_longitude_epoch=278.83354, ecliptic_longitude_perigee=282.596403, eccentricity=0.016718, moon_mean_longitude_epoch=64.975464, moon_mean_perigee_epoch=349.383063)
          +autots.tools.lunar.moon_phase(datetime_index, epsilon=1e-06, epoch=2444237.905, ecliptic_longitude_epoch=278.83354, ecliptic_longitude_perigee=282.596403, eccentricity=0.016718, moon_mean_longitude_epoch=64.975464, moon_mean_perigee_epoch=349.383063)

          Numpy version. Takes a pd.DatetimeIndex and returns moon phase (%illuminated). Epoch can be adjust slightly (0.5 = half day) to adjust for time zones. This is for US. epoch=2444238.5 for Asia generally.

          -autots.tools.lunar.moon_phase_df(datetime_index, epoch=2444237.905)
          +autots.tools.lunar.moon_phase_df(datetime_index, epoch=2444237.905)

          Convert pandas DatetimeIndex to moon phases. Note timezone and hour can matter slightly. Epoch can be adjust slightly (0.5 = half day) to adjust for time zones. 2444237.905 is for US Central. epoch=2444238.5 for Asia generally.

          @@ -1126,27 +1134,27 @@

          Usage example
          -autots.tools.lunar.phase_string(p, precision=0.05, new=0.0, first=0.25, full=0.4, last=0.75, nextnew=1.0)
          +autots.tools.lunar.phase_string(p, precision=0.05, new=0.0, first=0.25, full=0.4, last=0.75, nextnew=1.0)

          -autots.tools.lunar.todeg(r)
          +autots.tools.lunar.todeg(r)
          -autots.tools.lunar.torad(d)
          +autots.tools.lunar.torad(d)
          -

          autots.tools.percentile module

          +

          autots.tools.percentile module

          Faster percentile and quantile for numpy

          Entirely from: https://krstn.eu/np.nanpercentile()-there-has-to-be-a-faster-way/

          -autots.tools.percentile.nan_percentile(in_arr, q, method='linear', axis=0, errors='raise')
          +autots.tools.percentile.nan_percentile(in_arr, q, method='linear', axis=0, errors='raise')

          Given a 3D array, return the given percentiles as input by q. Beware this is only tested for the limited case required here, and will not match np fully. Args more limited. If errors=”rollover” passes to np.nanpercentile where args are not supported.

          @@ -1154,23 +1162,23 @@

          Usage example
          -autots.tools.percentile.nan_quantile(arr, q, method='linear', axis=0, errors='raise')
          +autots.tools.percentile.nan_quantile(arr, q, method='linear', axis=0, errors='raise')

          Same as nan_percentile but accepts q in range [0, 1]. Args more limited. If errors=”rollover” passes to np.nanpercentile where not supported.

          -autots.tools.percentile.trimmed_mean(data, percent, axis=0)
          +autots.tools.percentile.trimmed_mean(data, percent, axis=0)
          -

          autots.tools.probabilistic module

          +

          autots.tools.probabilistic module

          Point to Probabilistic

          -autots.tools.probabilistic.Point_to_Probability(train, forecast, prediction_interval=0.9, method: str = 'historic_quantile')
          +autots.tools.probabilistic.Point_to_Probability(train, forecast, prediction_interval=0.9, method: str = 'historic_quantile')

          Data driven placeholder for model error estimation.

          Catlin Point to Probability method (‘a mixture of dark magic and gum disease’)

          @@ -1194,7 +1202,7 @@

          Usage example
          -autots.tools.probabilistic.Variable_Point_to_Probability(train, forecast, alpha=0.3, beta=1)
          +autots.tools.probabilistic.Variable_Point_to_Probability(train, forecast, alpha=0.3, beta=1)

          Data driven placeholder for model error estimation.

          ErrorRange = beta * (En + alpha * En-1 [cum sum of En]) En = abs(0.5 - QTP) * D @@ -1226,7 +1234,7 @@

          Usage example
          -autots.tools.probabilistic.historic_quantile(df_train, prediction_interval: float = 0.9, nan_flag=None)
          +autots.tools.probabilistic.historic_quantile(df_train, prediction_interval: float = 0.9, nan_flag=None)

          Computes the difference between the median and the prediction interval range in historic data.

          Parameters:
          @@ -1246,33 +1254,33 @@

          Usage example
          -autots.tools.probabilistic.inferred_normal(train, forecast, n: int = 5, prediction_interval: float = 0.9)
          +autots.tools.probabilistic.inferred_normal(train, forecast, n: int = 5, prediction_interval: float = 0.9)

          A corruption of Bayes theorem. It will be sensitive to the transformations of the data.

          -autots.tools.probabilistic.percentileofscore_appliable(x, a, kind='rank')
          +autots.tools.probabilistic.percentileofscore_appliable(x, a, kind='rank')

          -

          autots.tools.profile module

          +

          autots.tools.profile module

          Profiling

          -autots.tools.profile.data_profile(df)
          +autots.tools.profile.data_profile(df)

          Input: a pd DataFrame of columns which are time series, and a datetime index

          Output: a pd DataFrame of column per time series, with rows which are statistics

          -

          autots.tools.regressor module

          +

          autots.tools.regressor module

          -autots.tools.regressor.create_lagged_regressor(df, forecast_length: int, frequency: str = 'infer', scale: bool = True, summarize: str | None = None, backfill: str = 'bfill', n_jobs: str = 'auto', fill_na: str = 'ffill')
          +autots.tools.regressor.create_lagged_regressor(df, forecast_length: int, frequency: str = 'infer', scale: bool = True, summarize: str | None = None, backfill: str = 'bfill', n_jobs: str = 'auto', fill_na: str = 'ffill')

          Create a regressor of features lagged by forecast length. Useful to some models that don’t otherwise use such information.

          It is recommended that the .head(forecast_length) of both regressor_train and the df for training are dropped. @@ -1301,7 +1309,7 @@

          Usage example
          -autots.tools.regressor.create_regressor(df, forecast_length, frequency: str = 'infer', holiday_countries: list = ['US'], datepart_method: str = 'simple_binarized', drop_most_recent: int = 0, scale: bool = True, summarize: str = 'auto', backfill: str = 'bfill', n_jobs: str = 'auto', fill_na: str = 'ffill', aggfunc: str = 'first', encode_holiday_type=False, holiday_detector_params={'anomaly_detector_params': {'forecast_params': None, 'method': 'mad', 'method_params': {'alpha': 0.05, 'distribution': 'gamma'}, 'transform_dict': {'fillna': None, 'transformation_params': {'0': {}}, 'transformations': {'0': 'DifferencedTransformer'}}}, 'output': 'univariate', 'splash_threshold': None, 'threshold': 0.8, 'use_dayofmonth_holidays': True, 'use_hebrew_holidays': False, 'use_islamic_holidays': False, 'use_lunar_holidays': False, 'use_lunar_weekday': False, 'use_wkdeom_holidays': False, 'use_wkdom_holidays': True}, holiday_regr_style: str = 'flag', preprocessing_params: dict | None = None)
          +autots.tools.regressor.create_regressor(df, forecast_length, frequency: str = 'infer', holiday_countries: list = ['US'], datepart_method: str = 'simple_binarized', drop_most_recent: int = 0, scale: bool = True, summarize: str = 'auto', backfill: str = 'bfill', n_jobs: str = 'auto', fill_na: str = 'ffill', aggfunc: str = 'first', encode_holiday_type=False, holiday_detector_params={'anomaly_detector_params': {'forecast_params': None, 'method': 'mad', 'method_params': {'alpha': 0.05, 'distribution': 'gamma'}, 'transform_dict': {'fillna': None, 'transformation_params': {'0': {}}, 'transformations': {'0': 'DifferencedTransformer'}}}, 'output': 'univariate', 'splash_threshold': None, 'threshold': 0.8, 'use_dayofmonth_holidays': True, 'use_hebrew_holidays': False, 'use_islamic_holidays': False, 'use_lunar_holidays': False, 'use_lunar_weekday': False, 'use_wkdeom_holidays': False, 'use_wkdom_holidays': True}, holiday_regr_style: str = 'flag', preprocessing_params: dict | None = None)

          Create a regressor from information available in the existing dataset. Components: are lagged data, datepart information, and holiday.

          This function has been confusing people. This is NOT necessary for machine learning models, in AutoTS they internally create more elaborate feature sets separately. @@ -1343,24 +1351,24 @@

          Usage example -

          autots.tools.seasonal module

          +

          autots.tools.seasonal module

          seasonal

          @author: Colin

          -autots.tools.seasonal.create_datepart_components(DTindex, seasonality)
          +autots.tools.seasonal.create_datepart_components(DTindex, seasonality)

          single date part one-hot flags.

          -autots.tools.seasonal.create_seasonality_feature(DTindex, t, seasonality, history_days=None)
          +autots.tools.seasonal.create_seasonality_feature(DTindex, t, seasonality, history_days=None)

          Cassandra-designed feature generator.

          -autots.tools.seasonal.date_part(DTindex, method: str = 'simple', set_index: bool = True, polynomial_degree: int | None = None, holiday_country: str | None = None, holiday_countries_used: bool = True)
          +autots.tools.seasonal.date_part(DTindex, method: str = 'simple', set_index: bool = True, polynomial_degree: int | None = None, holiday_country: str | None = None, holiday_countries_used: bool = True)

          Create date part columns from pd.DatetimeIndex.

          Parameters:
          @@ -1387,28 +1395,28 @@

          Usage example
          -autots.tools.seasonal.fourier_df(DTindex, seasonality, order=10, t=None, history_days=None)
          +autots.tools.seasonal.fourier_df(DTindex, seasonality, order=10, t=None, history_days=None)

          -autots.tools.seasonal.fourier_series(t, p=365.25, n=10)
          +autots.tools.seasonal.fourier_series(t, p=365.25, n=10)
          -autots.tools.seasonal.random_datepart(method='random')
          +autots.tools.seasonal.random_datepart(method='random')

          New random parameters for seasonality.

          -autots.tools.seasonal.seasonal_independent_match(DTindex, DTindex_future, k, datepart_method='simple_binarized', distance_metric='canberra', full_sort=False, nan_array=None)
          +autots.tools.seasonal.seasonal_independent_match(DTindex, DTindex_future, k, datepart_method='simple_binarized', distance_metric='canberra', full_sort=False, nan_array=None)
          -autots.tools.seasonal.seasonal_int(include_one: bool = False, small=False, very_small=False)
          +autots.tools.seasonal.seasonal_int(include_one: bool = False, small=False, very_small=False)

          Generate a random integer of typical seasonalities.

          Parameters:
          @@ -1423,16 +1431,16 @@

          Usage example
          -autots.tools.seasonal.seasonal_window_match(DTindex, k, window_size, forecast_length, datepart_method, distance_metric, full_sort=False)
          +autots.tools.seasonal.seasonal_window_match(DTindex, k, window_size, forecast_length, datepart_method, distance_metric, full_sort=False)

          -

          autots.tools.shaping module

          +

          autots.tools.shaping module

          Reshape data.

          -class autots.tools.shaping.NumericTransformer(na_strings: list = ['', ' '], categorical_fillna: str = 'ffill', handle_unknown: str = 'use_encoded_value', downcast: str | None = None, verbose: int = 0)
          +class autots.tools.shaping.NumericTransformer(na_strings: list = ['', ' '], categorical_fillna: str = 'ffill', handle_unknown: str = 'use_encoded_value', downcast: str | None = None, verbose: int = 0)

          Bases: object

          General purpose numeric conversion for pandas dataframes.

          All categorical data and levels must be passed to .fit(). @@ -1453,7 +1461,7 @@

          Usage example
          -fit(df)
          +fit(df)

          Learn behavior of data to change.

          Parameters:
          @@ -1464,7 +1472,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -1475,7 +1483,7 @@

          Usage example
          -inverse_transform(df, convert_dtypes: bool = False)
          +inverse_transform(df, convert_dtypes: bool = False)

          Convert numeric back to categorical. :param df: df :type df: pandas.DataFrame @@ -1485,7 +1493,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Convert categorical dataset to numeric.

          @@ -1493,7 +1501,7 @@

          Usage example
          -autots.tools.shaping.clean_weights(weights, series, verbose=0)
          +autots.tools.shaping.clean_weights(weights, series, verbose=0)

          Polish up series weighting information

          Parameters:
          @@ -1507,7 +1515,7 @@

          Usage example
          -autots.tools.shaping.df_cleanup(df_wide, frequency: str = 'infer', prefill_na: str | None = None, na_tolerance: float = 0.999, drop_data_older_than_periods: int = 100000, drop_most_recent: int = 0, aggfunc: str = 'first', verbose: int = 1)
          +autots.tools.shaping.df_cleanup(df_wide, frequency: str = 'infer', prefill_na: str | None = None, na_tolerance: float = 0.999, drop_data_older_than_periods: int = 100000, drop_most_recent: int = 0, aggfunc: str = 'first', verbose: int = 1)

          Pass cleaning functions through to dataframe.

          Parameters:
          @@ -1532,13 +1540,13 @@

          Usage example
          -autots.tools.shaping.freq_to_timedelta(freq)
          +autots.tools.shaping.freq_to_timedelta(freq)

          Working around pandas limitations.

          -autots.tools.shaping.infer_frequency(df_wide, warn=True, **kwargs)
          +autots.tools.shaping.infer_frequency(df_wide, warn=True, **kwargs)

          Infer the frequency in a slightly more robust way.

          Parameters:
          @@ -1552,7 +1560,7 @@

          Usage example
          -autots.tools.shaping.long_to_wide(df, date_col: str = 'datetime', value_col: str = 'value', id_col: str = 'series_id', aggfunc: str = 'first')
          +autots.tools.shaping.long_to_wide(df, date_col: str = 'datetime', value_col: str = 'value', id_col: str = 'series_id', aggfunc: str = 'first')

          Take long data and convert into wide, cleaner data.

          Parameters:
          @@ -1580,7 +1588,7 @@

          Usage example
          -autots.tools.shaping.simple_train_test_split(df, forecast_length: int = 10, min_allowed_train_percent: float = 0.3, verbose: int = 1)
          +autots.tools.shaping.simple_train_test_split(df, forecast_length: int = 10, min_allowed_train_percent: float = 0.3, verbose: int = 1)

          Uses the last periods of forecast_length as the test set, the rest as train

          Parameters:
          @@ -1602,12 +1610,12 @@

          Usage example
          -autots.tools.shaping.split_digits_and_non_digits(s)
          +autots.tools.shaping.split_digits_and_non_digits(s)

          -autots.tools.shaping.subset_series(df, weights, n: int = 1000, random_state: int = 2020)
          +autots.tools.shaping.subset_series(df, weights, n: int = 1000, random_state: int = 2020)

          Return a sample of time series.

          Parameters:
          @@ -1622,7 +1630,7 @@

          Usage example
          -autots.tools.shaping.wide_to_3d(wide_arr, seasonality=7, output_shape='gst')
          +autots.tools.shaping.wide_to_3d(wide_arr, seasonality=7, output_shape='gst')

          Generates 3d (groups/seasonality, series, time steps) from wide (time step, series) numpy array.

          Parameters:
          @@ -1639,16 +1647,16 @@

          Usage example -

          autots.tools.thresholding module

          +

          autots.tools.thresholding module

          Created on Thu Jul 7 10:27:46 2022

          @author: Colin

          -class autots.tools.thresholding.NonparametricThreshold(data, warmup_pts: int = 1, p=0.1, error_buffer=1, z_init=2.5, z_limit=12.0, z_step=0.5, max_contamination=0.25, mean_weight: float = 10, sd_weight: float = 10, anomaly_count_weight: float = 1, inverse: bool = False)
          +class autots.tools.thresholding.NonparametricThreshold(data, warmup_pts: int = 1, p=0.1, error_buffer=1, z_init=2.5, z_limit=12.0, z_step=0.5, max_contamination=0.25, mean_weight: float = 10, sd_weight: float = 10, anomaly_count_weight: float = 1, inverse: bool = False)

          Bases: object

          -compare_to_epsilon(inverse=False)
          +compare_to_epsilon(inverse=False)

          Compare smoothed error values to epsilon (error threshold) and group consecutive errors together into sequences.

          @@ -1663,7 +1671,7 @@

          Usage example
          -find_epsilon(inverse=False)
          +find_epsilon(inverse=False)

          Find the anomaly threshold that maximizes function representing tradeoff between:

          @@ -1683,7 +1691,7 @@

          Usage example
          -prune_anoms(inverse=False)
          +prune_anoms(inverse=False)

          Remove anomalies that don’t meet minimum separation from the next closest anomaly or error value

          @@ -1695,7 +1703,7 @@

          Usage example
          -score_anomalies()
          +score_anomalies()

          Calculate anomaly scores based on max distance from epsilon for each anomalous sequence.

          @@ -1704,23 +1712,23 @@

          Usage example
          -autots.tools.thresholding.consecutive_groups(iterable, ordering=<function <lambda>>)
          +autots.tools.thresholding.consecutive_groups(iterable, ordering=<function <lambda>>)

          Yield groups of consecutive items using itertools.groupby().

          From more_itertools package, see description there for details (circa mid 2022)

          -autots.tools.thresholding.nonparametric(series, method_params)
          +autots.tools.thresholding.nonparametric(series, method_params)

          -

          autots.tools.transform module

          +

          autots.tools.transform module

          Preprocessing data methods.

          -class autots.tools.transform.AlignLastDiff(rows: int = 1, quantile: float = 0.5, decay_span: float | None = None, displacement_rows: int = 1, **kwargs)
          +class autots.tools.transform.AlignLastDiff(rows: int = 1, quantile: float = 0.5, decay_span: float | None = None, displacement_rows: int = 1, **kwargs)

          Bases: EmptyTransformer

          Shift all data relative to the last value(s) of the series. This version aligns based on historic diffs rather than direct values.

          @@ -1735,7 +1743,7 @@

          Usage example
          -fit(df)
          +fit(df)

          Learn behavior of data to change.

          Parameters:
          @@ -1746,7 +1754,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -1757,13 +1765,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -inverse_transform(df, trans_method: str = 'forecast', adjustment=None)
          +inverse_transform(df, trans_method: str = 'forecast', adjustment=None)

          Return data to original or forecast form.

          Parameters:
          @@ -1777,7 +1785,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return changed data.

          Parameters:
          @@ -1790,7 +1798,7 @@

          Usage example
          -class autots.tools.transform.AlignLastValue(rows: int = 1, lag: int = 1, method: str = 'additive', strength: float = 1.0, first_value_only: bool = False, **kwargs)
          +class autots.tools.transform.AlignLastValue(rows: int = 1, lag: int = 1, method: str = 'additive', strength: float = 1.0, first_value_only: bool = False, **kwargs)

          Bases: EmptyTransformer

          Shift all data relative to the last value(s) of the series.

          @@ -1805,12 +1813,12 @@

          Usage example
          -static find_centerpoint(df, rows, lag)
          +static find_centerpoint(df, rows, lag)

          -fit(df)
          +fit(df)

          Learn behavior of data to change.

          Parameters:
          @@ -1821,7 +1829,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -1832,13 +1840,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -inverse_transform(df, trans_method: str = 'forecast', adjustment=None)
          +inverse_transform(df, trans_method: str = 'forecast', adjustment=None)

          Return data to original or forecast form.

          Parameters:
          @@ -1852,7 +1860,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return changed data.

          Parameters:
          @@ -1865,11 +1873,11 @@

          Usage example
          -class autots.tools.transform.AnomalyRemoval(output='multivariate', method='zscore', transform_dict={'transformation_params': {0: {'datepart_method': 'simple_3', 'regression_model': {'model': 'ElasticNet', 'model_params': {}}}}, 'transformations': {0: 'DatepartRegression'}}, method_params={}, fillna=None, isolated_only=False, n_jobs=1)
          +class autots.tools.transform.AnomalyRemoval(output='multivariate', method='zscore', transform_dict={'transformation_params': {0: {'datepart_method': 'simple_3', 'regression_model': {'model': 'ElasticNet', 'model_params': {}}}}, 'transformations': {0: 'DatepartRegression'}}, method_params={}, fillna=None, isolated_only=False, n_jobs=1)

          Bases: EmptyTransformer

          -fit(df)
          +fit(df)

          All will return -1 for anomalies.

          Parameters:
          @@ -1883,13 +1891,13 @@

          Usage example
          -fit_anomaly_classifier()
          +fit_anomaly_classifier()

          Fit a model to predict if a score is an anomaly.

          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -1900,19 +1908,19 @@

          Usage example
          -static get_new_params(method='random')
          +static get_new_params(method='random')

          Generate new random parameters

          -score_to_anomaly(scores)
          +score_to_anomaly(scores)

          A DecisionTree model, used as models are nonstandard (and nonparametric).

          -transform(df)
          +transform(df)

          Return changed data.

          Parameters:
          @@ -1925,7 +1933,7 @@

          Usage example
          -class autots.tools.transform.BKBandpassFilter(low: int = 6, high: int = 32, K: int = 1, lanczos_factor: int = False, return_diff: int = True, **kwargs)
          +class autots.tools.transform.BKBandpassFilter(low: int = 6, high: int = 32, K: int = 1, lanczos_factor: int = False, return_diff: int = True, **kwargs)

          Bases: EmptyTransformer

          More complete implentation of Baxter King Bandpass Filter based off the successful but somewhat confusing statmodelsfilter transformer.

          @@ -1936,7 +1944,7 @@

          Usage example
          -fit(df)
          +fit(df)

          Learn behavior of data to change.

          Parameters:
          @@ -1947,7 +1955,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -1958,13 +1966,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -inverse_transform(df, trans_method: str = 'forecast')
          +inverse_transform(df, trans_method: str = 'forecast')

          Return data to original or forecast form.

          Parameters:
          @@ -1975,7 +1983,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return changed data.

          Parameters:
          @@ -1988,12 +1996,12 @@

          Usage example
          -class autots.tools.transform.BTCD(regression_model: dict = {'model': 'LinearRegression', 'model_params': {}}, max_lags: int = 1, name: str = 'BTCD', **kwargs)
          +class autots.tools.transform.BTCD(regression_model: dict = {'model': 'LinearRegression', 'model_params': {}}, max_lags: int = 1, name: str = 'BTCD', **kwargs)

          Bases: EmptyTransformer

          Box and Tiao Canonical Decomposition.

          -fit(df)
          +fit(df)

          Learn behavior of data to change.

          Parameters:
          @@ -2004,7 +2012,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -2015,13 +2023,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -inverse_transform(df, trans_method: str = 'forecast')
          +inverse_transform(df, trans_method: str = 'forecast')

          Return data to original space.

          Parameters:
          @@ -2032,7 +2040,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return changed data.

          Parameters:
          @@ -2045,7 +2053,7 @@

          Usage example
          -class autots.tools.transform.CenterLastValue(rows: int = 1, **kwargs)
          +class autots.tools.transform.CenterLastValue(rows: int = 1, **kwargs)

          Bases: EmptyTransformer

          Scale all data relative to the last value(s) of the series.

          @@ -2055,7 +2063,7 @@

          Usage example
          -fit(df)
          +fit(df)

          Learn behavior of data to change.

          Parameters:
          @@ -2066,7 +2074,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -2077,13 +2085,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -inverse_transform(df, trans_method: str = 'forecast')
          +inverse_transform(df, trans_method: str = 'forecast')

          Return data to original or forecast form.

          Parameters:
          @@ -2094,7 +2102,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return changed data.

          Parameters:
          @@ -2107,7 +2115,7 @@

          Usage example
          -class autots.tools.transform.CenterSplit(center: str = 'zero', fillna='linear', suffix: str = '_mdfcrst', **kwargs)
          +class autots.tools.transform.CenterSplit(center: str = 'zero', fillna='linear', suffix: str = '_mdfcrst', **kwargs)

          Bases: EmptyTransformer

          Vaguely Croston inspired approach separating occurrence from magnitude.

          @@ -2120,7 +2128,7 @@

          Usage example
          -fit(df)
          +fit(df)

          Learn behavior of data to change.

          Parameters:
          @@ -2131,7 +2139,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -2142,13 +2150,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -inverse_transform(df, trans_method: str = 'forecast')
          +inverse_transform(df, trans_method: str = 'forecast')

          Return data to original or forecast form.

          Parameters:
          @@ -2159,7 +2167,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return changed data.

          Parameters:
          @@ -2172,7 +2180,7 @@

          Usage example
          -class autots.tools.transform.ClipOutliers(method: str = 'clip', std_threshold: float = 4, fillna: str | None = None, **kwargs)
          +class autots.tools.transform.ClipOutliers(method: str = 'clip', std_threshold: float = 4, fillna: str | None = None, **kwargs)

          Bases: EmptyTransformer

          PURGE THE OUTLIERS.

          @@ -2186,7 +2194,7 @@

          Usage example
          -fit(df)
          +fit(df)

          Learn behavior of data to change.

          Parameters:
          @@ -2197,7 +2205,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -2208,13 +2216,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -inverse_transform(df, trans_method: str = 'forecast')
          +inverse_transform(df, trans_method: str = 'forecast')

          Return data to original or forecast form.

          Parameters:
          @@ -2225,7 +2233,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return changed data.

          Parameters:
          @@ -2238,12 +2246,12 @@

          Usage example
          -class autots.tools.transform.Cointegration(det_order: int = -1, k_ar_diff: int = 1, name: str = 'Cointegration', **kwargs)
          +class autots.tools.transform.Cointegration(det_order: int = -1, k_ar_diff: int = 1, name: str = 'Cointegration', **kwargs)

          Bases: EmptyTransformer

          Johansen Cointegration Decomposition.

          -fit(df)
          +fit(df)

          Learn behavior of data to change.

          Parameters:
          @@ -2254,7 +2262,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -2265,13 +2273,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -inverse_transform(df, trans_method: str = 'forecast')
          +inverse_transform(df, trans_method: str = 'forecast')

          Return data to original space.

          Parameters:
          @@ -2282,7 +2290,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return changed data.

          Parameters:
          @@ -2295,7 +2303,7 @@

          Usage example
          -class autots.tools.transform.CumSumTransformer(**kwargs)
          +class autots.tools.transform.CumSumTransformer(**kwargs)

          Bases: EmptyTransformer

          Cumulative Sum of Data.

          @@ -2305,7 +2313,7 @@

          Usage example
          -fit(df)
          +fit(df)

          Fits.

          Parameters:
          @@ -2316,7 +2324,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame :param df: input dataframe :type df: pandas.DataFrame

          @@ -2324,7 +2332,7 @@

          Usage example
          -inverse_transform(df, trans_method: str = 'forecast')
          +inverse_transform(df, trans_method: str = 'forecast')

          Returns data to original or forecast form

          Parameters:
          @@ -2340,7 +2348,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Returns changed data :param df: input dataframe :type df: pandas.DataFrame

          @@ -2350,18 +2358,18 @@

          Usage example
          -autots.tools.transform.DatepartRegression
          +autots.tools.transform.DatepartRegression

          alias of DatepartRegressionTransformer

          -class autots.tools.transform.DatepartRegressionTransformer(regression_model: dict = {'model': 'DecisionTree', 'model_params': {'max_depth': 5, 'min_samples_split': 2}}, datepart_method: str = 'expanded', polynomial_degree: int | None = None, transform_dict: dict | None = None, holiday_country: list | None = None, holiday_countries_used: bool = False, n_jobs: int = 1, **kwargs)
          +class autots.tools.transform.DatepartRegressionTransformer(regression_model: dict = {'model': 'DecisionTree', 'model_params': {'max_depth': 5, 'min_samples_split': 2}}, datepart_method: str = 'expanded', polynomial_degree: int | None = None, transform_dict: dict | None = None, holiday_country: list | None = None, holiday_countries_used: bool = False, n_jobs: int = 1, **kwargs)

          Bases: EmptyTransformer

          Remove a regression on datepart from the data. See tools.seasonal.date_part

          -fit(df, regressor=None)
          +fit(df, regressor=None)

          Fits trend for later detrending.

          Parameters:
          @@ -2372,7 +2380,7 @@

          Usage example
          -fit_transform(df, regressor=None)
          +fit_transform(df, regressor=None)

          Fit and Return Detrended DataFrame.

          Parameters:
          @@ -2383,19 +2391,19 @@

          Usage example
          -static get_new_params(method: str = 'random', holiday_countries_used=None)
          +static get_new_params(method: str = 'random', holiday_countries_used=None)

          Generate new random parameters

          -impute(df, regressor=None)
          +impute(df, regressor=None)

          Fill Missing. Needs to have same general pattern of missingness (full rows of NaN only or scattered NaN) as was present during .fit()

          -inverse_transform(df, regressor=None)
          +inverse_transform(df, regressor=None)

          Return data to original form.

          Parameters:
          @@ -2406,7 +2414,7 @@

          Usage example
          -transform(df, regressor=None)
          +transform(df, regressor=None)

          Return detrended data.

          Parameters:
          @@ -2419,12 +2427,12 @@

          Usage example
          -class autots.tools.transform.Detrend(model: str = 'GLS', phi: float = 1.0, window: int | None = None, transform_dict=None, **kwargs)
          +class autots.tools.transform.Detrend(model: str = 'GLS', phi: float = 1.0, window: int | None = None, transform_dict=None, **kwargs)

          Bases: EmptyTransformer

          Remove a linear trend from the data.

          -fit(df)
          +fit(df)

          Fits trend for later detrending.

          Parameters:
          @@ -2435,7 +2443,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fit and Return Detrended DataFrame.

          Parameters:
          @@ -2446,13 +2454,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -inverse_transform(df)
          +inverse_transform(df)

          Return data to original form. Will only match original if phi==1

          @@ -2464,7 +2472,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return detrended data.

          Parameters:
          @@ -2477,11 +2485,11 @@

          Usage example
          -class autots.tools.transform.DiffSmoother(output='multivariate', method=None, transform_dict=None, method_params=None, fillna=None, n_jobs=1, adjustment: int = 2, reverse_alignment=True, isolated_only=False)
          +class autots.tools.transform.DiffSmoother(output='multivariate', method=None, transform_dict=None, method_params=None, fillna=None, n_jobs=1, adjustment: int = 2, reverse_alignment=True, isolated_only=False)

          Bases: EmptyTransformer

          -fit(df)
          +fit(df)

          Fit. :param df: input dataframe :type df: pandas.DataFrame

          @@ -2489,7 +2497,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame :param df: input dataframe :type df: pandas.DataFrame

          @@ -2497,13 +2505,13 @@

          Usage example
          -static get_new_params(method='fast')
          +static get_new_params(method='fast')

          Generate new random parameters

          -transform(df)
          +transform(df)

          Return differenced data.

          Parameters:
          @@ -2516,7 +2524,7 @@

          Usage example
          -class autots.tools.transform.DifferencedTransformer(**kwargs)
          +class autots.tools.transform.DifferencedTransformer(**kwargs)

          Bases: EmptyTransformer

          Difference from lag n value. inverse_transform can only be applied to the original series, or an immediately following forecast

          @@ -2527,7 +2535,7 @@

          Usage example
          -fit(df)
          +fit(df)

          Fit. :param df: input dataframe :type df: pandas.DataFrame

          @@ -2535,7 +2543,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame :param df: input dataframe :type df: pandas.DataFrame

          @@ -2543,7 +2551,7 @@

          Usage example
          -inverse_transform(df, trans_method: str = 'forecast')
          +inverse_transform(df, trans_method: str = 'forecast')

          Returns data to original or forecast form

          Parameters:
          @@ -2559,7 +2567,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return differenced data.

          Parameters:
          @@ -2572,7 +2580,7 @@

          Usage example
          -class autots.tools.transform.Discretize(discretization: str = 'center', n_bins: int = 10, nan_flag=False, **kwargs)
          +class autots.tools.transform.Discretize(discretization: str = 'center', n_bins: int = 10, nan_flag=False, **kwargs)

          Bases: EmptyTransformer

          Round/convert data to bins.

          @@ -2591,7 +2599,7 @@

          Usage example
          -fit(df)
          +fit(df)

          Learn behavior of data to change.

          Parameters:
          @@ -2602,7 +2610,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -2613,13 +2621,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -inverse_transform(df, trans_method: str = 'forecast')
          +inverse_transform(df, trans_method: str = 'forecast')

          Return data to original or forecast form.

          Parameters:
          @@ -2630,7 +2638,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return changed data.

          Parameters:
          @@ -2643,7 +2651,7 @@

          Usage example
          -class autots.tools.transform.EWMAFilter(span: int = 7, **kwargs)
          +class autots.tools.transform.EWMAFilter(span: int = 7, **kwargs)

          Bases: EmptyTransformer

          Irreversible filters of Exponential Weighted Moving Average

          @@ -2653,7 +2661,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fit and Return Detrended DataFrame.

          Parameters:
          @@ -2664,13 +2672,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -transform(df)
          +transform(df)

          Return detrended data.

          Parameters:
          @@ -2683,12 +2691,12 @@

          Usage example
          -class autots.tools.transform.EmptyTransformer(name: str = 'EmptyTransformer', **kwargs)
          +class autots.tools.transform.EmptyTransformer(name: str = 'EmptyTransformer', **kwargs)

          Bases: object

          Base transformer returning raw data.

          -fit(df)
          +fit(df)

          Learn behavior of data to change.

          Parameters:
          @@ -2699,7 +2707,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -2710,13 +2718,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -inverse_transform(df, trans_method: str = 'forecast')
          +inverse_transform(df, trans_method: str = 'forecast')

          Return data to original or forecast form.

          Parameters:
          @@ -2727,7 +2735,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return changed data.

          Parameters:
          @@ -2740,7 +2748,7 @@

          Usage example
          -class autots.tools.transform.FFTDecomposition(n_harmonics: float = 0.1, detrend: str = 'linear', **kwargs)
          +class autots.tools.transform.FFTDecomposition(n_harmonics: float = 0.1, detrend: str = 'linear', **kwargs)

          Bases: EmptyTransformer

          FFT decomposition, then removal, then extrapolation and addition.

          @@ -2753,7 +2761,7 @@

          Usage example
          -fit(df)
          +fit(df)

          Learn behavior of data to change.

          Parameters:
          @@ -2764,7 +2772,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -2775,13 +2783,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -inverse_transform(df, trans_method: str = 'forecast')
          +inverse_transform(df, trans_method: str = 'forecast')

          Return data to original or forecast form.

          Parameters:
          @@ -2792,7 +2800,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return changed data.

          Parameters:
          @@ -2805,7 +2813,7 @@

          Usage example
          -class autots.tools.transform.FFTFilter(cutoff: float = 0.1, reverse: bool = False, **kwargs)
          +class autots.tools.transform.FFTFilter(cutoff: float = 0.1, reverse: bool = False, **kwargs)

          Bases: EmptyTransformer

          Fit Fourier Transform and keep only lowest frequencies below cutoff

          @@ -2818,7 +2826,7 @@

          Usage example
          -fit(df)
          +fit(df)

          Learn behavior of data to change.

          Parameters:
          @@ -2829,7 +2837,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -2840,13 +2848,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -inverse_transform(df, trans_method: str = 'forecast')
          +inverse_transform(df, trans_method: str = 'forecast')

          Return data to original or forecast form.

          Parameters:
          @@ -2857,7 +2865,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return changed data.

          Parameters:
          @@ -2870,7 +2878,7 @@

          Usage example
          -class autots.tools.transform.FastICA(**kwargs)
          +class autots.tools.transform.FastICA(**kwargs)

          Bases: EmptyTransformer

          sklearn FastICA for signal decomposition. But need to store columns.

          @@ -2880,7 +2888,7 @@

          Usage example
          -fit(df)
          +fit(df)

          Learn behavior of data to change.

          Parameters:
          @@ -2891,7 +2899,7 @@

          Usage example
          -fit_transform(df)
          +fit_transform(df)

          Fits and Returns Magical DataFrame.

          Parameters:
          @@ -2902,13 +2910,13 @@

          Usage example
          -static get_new_params(method: str = 'random')
          +static get_new_params(method: str = 'random')

          Generate new random parameters

          -inverse_transform(df, trans_method: str = 'forecast')
          +inverse_transform(df, trans_method: str = 'forecast')

          Return data to original or forecast form.

          Parameters:
          @@ -2919,7 +2927,7 @@

          Usage example
          -transform(df)
          +transform(df)

          Return changed data.

          Parameters:
          @@ -2932,7 +2940,7 @@

          Usage example
          -class autots.tools.transform.GeneralTransformer(fillna: str | None = None, transformations: dict = {}, transformation_params: dict = {}, grouping: str | None = None, reconciliation: str | None = None, grouping_ids=None, random_seed: int = 2020, n_jobs: int = 1, holiday_country: list | None = None, verbose: int = 0)
          +class autots.tools.transform.GeneralTransformer(fillna: str | None = None, transformations: dict = {}, transformation_params: dict = {}, grouping: str | None = None, reconciliation: str | None = None, grouping_ids=None, random_seed: int = 2020, n_jobs: int = 1, holiday_country: list | None = None, verbose: int = 0)

          Bases: object

          Remove fillNA and then mathematical transformations.

          Expects a chronologically sorted pandas.DataFrame with a DatetimeIndex, only numeric data, and a ‘wide’ (one column per series) shape.

          @@ -3037,7 +3045,7 @@

          Usage example
          -fill_na(df, window: int = 10)
          +fill_na(df, window: int = 10)
          Parameters:

          -

          autots.tools.window_functions module

          +

          autots.tools.window_functions module

          -autots.tools.window_functions.chunk_reshape(arr, window_size=10, chunk_size=100, sample_fraction=None, random_seed=7734, dtype=<class 'numpy.float32'>)
          +autots.tools.window_functions.chunk_reshape(arr, window_size=10, chunk_size=100, sample_fraction=None, random_seed=7734, dtype=<class 'numpy.float32'>)

          Shifts from (n_records, n_series) to (windows, window_size). Multivariate. More memory efficient, if not quite as fast as x.reshape(-1, x.shape[-1]) for 3D numpy array.

          @@ -4477,18 +4485,18 @@

          Usage example
          -autots.tools.window_functions.last_window(df, window_size: int = 10, input_dim: str = 'univariate', normalize_window: bool = False)
          +autots.tools.window_functions.last_window(df, window_size: int = 10, input_dim: str = 'univariate', normalize_window: bool = False)

          Pandas based function to provide the last window of window_maker.

          -autots.tools.window_functions.np_2d_arange(start=0, stop=3, step=1, num_columns=4)
          +autots.tools.window_functions.np_2d_arange(start=0, stop=3, step=1, num_columns=4)
          -autots.tools.window_functions.retrieve_closest_indices(df, num_indices, forecast_length, window_size: int = 10, distance_metric: str = 'braycurtis', stride_size: int = 1, start_index: int | None = None, include_differenced: bool = False, include_last: bool = True, verbose: int = 0)
          +autots.tools.window_functions.retrieve_closest_indices(df, num_indices, forecast_length, window_size: int = 10, distance_metric: str = 'braycurtis', stride_size: int = 1, start_index: int | None = None, include_differenced: bool = False, include_last: bool = True, verbose: int = 0)

          Find next indicies closest to the final segment of forecast_length

          Parameters:
          @@ -4508,7 +4516,7 @@

          Usage example
          -autots.tools.window_functions.rolling_window_view(array, window_shape=(0,), axis=None, writeable=False)
          +autots.tools.window_functions.rolling_window_view(array, window_shape=(0,), axis=None, writeable=False)

          Create a view of array which for every point gives the n-dimensional neighbourhood of size window. New dimensions are added at the end of array or after the corresponding original dimension.

          @@ -4534,13 +4542,13 @@

          Usage example
          -autots.tools.window_functions.sliding_window_view(array, window_shape=(0,), axis=None, writeable=False, **kwargs)
          +autots.tools.window_functions.sliding_window_view(array, window_shape=(0,), axis=None, writeable=False, **kwargs)

          Toggles between numpy and internal version depending on np.__version__.

          -autots.tools.window_functions.window_id_maker(window_size: int, max_steps: int, start_index: int = 0, stride_size: int = 1, skip_size: int = 1)
          +autots.tools.window_functions.window_id_maker(window_size: int, max_steps: int, start_index: int = 0, stride_size: int = 1, skip_size: int = 1)

          Create indices for array of multiple window slices of data

          Parameters:
          @@ -4560,25 +4568,25 @@

          Usage example
          -autots.tools.window_functions.window_lin_reg(x, y, w)
          +autots.tools.window_functions.window_lin_reg(x, y, w)

          From https://stackoverflow.com/questions/70296498/efficient-computation-of-moving-linear-regression-with-numpy-numba/70304475#70304475

          -autots.tools.window_functions.window_lin_reg_mean(x, y, w)
          +autots.tools.window_functions.window_lin_reg_mean(x, y, w)

          From https://stackoverflow.com/questions/70296498/efficient-computation-of-moving-linear-regression-with-numpy-numba/70304475#70304475

          -autots.tools.window_functions.window_lin_reg_mean_no_nan(x, y, w)
          +autots.tools.window_functions.window_lin_reg_mean_no_nan(x, y, w)

          From https://stackoverflow.com/questions/70296498/efficient-computation-of-moving-linear-regression-with-numpy-numba/70304475#70304475

          -autots.tools.window_functions.window_maker(df, window_size: int = 10, input_dim: str = 'univariate', normalize_window: bool = False, shuffle: bool = False, output_dim: str = 'forecast_length', forecast_length: int = 1, max_windows: int = 5000, regression_type: str | None = None, future_regressor=None, random_seed: int = 1234)
          +autots.tools.window_functions.window_maker(df, window_size: int = 10, input_dim: str = 'univariate', normalize_window: bool = False, shuffle: bool = False, output_dim: str = 'forecast_length', forecast_length: int = 1, max_windows: int = 5000, regression_type: str | None = None, future_regressor=None, random_seed: int = 1234)

          Convert a dataset into slices with history and y forecast.

          Parameters:
          @@ -4603,7 +4611,7 @@

          Usage example
          -autots.tools.window_functions.window_maker_2(array, window_size: int, max_steps: int | None = None, start_index: int = 0, stride_size: int = 1, skip_size: int = 1)
          +autots.tools.window_functions.window_maker_2(array, window_size: int, max_steps: int | None = None, start_index: int = 0, stride_size: int = 1, skip_size: int = 1)

          Create array of multiple window slices of data Note that this returns a different orientation than window_maker_3

          @@ -4625,7 +4633,7 @@

          Usage example
          -autots.tools.window_functions.window_maker_3(array, window_size: int, **kwargs)
          +autots.tools.window_functions.window_maker_3(array, window_size: int, **kwargs)

          stride tricks version of window. About 40% faster than window_maker_2 Note that this returns a different orientation than window_maker_2

          @@ -4644,22 +4652,22 @@

          Usage example
          -autots.tools.window_functions.window_sum_mean(x, w, axis=0)
          +autots.tools.window_functions.window_sum_mean(x, w, axis=0)

          -autots.tools.window_functions.window_sum_mean_nan_tail(x, w, axis=0)
          +autots.tools.window_functions.window_sum_mean_nan_tail(x, w, axis=0)
          -autots.tools.window_functions.window_sum_nan_mean(x, w, axis=0)
          +autots.tools.window_functions.window_sum_nan_mean(x, w, axis=0)

          -

          Module contents

          +

          Module contents

          Basic utilities.

          @@ -4753,21 +4761,5 @@

          Quick search

          - - \ No newline at end of file diff --git a/docs/build/html/source/intro.html b/docs/build/html/source/intro.html index 477637e4..18861acc 100644 --- a/docs/build/html/source/intro.html +++ b/docs/build/html/source/intro.html @@ -1,17 +1,25 @@ - - + + + + + Intro — AutoTS 0.6.10 documentation - - - - - + + + + + @@ -33,10 +41,10 @@
          -

          Intro

          +

          Intro

          -

          AutoTS

          +

          AutoTS

          AutoTS is a time series package for Python designed for rapidly deploying high-accuracy forecasts at scale.

          In 2023, AutoTS has won in the M6 forecasting competition, delivering the highest performance investment decisions across 12 months of stock market forecasting.

          @@ -51,7 +59,7 @@

          AutoTSHorizontal and mosaic style ensembles are the flagship ensembling types, allowing each series to receive the most accurate possible models while still maintaining scalability.

          A combination of metrics and cross-validation options, the ability to apply subsets and weighting, regressor generation tools, simulation forecasting mode, event risk forecasting, live datasets, template import and export, plotting, and a collection of data shaping parameters round out the available feature set.

          -

          Table of Contents

          +

          Table of Contents

          -

          Tips for Speed and Large Data:

          +

          Tips for Speed and Large Data:

          • Use appropriate model lists, especially the predefined lists:

              @@ -172,7 +180,7 @@

              Tips for Speed and Large Data: -

              How to Contribute:

              +

              How to Contribute:

              • Give feedback on where you find the documentation confusing

              • Use AutoTS and…

                @@ -278,21 +286,5 @@

                Quick search

                - - \ No newline at end of file diff --git a/docs/build/html/source/modules.html b/docs/build/html/source/modules.html index bda841f4..02edbd77 100644 --- a/docs/build/html/source/modules.html +++ b/docs/build/html/source/modules.html @@ -1,17 +1,25 @@ - - + + + + + autots — AutoTS 0.6.10 documentation - - - - - + + + + + @@ -33,7 +41,7 @@
                -

                autots

                +

                autots

                • autots package
                    @@ -367,21 +375,5 @@

                    Quick search

                    - - \ No newline at end of file diff --git a/docs/build/html/source/tutorial.html b/docs/build/html/source/tutorial.html index 01ca8546..cef15a95 100644 --- a/docs/build/html/source/tutorial.html +++ b/docs/build/html/source/tutorial.html @@ -1,17 +1,25 @@ - - + + + + + Tutorial — AutoTS 0.6.10 documentation - - - - - + + + + + @@ -33,12 +41,12 @@
                    -

                    Tutorial

                    +

                    Tutorial

                    -

                    Extended Tutorial

                    +

                    Extended Tutorial

                    -

                    Table of Contents

                    +

                    Table of Contents

                    -

                    A simple example

                    +

                    A simple example

                    # also: _hourly, _daily, _weekly, or _yearly
                     from autots.datasets import load_monthly
                     
                    @@ -78,14 +86,14 @@ 

                    A simple example -

                    Import of data

                    +

                    Import of data

                    There are two shapes/styles of pandas.DataFrame which are accepted. The first is long data, like that out of an aggregated sales-transaction table containing three columns identified to .fit() as date_col {pd.Datetime}, value_col {the numeric or categorical data of interest}, and id_col {id string, if multiple series are provided}. Alternatively, the data may be in a wide format where the index is a pandas.DatetimeIndex, and each column is a distinct data series.

                    If horizontal style ensembles are used, series_ids/column names will be coerced to strings.

                    -

                    You can tailor the process in a few ways…

                    +

                    You can tailor the process in a few ways…

                    The simplest way to improve accuracy is to increase the number of generations max_generations=15. Each generation tries new models, taking additional time but improving the accuracy. The nature of genetic algorithms, however, means there is no consistent improvement for each generation, and large number of generations will often only result in minimal performance gains.

                    Another approach that may improve accuracy is to set ensemble='all'. Ensemble parameter expects a single string, and can for example be 'simple,dist', or 'horizontal'. As this means storing more details of every model, this takes more time and memory.

                    A handy parameter for when your data is expected to always be 0 or greater (such as unit sales) is to set no_negatives=True. This forces forecasts to be greater than or equal to 0. @@ -102,7 +110,7 @@

                    You can tailor the process in a few ways… -

                    What to Worry About

                    +

                    What to Worry About

                    There are some basic things to beware of that can commonly lead to poor results:

                    1. Bad data (sudden drops or missing values) in the most recent data is the single most common cause of bad forecasts here. As many models use the most recent data as a jumping off point, error in the most recent data points can have an oversized effect on forecasts.

                    2. @@ -113,7 +121,7 @@

                      What to Worry About -

                      Validation and Cross Validation

                      +

                      Validation and Cross Validation

                      Cross validation helps assure that the optimal model is stable over the dynamics of a time series. Cross validation can be tricky in time series data due to the necessity of preventing data leakage from future data points.

                      Firstly, all models are initially validated on the most recent piece of data. This is done because the most recent data will generally most closely resemble the forecast future. @@ -138,7 +146,7 @@

                      Validation and Cross Validationeven cross validation and use one of the other validation methods.

                    -

                    Another Example:

                    +

                    Another Example:

                    Here, we are forecasting the traffice along Interstate 94 between Minneapolis and St Paul in Minnesota. This is a great dataset to demonstrate a recommended way of including external variables - by including them as time series with a lower weighting. Here weather data is included - winter and road construction being the major influencers for traffic and will be forecast alongside the traffic volume. These additional series carry information to models such as RollingRegression, VARMAX, and VECM.

                    Also seen in use here is the model_list.

                    @@ -189,14 +197,14 @@

                    Another Example: -

                    Model Lists

                    +

                    Model Lists

                    By default, most available models are tried. For a more limited subset of models, a custom list can be passed in, or more simply, a string, one of 'probabilistic', 'multivariate', 'fast', 'superfast', or 'all'.

                    A table of all available models is below.

                    On large multivariate series, DynamicFactor and VARMAX can be impractically slow.

                    -

                    Deployment and Template Import/Export

                    +

                    Deployment and Template Import/Export

                    Take a look at the production_example.py

                    Many models can be reverse engineered with (relative) simplicity outside of AutoTS by placing the choosen parameters into Statsmodels or other underlying package. Following the model training, the top models can be exported to a .csv or .json file, then on next run only those models will be tried. @@ -216,7 +224,7 @@

                    Deployment and Template Import/Export -

                    Running Just One Model

                    +

                    Running Just One Model

                    While the above version of deployment, with evolving templates and cross_validation on every run, is the recommended deployment, it is also possible to run a single fixed model.

                    Coming from the deeper internals of AutoTS, this function can only take the wide style data (there is a long_to_wide function available). Data must already be fairly clean - all numerics (or np.nan). @@ -256,7 +264,7 @@

                    Running Just One Model -

                    Metrics

                    +

                    Metrics

                    There are a number of available metrics, all combined together into a ‘Score’ which evaluates the best model. The ‘Score’ that compares models can easily be adjusted by passing through custom metric weights dictionary. Higher weighting increases the importance of that metric, while 0 removes that metric from consideration. Weights must be numbers greater than or equal to 0. This weighting is not to be confused with series weighting, which effects how equally any one metric is applied to all the series.

                    @@ -340,7 +348,7 @@

                    Metrics

                -

                Hierarchial and Grouped Forecasts

                +

                Hierarchial and Grouped Forecasts

                Hiearchial and grouping refer to multivariate forecast situations where the individual series are aggregated. A common example of this is product sales forecasting, where individual products are forecast and then also aggregated for a view on demand across all products. Aggregation combines the errors of individual series, however, potentially resulting in major over- or -under estimation of the overall demand. @@ -354,7 +362,7 @@

                Hierarchial and Grouped ForecastsMLE or iMLE can be used if either underestimation or overestimation respectively has been identified as a problem.

                -

                Ensembles

                +

                Ensembles

                Ensemble methods are specified by the ensemble= parameter. It can be either a list or a comma-separated string.

                simple style ensembles (labeled ‘BestN’ in templates) are the most recognizable form of ensemble and are the simple average of the specified models, here usally 3 or 5 models. distance style ensembles are two models spliced together. The first model forecasts the first fraction of forecast period, the second model the latter half. There is no overlap of the models. @@ -393,14 +401,14 @@

                Ensembles

                -

                Installation and Dependency Versioning

                +

                Installation and Dependency Versioning

                pip install autots

                Some optional packages require installing Visual Studio C compilers if on Windows.

                On Linux systems, apt-get/yum (rather than pip) installs of numpy/pandas may install faster/more stable compilations. Linux may also require sudo apt install build-essential for some packages.

                You can check if your system is using mkl, OpenBLAS, or none with numpy.show_config(). Generally recommended that you double-check this after installing new packages to make sure you haven’t broken the LINPACK connection.

                -

                Requirements:

                +

                Requirements:

                Python >= 3.6
                 numpy
                     >= 1.20 (Sliding Window in Motif and WindowRegression)
                @@ -430,7 +438,7 @@ 

                Requirements:pip install autots['additional']

                -

                Optional Packages

                +

                Optional Packages

                requests
                 psutil
                 holidays
                @@ -452,7 +460,7 @@ 

                Optional Packages -

                Safest bet for installation:

                +

                Safest bet for installation:

                venv, Anaconda, or Miniforge with some more tips here.

                # create a conda or venv environment
                 conda create -n timeseries python=3.9
                @@ -500,7 +508,7 @@ 

                Safest bet for installation:mamba and conda commands are generally interchangeable. conda env remove -n env_name

                -

                Intel conda channel installation (sometime faster, also, more prone to bugs)

                +

                Intel conda channel installation (sometime faster, also, more prone to bugs)

                https://software.intel.com/content/www/us/en/develop/tools/oneapi/ai-analytics-toolkit.html

                # create the environment
                 mamba create -n aikit37 python=3.7 intel-aikit-modin pandas statsmodels prophet numexpr bottleneck tqdm holidays lightgbm matplotlib requests tensorflow dpctl -c intel
                @@ -521,7 +529,7 @@ 

                Intel conda channel installation (sometime faster, also, more prone to bugs)

                -

                Speed Benchmark

                +

                Speed Benchmark

                from autots.evaluator.benchmark import Benchmark
                 bench = Benchmark()
                 bench.run(n_jobs="auto", times=3)
                @@ -531,9 +539,9 @@ 

                Speed Benchmark -

                Caveats and Advice

                +

                Caveats and Advice

                -

                Mysterious crashes

                +

                Mysterious crashes

                Usually mysterious crashes or hangs (those without clear error messages) occur when the CPU or Memory is overloaded. UnivariateRegression is usually the most prone to these issues, removing it from the model_list may help (by default it is not included in most lists for this reason).

                Try setting n_jobs=1 or an otherwise low number, which should reduce the load. Also test the ‘superfast’ naive models, which are generally low resource consumption. @@ -542,18 +550,18 @@

                Mysterious crashes -

                Series IDs really need to be unique (or column names need to be all unique in wide data)

                +

                Series IDs really need to be unique (or column names need to be all unique in wide data)

                Pretty much as it says, if this isn’t true, some odd things may happen that shouldn’t.

                Also if using the Prophet model, you can’t have any series named ‘ds’

                -

                Short Training History

                +

                Short Training History

                How much data is ‘too little’ depends on the seasonality and volatility of the data. Minimal training data most greatly impacts the ability to do proper cross validation. Set num_validations=0 in such cases. Since ensembles are based on the test dataset, it would also be wise to set ensemble=None if num_validations=0.

                -

                Adding regressors and other information

                +

                Adding regressors and other information

                future_ regressor, to make it clear this is data that will be know with high certainy about the future. Such data about the future is rare, one example might be number of stores that will be (planned to be) open each given day in the future when forecast sales. Generally using regressors is very helpful for separating ‘organic’ and ‘inorganic’ patterns. @@ -611,7 +619,7 @@

                Adding regressors and other information -

                Simulation Forecasting

                +

                Simulation Forecasting

                Simulation forecasting allows for experimenting with different potential future scenarios to examine the potential effects on the forecast. This is done here by passing known values of a future_regressor to model .fit and then running .predict with multiple variations on the future_regressor future values. By default in AutoTS, when a future_regressor is supplied, models that can utilize it are tried both with and without the regressor. @@ -661,7 +669,7 @@

                Simulation Forecasting['FBProphet', 'GLM', 'ARDL', 'DatepartRegression'].

                -

                Event Risk Forecasting and Anomaly Detection

                +

                Event Risk Forecasting and Anomaly Detection

                Anomaly (or Outlier) Detection is historic and Event Risk Forecasting is forward looking.

                Event Risk Forecasting Generate a risk score (0 to 1, but usually close to 0) for a future event exceeding user specified upper or lower bounds.

                @@ -773,7 +781,7 @@

                Event Risk Forecasting and Anomaly Detection -

                A Hack for Passing in Parameters (that aren’t otherwise available)

                +

                A Hack for Passing in Parameters (that aren’t otherwise available)

                There are a lot of parameters available here, but not always all of the options available for a particular parameter are actually used in generated templates. Usually, very slow options are left out. If you are familiar with a model, you can try manualy adding those parameter values in for a run in this way… To clarify, you can’t usually add in entirely new parameters in this way, but you can often pass in new choices for existing parameter values.

                @@ -785,19 +793,19 @@

                A Hack for Passing in Parameters (that aren’t otherwise available)

                -

                Categorical Data

                +

                Categorical Data

                Categorical data is handled, but it is handled crudely. For example, optimization metrics do not currently include any categorical accuracy metrics. For categorical data that has a meaningful order (ie ‘low’, ‘medium’, ‘high’) it is best for the user to encode that data before passing it in, thus properly capturing the relative sequence (ie ‘low’=1, ‘medium’=2, ‘high’=3).

                -

                Custom and Unusual Frequencies

                +

                Custom and Unusual Frequencies

                Data must be coercible to a regular frequency. It is recommended the frequency be specified as a datetime offset as per pandas documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects Some models will support a more limited range of frequencies.

                -

                Using the Transformers independently

                +

                Using the Transformers independently

                The transformers expect data only in the wide shape with ascending date. The simplest way to access them is through the GeneralTransformer. This takes dictionaries containing strings of the desired transformers and parameters.

                @@ -824,7 +832,7 @@

                Using the Transformers independently -

                Note on ~Regression Models

                +

                Note on ~Regression Models

                The Regression models are WindowRegression, RollingRegression, UnivariateRegression, MultivariateRegression, and DatepartRegression. They are all different ways of reshaping the time series into X and Y for traditional ML and Deep Learning approaches. All draw from the same potential pool of models, mostly sklearn and tensorflow models.

                @@ -839,7 +847,7 @@

                Note on ~Regression Models -

                Models

                +

                Models

                @@ -1399,21 +1407,5 @@

                Quick search

                - - \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index 479d235a..c02c7f4c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -34,7 +34,7 @@ # ones. # Add napoleon to the extensions list # 'recommonmark', -extensions = ['sphinx.ext.napoleon', 'sphinx.ext.autodoc', 'm2r2', 'sphinx.ext.githubpages',] +extensions = ['sphinx.ext.napoleon', 'sphinx.ext.autodoc', 'm2r2', 'sphinx.ext.githubpages', "sphinxcontrib.googleanalytics"] source_suffix = ['.rst', '.md'] @@ -64,7 +64,6 @@ html_theme_options = { "show_powered_by": False, - 'analytics_id': 'G-P2KLF8302E', 'logo': 'autots_logo.png', 'description': 'Automated Forecasting', "github_user": "winedarksea", @@ -73,5 +72,13 @@ "show_related": False, "note_bg": "#FFF59C", } + +# pip install sphinxcontrib-googleanalytics +googleanalytics_id = "G-P2KLF8302E" +# this will give a warning but works at least with pydata theme +html_theme_options["analytics"] = { + "google_analytics_id": googleanalytics_id, +} + # Output file base name for HTML help builder. htmlhelp_basename = "autotsdoc" \ No newline at end of file

                Model