diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 6846cafdfd3..b9422be0685 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.8.0a1 +current_version = 1.8.0b3 parse = (?P[\d]+) # major version number \.(?P[\d]+) # minor version number \.(?P[\d]+) # patch version number @@ -35,5 +35,3 @@ first_value = 1 [bumpversion:file:core/setup.py] [bumpversion:file:core/dbt/version.py] - -[bumpversion:file:docker/Dockerfile] diff --git a/.changes/1.8.0-b1.md b/.changes/1.8.0-b1.md new file mode 100644 index 00000000000..069ae318427 --- /dev/null +++ b/.changes/1.8.0-b1.md @@ -0,0 +1,187 @@ +## dbt-core 1.8.0-b1 - February 28, 2024 + +### Breaking Changes + +- Remove adapter.get_compiler interface ([#9148](https://github.com/dbt-labs/dbt-core/issues/9148)) +- Move AdapterLogger to adapters folder ([#9151](https://github.com/dbt-labs/dbt-core/issues/9151)) +- Rm --dry-run flag from 'dbt deps --add-package', in favor of just 'dbt deps --lock' ([#9100](https://github.com/dbt-labs/dbt-core/issues/9100)) +- move event manager setup back to core, remove ref to global EVENT_MANAGER and clean up event manager functions ([#9150](https://github.com/dbt-labs/dbt-core/issues/9150)) +- Remove dbt-tests-adapter and dbt-postgres packages from dbt-core ([#9455](https://github.com/dbt-labs/dbt-core/issues/9455)) + +### Features + +- Initial implementation of unit testing ([#8287](https://github.com/dbt-labs/dbt-core/issues/8287)) +- Unit test manifest artifacts and selection ([#8295](https://github.com/dbt-labs/dbt-core/issues/8295)) +- Support config with tags & meta for unit tests ([#8294](https://github.com/dbt-labs/dbt-core/issues/8294)) +- Allow adapters to include package logs in dbt standard logging ([#7859](https://github.com/dbt-labs/dbt-core/issues/7859)) +- Enable inline csv fixtures in unit tests ([#8626](https://github.com/dbt-labs/dbt-core/issues/8626)) +- Add drop_schema_named macro ([#8025](https://github.com/dbt-labs/dbt-core/issues/8025)) +- migrate utils to common and adapters folders ([#8924](https://github.com/dbt-labs/dbt-core/issues/8924)) +- Move Agate helper client into common ([#8926](https://github.com/dbt-labs/dbt-core/issues/8926)) +- remove usage of dbt.config.PartialProject from dbt/adapters ([#8928](https://github.com/dbt-labs/dbt-core/issues/8928)) +- Add exports to SavedQuery spec ([#8892](https://github.com/dbt-labs/dbt-core/issues/8892)) +- Support unit testing incremental models ([#8422](https://github.com/dbt-labs/dbt-core/issues/8422)) +- Add support of csv file fixtures to unit testing ([#8290](https://github.com/dbt-labs/dbt-core/issues/8290)) +- Remove legacy logger ([#8027](https://github.com/dbt-labs/dbt-core/issues/8027)) +- Unit tests support --defer and state:modified ([#8517](https://github.com/dbt-labs/dbt-core/issues/8517)) +- Support setting export configs hierarchically via saved query and project configs ([#8956](https://github.com/dbt-labs/dbt-core/issues/8956)) +- Support source inputs in unit tests ([#8507](https://github.com/dbt-labs/dbt-core/issues/8507)) +- Use daff to render diff displayed in stdout when unit test fails ([#8558](https://github.com/dbt-labs/dbt-core/issues/8558)) +- Move unit testing to test command ([#8979](https://github.com/dbt-labs/dbt-core/issues/8979)) +- Support --empty flag for schema-only dry runs ([#8971](https://github.com/dbt-labs/dbt-core/issues/8971)) +- Support unit tests in non-root packages ([#8285](https://github.com/dbt-labs/dbt-core/issues/8285)) +- Convert the `tests` config to `data_tests` in both dbt_project.yml and schema files. in schema files. ([#8699](https://github.com/dbt-labs/dbt-core/issues/8699)) +- Make fixture files full-fledged parts of the manifest and enable partial parsing ([#9067](https://github.com/dbt-labs/dbt-core/issues/9067)) +- Adds support for parsing conversion metric related properties for the semantic layer. ([#9203](https://github.com/dbt-labs/dbt-core/issues/9203)) +- In build command run unit tests before models ([#9128](https://github.com/dbt-labs/dbt-core/issues/9128)) +- Move flags from UserConfig in profiles.yml to flags in dbt_project.yml ([#9183](https://github.com/dbt-labs/dbt-core/issues/9183)) +- Added hook support for `dbt source freshness` ([#5609](https://github.com/dbt-labs/dbt-core/issues/5609)) +- Align with order of unit test output when `actual` differs from `expected` ([#9370](https://github.com/dbt-labs/dbt-core/issues/9370)) +- Added support for external nodes in unit test nodes ([#8944](https://github.com/dbt-labs/dbt-core/issues/8944)) +- Enable unit testing versioned models ([#9344](https://github.com/dbt-labs/dbt-core/issues/9344)) +- Enable list command for unit tests ([#8508](https://github.com/dbt-labs/dbt-core/issues/8508)) +- Integration Test Optimizations ([#9498](https://github.com/dbt-labs/dbt-core/issues/9498)) +- Accelerate integration tests with caching. ([#9498](https://github.com/dbt-labs/dbt-core/issues/9498)) +- Cache environment variables ([#9489](https://github.com/dbt-labs/dbt-core/issues/9489)) +- Support meta at the config level for Metric nodes ([#9441](https://github.com/dbt-labs/dbt-core/issues/9441)) +- Add cache to SavedQuery config ([#9540](https://github.com/dbt-labs/dbt-core/issues/9540)) + +### Fixes + +- For packages installed with tarball method, fetch metadata to resolve nested dependencies ([#8621](https://github.com/dbt-labs/dbt-core/issues/8621)) +- Fix partial parsing not working for semantic model change ([#8859](https://github.com/dbt-labs/dbt-core/issues/8859)) +- Handle unknown `type_code` for model contracts ([#8877](https://github.com/dbt-labs/dbt-core/issues/8877), [#8353](https://github.com/dbt-labs/dbt-core/issues/8353)) +- Rework get_catalog implementation to retain previous adapter interface semantics ([#8846](https://github.com/dbt-labs/dbt-core/issues/8846)) +- Add back contract enforcement for temporary tables on postgres ([#8857](https://github.com/dbt-labs/dbt-core/issues/8857)) +- Add version to fqn when version==0 ([#8836](https://github.com/dbt-labs/dbt-core/issues/8836)) +- Fix cased comparison in catalog-retrieval function. ([#8939](https://github.com/dbt-labs/dbt-core/issues/8939)) +- Catalog queries now assign the correct type to materialized views ([#8864](https://github.com/dbt-labs/dbt-core/issues/8864)) +- Fix compilation exception running empty seed file and support new Integer agate data_type ([#8895](https://github.com/dbt-labs/dbt-core/issues/8895)) +- Make relation filtering None-tolerant for maximal flexibility across adapters. ([#8974](https://github.com/dbt-labs/dbt-core/issues/8974)) +- Update run_results.json from previous versions of dbt to support deferral and rerun from failure ([#9010](https://github.com/dbt-labs/dbt-core/issues/9010)) +- Use MANIFEST.in to recursively include all jinja templates; fixes issue where some templates were not included in the distribution ([#9016](https://github.com/dbt-labs/dbt-core/issues/9016)) +- Fix git repository with subdirectory for Deps ([#9000](https://github.com/dbt-labs/dbt-core/issues/9000)) +- Use seed file from disk for unit testing if rows not specified in YAML config ([#8652](https://github.com/dbt-labs/dbt-core/issues/8652)) +- Fix formatting of tarball information in packages-lock.yml ([#9062](https://github.com/dbt-labs/dbt-core/issues/9062)) +- deps: Lock git packages to commit SHA during resolution ([#9050](https://github.com/dbt-labs/dbt-core/issues/9050)) +- deps: Use PackageRenderer to read package-lock.json ([#9127](https://github.com/dbt-labs/dbt-core/issues/9127)) +- Ensure we produce valid jsonschema schemas for manifest, catalog, run-results, and sources ([#8991](https://github.com/dbt-labs/dbt-core/issues/8991)) +- Get sources working again in dbt docs generate ([#9119](https://github.com/dbt-labs/dbt-core/issues/9119)) +- Fix parsing f-strings in python models ([#6976](https://github.com/dbt-labs/dbt-core/issues/6976)) +- Preserve the value of vars and the --full-refresh flags when using retry. ([#9112](https://github.com/dbt-labs/dbt-core/issues/9112)) +- Support reasonably long unit test names ([#9015](https://github.com/dbt-labs/dbt-core/issues/9015)) +- Fix back-compat parsing for model-level 'tests', source table-level 'tests', and 'tests' defined on model versions ([#9411](https://github.com/dbt-labs/dbt-core/issues/9411)) +- Fix retry command run from CLI ([#9444](https://github.com/dbt-labs/dbt-core/issues/9444)) +- Fix seed and source selection in `dbt docs generate` ([#9161](https://github.com/dbt-labs/dbt-core/issues/9161)) +- Add TestGenerateCatalogWithExternalNodes, include empty nodes in node selection during docs generate ([#9456](https://github.com/dbt-labs/dbt-core/issues/9456)) +- Fix node type plurals in FoundStats log message ([#9464](https://github.com/dbt-labs/dbt-core/issues/9464)) +- Run manifest upgrade preprocessing on any older manifest version, including v11 ([#9487](https://github.com/dbt-labs/dbt-core/issues/9487)) +- Update 'compiled_code' context member logic to route based on command ('clone' or not). Reimplement 'sql' context member as wrapper of 'compiled_code'. ([#9502](https://github.com/dbt-labs/dbt-core/issues/9502)) +- Fix bug where Semantic Layer filter strings are parsed into lists. ([#9507](https://github.com/dbt-labs/dbt-core/issues/9507)) +- Initialize invocation context before test fixtures are built. ([##9489](https://github.com/dbt-labs/dbt-core/issues/#9489)) +- When patching versioned models, set constraints after config ([#9364](https://github.com/dbt-labs/dbt-core/issues/9364)) +- only include unmodified semantic mdodels in state:modified selection ([#9548](https://github.com/dbt-labs/dbt-core/issues/9548)) +- Set query headers when manifest is passed in to dbtRunner ([#9546](https://github.com/dbt-labs/dbt-core/issues/9546)) +- Store node_info in node associated logging events ([#9557](https://github.com/dbt-labs/dbt-core/issues/9557)) +- Fix Semantic Model Compare node relations ([#9548](https://github.com/dbt-labs/dbt-core/issues/9548)) +- Clearer no-op logging in stubbed SavedQueryRunner ([#9533](https://github.com/dbt-labs/dbt-core/issues/9533)) +- Fix node_info contextvar handling so incorrect node_info doesn't persist ([#8866](https://github.com/dbt-labs/dbt-core/issues/8866)) +- Add target-path to retry ([#8948](https://github.com/dbt-labs/dbt-core/issues/8948)) + +### Docs + +- fix get_custom_database docstring ([dbt-docs/#9003](https://github.com/dbt-labs/dbt-docs/issues/9003)) + +### Under the Hood + +- Added more type annotations. ([#8537](https://github.com/dbt-labs/dbt-core/issues/8537)) +- Add unit testing functional tests ([#8512](https://github.com/dbt-labs/dbt-core/issues/8512)) +- Remove usage of dbt.include.global_project in dbt/adapters ([#8925](https://github.com/dbt-labs/dbt-core/issues/8925)) +- Add a no-op runner for Saved Qeury ([#8893](https://github.com/dbt-labs/dbt-core/issues/8893)) +- remove dbt.flags.MP_CONTEXT usage in dbt/adapters ([#8967](https://github.com/dbt-labs/dbt-core/issues/8967)) +- Remove usage of dbt.flags.LOG_CACHE_EVENTS in dbt/adapters ([#8969](https://github.com/dbt-labs/dbt-core/issues/8969)) +- Move CatalogRelationTypes test case to the shared test suite to be reused by adapter maintainers ([#8952](https://github.com/dbt-labs/dbt-core/issues/8952)) +- Treat SystemExit as an interrupt if raised during node execution. ([#n/a](https://github.com/dbt-labs/dbt-core/issues/n/a)) +- Removing unused 'documentable' ([#8871](https://github.com/dbt-labs/dbt-core/issues/8871)) +- Remove use of dbt/core exceptions in dbt/adapter ([#8920](https://github.com/dbt-labs/dbt-core/issues/8920)) +- Cache dbt plugin modules to improve integration test performance ([#9029](https://github.com/dbt-labs/dbt-core/issues/9029)) +- Consolidate deferral methods & flags ([#7965](https://github.com/dbt-labs/dbt-core/issues/7965), [#8715](https://github.com/dbt-labs/dbt-core/issues/8715)) +- Fix test_current_timestamp_matches_utc test; allow for MacOS runner system clock variance ([#9057](https://github.com/dbt-labs/dbt-core/issues/9057)) +- Remove usage of dbt.deprecations in dbt/adapters, enable core & adapter-specific event types and protos ([#8927](https://github.com/dbt-labs/dbt-core/issues/8927), [#8918](https://github.com/dbt-labs/dbt-core/issues/8918)) +- Clean up unused adaptor folders ([#9123](https://github.com/dbt-labs/dbt-core/issues/9123)) +- Move column constraints into common/contracts, removing another dependency of adapters on core. ([#9024](https://github.com/dbt-labs/dbt-core/issues/9024)) +- Move dbt.semver to dbt.common.semver and update references. ([#9039](https://github.com/dbt-labs/dbt-core/issues/9039)) +- Move lowercase utils method to common ([#9180](https://github.com/dbt-labs/dbt-core/issues/9180)) +- Remove usages of dbt.clients.jinja in dbt/adapters ([#9205](https://github.com/dbt-labs/dbt-core/issues/9205)) +- Remove usage of dbt.contracts in dbt/adapters ([#9208](https://github.com/dbt-labs/dbt-core/issues/9208)) +- Remove usage of dbt.contracts.graph.nodes.ResultNode in dbt/adapters ([#9214](https://github.com/dbt-labs/dbt-core/issues/9214)) +- Introduce RelationConfig Protocol, consolidate Relation.create_from ([#9215](https://github.com/dbt-labs/dbt-core/issues/9215)) +- remove manifest from adapter.set_relations_cache signature ([#9217](https://github.com/dbt-labs/dbt-core/issues/9217)) +- remove manifest from adapter catalog method signatures ([#9218](https://github.com/dbt-labs/dbt-core/issues/9218)) +- Move BaseConfig, Metadata and various other contract classes from model_config to common/contracts/config ([#8919](https://github.com/dbt-labs/dbt-core/issues/8919)) +- Add MacroResolverProtocol, remove lazy loading of manifest in adapter.execute_macro ([#9244](https://github.com/dbt-labs/dbt-core/issues/9244)) +- pass query header context to MacroQueryStringSetter ([#9249](https://github.com/dbt-labs/dbt-core/issues/9249), [#9250](https://github.com/dbt-labs/dbt-core/issues/9250)) +- add macro_context_generator on adapter ([#9247](https://github.com/dbt-labs/dbt-core/issues/9247)) +- pass mp_context to adapter factory as argument instead of import ([#9025](https://github.com/dbt-labs/dbt-core/issues/9025)) +- have dbt-postgres use RelationConfig protocol for materialized views' ([#9292](https://github.com/dbt-labs/dbt-core/issues/9292)) +- move system.py to common as dbt-bigquery relies on it to call gcloud ([#9293](https://github.com/dbt-labs/dbt-core/issues/9293)) +- Reorganizing event definitions to define core events in dbt/events rather than dbt/common ([#9152](https://github.com/dbt-labs/dbt-core/issues/9152)) +- move exceptions used only in dbt/common to dbt/common/exceptions ([#9332](https://github.com/dbt-labs/dbt-core/issues/9332)) +- Remove usage of dbt.adapters.factory in dbt/common ([#9334](https://github.com/dbt-labs/dbt-core/issues/9334)) +- Accept valid_error_names in WarnErrorOptions constructor, remove global usage of event modules ([#9337](https://github.com/dbt-labs/dbt-core/issues/9337)) +- Move result objects to dbt.artifacts ([#9193](https://github.com/dbt-labs/dbt-core/issues/9193)) +- dbt Labs OSS standardization of docs and templates. ([#9252](https://github.com/dbt-labs/dbt-core/issues/9252)) +- Add dbt-common as a dependency and remove dbt/common ([#9357](https://github.com/dbt-labs/dbt-core/issues/9357)) +- move cache exceptions to dbt/adapters ([#9362](https://github.com/dbt-labs/dbt-core/issues/9362)) +- Clean up macro contexts. ([#9422](https://github.com/dbt-labs/dbt-core/issues/9422)) +- Add the @requires.manifest decorator to the retry command. ([#9426](https://github.com/dbt-labs/dbt-core/issues/9426)) +- Move WritableManifest + Documentation to dbt/artifacts ([#9378](https://github.com/dbt-labs/dbt-core/issues/9378), [#9379](https://github.com/dbt-labs/dbt-core/issues/9379)) +- Define Macro and Group resources in dbt/artifacts ([#9381](https://github.com/dbt-labs/dbt-core/issues/9381), [#9382](https://github.com/dbt-labs/dbt-core/issues/9382)) +- Move `SavedQuery` data definition to `dbt/artifacts` ([#9386](https://github.com/dbt-labs/dbt-core/issues/9386)) +- Migrate data parts of `Metric` node to dbt/artifacts ([#9383](https://github.com/dbt-labs/dbt-core/issues/9383)) +- Move data portion of `SemanticModel` to dbt/artifacts ([#9387](https://github.com/dbt-labs/dbt-core/issues/9387)) +- Move data parts of `Exposure` class to dbt/artifacts ([#9380](https://github.com/dbt-labs/dbt-core/issues/9380)) +- Start using `Mergeable` from dbt-common ([#9505](https://github.com/dbt-labs/dbt-core/issues/9505)) +- Move manifest nodes to artifacts ([#9388](https://github.com/dbt-labs/dbt-core/issues/9388)) +- Move data parts of `SourceDefinition` class to dbt/artifacts ([#9384](https://github.com/dbt-labs/dbt-core/issues/9384)) +- Remove uses of Replaceable class ([#7802](https://github.com/dbt-labs/dbt-core/issues/7802)) +- Make dbt-core compatible with Python 3.12 ([#9007](https://github.com/dbt-labs/dbt-core/issues/9007)) +- Restrict protobuf to major version 4. ([#9566](https://github.com/dbt-labs/dbt-core/issues/9566)) +- Remove references to dbt.tracking and dbt.flags from dbt/artifacts ([#9390](https://github.com/dbt-labs/dbt-core/issues/9390)) +- Implement primary key inference for model nodes ([#9652](https://github.com/dbt-labs/dbt-core/issues/9652)) +- Define UnitTestDefinition resource in dbt/artifacts/resources ([#9667](https://github.com/dbt-labs/dbt-core/issues/9667)) +- Use Manifest instead of WritableManifest in PreviousState and _get_deferred_manifest ([#9567](https://github.com/dbt-labs/dbt-core/issues/9567)) + +### Dependencies + +- Bump actions/checkout from 3 to 4 ([#8781](https://github.com/dbt-labs/dbt-core/pull/8781)) +- Begin using DSI 0.4.x ([#8892](https://github.com/dbt-labs/dbt-core/pull/8892)) +- Update typing-extensions version to >=4.4 ([#9012](https://github.com/dbt-labs/dbt-core/pull/9012)) +- Bump ddtrace from 2.1.7 to 2.3.0 ([#9132](https://github.com/dbt-labs/dbt-core/pull/9132)) +- Bump freezegun from 0.3.12 to 1.3.0 ([#9197](https://github.com/dbt-labs/dbt-core/pull/9197)) +- Bump actions/setup-python from 4 to 5 ([#9267](https://github.com/dbt-labs/dbt-core/pull/9267)) +- Bump actions/download-artifact from 3 to 4 ([#9374](https://github.com/dbt-labs/dbt-core/pull/9374)) +- remove dbt/adapters and add dependency on dbt-adapters ([#9430](https://github.com/dbt-labs/dbt-core/pull/9430)) +- Bump actions/cache from 3 to 4 ([#9471](https://github.com/dbt-labs/dbt-core/pull/9471)) +- Bump peter-evans/create-pull-request from 5 to 6 ([#9552](https://github.com/dbt-labs/dbt-core/pull/9552)) +- Cap dbt-semantic-interfaces version range to <0.6 ([#9671](https://github.com/dbt-labs/dbt-core/pull/9671)) +- bump dbt-common to accept major version 1 ([#9690](https://github.com/dbt-labs/dbt-core/pull/9690)) + +### Security + +- Update Jinja2 to >= 3.1.3 to address CVE-2024-22195 ([#CVE-2024-22195](https://github.com/dbt-labs/dbt-core/pull/CVE-2024-22195)) + +### Contributors +- [@LeoTheGriff](https://github.com/LeoTheGriff) ([#9003](https://github.com/dbt-labs/dbt-core/issues/9003)) +- [@WilliamDee](https://github.com/WilliamDee) ([#9203](https://github.com/dbt-labs/dbt-core/issues/9203)) +- [@adamlopez](https://github.com/adamlopez) ([#8621](https://github.com/dbt-labs/dbt-core/issues/8621)) +- [@aliceliu](https://github.com/aliceliu) ([#9652](https://github.com/dbt-labs/dbt-core/issues/9652)) +- [@benmosher](https://github.com/benmosher) ([#n/a](https://github.com/dbt-labs/dbt-core/issues/n/a)) +- [@colin-rorgers-dbt](https://github.com/colin-rorgers-dbt) ([#8919](https://github.com/dbt-labs/dbt-core/issues/8919)) +- [@courtneyholcomb](https://github.com/courtneyholcomb) ([#9507](https://github.com/dbt-labs/dbt-core/issues/9507)) +- [@l1xnan](https://github.com/l1xnan) ([#9007](https://github.com/dbt-labs/dbt-core/issues/9007)) +- [@mederka](https://github.com/mederka) ([#6976](https://github.com/dbt-labs/dbt-core/issues/6976)) +- [@ofek1weiss](https://github.com/ofek1weiss) ([#5609](https://github.com/dbt-labs/dbt-core/issues/5609)) +- [@peterallenwebb,](https://github.com/peterallenwebb,) ([#9112](https://github.com/dbt-labs/dbt-core/issues/9112)) +- [@tlento](https://github.com/tlento) ([#9012](https://github.com/dbt-labs/dbt-core/pull/9012), [#9671](https://github.com/dbt-labs/dbt-core/pull/9671)) +- [@tonayya](https://github.com/tonayya) ([#9252](https://github.com/dbt-labs/dbt-core/issues/9252)) diff --git a/.changes/1.8.0-b2.md b/.changes/1.8.0-b2.md new file mode 100644 index 00000000000..e69170a41de --- /dev/null +++ b/.changes/1.8.0-b2.md @@ -0,0 +1,53 @@ +## dbt-core 1.8.0-b2 - April 03, 2024 + +### Features + +- Global config for --target and --profile CLI flags and DBT_TARGET and DBT_PROFILE environment variables. ([#7798](https://github.com/dbt-labs/dbt-core/issues/7798)) +- Allow excluding resource types for build, list, and clone commands, and provide env vars ([#9237](https://github.com/dbt-labs/dbt-core/issues/9237)) +- SourceDefinition.meta represents source-level and table-level meta properties, instead of only table-level ([#9766](https://github.com/dbt-labs/dbt-core/issues/9766)) +- Allow metrics in semantic layer filters. ([#9804](https://github.com/dbt-labs/dbt-core/issues/9804)) + +### Fixes + +- fix lock-file bad indentation ([#9319](https://github.com/dbt-labs/dbt-core/issues/9319)) +- Tighten exception handling to avoid worker thread hangs. ([#9583](https://github.com/dbt-labs/dbt-core/issues/9583)) +- Do not add duplicate input_measures ([#9360](https://github.com/dbt-labs/dbt-core/issues/9360)) +- Throw a ParsingError if a primary key constraint is defined on multiple columns or at both the column and model level. ([#9581](https://github.com/dbt-labs/dbt-core/issues/9581)) +- Bug fix: don't parse Jinja in filters for input metrics or measures. ([#9582](https://github.com/dbt-labs/dbt-core/issues/9582)) +- Fix traceback parsing for exceptions raised due to csv fixtures moved into or out of fixture/subfolders. ([#9570](https://github.com/dbt-labs/dbt-core/issues/9570)) +- Fix partial parsing `KeyError` on deleted schema files ([#8860](https://github.com/dbt-labs/dbt-core/issues/8860)) +- Support saved queries in `dbt list` ([#9532](https://github.com/dbt-labs/dbt-core/issues/9532)) +- include sources in catalog.json when over 100 relations selected for catalog generation ([#9755](https://github.com/dbt-labs/dbt-core/issues/9755)) +- Support overriding macros in packages in unit testing ([#9624](https://github.com/dbt-labs/dbt-core/issues/9624)) +- Handle exceptions for failing on-run-* hooks in source freshness ([#9511](https://github.com/dbt-labs/dbt-core/issues/9511)) +- Validation of unit test parsing for incremental models ([#9593](https://github.com/dbt-labs/dbt-core/issues/9593)) +- Fix use of retry command on command using defer ([#9770](https://github.com/dbt-labs/dbt-core/issues/9770)) +- Make `args` variable to be un-modified by `dbt.invoke(args)` ([#8938](https://github.com/dbt-labs/dbt-core/issues/8938), [#9787](https://github.com/dbt-labs/dbt-core/issues/9787)) +- Unit test path outputs ([#9608](https://github.com/dbt-labs/dbt-core/issues/9608)) +- Fix assorted source freshness edgecases so check is run or actionable information is given ([#9078](https://github.com/dbt-labs/dbt-core/issues/9078)) +- "Fix Docker release process to account for both historical and current versions of `dbt-postgres` ([#9827](https://github.com/dbt-labs/dbt-core/issues/9827)) + +### Docs + +- Add analytics for dbt.com ([dbt-docs/#430](https://github.com/dbt-labs/dbt-docs/issues/430)) + +### Under the Hood + +- Remove unused key `wildcard` from MethodName enum ([#9641](https://github.com/dbt-labs/dbt-core/issues/9641)) +- Improve dbt CLI speed ([#4627](https://github.com/dbt-labs/dbt-core/issues/4627)) +- Include node_info in various Result events ([#9619](https://github.com/dbt-labs/dbt-core/issues/9619)) + +### Dependencies + +- Bump actions/upload-artifact from 3 to 4 ([#9470](https://github.com/dbt-labs/dbt-core/pull/9470)) +- Restrict protobuf to 4.* versions ([#9566](https://github.com/dbt-labs/dbt-core/pull/9566)) +- Bump codecov/codecov-action from 3 to 4 ([#9659](https://github.com/dbt-labs/dbt-core/pull/9659)) + +### Contributors +- [@asweet](https://github.com/asweet) ([#9641](https://github.com/dbt-labs/dbt-core/issues/9641)) +- [@b-per](https://github.com/b-per) ([#430](https://github.com/dbt-labs/dbt-core/issues/430)) +- [@barton996](https://github.com/barton996) ([#7798](https://github.com/dbt-labs/dbt-core/issues/7798)) +- [@courtneyholcomb](https://github.com/courtneyholcomb) ([#9804](https://github.com/dbt-labs/dbt-core/issues/9804), [#9582](https://github.com/dbt-labs/dbt-core/issues/9582)) +- [@dwreeves](https://github.com/dwreeves) ([#4627](https://github.com/dbt-labs/dbt-core/issues/4627)) +- [@jx2lee](https://github.com/jx2lee) ([#9319](https://github.com/dbt-labs/dbt-core/issues/9319)) +- [@slothkong](https://github.com/slothkong) ([#9570](https://github.com/dbt-labs/dbt-core/issues/9570)) diff --git a/.changes/1.8.0-b3.md b/.changes/1.8.0-b3.md new file mode 100644 index 00000000000..0b9ce6aaaca --- /dev/null +++ b/.changes/1.8.0-b3.md @@ -0,0 +1,48 @@ +## dbt-core 1.8.0-b3 - April 18, 2024 + +### Features + +- Support scrubbing secret vars ([#7247](https://github.com/dbt-labs/dbt-core/issues/7247)) +- Add wildcard support to the group selector method ([#9811](https://github.com/dbt-labs/dbt-core/issues/9811)) +- source freshness precomputes metadata-based freshness in batch, if possible ([#8705](https://github.com/dbt-labs/dbt-core/issues/8705)) +- Better error message when trying to select a disabled model ([#9747](https://github.com/dbt-labs/dbt-core/issues/9747)) +- Support SQL in unit testing fixtures ([#9405](https://github.com/dbt-labs/dbt-core/issues/9405)) + +### Fixes + +- fix configuration of turning test warnings into failures with WARN_ERROR_OPTIONS ([#7761](https://github.com/dbt-labs/dbt-core/issues/7761)) +- Fix conflict with newer versions of Snowplow tracker ([#8719](https://github.com/dbt-labs/dbt-core/issues/8719)) +- Only create the packages-install-path / dbt_packages folder during dbt deps ([#6985](https://github.com/dbt-labs/dbt-core/issues/6985), [#9584](https://github.com/dbt-labs/dbt-core/issues/9584)) +- Exclude password-like fields for considering reparse ([#9795](https://github.com/dbt-labs/dbt-core/issues/9795)) +- Fixed query comments test ([#9860](https://github.com/dbt-labs/dbt-core/issues/9860)) +- Begin warning people about spaces in model names ([#9397](https://github.com/dbt-labs/dbt-core/issues/9397)) +- Disambiguiate FreshnessConfigProblem error message ([#9891](https://github.com/dbt-labs/dbt-core/issues/9891)) + +### Under the Hood + +- Remove non dbt.artifacts dbt.* imports from dbt/artifacts ([#9926](https://github.com/dbt-labs/dbt-core/issues/9926)) +- Migrate to using `error_tag` provided by `dbt-common` ([#9914](https://github.com/dbt-labs/dbt-core/issues/9914)) +- Add a test for semantic manifest and move test fixtures needed for it ([#9665](https://github.com/dbt-labs/dbt-core/issues/9665)) + +### Dependencies + +- Relax pathspec upper bound version restriction ([#9373](https://github.com/dbt-labs/dbt-core/issues/9373)) +- Bump python from 3.10.7-slim-nullseye to 3.11.2-slim-bullseye in /docker ([#9687](https://github.com/dbt-labs/dbt-core/issues/9687)) +- Remove duplicate dependency of protobuf in dev-requirements ([#9830](https://github.com/dbt-labs/dbt-core/issues/9830)) +- Bump black from 23.3.0 to >=24.3.0,<25.0 ([#8074](https://github.com/dbt-labs/dbt-core/issues/8074)) + +### Security + +- Bump sqlparse to >=0.5.0, <0.6.0 to address GHSA-2m57-hf25-phgg ([#9951](https://github.com/dbt-labs/dbt-core/issues/9951)) + +### Contributors +- [@SamuelBFavarin](https://github.com/SamuelBFavarin) ([#9747](https://github.com/dbt-labs/dbt-core/issues/9747)) +- [@akurdyukov](https://github.com/akurdyukov) ([#8719](https://github.com/dbt-labs/dbt-core/issues/8719)) +- [@damian3031](https://github.com/damian3031) ([#9860](https://github.com/dbt-labs/dbt-core/issues/9860)) +- [@edgarrmondragon](https://github.com/edgarrmondragon) ([#8719](https://github.com/dbt-labs/dbt-core/issues/8719)) +- [@emmoop](https://github.com/emmoop) ([#9951](https://github.com/dbt-labs/dbt-core/issues/9951)) +- [@heysweet](https://github.com/heysweet) ([#9811](https://github.com/dbt-labs/dbt-core/issues/9811)) +- [@jx2lee](https://github.com/jx2lee) ([#7761](https://github.com/dbt-labs/dbt-core/issues/7761)) +- [@nielspardon](https://github.com/nielspardon) ([#7247](https://github.com/dbt-labs/dbt-core/issues/7247)) +- [@niteshy](https://github.com/niteshy) ([#9830](https://github.com/dbt-labs/dbt-core/issues/9830)) +- [@rzjfr](https://github.com/rzjfr) ([#9373](https://github.com/dbt-labs/dbt-core/issues/9373)) diff --git a/.changes/unreleased/Breaking Changes-20231127-114757.yaml b/.changes/1.8.0/Breaking Changes-20231127-114757.yaml similarity index 100% rename from .changes/unreleased/Breaking Changes-20231127-114757.yaml rename to .changes/1.8.0/Breaking Changes-20231127-114757.yaml diff --git a/.changes/unreleased/Breaking Changes-20231128-134356.yaml b/.changes/1.8.0/Breaking Changes-20231128-134356.yaml similarity index 100% rename from .changes/unreleased/Breaking Changes-20231128-134356.yaml rename to .changes/1.8.0/Breaking Changes-20231128-134356.yaml diff --git a/.changes/unreleased/Breaking Changes-20231129-091921.yaml b/.changes/1.8.0/Breaking Changes-20231129-091921.yaml similarity index 100% rename from .changes/unreleased/Breaking Changes-20231129-091921.yaml rename to .changes/1.8.0/Breaking Changes-20231129-091921.yaml diff --git a/.changes/unreleased/Breaking Changes-20231130-135348.yaml b/.changes/1.8.0/Breaking Changes-20231130-135348.yaml similarity index 100% rename from .changes/unreleased/Breaking Changes-20231130-135348.yaml rename to .changes/1.8.0/Breaking Changes-20231130-135348.yaml diff --git a/.changes/unreleased/Breaking Changes-20240130-140550.yaml b/.changes/1.8.0/Breaking Changes-20240130-140550.yaml similarity index 100% rename from .changes/unreleased/Breaking Changes-20240130-140550.yaml rename to .changes/1.8.0/Breaking Changes-20240130-140550.yaml diff --git a/.changes/unreleased/Dependencies-20231005-151848.yaml b/.changes/1.8.0/Dependencies-20231005-151848.yaml similarity index 90% rename from .changes/unreleased/Dependencies-20231005-151848.yaml rename to .changes/1.8.0/Dependencies-20231005-151848.yaml index 2020c3a535a..bca488d6485 100644 --- a/.changes/unreleased/Dependencies-20231005-151848.yaml +++ b/.changes/1.8.0/Dependencies-20231005-151848.yaml @@ -3,4 +3,4 @@ body: "Bump actions/checkout from 3 to 4" time: 2023-10-05T15:18:48.00000Z custom: Author: dependabot[bot] - PR: 8781 + Issue: 8781 diff --git a/.changes/unreleased/Dependencies-20231031-131954.yaml b/.changes/1.8.0/Dependencies-20231031-131954.yaml similarity index 88% rename from .changes/unreleased/Dependencies-20231031-131954.yaml rename to .changes/1.8.0/Dependencies-20231031-131954.yaml index 01c9a80d322..e94d4c2405f 100644 --- a/.changes/unreleased/Dependencies-20231031-131954.yaml +++ b/.changes/1.8.0/Dependencies-20231031-131954.yaml @@ -3,4 +3,4 @@ body: Begin using DSI 0.4.x time: 2023-10-31T13:19:54.750009-07:00 custom: Author: QMalcolm peterallenwebb - PR: "8892" + Issue: "8892" diff --git a/.changes/unreleased/Dependencies-20231106-130051.yaml b/.changes/1.8.0/Dependencies-20231106-130051.yaml similarity index 89% rename from .changes/unreleased/Dependencies-20231106-130051.yaml rename to .changes/1.8.0/Dependencies-20231106-130051.yaml index 8cdb992ab69..6e42d7920b7 100644 --- a/.changes/unreleased/Dependencies-20231106-130051.yaml +++ b/.changes/1.8.0/Dependencies-20231106-130051.yaml @@ -3,4 +3,4 @@ body: Update typing-extensions version to >=4.4 time: 2023-11-06T13:00:51.062386-08:00 custom: Author: tlento - PR: "9012" + Issue: "9012" diff --git a/.changes/unreleased/Dependencies-20231122-001840.yaml b/.changes/1.8.0/Dependencies-20231122-001840.yaml similarity index 90% rename from .changes/unreleased/Dependencies-20231122-001840.yaml rename to .changes/1.8.0/Dependencies-20231122-001840.yaml index 21cd0f012fb..b92795f10d9 100644 --- a/.changes/unreleased/Dependencies-20231122-001840.yaml +++ b/.changes/1.8.0/Dependencies-20231122-001840.yaml @@ -3,4 +3,4 @@ body: "Bump ddtrace from 2.1.7 to 2.3.0" time: 2023-11-22T00:18:40.00000Z custom: Author: dependabot[bot] - PR: 9132 + Issue: 9132 diff --git a/.changes/1.8.0/Dependencies-20231204-000945.yaml b/.changes/1.8.0/Dependencies-20231204-000945.yaml new file mode 100644 index 00000000000..a9ff267db30 --- /dev/null +++ b/.changes/1.8.0/Dependencies-20231204-000945.yaml @@ -0,0 +1,6 @@ +kind: "Dependencies" +body: "Bump freezegun from 0.3.12 to 1.3.0" +time: 2023-12-04T00:09:45.00000Z +custom: + Author: dependabot[bot] + Issue: 9197 diff --git a/.changes/unreleased/Dependencies-20231211-005651.yaml b/.changes/1.8.0/Dependencies-20231211-005651.yaml similarity index 90% rename from .changes/unreleased/Dependencies-20231211-005651.yaml rename to .changes/1.8.0/Dependencies-20231211-005651.yaml index cf60935f675..bffcfbb0ade 100644 --- a/.changes/unreleased/Dependencies-20231211-005651.yaml +++ b/.changes/1.8.0/Dependencies-20231211-005651.yaml @@ -3,4 +3,4 @@ body: "Bump actions/setup-python from 4 to 5" time: 2023-12-11T00:56:51.00000Z custom: Author: dependabot[bot] - PR: 9267 + Issue: 9267 diff --git a/.changes/unreleased/Dependencies-20240115-012030.yaml b/.changes/1.8.0/Dependencies-20240115-012030.yaml similarity index 90% rename from .changes/unreleased/Dependencies-20240115-012030.yaml rename to .changes/1.8.0/Dependencies-20240115-012030.yaml index 3de246ad260..d1819c8beac 100644 --- a/.changes/unreleased/Dependencies-20240115-012030.yaml +++ b/.changes/1.8.0/Dependencies-20240115-012030.yaml @@ -3,4 +3,4 @@ body: "Bump actions/download-artifact from 3 to 4" time: 2024-01-15T01:20:30.00000Z custom: Author: dependabot[bot] - PR: 9374 + Issue: 9374 diff --git a/.changes/1.8.0/Dependencies-20240117-100818.yaml b/.changes/1.8.0/Dependencies-20240117-100818.yaml new file mode 100644 index 00000000000..f8f1e65b593 --- /dev/null +++ b/.changes/1.8.0/Dependencies-20240117-100818.yaml @@ -0,0 +1,6 @@ +kind: Dependencies +body: Relax pathspec upper bound version restriction +time: 2024-01-17T10:08:18.009949641+01:00 +custom: + Author: rzjfr + Issue: "9373" diff --git a/.changes/unreleased/Dependencies-20240123-105843.yaml b/.changes/1.8.0/Dependencies-20240123-105843.yaml similarity index 90% rename from .changes/unreleased/Dependencies-20240123-105843.yaml rename to .changes/1.8.0/Dependencies-20240123-105843.yaml index 94fd865e0d0..4c2995c5ce6 100644 --- a/.changes/unreleased/Dependencies-20240123-105843.yaml +++ b/.changes/1.8.0/Dependencies-20240123-105843.yaml @@ -3,4 +3,4 @@ body: remove dbt/adapters and add dependency on dbt-adapters time: 2024-01-23T10:58:43.286952-08:00 custom: Author: colin-rogers-dbt - PR: "9430" + Issue: "9430" diff --git a/.changes/1.8.0/Dependencies-20240129-005734.yaml b/.changes/1.8.0/Dependencies-20240129-005734.yaml new file mode 100644 index 00000000000..90a5ee0c0c3 --- /dev/null +++ b/.changes/1.8.0/Dependencies-20240129-005734.yaml @@ -0,0 +1,6 @@ +kind: "Dependencies" +body: "Bump actions/upload-artifact from 3 to 4" +time: 2024-01-29T00:57:34.00000Z +custom: + Author: dependabot[bot] + Issue: 9470 diff --git a/.changes/unreleased/Dependencies-20240129-005743.yaml b/.changes/1.8.0/Dependencies-20240129-005743.yaml similarity index 90% rename from .changes/unreleased/Dependencies-20240129-005743.yaml rename to .changes/1.8.0/Dependencies-20240129-005743.yaml index 2e650f4f1bc..6e491069322 100644 --- a/.changes/unreleased/Dependencies-20240129-005743.yaml +++ b/.changes/1.8.0/Dependencies-20240129-005743.yaml @@ -3,4 +3,4 @@ body: "Bump actions/cache from 3 to 4" time: 2024-01-29T00:57:43.00000Z custom: Author: dependabot[bot] - PR: 9471 + Issue: 9471 diff --git a/.changes/1.8.0/Dependencies-20240212-011324.yaml b/.changes/1.8.0/Dependencies-20240212-011324.yaml new file mode 100644 index 00000000000..200932ca249 --- /dev/null +++ b/.changes/1.8.0/Dependencies-20240212-011324.yaml @@ -0,0 +1,6 @@ +kind: "Dependencies" +body: "Bump peter-evans/create-pull-request from 5 to 6" +time: 2024-02-12T01:13:24.00000Z +custom: + Author: dependabot[bot] + Issue: 9552 diff --git a/.changes/1.8.0/Dependencies-20240222-102947.yaml b/.changes/1.8.0/Dependencies-20240222-102947.yaml new file mode 100644 index 00000000000..78c7a994b35 --- /dev/null +++ b/.changes/1.8.0/Dependencies-20240222-102947.yaml @@ -0,0 +1,6 @@ +kind: Dependencies +body: Restrict protobuf to 4.* versions +time: 2024-02-22T10:29:47.595435-08:00 +custom: + Author: QMalcolm + Issue: "9566" diff --git a/.changes/1.8.0/Dependencies-20240226-004412.yaml b/.changes/1.8.0/Dependencies-20240226-004412.yaml new file mode 100644 index 00000000000..8300a32e086 --- /dev/null +++ b/.changes/1.8.0/Dependencies-20240226-004412.yaml @@ -0,0 +1,6 @@ +kind: "Dependencies" +body: "Bump codecov/codecov-action from 3 to 4" +time: 2024-02-26T00:44:12.00000Z +custom: + Author: dependabot[bot] + Issue: 9659 diff --git a/.changes/1.8.0/Dependencies-20240226-123502.yaml b/.changes/1.8.0/Dependencies-20240226-123502.yaml new file mode 100644 index 00000000000..f1ad49c393d --- /dev/null +++ b/.changes/1.8.0/Dependencies-20240226-123502.yaml @@ -0,0 +1,6 @@ +kind: Dependencies +body: Cap dbt-semantic-interfaces version range to <0.6 +time: 2024-02-26T12:35:02.643779-08:00 +custom: + Author: tlento + Issue: "9671" diff --git a/.changes/1.8.0/Dependencies-20240227-142138.yaml b/.changes/1.8.0/Dependencies-20240227-142138.yaml new file mode 100644 index 00000000000..0b20244d773 --- /dev/null +++ b/.changes/1.8.0/Dependencies-20240227-142138.yaml @@ -0,0 +1,6 @@ +kind: Dependencies +body: Bump python from 3.10.7-slim-nullseye to 3.11.2-slim-bullseye in /docker +time: 2024-02-27T14:21:38.394757-05:00 +custom: + Author: michelleark + Issue: "9687" diff --git a/.changes/1.8.0/Dependencies-20240227-151115.yaml b/.changes/1.8.0/Dependencies-20240227-151115.yaml new file mode 100644 index 00000000000..d9a99d7e3dd --- /dev/null +++ b/.changes/1.8.0/Dependencies-20240227-151115.yaml @@ -0,0 +1,6 @@ +kind: Dependencies +body: bump dbt-common to accept major version 1 +time: 2024-02-27T15:11:15.583604-05:00 +custom: + Author: michelleark + Issue: "9690" diff --git a/.changes/1.8.0/Dependencies-20240331-103917.yaml b/.changes/1.8.0/Dependencies-20240331-103917.yaml new file mode 100644 index 00000000000..c4cb75dd449 --- /dev/null +++ b/.changes/1.8.0/Dependencies-20240331-103917.yaml @@ -0,0 +1,6 @@ +kind: Dependencies +body: Remove duplicate dependency of protobuf in dev-requirements +time: 2024-03-31T10:39:17.432017-07:00 +custom: + Author: niteshy + Issue: "9830" diff --git a/.changes/1.8.0/Dependencies-20240410-183321.yaml b/.changes/1.8.0/Dependencies-20240410-183321.yaml new file mode 100644 index 00000000000..7fb86e98c3b --- /dev/null +++ b/.changes/1.8.0/Dependencies-20240410-183321.yaml @@ -0,0 +1,6 @@ +kind: "Dependencies" +body: "Bump black from 23.3.0 to >=24.3.0,<25.0" +time: 2024-04-10T18:33:21.00000Z +custom: + Author: dependabot[bot] + Issue: 8074 diff --git a/.changes/1.8.0/Docs-20230615-105157.yaml b/.changes/1.8.0/Docs-20230615-105157.yaml new file mode 100644 index 00000000000..327e33b2e03 --- /dev/null +++ b/.changes/1.8.0/Docs-20230615-105157.yaml @@ -0,0 +1,6 @@ +kind: Docs +body: Add analytics for dbt.com +time: 2023-06-15T10:51:57.838991+02:00 +custom: + Author: b-per + Issue: "430" diff --git a/.changes/unreleased/Docs-20231106-123157.yaml b/.changes/1.8.0/Docs-20231106-123157.yaml similarity index 100% rename from .changes/unreleased/Docs-20231106-123157.yaml rename to .changes/1.8.0/Docs-20231106-123157.yaml diff --git a/.changes/unreleased/Features-20230802-145011.yaml b/.changes/1.8.0/Features-20230802-145011.yaml similarity index 100% rename from .changes/unreleased/Features-20230802-145011.yaml rename to .changes/1.8.0/Features-20230802-145011.yaml diff --git a/.changes/unreleased/Features-20230828-101825.yaml b/.changes/1.8.0/Features-20230828-101825.yaml similarity index 100% rename from .changes/unreleased/Features-20230828-101825.yaml rename to .changes/1.8.0/Features-20230828-101825.yaml diff --git a/.changes/unreleased/Features-20230906-234741.yaml b/.changes/1.8.0/Features-20230906-234741.yaml similarity index 100% rename from .changes/unreleased/Features-20230906-234741.yaml rename to .changes/1.8.0/Features-20230906-234741.yaml diff --git a/.changes/unreleased/Features-20230915-123733.yaml b/.changes/1.8.0/Features-20230915-123733.yaml similarity index 100% rename from .changes/unreleased/Features-20230915-123733.yaml rename to .changes/1.8.0/Features-20230915-123733.yaml diff --git a/.changes/unreleased/Features-20230928-163205.yaml b/.changes/1.8.0/Features-20230928-163205.yaml similarity index 100% rename from .changes/unreleased/Features-20230928-163205.yaml rename to .changes/1.8.0/Features-20230928-163205.yaml diff --git a/.changes/unreleased/Features-20231017-143620.yaml b/.changes/1.8.0/Features-20231017-143620.yaml similarity index 100% rename from .changes/unreleased/Features-20231017-143620.yaml rename to .changes/1.8.0/Features-20231017-143620.yaml diff --git a/.changes/unreleased/Features-20231026-110821.yaml b/.changes/1.8.0/Features-20231026-110821.yaml similarity index 100% rename from .changes/unreleased/Features-20231026-110821.yaml rename to .changes/1.8.0/Features-20231026-110821.yaml diff --git a/.changes/unreleased/Features-20231026-123556.yaml b/.changes/1.8.0/Features-20231026-123556.yaml similarity index 100% rename from .changes/unreleased/Features-20231026-123556.yaml rename to .changes/1.8.0/Features-20231026-123556.yaml diff --git a/.changes/unreleased/Features-20231026-123913.yaml b/.changes/1.8.0/Features-20231026-123913.yaml similarity index 100% rename from .changes/unreleased/Features-20231026-123913.yaml rename to .changes/1.8.0/Features-20231026-123913.yaml diff --git a/.changes/unreleased/Features-20231031-132022.yaml b/.changes/1.8.0/Features-20231031-132022.yaml similarity index 100% rename from .changes/unreleased/Features-20231031-132022.yaml rename to .changes/1.8.0/Features-20231031-132022.yaml diff --git a/.changes/unreleased/Features-20231101-101845.yaml b/.changes/1.8.0/Features-20231101-101845.yaml similarity index 100% rename from .changes/unreleased/Features-20231101-101845.yaml rename to .changes/1.8.0/Features-20231101-101845.yaml diff --git a/.changes/unreleased/Features-20231106-194752.yaml b/.changes/1.8.0/Features-20231106-194752.yaml similarity index 100% rename from .changes/unreleased/Features-20231106-194752.yaml rename to .changes/1.8.0/Features-20231106-194752.yaml diff --git a/.changes/unreleased/Features-20231107-135635.yaml b/.changes/1.8.0/Features-20231107-135635.yaml similarity index 100% rename from .changes/unreleased/Features-20231107-135635.yaml rename to .changes/1.8.0/Features-20231107-135635.yaml diff --git a/.changes/unreleased/Features-20231107-231006.yaml b/.changes/1.8.0/Features-20231107-231006.yaml similarity index 100% rename from .changes/unreleased/Features-20231107-231006.yaml rename to .changes/1.8.0/Features-20231107-231006.yaml diff --git a/.changes/unreleased/Features-20231110-154255.yaml b/.changes/1.8.0/Features-20231110-154255.yaml similarity index 100% rename from .changes/unreleased/Features-20231110-154255.yaml rename to .changes/1.8.0/Features-20231110-154255.yaml diff --git a/.changes/unreleased/Features-20231111-191150.yaml b/.changes/1.8.0/Features-20231111-191150.yaml similarity index 100% rename from .changes/unreleased/Features-20231111-191150.yaml rename to .changes/1.8.0/Features-20231111-191150.yaml diff --git a/.changes/unreleased/Features-20231114-101555.yaml b/.changes/1.8.0/Features-20231114-101555.yaml similarity index 100% rename from .changes/unreleased/Features-20231114-101555.yaml rename to .changes/1.8.0/Features-20231114-101555.yaml diff --git a/.changes/1.8.0/Features-20231115-092005.yaml b/.changes/1.8.0/Features-20231115-092005.yaml new file mode 100644 index 00000000000..6f156764aff --- /dev/null +++ b/.changes/1.8.0/Features-20231115-092005.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Global config for --target and --profile CLI flags and DBT_TARGET and DBT_PROFILE environment variables. +time: 2023-11-15T09:20:05.12461Z +custom: + Author: barton996 + Issue: "7798" diff --git a/.changes/unreleased/Features-20231116-144006.yaml b/.changes/1.8.0/Features-20231116-144006.yaml similarity index 100% rename from .changes/unreleased/Features-20231116-144006.yaml rename to .changes/1.8.0/Features-20231116-144006.yaml diff --git a/.changes/unreleased/Features-20231116-234049.yaml b/.changes/1.8.0/Features-20231116-234049.yaml similarity index 100% rename from .changes/unreleased/Features-20231116-234049.yaml rename to .changes/1.8.0/Features-20231116-234049.yaml diff --git a/.changes/unreleased/Features-20231130-130948.yaml b/.changes/1.8.0/Features-20231130-130948.yaml similarity index 100% rename from .changes/unreleased/Features-20231130-130948.yaml rename to .changes/1.8.0/Features-20231130-130948.yaml diff --git a/.changes/unreleased/Features-20231205-131717.yaml b/.changes/1.8.0/Features-20231205-131717.yaml similarity index 100% rename from .changes/unreleased/Features-20231205-131717.yaml rename to .changes/1.8.0/Features-20231205-131717.yaml diff --git a/.changes/unreleased/Features-20231205-200447.yaml b/.changes/1.8.0/Features-20231205-200447.yaml similarity index 100% rename from .changes/unreleased/Features-20231205-200447.yaml rename to .changes/1.8.0/Features-20231205-200447.yaml diff --git a/.changes/unreleased/Features-20231206-181458.yaml b/.changes/1.8.0/Features-20231206-181458.yaml similarity index 100% rename from .changes/unreleased/Features-20231206-181458.yaml rename to .changes/1.8.0/Features-20231206-181458.yaml diff --git a/.changes/unreleased/Features-20231212-150556.yaml b/.changes/1.8.0/Features-20231212-150556.yaml similarity index 100% rename from .changes/unreleased/Features-20231212-150556.yaml rename to .changes/1.8.0/Features-20231212-150556.yaml diff --git a/.changes/unreleased/Features-20231218-195854.yaml b/.changes/1.8.0/Features-20231218-195854.yaml similarity index 100% rename from .changes/unreleased/Features-20231218-195854.yaml rename to .changes/1.8.0/Features-20231218-195854.yaml diff --git a/.changes/unreleased/Features-20231231-171205.yaml b/.changes/1.8.0/Features-20231231-171205.yaml similarity index 100% rename from .changes/unreleased/Features-20231231-171205.yaml rename to .changes/1.8.0/Features-20231231-171205.yaml diff --git a/.changes/unreleased/Features-20240118-135651.yaml b/.changes/1.8.0/Features-20240118-135651.yaml similarity index 100% rename from .changes/unreleased/Features-20240118-135651.yaml rename to .changes/1.8.0/Features-20240118-135651.yaml diff --git a/.changes/unreleased/Features-20240119-101335.yaml b/.changes/1.8.0/Features-20240119-101335.yaml similarity index 100% rename from .changes/unreleased/Features-20240119-101335.yaml rename to .changes/1.8.0/Features-20240119-101335.yaml diff --git a/.changes/unreleased/Features-20240122-145854.yaml b/.changes/1.8.0/Features-20240122-145854.yaml similarity index 100% rename from .changes/unreleased/Features-20240122-145854.yaml rename to .changes/1.8.0/Features-20240122-145854.yaml diff --git a/.changes/unreleased/Features-20240129-114753.yaml b/.changes/1.8.0/Features-20240129-114753.yaml similarity index 100% rename from .changes/unreleased/Features-20240129-114753.yaml rename to .changes/1.8.0/Features-20240129-114753.yaml diff --git a/.changes/unreleased/Features-20240131-153535.yaml b/.changes/1.8.0/Features-20240131-153535.yaml similarity index 100% rename from .changes/unreleased/Features-20240131-153535.yaml rename to .changes/1.8.0/Features-20240131-153535.yaml diff --git a/.changes/unreleased/Features-20240201-154956.yaml b/.changes/1.8.0/Features-20240201-154956.yaml similarity index 100% rename from .changes/unreleased/Features-20240201-154956.yaml rename to .changes/1.8.0/Features-20240201-154956.yaml diff --git a/.changes/unreleased/Features-20240202-112644.yaml b/.changes/1.8.0/Features-20240202-112644.yaml similarity index 100% rename from .changes/unreleased/Features-20240202-112644.yaml rename to .changes/1.8.0/Features-20240202-112644.yaml diff --git a/.changes/unreleased/Features-20240215-120811.yaml b/.changes/1.8.0/Features-20240215-120811.yaml similarity index 100% rename from .changes/unreleased/Features-20240215-120811.yaml rename to .changes/1.8.0/Features-20240215-120811.yaml diff --git a/.changes/unreleased/Features-20240215-145814.yaml b/.changes/1.8.0/Features-20240215-145814.yaml similarity index 100% rename from .changes/unreleased/Features-20240215-145814.yaml rename to .changes/1.8.0/Features-20240215-145814.yaml diff --git a/.changes/1.8.0/Features-20240307-153622.yaml b/.changes/1.8.0/Features-20240307-153622.yaml new file mode 100644 index 00000000000..80886a82c9b --- /dev/null +++ b/.changes/1.8.0/Features-20240307-153622.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Support scrubbing secret vars +time: 2024-03-07T15:36:22.754627+01:00 +custom: + Author: nielspardon + Issue: "7247" diff --git a/.changes/1.8.0/Features-20240312-140407.yaml b/.changes/1.8.0/Features-20240312-140407.yaml new file mode 100644 index 00000000000..a73c3bc1c85 --- /dev/null +++ b/.changes/1.8.0/Features-20240312-140407.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Allow excluding resource types for build, list, and clone commands, and provide env vars +time: 2024-03-12T14:04:07.086017-04:00 +custom: + Author: gshank + Issue: "9237" diff --git a/.changes/1.8.0/Features-20240315-161209.yaml b/.changes/1.8.0/Features-20240315-161209.yaml new file mode 100644 index 00000000000..4a428b973db --- /dev/null +++ b/.changes/1.8.0/Features-20240315-161209.yaml @@ -0,0 +1,7 @@ +kind: Features +body: SourceDefinition.meta represents source-level and table-level meta properties, + instead of only table-level +time: 2024-03-15T16:12:09.789935-04:00 +custom: + Author: michelleark + Issue: "9766" diff --git a/.changes/1.8.0/Features-20240322-103124.yaml b/.changes/1.8.0/Features-20240322-103124.yaml new file mode 100644 index 00000000000..735dd44bdbc --- /dev/null +++ b/.changes/1.8.0/Features-20240322-103124.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Allow metrics in semantic layer filters. +time: 2024-03-22T10:31:24.76978-07:00 +custom: + Author: courtneyholcomb + Issue: "9804" diff --git a/.changes/1.8.0/Features-20240323-201230.yaml b/.changes/1.8.0/Features-20240323-201230.yaml new file mode 100644 index 00000000000..3f981ecc7b3 --- /dev/null +++ b/.changes/1.8.0/Features-20240323-201230.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Add wildcard support to the group selector method +time: 2024-03-23T20:12:30.715975-04:00 +custom: + Author: heysweet + Issue: "9811" diff --git a/.changes/1.8.0/Features-20240404-170728.yaml b/.changes/1.8.0/Features-20240404-170728.yaml new file mode 100644 index 00000000000..6db7735acbc --- /dev/null +++ b/.changes/1.8.0/Features-20240404-170728.yaml @@ -0,0 +1,6 @@ +kind: Features +body: 'source freshness precomputes metadata-based freshness in batch, if possible ' +time: 2024-04-04T17:07:28.717868-07:00 +custom: + Author: michelleark + Issue: "8705" diff --git a/.changes/1.8.0/Features-20240405-175733.yaml b/.changes/1.8.0/Features-20240405-175733.yaml new file mode 100644 index 00000000000..0346361fc15 --- /dev/null +++ b/.changes/1.8.0/Features-20240405-175733.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Better error message when trying to select a disabled model +time: 2024-04-05T17:57:33.047963+02:00 +custom: + Author: SamuelBFavarin + Issue: "9747" diff --git a/.changes/1.8.0/Features-20240408-094132.yaml b/.changes/1.8.0/Features-20240408-094132.yaml new file mode 100644 index 00000000000..0b7a251e926 --- /dev/null +++ b/.changes/1.8.0/Features-20240408-094132.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Support SQL in unit testing fixtures +time: 2024-04-08T09:41:32.15936-04:00 +custom: + Author: gshank + Issue: "9405" diff --git a/.changes/unreleased/Fixes-20231013-130943.yaml b/.changes/1.8.0/Fixes-20231013-130943.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231013-130943.yaml rename to .changes/1.8.0/Fixes-20231013-130943.yaml diff --git a/.changes/unreleased/Fixes-20231016-163953.yaml b/.changes/1.8.0/Fixes-20231016-163953.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231016-163953.yaml rename to .changes/1.8.0/Fixes-20231016-163953.yaml diff --git a/.changes/unreleased/Fixes-20231024-110151.yaml b/.changes/1.8.0/Fixes-20231024-110151.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231024-110151.yaml rename to .changes/1.8.0/Fixes-20231024-110151.yaml diff --git a/.changes/unreleased/Fixes-20231024-145504.yaml b/.changes/1.8.0/Fixes-20231024-145504.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231024-145504.yaml rename to .changes/1.8.0/Fixes-20231024-145504.yaml diff --git a/.changes/unreleased/Fixes-20231024-155400.yaml b/.changes/1.8.0/Fixes-20231024-155400.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231024-155400.yaml rename to .changes/1.8.0/Fixes-20231024-155400.yaml diff --git a/.changes/unreleased/Fixes-20231026-002536.yaml b/.changes/1.8.0/Fixes-20231026-002536.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231026-002536.yaml rename to .changes/1.8.0/Fixes-20231026-002536.yaml diff --git a/.changes/unreleased/Fixes-20231030-093734.yaml b/.changes/1.8.0/Fixes-20231030-093734.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231030-093734.yaml rename to .changes/1.8.0/Fixes-20231030-093734.yaml diff --git a/.changes/unreleased/Fixes-20231031-005345.yaml b/.changes/1.8.0/Fixes-20231031-005345.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231031-005345.yaml rename to .changes/1.8.0/Fixes-20231031-005345.yaml diff --git a/.changes/unreleased/Fixes-20231031-144837.yaml b/.changes/1.8.0/Fixes-20231031-144837.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231031-144837.yaml rename to .changes/1.8.0/Fixes-20231031-144837.yaml diff --git a/.changes/unreleased/Fixes-20231101-155824.yaml b/.changes/1.8.0/Fixes-20231101-155824.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231101-155824.yaml rename to .changes/1.8.0/Fixes-20231101-155824.yaml diff --git a/.changes/unreleased/Fixes-20231106-155933.yaml b/.changes/1.8.0/Fixes-20231106-155933.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231106-155933.yaml rename to .changes/1.8.0/Fixes-20231106-155933.yaml diff --git a/.changes/unreleased/Fixes-20231107-092358.yaml b/.changes/1.8.0/Fixes-20231107-092358.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231107-092358.yaml rename to .changes/1.8.0/Fixes-20231107-092358.yaml diff --git a/.changes/unreleased/Fixes-20231107-094130.yaml b/.changes/1.8.0/Fixes-20231107-094130.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231107-094130.yaml rename to .changes/1.8.0/Fixes-20231107-094130.yaml diff --git a/.changes/unreleased/Fixes-20231113-114956.yaml b/.changes/1.8.0/Fixes-20231113-114956.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231113-114956.yaml rename to .changes/1.8.0/Fixes-20231113-114956.yaml diff --git a/.changes/unreleased/Fixes-20231113-154535.yaml b/.changes/1.8.0/Fixes-20231113-154535.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231113-154535.yaml rename to .changes/1.8.0/Fixes-20231113-154535.yaml diff --git a/.changes/unreleased/Fixes-20231127-154310.yaml b/.changes/1.8.0/Fixes-20231127-154310.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231127-154310.yaml rename to .changes/1.8.0/Fixes-20231127-154310.yaml diff --git a/.changes/unreleased/Fixes-20231127-154347.yaml b/.changes/1.8.0/Fixes-20231127-154347.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231127-154347.yaml rename to .changes/1.8.0/Fixes-20231127-154347.yaml diff --git a/.changes/unreleased/Fixes-20231127-165244.yaml b/.changes/1.8.0/Fixes-20231127-165244.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231127-165244.yaml rename to .changes/1.8.0/Fixes-20231127-165244.yaml diff --git a/.changes/unreleased/Fixes-20231128-102111.yaml b/.changes/1.8.0/Fixes-20231128-102111.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231128-102111.yaml rename to .changes/1.8.0/Fixes-20231128-102111.yaml diff --git a/.changes/unreleased/Fixes-20231128-155225.yaml b/.changes/1.8.0/Fixes-20231128-155225.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231128-155225.yaml rename to .changes/1.8.0/Fixes-20231128-155225.yaml diff --git a/.changes/unreleased/Fixes-20231213-220449.yaml b/.changes/1.8.0/Fixes-20231213-220449.yaml similarity index 100% rename from .changes/unreleased/Fixes-20231213-220449.yaml rename to .changes/1.8.0/Fixes-20231213-220449.yaml diff --git a/.changes/1.8.0/Fixes-20240106-003649.yaml b/.changes/1.8.0/Fixes-20240106-003649.yaml new file mode 100644 index 00000000000..b41086fb4d7 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240106-003649.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: fix lock-file bad indentation +time: 2024-01-06T00:36:49.547533+09:00 +custom: + Author: jx2lee + Issue: "9319" diff --git a/.changes/1.8.0/Fixes-20240108-232035.yaml b/.changes/1.8.0/Fixes-20240108-232035.yaml new file mode 100644 index 00000000000..227332f7af1 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240108-232035.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: fix configuration of turning test warnings into failures with WARN_ERROR_OPTIONS +time: 2024-01-08T23:20:35.339102+09:00 +custom: + Author: jx2lee + Issue: "7761" diff --git a/.changes/unreleased/Fixes-20240115-165310.yaml b/.changes/1.8.0/Fixes-20240115-165310.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240115-165310.yaml rename to .changes/1.8.0/Fixes-20240115-165310.yaml diff --git a/.changes/unreleased/Fixes-20240119-215214.yaml b/.changes/1.8.0/Fixes-20240119-215214.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240119-215214.yaml rename to .changes/1.8.0/Fixes-20240119-215214.yaml diff --git a/.changes/unreleased/Fixes-20240124-142522.yaml b/.changes/1.8.0/Fixes-20240124-142522.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240124-142522.yaml rename to .changes/1.8.0/Fixes-20240124-142522.yaml diff --git a/.changes/unreleased/Fixes-20240125-155641.yaml b/.changes/1.8.0/Fixes-20240125-155641.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240125-155641.yaml rename to .changes/1.8.0/Fixes-20240125-155641.yaml diff --git a/.changes/unreleased/Fixes-20240125-182243.yaml b/.changes/1.8.0/Fixes-20240125-182243.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240125-182243.yaml rename to .changes/1.8.0/Fixes-20240125-182243.yaml diff --git a/.changes/unreleased/Fixes-20240126-134234.yaml b/.changes/1.8.0/Fixes-20240126-134234.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240126-134234.yaml rename to .changes/1.8.0/Fixes-20240126-134234.yaml diff --git a/.changes/unreleased/Fixes-20240130-124135.yaml b/.changes/1.8.0/Fixes-20240130-124135.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240130-124135.yaml rename to .changes/1.8.0/Fixes-20240130-124135.yaml diff --git a/.changes/unreleased/Fixes-20240201-124701.yaml b/.changes/1.8.0/Fixes-20240201-124701.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240201-124701.yaml rename to .changes/1.8.0/Fixes-20240201-124701.yaml diff --git a/.changes/1.8.0/Fixes-20240201-164407.yaml b/.changes/1.8.0/Fixes-20240201-164407.yaml new file mode 100644 index 00000000000..a156a2a7dd5 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240201-164407.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Fix bug where Semantic Layer filter strings are parsed into lists. +time: 2024-02-01T16:44:07.697777-08:00 +custom: + Author: courtneyholcomb + Issue: "9507" diff --git a/.changes/1.8.0/Fixes-20240206-152435.yaml b/.changes/1.8.0/Fixes-20240206-152435.yaml new file mode 100644 index 00000000000..d5cecf873fd --- /dev/null +++ b/.changes/1.8.0/Fixes-20240206-152435.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Fix conflict with newer versions of Snowplow tracker +time: 2024-02-06T15:24:35.778891-06:00 +custom: + Author: edgarrmondragon akurdyukov + Issue: "8719" diff --git a/.changes/unreleased/Fixes-20240206-161331.yaml b/.changes/1.8.0/Fixes-20240206-161331.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240206-161331.yaml rename to .changes/1.8.0/Fixes-20240206-161331.yaml diff --git a/.changes/unreleased/Fixes-20240207-150223.yaml b/.changes/1.8.0/Fixes-20240207-150223.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240207-150223.yaml rename to .changes/1.8.0/Fixes-20240207-150223.yaml diff --git a/.changes/unreleased/Fixes-20240209-170146.yaml b/.changes/1.8.0/Fixes-20240209-170146.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240209-170146.yaml rename to .changes/1.8.0/Fixes-20240209-170146.yaml diff --git a/.changes/unreleased/Fixes-20240212-144733.yaml b/.changes/1.8.0/Fixes-20240212-144733.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240212-144733.yaml rename to .changes/1.8.0/Fixes-20240212-144733.yaml diff --git a/.changes/unreleased/Fixes-20240212-154728.yaml b/.changes/1.8.0/Fixes-20240212-154728.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240212-154728.yaml rename to .changes/1.8.0/Fixes-20240212-154728.yaml diff --git a/.changes/unreleased/Fixes-20240212-165619.yaml b/.changes/1.8.0/Fixes-20240212-165619.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240212-165619.yaml rename to .changes/1.8.0/Fixes-20240212-165619.yaml diff --git a/.changes/1.8.0/Fixes-20240216-145632.yaml b/.changes/1.8.0/Fixes-20240216-145632.yaml new file mode 100644 index 00000000000..a02027f66a5 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240216-145632.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Tighten exception handling to avoid worker thread hangs. +time: 2024-02-16T14:56:32.858967-05:00 +custom: + Author: peterallenwebb + Issue: "9583" diff --git a/.changes/1.8.0/Fixes-20240220-165453.yaml b/.changes/1.8.0/Fixes-20240220-165453.yaml new file mode 100644 index 00000000000..11dad8f558c --- /dev/null +++ b/.changes/1.8.0/Fixes-20240220-165453.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Clearer no-op logging in stubbed SavedQueryRunner +time: 2024-02-20T16:54:53.623096-05:00 +custom: + Author: jtcohen6 + Issue: "9533" diff --git a/.changes/1.8.0/Fixes-20240222-100958.yaml b/.changes/1.8.0/Fixes-20240222-100958.yaml new file mode 100644 index 00000000000..1fb2ff46c6f --- /dev/null +++ b/.changes/1.8.0/Fixes-20240222-100958.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Fix node_info contextvar handling so incorrect node_info doesn't persist +time: 2024-02-22T10:09:58.122809-05:00 +custom: + Author: gshank + Issue: "8866" diff --git a/.changes/1.8.0/Fixes-20240223-162107.yaml b/.changes/1.8.0/Fixes-20240223-162107.yaml new file mode 100644 index 00000000000..446cf6d077a --- /dev/null +++ b/.changes/1.8.0/Fixes-20240223-162107.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Add target-path to retry +time: 2024-02-23T16:21:07.83639Z +custom: + Author: aranke + Issue: "8948" diff --git a/.changes/1.8.0/Fixes-20240226-173227.yaml b/.changes/1.8.0/Fixes-20240226-173227.yaml new file mode 100644 index 00000000000..fa1bf0ab8cf --- /dev/null +++ b/.changes/1.8.0/Fixes-20240226-173227.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Do not add duplicate input_measures +time: 2024-02-26T17:32:27.837427-05:00 +custom: + Author: gshank + Issue: "9360" diff --git a/.changes/1.8.0/Fixes-20240228-135928.yaml b/.changes/1.8.0/Fixes-20240228-135928.yaml new file mode 100644 index 00000000000..3b908608ef6 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240228-135928.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Throw a ParsingError if a primary key constraint is defined on multiple columns + or at both the column and model level. +time: 2024-02-28T13:59:28.728561-06:00 +custom: + Author: emmyoop + Issue: "9581" diff --git a/.changes/1.8.0/Fixes-20240229-114207.yaml b/.changes/1.8.0/Fixes-20240229-114207.yaml new file mode 100644 index 00000000000..aebb857f523 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240229-114207.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: 'Bug fix: don''t parse Jinja in filters for input metrics or measures.' +time: 2024-02-29T11:42:07.259143-08:00 +custom: + Author: courtneyholcomb + Issue: "9582" diff --git a/.changes/1.8.0/Fixes-20240301-000355.yaml b/.changes/1.8.0/Fixes-20240301-000355.yaml new file mode 100644 index 00000000000..7172982750f --- /dev/null +++ b/.changes/1.8.0/Fixes-20240301-000355.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Fix traceback parsing for exceptions raised due to csv fixtures moved into or + out of fixture/subfolders. +time: 2024-03-01T00:03:55.753473609+01:00 +custom: + Author: slothkong + Issue: "9570" diff --git a/.changes/1.8.0/Fixes-20240301-135536.yaml b/.changes/1.8.0/Fixes-20240301-135536.yaml new file mode 100644 index 00000000000..2a96bd7eeec --- /dev/null +++ b/.changes/1.8.0/Fixes-20240301-135536.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Fix partial parsing `KeyError` on deleted schema files +time: 2024-03-01T13:55:36.533176-08:00 +custom: + Author: QMalcolm + Issue: "8860" diff --git a/.changes/1.8.0/Fixes-20240307-142459.yaml b/.changes/1.8.0/Fixes-20240307-142459.yaml new file mode 100644 index 00000000000..14c08da2816 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240307-142459.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Support saved queries in `dbt list` +time: 2024-03-07T14:24:59.530072-05:00 +custom: + Author: QMalcolm jtcohen6 + Issue: "9532" diff --git a/.changes/1.8.0/Fixes-20240312-165357.yaml b/.changes/1.8.0/Fixes-20240312-165357.yaml new file mode 100644 index 00000000000..7a391118015 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240312-165357.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: include sources in catalog.json when over 100 relations selected for catalog + generation +time: 2024-03-12T16:53:57.714118-04:00 +custom: + Author: michelleark + Issue: "9755" diff --git a/.changes/1.8.0/Fixes-20240315-145538.yaml b/.changes/1.8.0/Fixes-20240315-145538.yaml new file mode 100644 index 00000000000..8723734db86 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240315-145538.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Support overriding macros in packages in unit testing +time: 2024-03-15T14:55:38.958553-04:00 +custom: + Author: michelleark + Issue: "9624" diff --git a/.changes/1.8.0/Fixes-20240316-231152.yaml b/.changes/1.8.0/Fixes-20240316-231152.yaml new file mode 100644 index 00000000000..725d8bbc3c5 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240316-231152.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Handle exceptions for failing on-run-* hooks in source freshness +time: 2024-03-16T23:11:52.819014-07:00 +custom: + Author: aranke + Issue: "9511" diff --git a/.changes/1.8.0/Fixes-20240317-005611.yaml b/.changes/1.8.0/Fixes-20240317-005611.yaml new file mode 100644 index 00000000000..0878779174a --- /dev/null +++ b/.changes/1.8.0/Fixes-20240317-005611.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: 'Validation of unit test parsing for incremental models' +time: 2024-03-17T00:56:11.855232-07:00 +custom: + Author: aranke + Issue: "9593" diff --git a/.changes/1.8.0/Fixes-20240318-153338.yaml b/.changes/1.8.0/Fixes-20240318-153338.yaml new file mode 100644 index 00000000000..c1328ce8957 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240318-153338.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Fix use of retry command on command using defer +time: 2024-03-18T15:33:38.90058-04:00 +custom: + Author: gshank + Issue: "9770" diff --git a/.changes/1.8.0/Fixes-20240323-122018.yaml b/.changes/1.8.0/Fixes-20240323-122018.yaml new file mode 100644 index 00000000000..a165511283c --- /dev/null +++ b/.changes/1.8.0/Fixes-20240323-122018.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Make `args` variable to be un-modified by `dbt.invoke(args)` +time: 2024-03-23T12:20:18.170948-06:00 +custom: + Author: dbeatty10 + Issue: 8938 9787 diff --git a/.changes/1.8.0/Fixes-20240323-124558.yaml b/.changes/1.8.0/Fixes-20240323-124558.yaml new file mode 100644 index 00000000000..b36173325ba --- /dev/null +++ b/.changes/1.8.0/Fixes-20240323-124558.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Only create the packages-install-path / dbt_packages folder during dbt deps +time: 2024-03-23T12:45:58.159017-06:00 +custom: + Author: dbeatty10 + Issue: 6985 9584 diff --git a/.changes/1.8.0/Fixes-20240326-003411.yaml b/.changes/1.8.0/Fixes-20240326-003411.yaml new file mode 100644 index 00000000000..f5b5fe9e095 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240326-003411.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Unit test path outputs +time: 2024-03-26T00:34:11.162594Z +custom: + Author: aranke + Issue: "9608" diff --git a/.changes/1.8.0/Fixes-20240326-162100.yaml b/.changes/1.8.0/Fixes-20240326-162100.yaml new file mode 100644 index 00000000000..f4c181dbb31 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240326-162100.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Fix assorted source freshness edgecases so check is run or actionable information + is given +time: 2024-03-26T16:21:00.008936-07:00 +custom: + Author: QMalcolm + Issue: "9078" diff --git a/.changes/1.8.0/Fixes-20240327-150013.yaml b/.changes/1.8.0/Fixes-20240327-150013.yaml new file mode 100644 index 00000000000..f988dd5c1ab --- /dev/null +++ b/.changes/1.8.0/Fixes-20240327-150013.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: '"Fix Docker release process to account for both historical and current versions + of `dbt-postgres`' +time: 2024-03-27T15:00:13.388268-04:00 +custom: + Author: mikealfare + Issue: "9827" diff --git a/.changes/1.8.0/Fixes-20240402-135556.yaml b/.changes/1.8.0/Fixes-20240402-135556.yaml new file mode 100644 index 00000000000..b6ba62fc0f7 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240402-135556.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Exclude password-like fields for considering reparse +time: 2024-04-02T13:55:56.169953-07:00 +custom: + Author: ChenyuLInx + Issue: "9795" diff --git a/.changes/1.8.0/Fixes-20240408-130646.yaml b/.changes/1.8.0/Fixes-20240408-130646.yaml new file mode 100644 index 00000000000..9aeaa94a27c --- /dev/null +++ b/.changes/1.8.0/Fixes-20240408-130646.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Fixed query comments test +time: 2024-04-08T13:06:46.648144+02:00 +custom: + Author: damian3031 + Issue: "9860" diff --git a/.changes/1.8.0/Fixes-20240409-233347.yaml b/.changes/1.8.0/Fixes-20240409-233347.yaml new file mode 100644 index 00000000000..db929c16af0 --- /dev/null +++ b/.changes/1.8.0/Fixes-20240409-233347.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Begin warning people about spaces in model names +time: 2024-04-09T23:33:47.850166-07:00 +custom: + Author: QMalcolm + Issue: "9397" diff --git a/.changes/1.8.0/Fixes-20240412-095718.yaml b/.changes/1.8.0/Fixes-20240412-095718.yaml new file mode 100644 index 00000000000..98fb9333f8d --- /dev/null +++ b/.changes/1.8.0/Fixes-20240412-095718.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Disambiguiate FreshnessConfigProblem error message +time: 2024-04-12T09:57:18.417882-07:00 +custom: + Author: michelleark + Issue: "9891" diff --git a/.changes/1.8.0/Security-20240222-152445.yaml b/.changes/1.8.0/Security-20240222-152445.yaml new file mode 100644 index 00000000000..e21e013310e --- /dev/null +++ b/.changes/1.8.0/Security-20240222-152445.yaml @@ -0,0 +1,6 @@ +kind: Security +body: Update Jinja2 to >= 3.1.3 to address CVE-2024-22195 +time: 2024-02-22T15:24:45.158305-08:00 +custom: + Author: QMalcolm + Issue: 9638 diff --git a/.changes/1.8.0/Security-20240417-141316.yaml b/.changes/1.8.0/Security-20240417-141316.yaml new file mode 100644 index 00000000000..6611cafb443 --- /dev/null +++ b/.changes/1.8.0/Security-20240417-141316.yaml @@ -0,0 +1,6 @@ +kind: Security +body: Bump sqlparse to >=0.5.0, <0.6.0 to address GHSA-2m57-hf25-phgg +time: 2024-04-17T14:13:16.896353-05:00 +custom: + Author: emmoop + Issue: "9951" diff --git a/.changes/unreleased/Under the Hood-20230831-164435.yaml b/.changes/1.8.0/Under the Hood-20230831-164435.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20230831-164435.yaml rename to .changes/1.8.0/Under the Hood-20230831-164435.yaml diff --git a/.changes/unreleased/Under the Hood-20230912-190506.yaml b/.changes/1.8.0/Under the Hood-20230912-190506.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20230912-190506.yaml rename to .changes/1.8.0/Under the Hood-20230912-190506.yaml diff --git a/.changes/unreleased/Under the Hood-20231026-184953.yaml b/.changes/1.8.0/Under the Hood-20231026-184953.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231026-184953.yaml rename to .changes/1.8.0/Under the Hood-20231026-184953.yaml diff --git a/.changes/unreleased/Under the Hood-20231027-140048.yaml b/.changes/1.8.0/Under the Hood-20231027-140048.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231027-140048.yaml rename to .changes/1.8.0/Under the Hood-20231027-140048.yaml diff --git a/.changes/unreleased/Under the Hood-20231101-102758.yaml b/.changes/1.8.0/Under the Hood-20231101-102758.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231101-102758.yaml rename to .changes/1.8.0/Under the Hood-20231101-102758.yaml diff --git a/.changes/unreleased/Under the Hood-20231101-173124.yaml b/.changes/1.8.0/Under the Hood-20231101-173124.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231101-173124.yaml rename to .changes/1.8.0/Under the Hood-20231101-173124.yaml diff --git a/.changes/unreleased/Under the Hood-20231103-195222.yaml b/.changes/1.8.0/Under the Hood-20231103-195222.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231103-195222.yaml rename to .changes/1.8.0/Under the Hood-20231103-195222.yaml diff --git a/.changes/unreleased/Under the Hood-20231106-080422.yaml b/.changes/1.8.0/Under the Hood-20231106-080422.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231106-080422.yaml rename to .changes/1.8.0/Under the Hood-20231106-080422.yaml diff --git a/.changes/unreleased/Under the Hood-20231106-105730.yaml b/.changes/1.8.0/Under the Hood-20231106-105730.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231106-105730.yaml rename to .changes/1.8.0/Under the Hood-20231106-105730.yaml diff --git a/.changes/unreleased/Under the Hood-20231107-135728.yaml b/.changes/1.8.0/Under the Hood-20231107-135728.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231107-135728.yaml rename to .changes/1.8.0/Under the Hood-20231107-135728.yaml diff --git a/.changes/unreleased/Under the Hood-20231107-191546.yaml b/.changes/1.8.0/Under the Hood-20231107-191546.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231107-191546.yaml rename to .changes/1.8.0/Under the Hood-20231107-191546.yaml diff --git a/.changes/unreleased/Under the Hood-20231108-163613.yaml b/.changes/1.8.0/Under the Hood-20231108-163613.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231108-163613.yaml rename to .changes/1.8.0/Under the Hood-20231108-163613.yaml diff --git a/.changes/unreleased/Under the Hood-20231111-175350.yaml b/.changes/1.8.0/Under the Hood-20231111-175350.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231111-175350.yaml rename to .changes/1.8.0/Under the Hood-20231111-175350.yaml diff --git a/.changes/unreleased/Under the Hood-20231116-174251.yaml b/.changes/1.8.0/Under the Hood-20231116-174251.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231116-174251.yaml rename to .changes/1.8.0/Under the Hood-20231116-174251.yaml diff --git a/.changes/unreleased/Under the Hood-20231120-134735.yaml b/.changes/1.8.0/Under the Hood-20231120-134735.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231120-134735.yaml rename to .changes/1.8.0/Under the Hood-20231120-134735.yaml diff --git a/.changes/unreleased/Under the Hood-20231120-183214.yaml b/.changes/1.8.0/Under the Hood-20231120-183214.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231120-183214.yaml rename to .changes/1.8.0/Under the Hood-20231120-183214.yaml diff --git a/.changes/unreleased/Under the Hood-20231128-170732.yaml b/.changes/1.8.0/Under the Hood-20231128-170732.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231128-170732.yaml rename to .changes/1.8.0/Under the Hood-20231128-170732.yaml diff --git a/.changes/unreleased/Under the Hood-20231130-135432.yaml b/.changes/1.8.0/Under the Hood-20231130-135432.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231130-135432.yaml rename to .changes/1.8.0/Under the Hood-20231130-135432.yaml diff --git a/.changes/unreleased/Under the Hood-20231205-093544.yaml b/.changes/1.8.0/Under the Hood-20231205-093544.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231205-093544.yaml rename to .changes/1.8.0/Under the Hood-20231205-093544.yaml diff --git a/.changes/unreleased/Under the Hood-20231205-120559.yaml b/.changes/1.8.0/Under the Hood-20231205-120559.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231205-120559.yaml rename to .changes/1.8.0/Under the Hood-20231205-120559.yaml diff --git a/.changes/unreleased/Under the Hood-20231205-165812.yaml b/.changes/1.8.0/Under the Hood-20231205-165812.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231205-165812.yaml rename to .changes/1.8.0/Under the Hood-20231205-165812.yaml diff --git a/.changes/unreleased/Under the Hood-20231205-170725.yaml b/.changes/1.8.0/Under the Hood-20231205-170725.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231205-170725.yaml rename to .changes/1.8.0/Under the Hood-20231205-170725.yaml diff --git a/.changes/unreleased/Under the Hood-20231205-185022.yaml b/.changes/1.8.0/Under the Hood-20231205-185022.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231205-185022.yaml rename to .changes/1.8.0/Under the Hood-20231205-185022.yaml diff --git a/.changes/unreleased/Under the Hood-20231205-235830.yaml b/.changes/1.8.0/Under the Hood-20231205-235830.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231205-235830.yaml rename to .changes/1.8.0/Under the Hood-20231205-235830.yaml diff --git a/.changes/unreleased/Under the Hood-20231206-000343.yaml b/.changes/1.8.0/Under the Hood-20231206-000343.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231206-000343.yaml rename to .changes/1.8.0/Under the Hood-20231206-000343.yaml diff --git a/.changes/unreleased/Under the Hood-20231207-111554.yaml b/.changes/1.8.0/Under the Hood-20231207-111554.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231207-111554.yaml rename to .changes/1.8.0/Under the Hood-20231207-111554.yaml diff --git a/.changes/unreleased/Under the Hood-20231207-224139.yaml b/.changes/1.8.0/Under the Hood-20231207-224139.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231207-224139.yaml rename to .changes/1.8.0/Under the Hood-20231207-224139.yaml diff --git a/.changes/unreleased/Under the Hood-20231208-004854.yaml b/.changes/1.8.0/Under the Hood-20231208-004854.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231208-004854.yaml rename to .changes/1.8.0/Under the Hood-20231208-004854.yaml diff --git a/.changes/unreleased/Under the Hood-20231212-154842.yaml b/.changes/1.8.0/Under the Hood-20231212-154842.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231212-154842.yaml rename to .changes/1.8.0/Under the Hood-20231212-154842.yaml diff --git a/.changes/unreleased/Under the Hood-20231214-122134.yaml b/.changes/1.8.0/Under the Hood-20231214-122134.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231214-122134.yaml rename to .changes/1.8.0/Under the Hood-20231214-122134.yaml diff --git a/.changes/unreleased/Under the Hood-20231214-164107.yaml b/.changes/1.8.0/Under the Hood-20231214-164107.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20231214-164107.yaml rename to .changes/1.8.0/Under the Hood-20231214-164107.yaml diff --git a/.changes/unreleased/Under the Hood-20240103-145843.yaml b/.changes/1.8.0/Under the Hood-20240103-145843.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240103-145843.yaml rename to .changes/1.8.0/Under the Hood-20240103-145843.yaml diff --git a/.changes/unreleased/Under the Hood-20240104-133249.yaml b/.changes/1.8.0/Under the Hood-20240104-133249.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240104-133249.yaml rename to .changes/1.8.0/Under the Hood-20240104-133249.yaml diff --git a/.changes/unreleased/Under the Hood-20240104-135849.yaml b/.changes/1.8.0/Under the Hood-20240104-135849.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240104-135849.yaml rename to .changes/1.8.0/Under the Hood-20240104-135849.yaml diff --git a/.changes/unreleased/Under the Hood-20240104-165248.yaml b/.changes/1.8.0/Under the Hood-20240104-165248.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240104-165248.yaml rename to .changes/1.8.0/Under the Hood-20240104-165248.yaml diff --git a/.changes/unreleased/Under the Hood-20240108-160140.yaml b/.changes/1.8.0/Under the Hood-20240108-160140.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240108-160140.yaml rename to .changes/1.8.0/Under the Hood-20240108-160140.yaml diff --git a/.changes/unreleased/Under the Hood-20240109-091856.yaml b/.changes/1.8.0/Under the Hood-20240109-091856.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240109-091856.yaml rename to .changes/1.8.0/Under the Hood-20240109-091856.yaml diff --git a/.changes/unreleased/Under the Hood-20240110-105734.yaml b/.changes/1.8.0/Under the Hood-20240110-105734.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240110-105734.yaml rename to .changes/1.8.0/Under the Hood-20240110-105734.yaml diff --git a/.changes/unreleased/Under the Hood-20240110-161723.yaml b/.changes/1.8.0/Under the Hood-20240110-161723.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240110-161723.yaml rename to .changes/1.8.0/Under the Hood-20240110-161723.yaml diff --git a/.changes/unreleased/Under the Hood-20240122-165446.yaml b/.changes/1.8.0/Under the Hood-20240122-165446.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240122-165446.yaml rename to .changes/1.8.0/Under the Hood-20240122-165446.yaml diff --git a/.changes/unreleased/Under the Hood-20240123-114855.yaml b/.changes/1.8.0/Under the Hood-20240123-114855.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240123-114855.yaml rename to .changes/1.8.0/Under the Hood-20240123-114855.yaml diff --git a/.changes/unreleased/Under the Hood-20240123-142256.yaml b/.changes/1.8.0/Under the Hood-20240123-142256.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240123-142256.yaml rename to .changes/1.8.0/Under the Hood-20240123-142256.yaml diff --git a/.changes/unreleased/Under the Hood-20240125-095453.yaml b/.changes/1.8.0/Under the Hood-20240125-095453.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240125-095453.yaml rename to .changes/1.8.0/Under the Hood-20240125-095453.yaml diff --git a/.changes/unreleased/Under the Hood-20240126-164038.yaml b/.changes/1.8.0/Under the Hood-20240126-164038.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240126-164038.yaml rename to .changes/1.8.0/Under the Hood-20240126-164038.yaml diff --git a/.changes/unreleased/Under the Hood-20240129-130549.yaml b/.changes/1.8.0/Under the Hood-20240129-130549.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240129-130549.yaml rename to .changes/1.8.0/Under the Hood-20240129-130549.yaml diff --git a/.changes/unreleased/Under the Hood-20240129-163800.yaml b/.changes/1.8.0/Under the Hood-20240129-163800.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240129-163800.yaml rename to .changes/1.8.0/Under the Hood-20240129-163800.yaml diff --git a/.changes/unreleased/Under the Hood-20240130-161637.yaml b/.changes/1.8.0/Under the Hood-20240130-161637.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240130-161637.yaml rename to .changes/1.8.0/Under the Hood-20240130-161637.yaml diff --git a/.changes/unreleased/Under the Hood-20240201-125416.yaml b/.changes/1.8.0/Under the Hood-20240201-125416.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240201-125416.yaml rename to .changes/1.8.0/Under the Hood-20240201-125416.yaml diff --git a/.changes/1.8.0/Under the Hood-20240207-122342.yaml b/.changes/1.8.0/Under the Hood-20240207-122342.yaml new file mode 100644 index 00000000000..f2e4a0ed3fe --- /dev/null +++ b/.changes/1.8.0/Under the Hood-20240207-122342.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Move manifest nodes to artifacts +time: 2024-02-07T12:23:42.909049-05:00 +custom: + Author: gshank + Issue: "9388" diff --git a/.changes/unreleased/Under the Hood-20240208-120620.yaml b/.changes/1.8.0/Under the Hood-20240208-120620.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240208-120620.yaml rename to .changes/1.8.0/Under the Hood-20240208-120620.yaml diff --git a/.changes/unreleased/Under the Hood-20240216-104002.yaml b/.changes/1.8.0/Under the Hood-20240216-104002.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20240216-104002.yaml rename to .changes/1.8.0/Under the Hood-20240216-104002.yaml diff --git a/.changes/1.8.0/Under the Hood-20240221-104518.yaml b/.changes/1.8.0/Under the Hood-20240221-104518.yaml new file mode 100644 index 00000000000..56c077fcd1c --- /dev/null +++ b/.changes/1.8.0/Under the Hood-20240221-104518.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Restrict protobuf to major version 4. +time: 2024-02-21T10:45:18.315195-05:00 +custom: + Author: peterallenwebb + Issue: "9566" diff --git a/.changes/1.8.0/Under the Hood-20240221-145058.yaml b/.changes/1.8.0/Under the Hood-20240221-145058.yaml new file mode 100644 index 00000000000..a847bb68c53 --- /dev/null +++ b/.changes/1.8.0/Under the Hood-20240221-145058.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Make dbt-core compatible with Python 3.12 +time: 2024-02-21T14:50:58.983559Z +custom: + Author: l1xnan aranke + Issue: "9007" diff --git a/.changes/1.8.0/Under the Hood-20240222-115245.yaml b/.changes/1.8.0/Under the Hood-20240222-115245.yaml new file mode 100644 index 00000000000..a2d1bbcac10 --- /dev/null +++ b/.changes/1.8.0/Under the Hood-20240222-115245.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Remove references to dbt.tracking and dbt.flags from dbt/artifacts +time: 2024-02-22T11:52:45.044853-06:00 +custom: + Author: emmyoop + Issue: "9390" diff --git a/.changes/1.8.0/Under the Hood-20240223-092330.yaml b/.changes/1.8.0/Under the Hood-20240223-092330.yaml new file mode 100644 index 00000000000..71e5903b4ad --- /dev/null +++ b/.changes/1.8.0/Under the Hood-20240223-092330.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Remove unused key `wildcard` from MethodName enum +time: 2024-02-23T09:23:30.029245-05:00 +custom: + Author: asweet + Issue: "9641" diff --git a/.changes/1.8.0/Under the Hood-20240223-115021.yaml b/.changes/1.8.0/Under the Hood-20240223-115021.yaml new file mode 100644 index 00000000000..ccc1a381124 --- /dev/null +++ b/.changes/1.8.0/Under the Hood-20240223-115021.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Implement primary key inference for model nodes +time: 2024-02-23T11:50:21.257494-08:00 +custom: + Author: aliceliu + Issue: "9652" diff --git a/.changes/1.8.0/Under the Hood-20240226-141038.yaml b/.changes/1.8.0/Under the Hood-20240226-141038.yaml new file mode 100644 index 00000000000..6ea389b997e --- /dev/null +++ b/.changes/1.8.0/Under the Hood-20240226-141038.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Define UnitTestDefinition resource in dbt/artifacts/resources +time: 2024-02-26T14:10:38.807154-05:00 +custom: + Author: michelleark + Issue: "9667" diff --git a/.changes/1.8.0/Under the Hood-20240226-184258.yaml b/.changes/1.8.0/Under the Hood-20240226-184258.yaml new file mode 100644 index 00000000000..06c0f5e029a --- /dev/null +++ b/.changes/1.8.0/Under the Hood-20240226-184258.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Use Manifest instead of WritableManifest in PreviousState and _get_deferred_manifest +time: 2024-02-26T18:42:58.740808-05:00 +custom: + Author: michelleark + Issue: "9567" diff --git a/.changes/1.8.0/Under the Hood-20240309-141054.yaml b/.changes/1.8.0/Under the Hood-20240309-141054.yaml new file mode 100644 index 00000000000..4dff658a8c1 --- /dev/null +++ b/.changes/1.8.0/Under the Hood-20240309-141054.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Improve dbt CLI speed +time: 2024-03-09T14:10:54.549618-05:00 +custom: + Author: dwreeves + Issue: "4627" diff --git a/.changes/1.8.0/Under the Hood-20240325-172059.yaml b/.changes/1.8.0/Under the Hood-20240325-172059.yaml new file mode 100644 index 00000000000..c53e1d390cb --- /dev/null +++ b/.changes/1.8.0/Under the Hood-20240325-172059.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Include node_info in various Result events +time: 2024-03-25T17:20:59.445718-04:00 +custom: + Author: gshank + Issue: "9619" diff --git a/.changes/1.8.0/Under the Hood-20240412-132000.yaml b/.changes/1.8.0/Under the Hood-20240412-132000.yaml new file mode 100644 index 00000000000..794e9ca287b --- /dev/null +++ b/.changes/1.8.0/Under the Hood-20240412-132000.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Remove non dbt.artifacts dbt.* imports from dbt/artifacts +time: 2024-04-12T13:20:00.017737-07:00 +custom: + Author: michelleark + Issue: "9926" diff --git a/.changes/1.8.0/Under the Hood-20240412-134502.yaml b/.changes/1.8.0/Under the Hood-20240412-134502.yaml new file mode 100644 index 00000000000..62d1ebb859b --- /dev/null +++ b/.changes/1.8.0/Under the Hood-20240412-134502.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Migrate to using `error_tag` provided by `dbt-common` +time: 2024-04-12T13:45:02.879023-07:00 +custom: + Author: QMalcolm + Issue: "9914" diff --git a/.changes/1.8.0/Under the Hood-20240416-150030.yaml b/.changes/1.8.0/Under the Hood-20240416-150030.yaml new file mode 100644 index 00000000000..b57a01a6cc6 --- /dev/null +++ b/.changes/1.8.0/Under the Hood-20240416-150030.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Add a test for semantic manifest and move test fixtures needed for it +time: 2024-04-16T15:00:30.614286-07:00 +custom: + Author: ChenyuLInx + Issue: "9665" diff --git a/.changes/unreleased/Features-20240422-173703.yaml b/.changes/unreleased/Features-20240422-173703.yaml new file mode 100644 index 00000000000..3c957af40c1 --- /dev/null +++ b/.changes/unreleased/Features-20240422-173703.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Add require_explicit_package_overrides_for_builtin_materializations to dbt_project.yml flags, which can be used to opt-out of overriding built-in materializations from packages +time: 2024-04-22T17:37:03.892268-04:00 +custom: + Author: michelleark + Issue: "10007" diff --git a/.changes/unreleased/Fixes-20240410-181741.yaml b/.changes/unreleased/Fixes-20240410-181741.yaml new file mode 100644 index 00000000000..66ec5e7d373 --- /dev/null +++ b/.changes/unreleased/Fixes-20240410-181741.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Add NodeRelation to SavedQuery Export +time: 2024-04-10T18:17:41.42533+01:00 +custom: + Author: aranke + Issue: "9534" diff --git a/.changes/unreleased/Fixes-20240423-120112.yaml b/.changes/unreleased/Fixes-20240423-120112.yaml new file mode 100644 index 00000000000..59e763451dc --- /dev/null +++ b/.changes/unreleased/Fixes-20240423-120112.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Simplify error message if test severity isn't 'warn' or 'error' +time: 2024-04-23T12:01:12.374904+02:00 +custom: + Author: aranke + Issue: "9715" diff --git a/.changes/unreleased/Under the Hood-20240418-172528.yaml b/.changes/unreleased/Under the Hood-20240418-172528.yaml new file mode 100644 index 00000000000..50743d8a755 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20240418-172528.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Raise deprecation warning if installed package overrides built-in materialization +time: 2024-04-18T17:25:28.37886-04:00 +custom: + Author: michelleark + Issue: "9971" diff --git a/.changie.yaml b/.changie.yaml index ba0590da8d3..23e802f190d 100644 --- a/.changie.yaml +++ b/.changie.yaml @@ -31,43 +31,7 @@ kinds: - {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}}) - label: Under the Hood - label: Dependencies - changeFormat: |- - {{- $PRList := list }} - {{- $changes := splitList " " $.Custom.PR }} - {{- range $pullrequest := $changes }} - {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }} - {{- $PRList = append $PRList $changeLink }} - {{- end -}} - - {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}}) - skipGlobalChoices: true - additionalChoices: - - key: Author - label: GitHub Username(s) (separated by a single space if multiple) - type: string - minLength: 3 - - key: PR - label: GitHub Pull Request Number (separated by a single space if multiple) - type: string - minLength: 1 - label: Security - changeFormat: |- - {{- $PRList := list }} - {{- $changes := splitList " " $.Custom.PR }} - {{- range $pullrequest := $changes }} - {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }} - {{- $PRList = append $PRList $changeLink }} - {{- end -}} - - {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}}) - skipGlobalChoices: true - additionalChoices: - - key: Author - label: GitHub Username(s) (separated by a single space if multiple) - type: string - minLength: 3 - - key: PR - label: GitHub Pull Request Number (separated by a single space if multiple) - type: string - minLength: 1 newlines: afterChangelogHeader: 1 @@ -106,18 +70,10 @@ footerFormat: | {{- $changeList := splitList " " $change.Custom.Author }} {{- $IssueList := list }} {{- $changeLink := $change.Kind }} - {{- if or (eq $change.Kind "Dependencies") (eq $change.Kind "Security") }} - {{- $changes := splitList " " $change.Custom.PR }} - {{- range $issueNbr := $changes }} - {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $issueNbr }} - {{- $IssueList = append $IssueList $changeLink }} - {{- end -}} - {{- else }} - {{- $changes := splitList " " $change.Custom.Issue }} - {{- range $issueNbr := $changes }} - {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $issueNbr }} - {{- $IssueList = append $IssueList $changeLink }} - {{- end -}} + {{- $changes := splitList " " $change.Custom.Issue }} + {{- range $issueNbr := $changes }} + {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $issueNbr }} + {{- $IssueList = append $IssueList $changeLink }} {{- end }} {{- /* check if this contributor has other changes associated with them already */}} {{- if hasKey $contributorDict $author }} diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 935053cd93d..698a20b0539 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -13,31 +13,6 @@ # the core team as a whole will be assigned * @dbt-labs/core-team -### ADAPTERS - -# Adapter interface ("base" + "sql" adapter defaults, cache) -/core/dbt/adapters @dbt-labs/core-adapters - -# Global project (default macros + materializations), starter project -/core/dbt/include @dbt-labs/core-adapters - -# Postgres plugin -/plugins/ @dbt-labs/core-adapters -/plugins/postgres/setup.py @dbt-labs/core-adapters - -# Functional tests for adapter plugins -/tests/adapter @dbt-labs/core-adapters - -### TESTS - -# Overlapping ownership for vast majority of unit + functional tests - -# Perf regression testing framework -# This excludes the test project files itself since those aren't specific -# framework changes (excluded by not setting an owner next to it- no owner) -/performance @nathaniel-may -/performance/projects - ### ARTIFACTS /schemas/dbt @dbt-labs/cloud-artifacts diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 1cbfb7a66a0..b20069df9b6 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -35,6 +35,6 @@ jobs: github.event.pull_request.merged && contains(github.event.label.name, 'backport') steps: - - uses: tibdex/backport@v2.0.3 + - uses: tibdex/backport@v2.0.4 with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/bot-changelog.yml b/.github/workflows/bot-changelog.yml index cc545482dd8..c85343ea333 100644 --- a/.github/workflows/bot-changelog.yml +++ b/.github/workflows/bot-changelog.yml @@ -56,4 +56,4 @@ jobs: commit_message: "Add automated changelog yaml from template for bot PR" changie_kind: ${{ matrix.changie_kind }} label: ${{ matrix.label }} - custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n PR: ${{ github.event.pull_request.number }}" + custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n Issue: ${{ github.event.pull_request.number }}" diff --git a/.github/workflows/check-artifact-changes.yml b/.github/workflows/check-artifact-changes.yml index 8ab2abcfd74..ce4cb4b4ca0 100644 --- a/.github/workflows/check-artifact-changes.yml +++ b/.github/workflows/check-artifact-changes.yml @@ -10,6 +10,7 @@ on: jobs: check-artifact-changes: runs-on: ubuntu-latest + if: ${{ !contains(github.event.pull_request.labels.*.name, 'artifact_minor_upgrade') }} steps: - name: Checkout code uses: actions/checkout@v4 @@ -17,26 +18,24 @@ jobs: fetch-depth: 0 - name: Check for changes in core/dbt/artifacts - id: check_changes - run: | - # Check for changes in dbt/artifacts - CHANGED_FILES=$(git diff --name-only origin/${GITHUB_BASE_REF} origin/${GITHUB_HEAD_REF} | grep 'core/dbt/artifacts/') - if [ ! -z "$CHANGED_FILES" ]; then - echo "CHANGED=true" >> $GITHUB_ENV - echo "Changed files in core/dbt/artifacts:" - echo "$CHANGED_FILES" - else - echo "CHANGED=false" >> $GITHUB_ENV - fi + # https://github.com/marketplace/actions/paths-changes-filter + uses: dorny/paths-filter@v3 + id: check_artifact_changes + with: + filters: | + artifacts_changed: + - 'core/dbt/artifacts/**' + list-files: shell - name: Fail CI if artifacts have changed - if: env.CHANGED == 'true' && !contains(github.event.pull_request.labels.*.name, 'artifact_minor_upgrade') + if: steps.check_artifact_changes.outputs.artifacts_changed == 'true' run: | echo "CI failure: Artifact changes checked in core/dbt/artifacts directory." + echo "Files changed: ${{ steps.check_artifact_changes.outputs.artifacts_changed_files }}" echo "To bypass this check, confirm that the change is not breaking (https://github.com/dbt-labs/dbt-core/blob/main/core/dbt/artifacts/README.md#breaking-changes) and add the 'artifact_minor_upgrade' label to the PR." exit 1 - name: CI check passed - if: env.CHANGED == 'false' || contains(github.event.pull_request.labels.*.name, 'artifact_minor_upgrade') + if: steps.check_artifact_changes.outputs.artifacts_changed == 'false' run: | - echo "No prohibited artifact changes checked in core/dbt/artifacts, or 'artifact_minor_upgrade' label found. CI check passed." + echo "No prohibited artifact changes found in core/dbt/artifacts. CI check passed." diff --git a/.github/workflows/community-label.yml b/.github/workflows/community-label.yml new file mode 100644 index 00000000000..3b75a3c14e7 --- /dev/null +++ b/.github/workflows/community-label.yml @@ -0,0 +1,39 @@ +# **what?** +# Label a PR with a `community` label when a PR is opened by a user outside core/adapters + +# **why?** +# To streamline triage and ensure that community contributions are recognized and prioritized + +# **when?** +# When a PR is opened, not in draft or moved from draft to ready for review + + +name: Label community PRs + +on: + # have to use pull_request_target since community PRs come from forks + pull_request_target: + types: [opened, ready_for_review] + +defaults: + run: + shell: bash + +permissions: + pull-requests: write # labels PRs + contents: read # reads team membership + +jobs: + open_issues: + # If this PR already has the community label, no need to relabel it + # If this PR is opened and not draft, determine if it needs to be labeled + # if the PR is converted out of draft, determine if it needs to be labeled + if: | + (!contains(github.event.pull_request.labels.*.name, 'community') && + (github.event.action == 'opened' && github.event.pull_request.draft == false ) || + github.event.action == 'ready_for_review' ) + uses: dbt-labs/actions/.github/workflows/label-community.yml@main + with: + github_team: 'core-group' + label: 'community' + secrets: inherit diff --git a/.github/workflows/docs-issue.yml b/.github/workflows/docs-issue.yml index 00a098df827..a0c093832fc 100644 --- a/.github/workflows/docs-issue.yml +++ b/.github/workflows/docs-issue.yml @@ -1,19 +1,18 @@ # **what?** -# Open an issue in docs.getdbt.com when a PR is labeled `user docs` +# Open an issue in docs.getdbt.com when an issue is labeled `user docs` and closed as completed # **why?** # To reduce barriers for keeping docs up to date # **when?** -# When a PR is labeled `user docs` and is merged. Runs on pull_request_target to run off the workflow already merged, -# not the workflow that existed on the PR branch. This allows old PRs to get comments. +# When an issue is labeled `user docs` and is closed as completed. Can be labeled before or after the issue is closed. -name: Open issues in docs.getdbt.com repo when a PR is labeled -run-name: "Open an issue in docs.getdbt.com for PR #${{ github.event.pull_request.number }}" +name: Open issues in docs.getdbt.com repo when an issue is labeled +run-name: "Open an issue in docs.getdbt.com for issue #${{ github.event.issue.number }}" on: - pull_request_target: + issues: types: [labeled, closed] defaults: @@ -21,23 +20,22 @@ defaults: shell: bash permissions: - issues: write # opens new issues - pull-requests: write # comments on PRs - + issues: write # comments on issues jobs: open_issues: - # we only want to run this when the PR has been merged or the label in the labeled event is `user docs`. Otherwise it runs the + # we only want to run this when the issue is closed as completed and the label `user docs` has been assigned. + # If this logic does not exist in this workflow, it runs the # risk of duplicaton of issues being created due to merge and label both triggering this workflow to run and neither having # generating the comment before the other runs. This lives here instead of the shared workflow because this is where we # decide if it should run or not. if: | - (github.event.pull_request.merged == true) && - ((github.event.action == 'closed' && contains( github.event.pull_request.labels.*.name, 'user docs')) || - (github.event.action == 'labeled' && github.event.label.name == 'user docs')) + (github.event.issue.state == 'closed' && + github.event.issue.state_reason == 'completed' && + contains( github.event.issue.labels.*.name, 'user docs')) uses: dbt-labs/actions/.github/workflows/open-issue-in-repo.yml@main with: issue_repository: "dbt-labs/docs.getdbt.com" - issue_title: "Docs Changes Needed from ${{ github.event.repository.name }} PR #${{ github.event.pull_request.number }}" + issue_title: "Docs Changes Needed from ${{ github.event.repository.name }} Issue #${{ github.event.issue.number }}" issue_body: "At a minimum, update body to include a link to the page on docs.getdbt.com requiring updates and what part(s) of the page you would like to see updated." secrets: inherit diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 1819146369a..fc9001a8fa1 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -74,7 +74,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] env: TOXENV: "unit" @@ -107,7 +107,7 @@ jobs: - name: Upload Unit Test Coverage to Codecov if: ${{ matrix.python-version == '3.11' }} - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} flags: unit @@ -157,7 +157,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] os: [ubuntu-20.04] split-group: ${{ fromJson(needs.integration-metadata.outputs.split-groups) }} include: ${{ fromJson(needs.integration-metadata.outputs.include) }} @@ -213,15 +213,15 @@ jobs: CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 if: always() with: - name: logs_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }} + name: logs_${{ matrix.python-version }}_${{ matrix.os }}_${{ matrix.split-group }}_${{ steps.date.outputs.date }} path: ./logs - name: Upload Integration Test Coverage to Codecov if: ${{ matrix.python-version == '3.11' }} - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} flags: integration @@ -288,7 +288,7 @@ jobs: - name: Install source distributions # ignore dbt-1.0.0, which intentionally raises an error when installed from source run: | - find ./dist/dbt-[a-z]*.gz -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/ + find ./dist/*.gz -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/ - name: Check source distributions run: | diff --git a/.github/workflows/model_performance.yml b/.github/workflows/model_performance.yml index 2801d586256..8d238ac574e 100644 --- a/.github/workflows/model_performance.yml +++ b/.github/workflows/model_performance.yml @@ -195,7 +195,7 @@ jobs: - name: '[DEBUG] ls baseline directory after run' run: ls -R performance/baselines/ - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: baseline path: performance/baselines/${{ needs.set-variables.outputs.release_id }}/ @@ -253,7 +253,7 @@ jobs: push: 'origin origin/${{ matrix.target-branch }}' - name: Create Pull Request - uses: peter-evans/create-pull-request@v5 + uses: peter-evans/create-pull-request@v6 with: author: 'Github Build Bot ' base: ${{ matrix.base-branch }} diff --git a/.github/workflows/nightly-release.yml b/.github/workflows/nightly-release.yml index c6b5dafaca4..d4f2e5bab15 100644 --- a/.github/workflows/nightly-release.yml +++ b/.github/workflows/nightly-release.yml @@ -20,6 +20,7 @@ on: permissions: contents: write # this is the permission that allows creating a new release + packages: write # this is the permission that allows Docker release defaults: run: diff --git a/.github/workflows/release-docker.yml b/.github/workflows/release-docker.yml deleted file mode 100644 index 63248784fc8..00000000000 --- a/.github/workflows/release-docker.yml +++ /dev/null @@ -1,100 +0,0 @@ -# **what?** -# This workflow will generate a series of docker images for dbt and push them to the github container registry -# -# **why?** -# Docker images for dbt are used in a number of important places throughout the dbt ecosystem. -# This is how we keep those images up-to-date. -# -# **when?** -# This is triggered manually -name: Docker release - -permissions: - packages: write - -on: - workflow_dispatch: - inputs: - package: - description: The package to release - type: choice - options: - - dbt-core - - dbt-bigquery - - dbt-postgres - - dbt-redshift - - dbt-snowflake - - dbt-spark - required: true - version_number: - description: The version number to release as a SemVer (e.g. 1.0.0b1, without `latest` or `v`) - required: true - dry_run: - description: Dry Run (don't publish) - type: boolean - default: false - -jobs: - version_metadata: - name: Get version metadata - runs-on: ubuntu-latest - outputs: - fully_qualified_tags: ${{ steps.tags.outputs.fully_qualified_tags }} - steps: - - name: Check out the repo - uses: actions/checkout@v4 - - - name: Get the tags to publish - id: tags - uses: ./.github/actions/latest-wrangler - with: - package_name: ${{ inputs.package }} - new_version: ${{ inputs.version_number }} - github_token: ${{ secrets.GITHUB_TOKEN }} - - setup_image_builder: - name: Set up Docker image builder - runs-on: ubuntu-latest - needs: [version_metadata] - steps: - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - build_and_push: - name: Build images and push to GHCR - runs-on: ubuntu-latest - needs: [setup_image_builder, version_metadata] - steps: - - name: Get docker build arg - id: build_arg - run: | - BUILD_ARG_NAME=$(echo ${{ inputs.package }} | sed 's/\-/_/g') - BUILD_ARG_VALUE=$(echo ${{ inputs.package }} | sed 's/postgres/core/g') - echo "name=$BUILD_ARG_NAME" >> $GITHUB_OUTPUT - echo "value=$BUILD_ARG_VALUE" >> $GITHUB_OUTPUT - - - name: Log in to GHCR - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Log publishing configuration - shell: bash - run: | - echo Package: ${{ inputs.package }} - echo Version: ${{ inputs.version_number }} - echo Tags: ${{ needs.version_metadata.outputs.fully_qualified_tags }} - echo Build Arg Name: ${{ steps.build_arg.outputs.name }} - echo Build Arg Value: ${{ steps.build_arg.outputs.value }} - - - name: Build and push `${{ inputs.package }}` - if: ${{ !inputs.dry_run }} - uses: docker/build-push-action@v5 - with: - file: docker/Dockerfile - push: True - target: ${{ inputs.package }} - build-args: ${{ steps.build_arg.outputs.name }}_ref=${{ steps.build_arg.outputs.value }}@v${{ inputs.version_number }} - tags: ${{ needs.version_metadata.outputs.fully_qualified_tags }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3b9cd73baa0..116dee7cd74 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -7,6 +7,7 @@ # - run unit and integration tests against given commit; # - build and package that SHA; # - release it to GitHub and PyPI with that specific build; +# - release it to Docker # # **why?** # Ensure an automated and tested release process @@ -14,7 +15,8 @@ # **when?** # This workflow can be run manually on demand or can be called by other workflows -name: Release to GitHub and PyPI +name: "Release to GitHub, PyPI & Docker" +run-name: "Release ${{ inputs.version_number }} to GitHub, PyPI & Docker" on: workflow_dispatch: @@ -37,6 +39,11 @@ on: type: boolean default: false required: false + only_docker: + description: "Only release Docker image, skip GitHub & PyPI" + type: boolean + default: false + required: false workflow_call: inputs: target_branch: @@ -79,6 +86,7 @@ jobs: echo The release version number: ${{ inputs.version_number }} echo Test run: ${{ inputs.test_run }} echo Nightly release: ${{ inputs.nightly_release }} + echo Only Docker: ${{ inputs.only_docker }} - name: "Checkout target branch" uses: actions/checkout@v4 @@ -97,6 +105,7 @@ jobs: bump-version-generate-changelog: name: Bump package version, Generate changelog needs: [job-setup] + if: ${{ !inputs.only_docker }} uses: dbt-labs/dbt-release/.github/workflows/release-prep.yml@main @@ -112,7 +121,7 @@ jobs: log-outputs-bump-version-generate-changelog: name: "[Log output] Bump package version, Generate changelog" - if: ${{ !failure() && !cancelled() }} + if: ${{ !failure() && !cancelled() && !inputs.only_docker }} needs: [bump-version-generate-changelog] @@ -126,7 +135,7 @@ jobs: build-test-package: name: Build, Test, Package - if: ${{ !failure() && !cancelled() }} + if: ${{ !failure() && !cancelled() && !inputs.only_docker }} needs: [job-setup, bump-version-generate-changelog] uses: dbt-labs/dbt-release/.github/workflows/build.yml@main @@ -147,7 +156,7 @@ jobs: github-release: name: GitHub Release - if: ${{ !failure() && !cancelled() }} + if: ${{ !failure() && !cancelled() && !inputs.only_docker }} needs: [bump-version-generate-changelog, build-test-package] @@ -174,6 +183,51 @@ jobs: PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }} TEST_PYPI_API_TOKEN: ${{ secrets.TEST_PYPI_API_TOKEN }} + determine-docker-package: + # dbt-postgres exists within dbt-core for versions 1.7 and earlier but is a separate package for 1.8 and later. + # determine if we need to release dbt-core or both dbt-core and dbt-postgres + name: Determine Docker Package + if: ${{ !failure() && !cancelled() }} + runs-on: ubuntu-latest + needs: [pypi-release] + outputs: + matrix: ${{ steps.determine-docker-package.outputs.matrix }} + steps: + - name: "Audit Version And Parse Into Parts" + id: semver + uses: dbt-labs/actions/parse-semver@v1.1.0 + with: + version: ${{ inputs.version_number }} + + - name: "Determine Packages to Release" + id: determine-docker-package + run: | + if [ ${{ steps.semver.outputs.minor }} -ge 8 ]; then + json_output={\"package\":[\"dbt-core\"]} + else + json_output={\"package\":[\"dbt-core\",\"dbt-postgres\"]} + fi + echo "matrix=$json_output" >> $GITHUB_OUTPUT + + docker-release: + name: "Docker Release for ${{ matrix.package }}" + needs: [determine-docker-package] + # We cannot release to docker on a test run because it uses the tag in GitHub as + # what we need to release but draft releases don't actually tag the commit so it + # finds nothing to release + if: ${{ !failure() && !cancelled() && (!inputs.test_run || inputs.only_docker) }} + strategy: + matrix: ${{fromJson(needs.determine-docker-package.outputs.matrix)}} + + permissions: + packages: write + + uses: dbt-labs/dbt-release/.github/workflows/release-docker.yml@main + with: + package: ${{ matrix.package }} + version_number: ${{ inputs.version_number }} + test_run: ${{ inputs.test_run }} + slack-notification: name: Slack Notification if: ${{ failure() && (!inputs.test_run || inputs.nightly_release) }} @@ -184,6 +238,7 @@ jobs: build-test-package, github-release, pypi-release, + docker-release, ] uses: dbt-labs/dbt-release/.github/workflows/slack-post-notification.yml@main diff --git a/.github/workflows/schema-check.yml b/.github/workflows/schema-check.yml index 425c3db7d2e..18d8f1b8830 100644 --- a/.github/workflows/schema-check.yml +++ b/.github/workflows/schema-check.yml @@ -26,7 +26,7 @@ permissions: read-all env: LATEST_SCHEMA_PATH: ${{ github.workspace }}/new_schemas - SCHEMA_DIFF_ARTIFACT: ${{ github.workspace }}//schema_schanges.txt + SCHEMA_DIFF_ARTIFACT: ${{ github.workspace }}/schema_schanges.txt DBT_REPO_DIRECTORY: ${{ github.workspace }}/dbt SCHEMA_REPO_DIRECTORY: ${{ github.workspace }}/schemas.getdbt.com @@ -83,7 +83,7 @@ jobs: fi - name: Upload schema diff - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: ${{ failure() }} with: name: 'schema_schanges.txt' diff --git a/CHANGELOG.md b/CHANGELOG.md index 9acba2135bd..8c4e9dcf37f 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,298 @@ - "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version. - Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry) +## dbt-core 1.8.0-b3 - April 18, 2024 + +### Features + +- Support scrubbing secret vars ([#7247](https://github.com/dbt-labs/dbt-core/issues/7247)) +- Add wildcard support to the group selector method ([#9811](https://github.com/dbt-labs/dbt-core/issues/9811)) +- source freshness precomputes metadata-based freshness in batch, if possible ([#8705](https://github.com/dbt-labs/dbt-core/issues/8705)) +- Better error message when trying to select a disabled model ([#9747](https://github.com/dbt-labs/dbt-core/issues/9747)) +- Support SQL in unit testing fixtures ([#9405](https://github.com/dbt-labs/dbt-core/issues/9405)) + +### Fixes + +- fix configuration of turning test warnings into failures with WARN_ERROR_OPTIONS ([#7761](https://github.com/dbt-labs/dbt-core/issues/7761)) +- Fix conflict with newer versions of Snowplow tracker ([#8719](https://github.com/dbt-labs/dbt-core/issues/8719)) +- Only create the packages-install-path / dbt_packages folder during dbt deps ([#6985](https://github.com/dbt-labs/dbt-core/issues/6985), [#9584](https://github.com/dbt-labs/dbt-core/issues/9584)) +- Exclude password-like fields for considering reparse ([#9795](https://github.com/dbt-labs/dbt-core/issues/9795)) +- Fixed query comments test ([#9860](https://github.com/dbt-labs/dbt-core/issues/9860)) +- Begin warning people about spaces in model names ([#9397](https://github.com/dbt-labs/dbt-core/issues/9397)) +- Disambiguiate FreshnessConfigProblem error message ([#9891](https://github.com/dbt-labs/dbt-core/issues/9891)) + +### Under the Hood + +- Remove non dbt.artifacts dbt.* imports from dbt/artifacts ([#9926](https://github.com/dbt-labs/dbt-core/issues/9926)) +- Migrate to using `error_tag` provided by `dbt-common` ([#9914](https://github.com/dbt-labs/dbt-core/issues/9914)) +- Add a test for semantic manifest and move test fixtures needed for it ([#9665](https://github.com/dbt-labs/dbt-core/issues/9665)) + +### Dependencies + +- Relax pathspec upper bound version restriction ([#9373](https://github.com/dbt-labs/dbt-core/issues/9373)) +- Bump python from 3.10.7-slim-nullseye to 3.11.2-slim-bullseye in /docker ([#9687](https://github.com/dbt-labs/dbt-core/issues/9687)) +- Remove duplicate dependency of protobuf in dev-requirements ([#9830](https://github.com/dbt-labs/dbt-core/issues/9830)) +- Bump black from 23.3.0 to >=24.3.0,<25.0 ([#8074](https://github.com/dbt-labs/dbt-core/issues/8074)) + +### Security + +- Bump sqlparse to >=0.5.0, <0.6.0 to address GHSA-2m57-hf25-phgg ([#9951](https://github.com/dbt-labs/dbt-core/issues/9951)) + +### Contributors +- [@SamuelBFavarin](https://github.com/SamuelBFavarin) ([#9747](https://github.com/dbt-labs/dbt-core/issues/9747)) +- [@akurdyukov](https://github.com/akurdyukov) ([#8719](https://github.com/dbt-labs/dbt-core/issues/8719)) +- [@damian3031](https://github.com/damian3031) ([#9860](https://github.com/dbt-labs/dbt-core/issues/9860)) +- [@edgarrmondragon](https://github.com/edgarrmondragon) ([#8719](https://github.com/dbt-labs/dbt-core/issues/8719)) +- [@emmoop](https://github.com/emmoop) ([#9951](https://github.com/dbt-labs/dbt-core/issues/9951)) +- [@heysweet](https://github.com/heysweet) ([#9811](https://github.com/dbt-labs/dbt-core/issues/9811)) +- [@jx2lee](https://github.com/jx2lee) ([#7761](https://github.com/dbt-labs/dbt-core/issues/7761)) +- [@nielspardon](https://github.com/nielspardon) ([#7247](https://github.com/dbt-labs/dbt-core/issues/7247)) +- [@niteshy](https://github.com/niteshy) ([#9830](https://github.com/dbt-labs/dbt-core/issues/9830)) +- [@rzjfr](https://github.com/rzjfr) ([#9373](https://github.com/dbt-labs/dbt-core/issues/9373)) + + +## dbt-core 1.8.0-b2 - April 03, 2024 + +### Features + +- Global config for --target and --profile CLI flags and DBT_TARGET and DBT_PROFILE environment variables. ([#7798](https://github.com/dbt-labs/dbt-core/issues/7798)) +- Allow excluding resource types for build, list, and clone commands, and provide env vars ([#9237](https://github.com/dbt-labs/dbt-core/issues/9237)) +- SourceDefinition.meta represents source-level and table-level meta properties, instead of only table-level ([#9766](https://github.com/dbt-labs/dbt-core/issues/9766)) +- Allow metrics in semantic layer filters. ([#9804](https://github.com/dbt-labs/dbt-core/issues/9804)) + +### Fixes + +- fix lock-file bad indentation ([#9319](https://github.com/dbt-labs/dbt-core/issues/9319)) +- Tighten exception handling to avoid worker thread hangs. ([#9583](https://github.com/dbt-labs/dbt-core/issues/9583)) +- Do not add duplicate input_measures ([#9360](https://github.com/dbt-labs/dbt-core/issues/9360)) +- Throw a ParsingError if a primary key constraint is defined on multiple columns or at both the column and model level. ([#9581](https://github.com/dbt-labs/dbt-core/issues/9581)) +- Bug fix: don't parse Jinja in filters for input metrics or measures. ([#9582](https://github.com/dbt-labs/dbt-core/issues/9582)) +- Fix traceback parsing for exceptions raised due to csv fixtures moved into or out of fixture/subfolders. ([#9570](https://github.com/dbt-labs/dbt-core/issues/9570)) +- Fix partial parsing `KeyError` on deleted schema files ([#8860](https://github.com/dbt-labs/dbt-core/issues/8860)) +- Support saved queries in `dbt list` ([#9532](https://github.com/dbt-labs/dbt-core/issues/9532)) +- include sources in catalog.json when over 100 relations selected for catalog generation ([#9755](https://github.com/dbt-labs/dbt-core/issues/9755)) +- Support overriding macros in packages in unit testing ([#9624](https://github.com/dbt-labs/dbt-core/issues/9624)) +- Handle exceptions for failing on-run-* hooks in source freshness ([#9511](https://github.com/dbt-labs/dbt-core/issues/9511)) +- Validation of unit test parsing for incremental models ([#9593](https://github.com/dbt-labs/dbt-core/issues/9593)) +- Fix use of retry command on command using defer ([#9770](https://github.com/dbt-labs/dbt-core/issues/9770)) +- Make `args` variable to be un-modified by `dbt.invoke(args)` ([#8938](https://github.com/dbt-labs/dbt-core/issues/8938), [#9787](https://github.com/dbt-labs/dbt-core/issues/9787)) +- Unit test path outputs ([#9608](https://github.com/dbt-labs/dbt-core/issues/9608)) +- Fix assorted source freshness edgecases so check is run or actionable information is given ([#9078](https://github.com/dbt-labs/dbt-core/issues/9078)) +- "Fix Docker release process to account for both historical and current versions of `dbt-postgres` ([#9827](https://github.com/dbt-labs/dbt-core/issues/9827)) + +### Docs + +- Add analytics for dbt.com ([dbt-docs/#430](https://github.com/dbt-labs/dbt-docs/issues/430)) + +### Under the Hood + +- Remove unused key `wildcard` from MethodName enum ([#9641](https://github.com/dbt-labs/dbt-core/issues/9641)) +- Improve dbt CLI speed ([#4627](https://github.com/dbt-labs/dbt-core/issues/4627)) +- Include node_info in various Result events ([#9619](https://github.com/dbt-labs/dbt-core/issues/9619)) + +### Dependencies + +- Bump actions/upload-artifact from 3 to 4 ([#9470](https://github.com/dbt-labs/dbt-core/pull/9470)) +- Restrict protobuf to 4.* versions ([#9566](https://github.com/dbt-labs/dbt-core/pull/9566)) +- Bump codecov/codecov-action from 3 to 4 ([#9659](https://github.com/dbt-labs/dbt-core/pull/9659)) + +### Contributors +- [@asweet](https://github.com/asweet) ([#9641](https://github.com/dbt-labs/dbt-core/issues/9641)) +- [@b-per](https://github.com/b-per) ([#430](https://github.com/dbt-labs/dbt-core/issues/430)) +- [@barton996](https://github.com/barton996) ([#7798](https://github.com/dbt-labs/dbt-core/issues/7798)) +- [@courtneyholcomb](https://github.com/courtneyholcomb) ([#9804](https://github.com/dbt-labs/dbt-core/issues/9804), [#9582](https://github.com/dbt-labs/dbt-core/issues/9582)) +- [@dwreeves](https://github.com/dwreeves) ([#4627](https://github.com/dbt-labs/dbt-core/issues/4627)) +- [@jx2lee](https://github.com/jx2lee) ([#9319](https://github.com/dbt-labs/dbt-core/issues/9319)) +- [@slothkong](https://github.com/slothkong) ([#9570](https://github.com/dbt-labs/dbt-core/issues/9570)) + +## dbt-core 1.8.0-b1 - February 28, 2024 + +### Breaking Changes + +- Remove adapter.get_compiler interface ([#9148](https://github.com/dbt-labs/dbt-core/issues/9148)) +- Move AdapterLogger to adapters folder ([#9151](https://github.com/dbt-labs/dbt-core/issues/9151)) +- Rm --dry-run flag from 'dbt deps --add-package', in favor of just 'dbt deps --lock' ([#9100](https://github.com/dbt-labs/dbt-core/issues/9100)) +- move event manager setup back to core, remove ref to global EVENT_MANAGER and clean up event manager functions ([#9150](https://github.com/dbt-labs/dbt-core/issues/9150)) +- Remove dbt-tests-adapter and dbt-postgres packages from dbt-core ([#9455](https://github.com/dbt-labs/dbt-core/issues/9455)) + +### Features + +- Initial implementation of unit testing ([#8287](https://github.com/dbt-labs/dbt-core/issues/8287)) +- Unit test manifest artifacts and selection ([#8295](https://github.com/dbt-labs/dbt-core/issues/8295)) +- Support config with tags & meta for unit tests ([#8294](https://github.com/dbt-labs/dbt-core/issues/8294)) +- Allow adapters to include package logs in dbt standard logging ([#7859](https://github.com/dbt-labs/dbt-core/issues/7859)) +- Enable inline csv fixtures in unit tests ([#8626](https://github.com/dbt-labs/dbt-core/issues/8626)) +- Add drop_schema_named macro ([#8025](https://github.com/dbt-labs/dbt-core/issues/8025)) +- migrate utils to common and adapters folders ([#8924](https://github.com/dbt-labs/dbt-core/issues/8924)) +- Move Agate helper client into common ([#8926](https://github.com/dbt-labs/dbt-core/issues/8926)) +- remove usage of dbt.config.PartialProject from dbt/adapters ([#8928](https://github.com/dbt-labs/dbt-core/issues/8928)) +- Add exports to SavedQuery spec ([#8892](https://github.com/dbt-labs/dbt-core/issues/8892)) +- Support unit testing incremental models ([#8422](https://github.com/dbt-labs/dbt-core/issues/8422)) +- Add support of csv file fixtures to unit testing ([#8290](https://github.com/dbt-labs/dbt-core/issues/8290)) +- Remove legacy logger ([#8027](https://github.com/dbt-labs/dbt-core/issues/8027)) +- Unit tests support --defer and state:modified ([#8517](https://github.com/dbt-labs/dbt-core/issues/8517)) +- Support setting export configs hierarchically via saved query and project configs ([#8956](https://github.com/dbt-labs/dbt-core/issues/8956)) +- Support source inputs in unit tests ([#8507](https://github.com/dbt-labs/dbt-core/issues/8507)) +- Use daff to render diff displayed in stdout when unit test fails ([#8558](https://github.com/dbt-labs/dbt-core/issues/8558)) +- Move unit testing to test command ([#8979](https://github.com/dbt-labs/dbt-core/issues/8979)) +- Support --empty flag for schema-only dry runs ([#8971](https://github.com/dbt-labs/dbt-core/issues/8971)) +- Support unit tests in non-root packages ([#8285](https://github.com/dbt-labs/dbt-core/issues/8285)) +- Convert the `tests` config to `data_tests` in both dbt_project.yml and schema files. in schema files. ([#8699](https://github.com/dbt-labs/dbt-core/issues/8699)) +- Make fixture files full-fledged parts of the manifest and enable partial parsing ([#9067](https://github.com/dbt-labs/dbt-core/issues/9067)) +- Adds support for parsing conversion metric related properties for the semantic layer. ([#9203](https://github.com/dbt-labs/dbt-core/issues/9203)) +- In build command run unit tests before models ([#9128](https://github.com/dbt-labs/dbt-core/issues/9128)) +- Move flags from UserConfig in profiles.yml to flags in dbt_project.yml ([#9183](https://github.com/dbt-labs/dbt-core/issues/9183)) +- Added hook support for `dbt source freshness` ([#5609](https://github.com/dbt-labs/dbt-core/issues/5609)) +- Align with order of unit test output when `actual` differs from `expected` ([#9370](https://github.com/dbt-labs/dbt-core/issues/9370)) +- Added support for external nodes in unit test nodes ([#8944](https://github.com/dbt-labs/dbt-core/issues/8944)) +- Enable unit testing versioned models ([#9344](https://github.com/dbt-labs/dbt-core/issues/9344)) +- Enable list command for unit tests ([#8508](https://github.com/dbt-labs/dbt-core/issues/8508)) +- Integration Test Optimizations ([#9498](https://github.com/dbt-labs/dbt-core/issues/9498)) +- Accelerate integration tests with caching. ([#9498](https://github.com/dbt-labs/dbt-core/issues/9498)) +- Cache environment variables ([#9489](https://github.com/dbt-labs/dbt-core/issues/9489)) +- Support meta at the config level for Metric nodes ([#9441](https://github.com/dbt-labs/dbt-core/issues/9441)) +- Add cache to SavedQuery config ([#9540](https://github.com/dbt-labs/dbt-core/issues/9540)) + +### Fixes + +- For packages installed with tarball method, fetch metadata to resolve nested dependencies ([#8621](https://github.com/dbt-labs/dbt-core/issues/8621)) +- Fix partial parsing not working for semantic model change ([#8859](https://github.com/dbt-labs/dbt-core/issues/8859)) +- Handle unknown `type_code` for model contracts ([#8877](https://github.com/dbt-labs/dbt-core/issues/8877), [#8353](https://github.com/dbt-labs/dbt-core/issues/8353)) +- Rework get_catalog implementation to retain previous adapter interface semantics ([#8846](https://github.com/dbt-labs/dbt-core/issues/8846)) +- Add back contract enforcement for temporary tables on postgres ([#8857](https://github.com/dbt-labs/dbt-core/issues/8857)) +- Add version to fqn when version==0 ([#8836](https://github.com/dbt-labs/dbt-core/issues/8836)) +- Fix cased comparison in catalog-retrieval function. ([#8939](https://github.com/dbt-labs/dbt-core/issues/8939)) +- Catalog queries now assign the correct type to materialized views ([#8864](https://github.com/dbt-labs/dbt-core/issues/8864)) +- Fix compilation exception running empty seed file and support new Integer agate data_type ([#8895](https://github.com/dbt-labs/dbt-core/issues/8895)) +- Make relation filtering None-tolerant for maximal flexibility across adapters. ([#8974](https://github.com/dbt-labs/dbt-core/issues/8974)) +- Update run_results.json from previous versions of dbt to support deferral and rerun from failure ([#9010](https://github.com/dbt-labs/dbt-core/issues/9010)) +- Use MANIFEST.in to recursively include all jinja templates; fixes issue where some templates were not included in the distribution ([#9016](https://github.com/dbt-labs/dbt-core/issues/9016)) +- Fix git repository with subdirectory for Deps ([#9000](https://github.com/dbt-labs/dbt-core/issues/9000)) +- Use seed file from disk for unit testing if rows not specified in YAML config ([#8652](https://github.com/dbt-labs/dbt-core/issues/8652)) +- Fix formatting of tarball information in packages-lock.yml ([#9062](https://github.com/dbt-labs/dbt-core/issues/9062)) +- deps: Lock git packages to commit SHA during resolution ([#9050](https://github.com/dbt-labs/dbt-core/issues/9050)) +- deps: Use PackageRenderer to read package-lock.json ([#9127](https://github.com/dbt-labs/dbt-core/issues/9127)) +- Ensure we produce valid jsonschema schemas for manifest, catalog, run-results, and sources ([#8991](https://github.com/dbt-labs/dbt-core/issues/8991)) +- Get sources working again in dbt docs generate ([#9119](https://github.com/dbt-labs/dbt-core/issues/9119)) +- Fix parsing f-strings in python models ([#6976](https://github.com/dbt-labs/dbt-core/issues/6976)) +- Preserve the value of vars and the --full-refresh flags when using retry. ([#9112](https://github.com/dbt-labs/dbt-core/issues/9112)) +- Support reasonably long unit test names ([#9015](https://github.com/dbt-labs/dbt-core/issues/9015)) +- Fix back-compat parsing for model-level 'tests', source table-level 'tests', and 'tests' defined on model versions ([#9411](https://github.com/dbt-labs/dbt-core/issues/9411)) +- Fix retry command run from CLI ([#9444](https://github.com/dbt-labs/dbt-core/issues/9444)) +- Fix seed and source selection in `dbt docs generate` ([#9161](https://github.com/dbt-labs/dbt-core/issues/9161)) +- Add TestGenerateCatalogWithExternalNodes, include empty nodes in node selection during docs generate ([#9456](https://github.com/dbt-labs/dbt-core/issues/9456)) +- Fix node type plurals in FoundStats log message ([#9464](https://github.com/dbt-labs/dbt-core/issues/9464)) +- Run manifest upgrade preprocessing on any older manifest version, including v11 ([#9487](https://github.com/dbt-labs/dbt-core/issues/9487)) +- Update 'compiled_code' context member logic to route based on command ('clone' or not). Reimplement 'sql' context member as wrapper of 'compiled_code'. ([#9502](https://github.com/dbt-labs/dbt-core/issues/9502)) +- Fix bug where Semantic Layer filter strings are parsed into lists. ([#9507](https://github.com/dbt-labs/dbt-core/issues/9507)) +- Initialize invocation context before test fixtures are built. ([##9489](https://github.com/dbt-labs/dbt-core/issues/#9489)) +- When patching versioned models, set constraints after config ([#9364](https://github.com/dbt-labs/dbt-core/issues/9364)) +- only include unmodified semantic mdodels in state:modified selection ([#9548](https://github.com/dbt-labs/dbt-core/issues/9548)) +- Set query headers when manifest is passed in to dbtRunner ([#9546](https://github.com/dbt-labs/dbt-core/issues/9546)) +- Store node_info in node associated logging events ([#9557](https://github.com/dbt-labs/dbt-core/issues/9557)) +- Fix Semantic Model Compare node relations ([#9548](https://github.com/dbt-labs/dbt-core/issues/9548)) +- Clearer no-op logging in stubbed SavedQueryRunner ([#9533](https://github.com/dbt-labs/dbt-core/issues/9533)) +- Fix node_info contextvar handling so incorrect node_info doesn't persist ([#8866](https://github.com/dbt-labs/dbt-core/issues/8866)) +- Add target-path to retry ([#8948](https://github.com/dbt-labs/dbt-core/issues/8948)) + +### Docs + +- fix get_custom_database docstring ([dbt-docs/#9003](https://github.com/dbt-labs/dbt-docs/issues/9003)) + +### Under the Hood + +- Added more type annotations. ([#8537](https://github.com/dbt-labs/dbt-core/issues/8537)) +- Add unit testing functional tests ([#8512](https://github.com/dbt-labs/dbt-core/issues/8512)) +- Remove usage of dbt.include.global_project in dbt/adapters ([#8925](https://github.com/dbt-labs/dbt-core/issues/8925)) +- Add a no-op runner for Saved Qeury ([#8893](https://github.com/dbt-labs/dbt-core/issues/8893)) +- remove dbt.flags.MP_CONTEXT usage in dbt/adapters ([#8967](https://github.com/dbt-labs/dbt-core/issues/8967)) +- Remove usage of dbt.flags.LOG_CACHE_EVENTS in dbt/adapters ([#8969](https://github.com/dbt-labs/dbt-core/issues/8969)) +- Move CatalogRelationTypes test case to the shared test suite to be reused by adapter maintainers ([#8952](https://github.com/dbt-labs/dbt-core/issues/8952)) +- Treat SystemExit as an interrupt if raised during node execution. ([#n/a](https://github.com/dbt-labs/dbt-core/issues/n/a)) +- Removing unused 'documentable' ([#8871](https://github.com/dbt-labs/dbt-core/issues/8871)) +- Remove use of dbt/core exceptions in dbt/adapter ([#8920](https://github.com/dbt-labs/dbt-core/issues/8920)) +- Cache dbt plugin modules to improve integration test performance ([#9029](https://github.com/dbt-labs/dbt-core/issues/9029)) +- Consolidate deferral methods & flags ([#7965](https://github.com/dbt-labs/dbt-core/issues/7965), [#8715](https://github.com/dbt-labs/dbt-core/issues/8715)) +- Fix test_current_timestamp_matches_utc test; allow for MacOS runner system clock variance ([#9057](https://github.com/dbt-labs/dbt-core/issues/9057)) +- Remove usage of dbt.deprecations in dbt/adapters, enable core & adapter-specific event types and protos ([#8927](https://github.com/dbt-labs/dbt-core/issues/8927), [#8918](https://github.com/dbt-labs/dbt-core/issues/8918)) +- Clean up unused adaptor folders ([#9123](https://github.com/dbt-labs/dbt-core/issues/9123)) +- Move column constraints into common/contracts, removing another dependency of adapters on core. ([#9024](https://github.com/dbt-labs/dbt-core/issues/9024)) +- Move dbt.semver to dbt.common.semver and update references. ([#9039](https://github.com/dbt-labs/dbt-core/issues/9039)) +- Move lowercase utils method to common ([#9180](https://github.com/dbt-labs/dbt-core/issues/9180)) +- Remove usages of dbt.clients.jinja in dbt/adapters ([#9205](https://github.com/dbt-labs/dbt-core/issues/9205)) +- Remove usage of dbt.contracts in dbt/adapters ([#9208](https://github.com/dbt-labs/dbt-core/issues/9208)) +- Remove usage of dbt.contracts.graph.nodes.ResultNode in dbt/adapters ([#9214](https://github.com/dbt-labs/dbt-core/issues/9214)) +- Introduce RelationConfig Protocol, consolidate Relation.create_from ([#9215](https://github.com/dbt-labs/dbt-core/issues/9215)) +- remove manifest from adapter.set_relations_cache signature ([#9217](https://github.com/dbt-labs/dbt-core/issues/9217)) +- remove manifest from adapter catalog method signatures ([#9218](https://github.com/dbt-labs/dbt-core/issues/9218)) +- Move BaseConfig, Metadata and various other contract classes from model_config to common/contracts/config ([#8919](https://github.com/dbt-labs/dbt-core/issues/8919)) +- Add MacroResolverProtocol, remove lazy loading of manifest in adapter.execute_macro ([#9244](https://github.com/dbt-labs/dbt-core/issues/9244)) +- pass query header context to MacroQueryStringSetter ([#9249](https://github.com/dbt-labs/dbt-core/issues/9249), [#9250](https://github.com/dbt-labs/dbt-core/issues/9250)) +- add macro_context_generator on adapter ([#9247](https://github.com/dbt-labs/dbt-core/issues/9247)) +- pass mp_context to adapter factory as argument instead of import ([#9025](https://github.com/dbt-labs/dbt-core/issues/9025)) +- have dbt-postgres use RelationConfig protocol for materialized views' ([#9292](https://github.com/dbt-labs/dbt-core/issues/9292)) +- move system.py to common as dbt-bigquery relies on it to call gcloud ([#9293](https://github.com/dbt-labs/dbt-core/issues/9293)) +- Reorganizing event definitions to define core events in dbt/events rather than dbt/common ([#9152](https://github.com/dbt-labs/dbt-core/issues/9152)) +- move exceptions used only in dbt/common to dbt/common/exceptions ([#9332](https://github.com/dbt-labs/dbt-core/issues/9332)) +- Remove usage of dbt.adapters.factory in dbt/common ([#9334](https://github.com/dbt-labs/dbt-core/issues/9334)) +- Accept valid_error_names in WarnErrorOptions constructor, remove global usage of event modules ([#9337](https://github.com/dbt-labs/dbt-core/issues/9337)) +- Move result objects to dbt.artifacts ([#9193](https://github.com/dbt-labs/dbt-core/issues/9193)) +- dbt Labs OSS standardization of docs and templates. ([#9252](https://github.com/dbt-labs/dbt-core/issues/9252)) +- Add dbt-common as a dependency and remove dbt/common ([#9357](https://github.com/dbt-labs/dbt-core/issues/9357)) +- move cache exceptions to dbt/adapters ([#9362](https://github.com/dbt-labs/dbt-core/issues/9362)) +- Clean up macro contexts. ([#9422](https://github.com/dbt-labs/dbt-core/issues/9422)) +- Add the @requires.manifest decorator to the retry command. ([#9426](https://github.com/dbt-labs/dbt-core/issues/9426)) +- Move WritableManifest + Documentation to dbt/artifacts ([#9378](https://github.com/dbt-labs/dbt-core/issues/9378), [#9379](https://github.com/dbt-labs/dbt-core/issues/9379)) +- Define Macro and Group resources in dbt/artifacts ([#9381](https://github.com/dbt-labs/dbt-core/issues/9381), [#9382](https://github.com/dbt-labs/dbt-core/issues/9382)) +- Move `SavedQuery` data definition to `dbt/artifacts` ([#9386](https://github.com/dbt-labs/dbt-core/issues/9386)) +- Migrate data parts of `Metric` node to dbt/artifacts ([#9383](https://github.com/dbt-labs/dbt-core/issues/9383)) +- Move data portion of `SemanticModel` to dbt/artifacts ([#9387](https://github.com/dbt-labs/dbt-core/issues/9387)) +- Move data parts of `Exposure` class to dbt/artifacts ([#9380](https://github.com/dbt-labs/dbt-core/issues/9380)) +- Start using `Mergeable` from dbt-common ([#9505](https://github.com/dbt-labs/dbt-core/issues/9505)) +- Move manifest nodes to artifacts ([#9388](https://github.com/dbt-labs/dbt-core/issues/9388)) +- Move data parts of `SourceDefinition` class to dbt/artifacts ([#9384](https://github.com/dbt-labs/dbt-core/issues/9384)) +- Remove uses of Replaceable class ([#7802](https://github.com/dbt-labs/dbt-core/issues/7802)) +- Make dbt-core compatible with Python 3.12 ([#9007](https://github.com/dbt-labs/dbt-core/issues/9007)) +- Restrict protobuf to major version 4. ([#9566](https://github.com/dbt-labs/dbt-core/issues/9566)) +- Remove references to dbt.tracking and dbt.flags from dbt/artifacts ([#9390](https://github.com/dbt-labs/dbt-core/issues/9390)) +- Implement primary key inference for model nodes ([#9652](https://github.com/dbt-labs/dbt-core/issues/9652)) +- Define UnitTestDefinition resource in dbt/artifacts/resources ([#9667](https://github.com/dbt-labs/dbt-core/issues/9667)) +- Use Manifest instead of WritableManifest in PreviousState and _get_deferred_manifest ([#9567](https://github.com/dbt-labs/dbt-core/issues/9567)) + +### Dependencies + +- Bump actions/checkout from 3 to 4 ([#8781](https://github.com/dbt-labs/dbt-core/pull/8781)) +- Begin using DSI 0.4.x ([#8892](https://github.com/dbt-labs/dbt-core/pull/8892)) +- Update typing-extensions version to >=4.4 ([#9012](https://github.com/dbt-labs/dbt-core/pull/9012)) +- Bump ddtrace from 2.1.7 to 2.3.0 ([#9132](https://github.com/dbt-labs/dbt-core/pull/9132)) +- Bump freezegun from 0.3.12 to 1.3.0 ([#9197](https://github.com/dbt-labs/dbt-core/pull/9197)) +- Bump actions/setup-python from 4 to 5 ([#9267](https://github.com/dbt-labs/dbt-core/pull/9267)) +- Bump actions/download-artifact from 3 to 4 ([#9374](https://github.com/dbt-labs/dbt-core/pull/9374)) +- remove dbt/adapters and add dependency on dbt-adapters ([#9430](https://github.com/dbt-labs/dbt-core/pull/9430)) +- Bump actions/cache from 3 to 4 ([#9471](https://github.com/dbt-labs/dbt-core/pull/9471)) +- Bump peter-evans/create-pull-request from 5 to 6 ([#9552](https://github.com/dbt-labs/dbt-core/pull/9552)) +- Cap dbt-semantic-interfaces version range to <0.6 ([#9671](https://github.com/dbt-labs/dbt-core/pull/9671)) +- bump dbt-common to accept major version 1 ([#9690](https://github.com/dbt-labs/dbt-core/pull/9690)) + +### Security + +- Update Jinja2 to >= 3.1.3 to address CVE-2024-22195 ([#CVE-2024-22195](https://github.com/dbt-labs/dbt-core/pull/CVE-2024-22195)) + +### Contributors +- [@LeoTheGriff](https://github.com/LeoTheGriff) ([#9003](https://github.com/dbt-labs/dbt-core/issues/9003)) +- [@WilliamDee](https://github.com/WilliamDee) ([#9203](https://github.com/dbt-labs/dbt-core/issues/9203)) +- [@adamlopez](https://github.com/adamlopez) ([#8621](https://github.com/dbt-labs/dbt-core/issues/8621)) +- [@aliceliu](https://github.com/aliceliu) ([#9652](https://github.com/dbt-labs/dbt-core/issues/9652)) +- [@benmosher](https://github.com/benmosher) ([#n/a](https://github.com/dbt-labs/dbt-core/issues/n/a)) +- [@colin-rorgers-dbt](https://github.com/colin-rorgers-dbt) ([#8919](https://github.com/dbt-labs/dbt-core/issues/8919)) +- [@courtneyholcomb](https://github.com/courtneyholcomb) ([#9507](https://github.com/dbt-labs/dbt-core/issues/9507)) +- [@l1xnan](https://github.com/l1xnan) ([#9007](https://github.com/dbt-labs/dbt-core/issues/9007)) +- [@mederka](https://github.com/mederka) ([#6976](https://github.com/dbt-labs/dbt-core/issues/6976)) +- [@ofek1weiss](https://github.com/ofek1weiss) ([#5609](https://github.com/dbt-labs/dbt-core/issues/5609)) +- [@peterallenwebb,](https://github.com/peterallenwebb,) ([#9112](https://github.com/dbt-labs/dbt-core/issues/9112)) +- [@tlento](https://github.com/tlento) ([#9012](https://github.com/dbt-labs/dbt-core/pull/9012), [#9671](https://github.com/dbt-labs/dbt-core/pull/9671)) +- [@tonayya](https://github.com/tonayya) ([#9252](https://github.com/dbt-labs/dbt-core/issues/9252)) + ## Previous Releases For information on prior major and minor releases, see their changelogs: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d33279543f2..898594e4860 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -170,9 +170,9 @@ Finally, you can also run a specific test or group of tests using [`pytest`](htt ```sh # run all unit tests in a file -python3 -m pytest tests/unit/test_graph.py +python3 -m pytest tests/unit/test_base_column.py # run a specific unit test -python3 -m pytest tests/unit/test_graph.py::GraphTest::test__dependency_list +python3 -m pytest tests/unit/test_base_column.py::TestNumericType::test__numeric_type # run specific Postgres functional tests python3 -m pytest tests/functional/sources ``` diff --git a/Makefile b/Makefile index 90b54578aae..bd8b3be57f6 100644 --- a/Makefile +++ b/Makefile @@ -30,14 +30,19 @@ CI_FLAGS =\ .PHONY: dev_req dev_req: ## Installs dbt-* packages in develop mode along with only development dependencies. @\ - pip install -r dev-requirements.txt - pip install -r editable-requirements.txt + pip install -r dev-requirements.txt -r editable-requirements.txt .PHONY: dev dev: dev_req ## Installs dbt-* packages in develop mode along with development dependencies and pre-commit. @\ pre-commit install +.PHONY: dev-uninstall +dev-uninstall: ## Uninstall all packages in venv except for build tools + @\ + pip freeze | grep -v "^-e" | cut -d "@" -f1 | xargs pip uninstall -y; \ + pip uninstall -y dbt-core + .PHONY: core_proto_types core_proto_types: ## generates google protobuf python file from core_types.proto protoc -I=./core/dbt/events --python_out=./core/dbt/events ./core/dbt/events/core_types.proto diff --git a/core/MANIFEST.in b/core/MANIFEST.in index 595aea2a8a8..2fe0583d285 100644 --- a/core/MANIFEST.in +++ b/core/MANIFEST.in @@ -1,3 +1,3 @@ -recursive-include dbt/adapters/include *.py *.sql *.yml *.html *.md .gitkeep .gitignore +recursive-include dbt/include *.py *.sql *.yml *.html *.md .gitkeep .gitignore include dbt/py.typed recursive-include dbt/task/docs *.html diff --git a/core/dbt/artifacts/exceptions/__init__.py b/core/dbt/artifacts/exceptions/__init__.py new file mode 100644 index 00000000000..ad8d4ae51b7 --- /dev/null +++ b/core/dbt/artifacts/exceptions/__init__.py @@ -0,0 +1 @@ +from dbt.artifacts.exceptions.schemas import IncompatibleSchemaError diff --git a/core/dbt/artifacts/exceptions/schemas.py b/core/dbt/artifacts/exceptions/schemas.py new file mode 100644 index 00000000000..c9f1b0e151f --- /dev/null +++ b/core/dbt/artifacts/exceptions/schemas.py @@ -0,0 +1,31 @@ +from typing import Optional + +from dbt_common.exceptions import DbtRuntimeError + + +class IncompatibleSchemaError(DbtRuntimeError): + def __init__(self, expected: str, found: Optional[str] = None) -> None: + self.expected = expected + self.found = found + self.filename = "input file" + + super().__init__(msg=self.get_message()) + + def add_filename(self, filename: str): + self.filename = filename + self.msg = self.get_message() + + def get_message(self) -> str: + found_str = "nothing" + if self.found is not None: + found_str = f'"{self.found}"' + + msg = ( + f'Expected a schema version of "{self.expected}" in ' + f"{self.filename}, but found {found_str}. Are you running with a " + f"different version of dbt?" + ) + return msg + + CODE = 10014 + MESSAGE = "Incompatible Schema" diff --git a/core/dbt/artifacts/resources/__init__.py b/core/dbt/artifacts/resources/__init__.py index 7618a3bc12d..d937828e6d5 100644 --- a/core/dbt/artifacts/resources/__init__.py +++ b/core/dbt/artifacts/resources/__init__.py @@ -1,16 +1,32 @@ -from dbt.artifacts.resources.base import BaseResource, GraphResource +from dbt.artifacts.resources.base import BaseResource, GraphResource, FileHash, Docs # alias to latest resource definitions from dbt.artifacts.resources.v1.components import ( - ColumnInfo, DependsOn, - FreshnessThreshold, - HasRelationMetadata, NodeVersion, - Quoting, RefArgs, + HasRelationMetadata, + ParsedResourceMandatory, + ParsedResource, + ColumnInfo, + CompiledResource, + InjectedCTE, + Contract, + DeferRelation, + FreshnessThreshold, + Quoting, Time, ) +from dbt.artifacts.resources.v1.analysis import Analysis +from dbt.artifacts.resources.v1.hook import HookNode +from dbt.artifacts.resources.v1.model import Model, ModelConfig +from dbt.artifacts.resources.v1.sql_operation import SqlOperation +from dbt.artifacts.resources.v1.seed import Seed, SeedConfig +from dbt.artifacts.resources.v1.singular_test import SingularTest +from dbt.artifacts.resources.v1.generic_test import GenericTest, TestMetadata +from dbt.artifacts.resources.v1.snapshot import Snapshot, SnapshotConfig + + from dbt.artifacts.resources.v1.documentation import Documentation from dbt.artifacts.resources.v1.exposure import ( Exposure, @@ -19,7 +35,6 @@ MaturityType, ) from dbt.artifacts.resources.v1.macro import Macro, MacroDependsOn, MacroArgument -from dbt.artifacts.resources.v1.docs import Docs from dbt.artifacts.resources.v1.group import Group from dbt.artifacts.resources.v1.metric import ( ConstantPropertyInput, @@ -59,10 +74,28 @@ SemanticModel, SemanticModelConfig, ) + +from dbt.artifacts.resources.v1.config import ( + NodeAndTestConfig, + NodeConfig, + TestConfig, + Hook, +) + from dbt.artifacts.resources.v1.source_definition import ( + SourceConfig, ExternalPartition, ExternalTable, SourceDefinition, ParsedSourceMandatory, - SourceConfig, +) + +from dbt.artifacts.resources.v1.unit_test_definition import ( + UnitTestConfig, + UnitTestDefinition, + UnitTestInputFixture, + UnitTestOutputFixture, + UnitTestOverrides, + UnitTestNodeVersions, + UnitTestFormat, ) diff --git a/core/dbt/artifacts/resources/base.py b/core/dbt/artifacts/resources/base.py index 06fecf57b26..dd66aa97d72 100644 --- a/core/dbt/artifacts/resources/base.py +++ b/core/dbt/artifacts/resources/base.py @@ -1,6 +1,7 @@ from dataclasses import dataclass from dbt_common.dataclass_schema import dbtClassMixin -from typing import List +from typing import List, Optional +import hashlib from dbt.artifacts.resources.types import NodeType @@ -18,3 +19,49 @@ class BaseResource(dbtClassMixin): @dataclass class GraphResource(BaseResource): fqn: List[str] + + +@dataclass +class FileHash(dbtClassMixin): + name: str # the hash type name + checksum: str # the hashlib.hash_type().hexdigest() of the file contents + + @classmethod + def empty(cls): + return FileHash(name="none", checksum="") + + @classmethod + def path(cls, path: str): + return FileHash(name="path", checksum=path) + + def __eq__(self, other): + if not isinstance(other, FileHash): + return NotImplemented + + if self.name == "none" or self.name != other.name: + return False + + return self.checksum == other.checksum + + def compare(self, contents: str) -> bool: + """Compare the file contents with the given hash""" + if self.name == "none": + return False + + return self.from_contents(contents, name=self.name) == self.checksum + + @classmethod + def from_contents(cls, contents: str, name="sha256") -> "FileHash": + """Create a file hash from the given file contents. The hash is always + the utf-8 encoding of the contents given, because dbt only reads files + as utf-8. + """ + data = contents.encode("utf-8") + checksum = hashlib.new(name, data).hexdigest() + return cls(name=name, checksum=checksum) + + +@dataclass +class Docs(dbtClassMixin): + show: bool = True + node_color: Optional[str] = None diff --git a/core/dbt/artifacts/resources/types.py b/core/dbt/artifacts/resources/types.py index af1383e834a..c0ab5341e4c 100644 --- a/core/dbt/artifacts/resources/types.py +++ b/core/dbt/artifacts/resources/types.py @@ -56,6 +56,11 @@ class ModelLanguage(StrEnum): sql = "sql" +class ModelHookType(StrEnum): + PreHook = "pre-hook" + PostHook = "post-hook" + + class TimePeriod(StrEnum): minute = "minute" hour = "hour" diff --git a/core/dbt/artifacts/resources/v1/analysis.py b/core/dbt/artifacts/resources/v1/analysis.py new file mode 100644 index 00000000000..60f90e61576 --- /dev/null +++ b/core/dbt/artifacts/resources/v1/analysis.py @@ -0,0 +1,9 @@ +from dbt.artifacts.resources.v1.components import CompiledResource +from typing import Literal +from dataclasses import dataclass +from dbt.artifacts.resources.types import NodeType + + +@dataclass +class Analysis(CompiledResource): + resource_type: Literal[NodeType.Analysis] diff --git a/core/dbt/artifacts/resources/v1/components.py b/core/dbt/artifacts/resources/v1/components.py index 1aa29086680..cb387304260 100644 --- a/core/dbt/artifacts/resources/v1/components.py +++ b/core/dbt/artifacts/resources/v1/components.py @@ -1,17 +1,29 @@ +import time from dataclasses import dataclass, field -from datetime import timedelta -from dbt.artifacts.resources.types import TimePeriod -from dbt.artifacts.resources.v1.macro import MacroDependsOn +from dbt.artifacts.resources.base import GraphResource, FileHash, Docs +from dbt.artifacts.resources.v1.config import NodeConfig +from dbt_common.dataclass_schema import dbtClassMixin, ExtensibleDbtClassMixin from dbt_common.contracts.config.properties import AdditionalPropertiesMixin from dbt_common.contracts.constraints import ColumnLevelConstraint +from typing import Dict, List, Optional, Union, Any +from datetime import timedelta +from dbt.artifacts.resources.types import TimePeriod from dbt_common.contracts.util import Mergeable -from dbt_common.dataclass_schema import dbtClassMixin, ExtensibleDbtClassMixin -from typing import Any, Dict, List, Optional, Union NodeVersion = Union[str, float] +@dataclass +class MacroDependsOn(dbtClassMixin): + macros: List[str] = field(default_factory=list) + + # 'in' on lists is O(n) so this is O(n^2) for # of macros + def add_macro(self, value: str): + if value not in self.macros: + self.macros.append(value) + + @dataclass class DependsOn(MacroDependsOn): nodes: List[str] = field(default_factory=list) @@ -56,6 +68,21 @@ class ColumnInfo(AdditionalPropertiesMixin, ExtensibleDbtClassMixin): _extra: Dict[str, Any] = field(default_factory=dict) +@dataclass +class InjectedCTE(dbtClassMixin): + """Used in CompiledNodes as part of ephemeral model processing""" + + id: str + sql: str + + +@dataclass +class Contract(dbtClassMixin): + enforced: bool = False + alias_types: bool = True + checksum: Optional[str] = None + + @dataclass class Quoting(dbtClassMixin, Mergeable): database: Optional[bool] = None @@ -121,3 +148,74 @@ def quoting_dict(self) -> Dict[str, bool]: return self.quoting.to_dict(omit_none=True) else: return {} + + +@dataclass +class DeferRelation(HasRelationMetadata): + alias: str + relation_name: Optional[str] + + @property + def identifier(self): + return self.alias + + +@dataclass +class ParsedResourceMandatory(GraphResource, HasRelationMetadata): + alias: str + checksum: FileHash + config: NodeConfig = field(default_factory=NodeConfig) + + @property + def identifier(self): + return self.alias + + +@dataclass +class ParsedResource(ParsedResourceMandatory): + tags: List[str] = field(default_factory=list) + description: str = field(default="") + columns: Dict[str, ColumnInfo] = field(default_factory=dict) + meta: Dict[str, Any] = field(default_factory=dict) + group: Optional[str] = None + docs: Docs = field(default_factory=Docs) + patch_path: Optional[str] = None + build_path: Optional[str] = None + unrendered_config: Dict[str, Any] = field(default_factory=dict) + created_at: float = field(default_factory=lambda: time.time()) + config_call_dict: Dict[str, Any] = field(default_factory=dict) + relation_name: Optional[str] = None + raw_code: str = "" + + +@dataclass +class CompiledResource(ParsedResource): + """Contains attributes necessary for SQL files and nodes with refs, sources, etc, + so all ManifestNodes except SeedNode.""" + + language: str = "sql" + refs: List[RefArgs] = field(default_factory=list) + sources: List[List[str]] = field(default_factory=list) + metrics: List[List[str]] = field(default_factory=list) + depends_on: DependsOn = field(default_factory=DependsOn) + compiled_path: Optional[str] = None + compiled: bool = False + compiled_code: Optional[str] = None + extra_ctes_injected: bool = False + extra_ctes: List[InjectedCTE] = field(default_factory=list) + _pre_injected_sql: Optional[str] = None + contract: Contract = field(default_factory=Contract) + + def __post_serialize__(self, dct): + dct = super().__post_serialize__(dct) + if "_pre_injected_sql" in dct: + del dct["_pre_injected_sql"] + # Remove compiled attributes + if "compiled" in dct and dct["compiled"] is False: + del dct["compiled"] + del dct["extra_ctes_injected"] + del dct["extra_ctes"] + # "omit_none" means these might not be in the dictionary + if "compiled_code" in dct: + del dct["compiled_code"] + return dct diff --git a/core/dbt/artifacts/resources/v1/config.py b/core/dbt/artifacts/resources/v1/config.py new file mode 100644 index 00000000000..28b9879584c --- /dev/null +++ b/core/dbt/artifacts/resources/v1/config.py @@ -0,0 +1,263 @@ +import re + +from dbt_common.dataclass_schema import dbtClassMixin, ValidationError +from typing import Optional, List, Any, Dict, Union +from typing_extensions import Annotated +from dataclasses import dataclass, field +from dbt_common.contracts.config.base import ( + BaseConfig, + CompareBehavior, + MergeBehavior, +) +from dbt_common.contracts.config.metadata import Metadata, ShowBehavior +from dbt_common.contracts.config.materialization import OnConfigurationChangeOption +from dbt.artifacts.resources.base import Docs +from dbt.artifacts.resources.types import ModelHookType +from dbt.artifacts.utils.validation import validate_color +from dbt import hooks +from mashumaro.jsonschema.annotations import Pattern + + +def list_str() -> List[str]: + return [] + + +class Severity(str): + pass + + +def metas(*metas: Metadata) -> Dict[str, Any]: + existing: Dict[str, Any] = {} + for m in metas: + existing = m.meta(existing) + return existing + + +@dataclass +class ContractConfig(dbtClassMixin): + enforced: bool = False + alias_types: bool = True + + +@dataclass +class Hook(dbtClassMixin): + sql: str + transaction: bool = True + index: Optional[int] = None + + +@dataclass +class NodeAndTestConfig(BaseConfig): + enabled: bool = True + # these fields are included in serialized output, but are not part of + # config comparison (they are part of database_representation) + alias: Optional[str] = field( + default=None, + metadata=CompareBehavior.Exclude.meta(), + ) + schema: Optional[str] = field( + default=None, + metadata=CompareBehavior.Exclude.meta(), + ) + database: Optional[str] = field( + default=None, + metadata=CompareBehavior.Exclude.meta(), + ) + tags: Union[List[str], str] = field( + default_factory=list_str, + metadata=metas(ShowBehavior.Hide, MergeBehavior.Append, CompareBehavior.Exclude), + ) + meta: Dict[str, Any] = field( + default_factory=dict, + metadata=MergeBehavior.Update.meta(), + ) + group: Optional[str] = field( + default=None, + metadata=CompareBehavior.Exclude.meta(), + ) + + +@dataclass +class NodeConfig(NodeAndTestConfig): + # Note: if any new fields are added with MergeBehavior, also update the + # 'mergebehavior' dictionary + materialized: str = "view" + incremental_strategy: Optional[str] = None + persist_docs: Dict[str, Any] = field(default_factory=dict) + post_hook: List[Hook] = field( + default_factory=list, + metadata={"merge": MergeBehavior.Append, "alias": "post-hook"}, + ) + pre_hook: List[Hook] = field( + default_factory=list, + metadata={"merge": MergeBehavior.Append, "alias": "pre-hook"}, + ) + quoting: Dict[str, Any] = field( + default_factory=dict, + metadata=MergeBehavior.Update.meta(), + ) + # This is actually only used by seeds. Should it be available to others? + # That would be a breaking change! + column_types: Dict[str, Any] = field( + default_factory=dict, + metadata=MergeBehavior.Update.meta(), + ) + full_refresh: Optional[bool] = None + # 'unique_key' doesn't use 'Optional' because typing.get_type_hints was + # sometimes getting the Union order wrong, causing serialization failures. + unique_key: Union[str, List[str], None] = None + on_schema_change: Optional[str] = "ignore" + on_configuration_change: OnConfigurationChangeOption = field( + default_factory=OnConfigurationChangeOption.default + ) + grants: Dict[str, Any] = field( + default_factory=dict, metadata=MergeBehavior.DictKeyAppend.meta() + ) + packages: List[str] = field( + default_factory=list, + metadata=MergeBehavior.Append.meta(), + ) + docs: Docs = field( + default_factory=Docs, + metadata=MergeBehavior.Update.meta(), + ) + contract: ContractConfig = field( + default_factory=ContractConfig, + metadata=MergeBehavior.Update.meta(), + ) + + def __post_init__(self): + # we validate that node_color has a suitable value to prevent dbt-docs from crashing + if self.docs.node_color: + node_color = self.docs.node_color + if not validate_color(node_color): + raise ValidationError( + f"Invalid color name for docs.node_color: {node_color}. " + "It is neither a valid HTML color name nor a valid HEX code." + ) + + if ( + self.contract.enforced + and self.materialized == "incremental" + and self.on_schema_change not in ("append_new_columns", "fail") + ): + raise ValidationError( + f"Invalid value for on_schema_change: {self.on_schema_change}. Models " + "materialized as incremental with contracts enabled must set " + "on_schema_change to 'append_new_columns' or 'fail'" + ) + + @classmethod + def __pre_deserialize__(cls, data): + data = super().__pre_deserialize__(data) + for key in ModelHookType: + if key in data: + data[key] = [hooks.get_hook_dict(h) for h in data[key]] + return data + + +SEVERITY_PATTERN = r"^([Ww][Aa][Rr][Nn]|[Ee][Rr][Rr][Oo][Rr])$" + + +@dataclass +class TestConfig(NodeAndTestConfig): + __test__ = False + + # this is repeated because of a different default + schema: Optional[str] = field( + default="dbt_test__audit", + metadata=CompareBehavior.Exclude.meta(), + ) + materialized: str = "test" + # Annotated is used by mashumaro for jsonschema generation + severity: Annotated[Severity, Pattern(SEVERITY_PATTERN)] = Severity("ERROR") + store_failures: Optional[bool] = None + store_failures_as: Optional[str] = None + where: Optional[str] = None + limit: Optional[int] = None + fail_calc: str = "count(*)" + warn_if: str = "!= 0" + error_if: str = "!= 0" + + def __post_init__(self): + """ + The presence of a setting for `store_failures_as` overrides any existing setting for `store_failures`, + regardless of level of granularity. If `store_failures_as` is not set, then `store_failures` takes effect. + At the time of implementation, `store_failures = True` would always create a table; the user could not + configure this. Hence, if `store_failures = True` and `store_failures_as` is not specified, then it + should be set to "table" to mimic the existing functionality. + + A side effect of this overriding functionality is that `store_failures_as="view"` at the project + level cannot be turned off at the model level without setting both `store_failures_as` and + `store_failures`. The former would cascade down and override `store_failures=False`. The proposal + is to include "ephemeral" as a value for `store_failures_as`, which effectively sets + `store_failures=False`. + + The exception handling for this is tricky. If we raise an exception here, the entire run fails at + parse time. We would rather well-formed models run successfully, leaving only exceptions to be rerun + if necessary. Hence, the exception needs to be raised in the test materialization. In order to do so, + we need to make sure that we go down the `store_failures = True` route with the invalid setting for + `store_failures_as`. This results in the `.get()` defaulted to `True` below, instead of a normal + dictionary lookup as is done in the `if` block. Refer to the test materialization for the + exception that is raise as a result of an invalid value. + + The intention of this block is to behave as if `store_failures_as` is the only setting, + but still allow for backwards compatibility for `store_failures`. + See https://github.com/dbt-labs/dbt-core/issues/6914 for more information. + """ + + # if `store_failures_as` is not set, it gets set by `store_failures` + # the settings below mimic existing behavior prior to `store_failures_as` + get_store_failures_as_map = { + True: "table", + False: "ephemeral", + None: None, + } + + # if `store_failures_as` is set, it dictates what `store_failures` gets set to + # the settings below overrides whatever `store_failures` is set to by the user + get_store_failures_map = { + "ephemeral": False, + "table": True, + "view": True, + } + + if self.store_failures_as is None: + self.store_failures_as = get_store_failures_as_map[self.store_failures] + else: + self.store_failures = get_store_failures_map.get(self.store_failures_as, True) + + @classmethod + def same_contents(cls, unrendered: Dict[str, Any], other: Dict[str, Any]) -> bool: + """This is like __eq__, except it explicitly checks certain fields.""" + modifiers = [ + "severity", + "where", + "limit", + "fail_calc", + "warn_if", + "error_if", + "store_failures", + "store_failures_as", + ] + + seen = set() + for _, target_name in cls._get_fields(): + key = target_name + seen.add(key) + if key in modifiers: + if not cls.compare_key(unrendered, other, key): + return False + return True + + @classmethod + def validate(cls, data): + if data.get("severity") and not re.match(SEVERITY_PATTERN, data.get("severity")): + raise ValidationError( + f"Severity must be either 'warn' or 'error'. Got '{data.get('severity')}'" + ) + + super().validate(data) + + if data.get("materialized") and data.get("materialized") != "test": + raise ValidationError("A test must have a materialized value of 'test'") diff --git a/core/dbt/artifacts/resources/v1/docs.py b/core/dbt/artifacts/resources/v1/docs.py deleted file mode 100644 index b016320a249..00000000000 --- a/core/dbt/artifacts/resources/v1/docs.py +++ /dev/null @@ -1,9 +0,0 @@ -from dataclasses import dataclass -from dbt_common.dataclass_schema import dbtClassMixin -from typing import Optional - - -@dataclass -class Docs(dbtClassMixin): - show: bool = True - node_color: Optional[str] = None diff --git a/core/dbt/artifacts/resources/v1/generic_test.py b/core/dbt/artifacts/resources/v1/generic_test.py new file mode 100644 index 00000000000..b24be584b3a --- /dev/null +++ b/core/dbt/artifacts/resources/v1/generic_test.py @@ -0,0 +1,30 @@ +from dataclasses import dataclass, field +from typing import Optional, Any, Dict, Literal +from dbt_common.dataclass_schema import dbtClassMixin +from dbt.artifacts.resources.types import NodeType +from dbt.artifacts.resources.v1.config import TestConfig +from dbt.artifacts.resources.v1.components import CompiledResource + + +@dataclass +class TestMetadata(dbtClassMixin): + __test__ = False + + name: str = "test" # dummy default to allow default in GenericTestNode. Should always be set. + # kwargs are the args that are left in the test builder after + # removing configs. They are set from the test builder when + # the test node is created. + kwargs: Dict[str, Any] = field(default_factory=dict) + namespace: Optional[str] = None + + +@dataclass +class GenericTest(CompiledResource): + resource_type: Literal[NodeType.Test] + column_name: Optional[str] = None + file_key_name: Optional[str] = None + # Was not able to make mypy happy and keep the code working. We need to + # refactor the various configs. + config: TestConfig = field(default_factory=TestConfig) # type: ignore + attached_node: Optional[str] = None + test_metadata: TestMetadata = field(default_factory=TestMetadata) diff --git a/core/dbt/artifacts/resources/v1/hook.py b/core/dbt/artifacts/resources/v1/hook.py new file mode 100644 index 00000000000..dcfb4684c68 --- /dev/null +++ b/core/dbt/artifacts/resources/v1/hook.py @@ -0,0 +1,10 @@ +from dataclasses import dataclass +from typing import Optional, Literal +from dbt.artifacts.resources.v1.components import CompiledResource +from dbt.artifacts.resources.types import NodeType + + +@dataclass +class HookNode(CompiledResource): + resource_type: Literal[NodeType.Operation] + index: Optional[int] = None diff --git a/core/dbt/artifacts/resources/v1/macro.py b/core/dbt/artifacts/resources/v1/macro.py index f52255933b2..be02d529ee1 100644 --- a/core/dbt/artifacts/resources/v1/macro.py +++ b/core/dbt/artifacts/resources/v1/macro.py @@ -3,9 +3,9 @@ from typing import Literal, List, Dict, Optional, Any from dbt_common.dataclass_schema import dbtClassMixin -from dbt.artifacts.resources.base import BaseResource +from dbt.artifacts.resources.base import BaseResource, Docs from dbt.artifacts.resources.types import NodeType, ModelLanguage -from dbt.artifacts.resources.v1.docs import Docs +from dbt.artifacts.resources.v1.components import MacroDependsOn @dataclass @@ -15,16 +15,6 @@ class MacroArgument(dbtClassMixin): description: str = "" -@dataclass -class MacroDependsOn(dbtClassMixin): - macros: List[str] = field(default_factory=list) - - # 'in' on lists is O(n) so this is O(n^2) for # of macros - def add_macro(self, value: str): - if value not in self.macros: - self.macros.append(value) - - @dataclass class Macro(BaseResource): macro_sql: str diff --git a/core/dbt/artifacts/resources/v1/model.py b/core/dbt/artifacts/resources/v1/model.py new file mode 100644 index 00000000000..afb5edaad54 --- /dev/null +++ b/core/dbt/artifacts/resources/v1/model.py @@ -0,0 +1,28 @@ +from dataclasses import dataclass, field +from typing import Literal, Optional, List +from datetime import datetime +from dbt_common.contracts.config.base import MergeBehavior +from dbt_common.contracts.constraints import ModelLevelConstraint +from dbt.artifacts.resources.v1.config import NodeConfig +from dbt.artifacts.resources.types import AccessType, NodeType +from dbt.artifacts.resources.v1.components import DeferRelation, NodeVersion, CompiledResource + + +@dataclass +class ModelConfig(NodeConfig): + access: AccessType = field( + default=AccessType.Protected, + metadata=MergeBehavior.Clobber.meta(), + ) + + +@dataclass +class Model(CompiledResource): + resource_type: Literal[NodeType.Model] + access: AccessType = AccessType.Protected + config: ModelConfig = field(default_factory=ModelConfig) + constraints: List[ModelLevelConstraint] = field(default_factory=list) + version: Optional[NodeVersion] = None + latest_version: Optional[NodeVersion] = None + deprecation_date: Optional[datetime] = None + defer_relation: Optional[DeferRelation] = None diff --git a/core/dbt/artifacts/resources/v1/saved_query.py b/core/dbt/artifacts/resources/v1/saved_query.py index cc24d8fddf4..5f0575d26a7 100644 --- a/core/dbt/artifacts/resources/v1/saved_query.py +++ b/core/dbt/artifacts/resources/v1/saved_query.py @@ -21,6 +21,7 @@ class ExportConfig(dbtClassMixin): export_as: ExportDestinationType schema_name: Optional[str] = None alias: Optional[str] = None + database: Optional[str] = None @dataclass diff --git a/core/dbt/artifacts/resources/v1/seed.py b/core/dbt/artifacts/resources/v1/seed.py new file mode 100644 index 00000000000..47a16352cf2 --- /dev/null +++ b/core/dbt/artifacts/resources/v1/seed.py @@ -0,0 +1,30 @@ +from dataclasses import dataclass, field +from typing import Optional, Literal +from dbt_common.dataclass_schema import ValidationError +from dbt.artifacts.resources.types import NodeType +from dbt.artifacts.resources.v1.components import MacroDependsOn, DeferRelation, ParsedResource +from dbt.artifacts.resources.v1.config import NodeConfig + + +@dataclass +class SeedConfig(NodeConfig): + materialized: str = "seed" + delimiter: str = "," + quote_columns: Optional[bool] = None + + @classmethod + def validate(cls, data): + super().validate(data) + if data.get("materialized") and data.get("materialized") != "seed": + raise ValidationError("A seed must have a materialized value of 'seed'") + + +@dataclass +class Seed(ParsedResource): # No SQLDefaults! + resource_type: Literal[NodeType.Seed] + config: SeedConfig = field(default_factory=SeedConfig) + # seeds need the root_path because the contents are not loaded initially + # and we need the root_path to load the seed later + root_path: Optional[str] = None + depends_on: MacroDependsOn = field(default_factory=MacroDependsOn) + defer_relation: Optional[DeferRelation] = None diff --git a/core/dbt/artifacts/resources/v1/semantic_model.py b/core/dbt/artifacts/resources/v1/semantic_model.py index b219b2bdcc8..8a02aa5fa61 100644 --- a/core/dbt/artifacts/resources/v1/semantic_model.py +++ b/core/dbt/artifacts/resources/v1/semantic_model.py @@ -42,7 +42,7 @@ class NodeRelation(dbtClassMixin): alias: str schema_name: str # TODO: Could this be called simply "schema" so we could reuse StateRelation? database: Optional[str] = None - relation_name: Optional[str] = None + relation_name: Optional[str] = "" # ==================================== diff --git a/core/dbt/artifacts/resources/v1/singular_test.py b/core/dbt/artifacts/resources/v1/singular_test.py new file mode 100644 index 00000000000..76b47183c51 --- /dev/null +++ b/core/dbt/artifacts/resources/v1/singular_test.py @@ -0,0 +1,13 @@ +from dataclasses import dataclass, field +from typing import Literal +from dbt.artifacts.resources.types import NodeType +from dbt.artifacts.resources.v1.components import CompiledResource +from dbt.artifacts.resources.v1.config import TestConfig + + +@dataclass +class SingularTest(CompiledResource): + resource_type: Literal[NodeType.Test] + # Was not able to make mypy happy and keep the code working. We need to + # refactor the various configs. + config: TestConfig = field(default_factory=TestConfig) # type: ignore diff --git a/core/dbt/artifacts/resources/v1/snapshot.py b/core/dbt/artifacts/resources/v1/snapshot.py new file mode 100644 index 00000000000..3eceb9bb1d2 --- /dev/null +++ b/core/dbt/artifacts/resources/v1/snapshot.py @@ -0,0 +1,66 @@ +from typing import Union, List, Optional, Literal +from dataclasses import dataclass +from dbt_common.dataclass_schema import ValidationError +from dbt.artifacts.resources.types import NodeType +from dbt.artifacts.resources.v1.components import CompiledResource, DeferRelation +from dbt.artifacts.resources.v1.config import NodeConfig + + +@dataclass +class SnapshotConfig(NodeConfig): + materialized: str = "snapshot" + strategy: Optional[str] = None + unique_key: Optional[str] = None + target_schema: Optional[str] = None + target_database: Optional[str] = None + updated_at: Optional[str] = None + # Not using Optional because of serialization issues with a Union of str and List[str] + check_cols: Union[str, List[str], None] = None + + @classmethod + def validate(cls, data): + super().validate(data) + # Note: currently you can't just set these keys in schema.yml because this validation + # will fail when parsing the snapshot node. + if not data.get("strategy") or not data.get("unique_key") or not data.get("target_schema"): + raise ValidationError( + "Snapshots must be configured with a 'strategy', 'unique_key', " + "and 'target_schema'." + ) + if data.get("strategy") == "check": + if not data.get("check_cols"): + raise ValidationError( + "A snapshot configured with the check strategy must " + "specify a check_cols configuration." + ) + if isinstance(data["check_cols"], str) and data["check_cols"] != "all": + raise ValidationError( + f"Invalid value for 'check_cols': {data['check_cols']}. " + "Expected 'all' or a list of strings." + ) + elif data.get("strategy") == "timestamp": + if not data.get("updated_at"): + raise ValidationError( + "A snapshot configured with the timestamp strategy " + "must specify an updated_at configuration." + ) + if data.get("check_cols"): + raise ValidationError("A 'timestamp' snapshot should not have 'check_cols'") + # If the strategy is not 'check' or 'timestamp' it's a custom strategy, + # formerly supported with GenericSnapshotConfig + + if data.get("materialized") and data.get("materialized") != "snapshot": + raise ValidationError("A snapshot must have a materialized value of 'snapshot'") + + # Called by "calculate_node_config_dict" in ContextConfigGenerator + def finalize_and_validate(self): + data = self.to_dict(omit_none=True) + self.validate(data) + return self.from_dict(data) + + +@dataclass +class Snapshot(CompiledResource): + resource_type: Literal[NodeType.Snapshot] + config: SnapshotConfig + defer_relation: Optional[DeferRelation] = None diff --git a/core/dbt/artifacts/resources/v1/source_definition.py b/core/dbt/artifacts/resources/v1/source_definition.py index 9d3a87b0bd6..e5a9ab1d98e 100644 --- a/core/dbt/artifacts/resources/v1/source_definition.py +++ b/core/dbt/artifacts/resources/v1/source_definition.py @@ -9,13 +9,18 @@ HasRelationMetadata, Quoting, ) -from dbt_common.contracts.config.base import BaseConfig +from dbt.artifacts.resources.v1.config import BaseConfig from dbt_common.contracts.config.properties import AdditionalPropertiesAllowed from dbt_common.contracts.util import Mergeable from dbt_common.exceptions import CompilationError from typing import Any, Dict, List, Literal, Optional, Union +@dataclass +class SourceConfig(BaseConfig): + enabled: bool = True + + @dataclass class ExternalPartition(AdditionalPropertiesAllowed): name: str = "" @@ -40,11 +45,6 @@ def __bool__(self): return self.location is not None -@dataclass -class SourceConfig(BaseConfig): - enabled: bool = True - - @dataclass class ParsedSourceMandatory(GraphResource, HasRelationMetadata): source_name: str diff --git a/core/dbt/artifacts/resources/v1/sql_operation.py b/core/dbt/artifacts/resources/v1/sql_operation.py new file mode 100644 index 00000000000..fd8e79b21a1 --- /dev/null +++ b/core/dbt/artifacts/resources/v1/sql_operation.py @@ -0,0 +1,9 @@ +from dataclasses import dataclass +from typing import Literal +from dbt.artifacts.resources.types import NodeType +from dbt.artifacts.resources.v1.components import CompiledResource + + +@dataclass +class SqlOperation(CompiledResource): + resource_type: Literal[NodeType.SqlOperation] diff --git a/core/dbt/artifacts/resources/v1/unit_test_definition.py b/core/dbt/artifacts/resources/v1/unit_test_definition.py new file mode 100644 index 00000000000..fc265fa36b9 --- /dev/null +++ b/core/dbt/artifacts/resources/v1/unit_test_definition.py @@ -0,0 +1,81 @@ +from dataclasses import dataclass, field +import time +from typing import Optional, Sequence, Dict, Any, List, Union + +from dbt_common.contracts.config.base import ( + BaseConfig, + CompareBehavior, + MergeBehavior, +) +from dbt_common.contracts.config.metadata import ShowBehavior +from dbt_common.dataclass_schema import dbtClassMixin, StrEnum + +from dbt.artifacts.resources.v1.config import metas, list_str +from dbt.artifacts.resources.base import GraphResource +from dbt.artifacts.resources import NodeVersion, DependsOn + + +@dataclass +class UnitTestConfig(BaseConfig): + tags: Union[str, List[str]] = field( + default_factory=list_str, + metadata=metas(ShowBehavior.Hide, MergeBehavior.Append, CompareBehavior.Exclude), + ) + meta: Dict[str, Any] = field( + default_factory=dict, + metadata=MergeBehavior.Update.meta(), + ) + + +class UnitTestFormat(StrEnum): + CSV = "csv" + Dict = "dict" + SQL = "sql" + + +@dataclass +class UnitTestInputFixture(dbtClassMixin): + input: str + rows: Optional[Union[str, List[Dict[str, Any]]]] = None + format: UnitTestFormat = UnitTestFormat.Dict + fixture: Optional[str] = None + + +@dataclass +class UnitTestOverrides(dbtClassMixin): + macros: Dict[str, Any] = field(default_factory=dict) + vars: Dict[str, Any] = field(default_factory=dict) + env_vars: Dict[str, Any] = field(default_factory=dict) + + +@dataclass +class UnitTestNodeVersions(dbtClassMixin): + include: Optional[List[NodeVersion]] = None + exclude: Optional[List[NodeVersion]] = None + + +@dataclass +class UnitTestOutputFixture(dbtClassMixin): + rows: Optional[Union[str, List[Dict[str, Any]]]] = None + format: UnitTestFormat = UnitTestFormat.Dict + fixture: Optional[str] = None + + +@dataclass +class UnitTestDefinitionMandatory: + model: str + given: Sequence[UnitTestInputFixture] + expect: UnitTestOutputFixture + + +@dataclass +class UnitTestDefinition(GraphResource, UnitTestDefinitionMandatory): + description: str = "" + overrides: Optional[UnitTestOverrides] = None + depends_on: DependsOn = field(default_factory=DependsOn) + config: UnitTestConfig = field(default_factory=UnitTestConfig) + checksum: Optional[str] = None + schema: Optional[str] = None + created_at: float = field(default_factory=lambda: time.time()) + versions: Optional[UnitTestNodeVersions] = None + version: Optional[NodeVersion] = None diff --git a/core/dbt/artifacts/schemas/base.py b/core/dbt/artifacts/schemas/base.py index ad94aa64e68..d7c206a218e 100644 --- a/core/dbt/artifacts/schemas/base.py +++ b/core/dbt/artifacts/schemas/base.py @@ -1,22 +1,18 @@ import dataclasses from datetime import datetime +import functools +from mashumaro.jsonschema import build_json_schema +from mashumaro.jsonschema.dialects import DRAFT_2020_12 from typing import ClassVar, Type, TypeVar, Dict, Any, Optional from dbt_common.clients.system import write_json, read_json -from dbt.exceptions import ( - DbtInternalError, - DbtRuntimeError, - IncompatibleSchemaError, -) -from dbt.version import __version__ - +from dbt_common.exceptions import DbtInternalError, DbtRuntimeError from dbt_common.events.functions import get_metadata_vars from dbt_common.invocation import get_invocation_id from dbt_common.dataclass_schema import dbtClassMixin -from mashumaro.jsonschema import build_json_schema -from mashumaro.jsonschema.dialects import DRAFT_2020_12 -import functools +from dbt.version import __version__ +from dbt.artifacts.exceptions import IncompatibleSchemaError BASE_SCHEMAS_URL = "https://schemas.getdbt.com/" diff --git a/core/dbt/artifacts/schemas/freshness/v3/freshness.py b/core/dbt/artifacts/schemas/freshness/v3/freshness.py index 5e8b4dabd30..a9b956d2863 100644 --- a/core/dbt/artifacts/schemas/freshness/v3/freshness.py +++ b/core/dbt/artifacts/schemas/freshness/v3/freshness.py @@ -107,7 +107,11 @@ class FreshnessExecutionResultArtifact( @classmethod def from_result(cls, base: FreshnessResult): - processed = [process_freshness_result(r) for r in base.results] + processed = [ + process_freshness_result(r) + for r in base.results + if isinstance(r, SourceFreshnessResult) + ] return cls( metadata=base.metadata, results=processed, diff --git a/core/dbt/artifacts/schemas/manifest/v12/manifest.py b/core/dbt/artifacts/schemas/manifest/v12/manifest.py index 35b24eeeb9e..088d2ff9023 100644 --- a/core/dbt/artifacts/schemas/manifest/v12/manifest.py +++ b/core/dbt/artifacts/schemas/manifest/v12/manifest.py @@ -1,5 +1,5 @@ from dataclasses import dataclass, field -from typing import Mapping, Iterable, Tuple, Optional, Dict, List, Any +from typing import Mapping, Iterable, Tuple, Optional, Dict, List, Any, Union from uuid import UUID from dbt.artifacts.schemas.base import ( @@ -17,21 +17,40 @@ Metric, SavedQuery, SemanticModel, -) - -# TODO: remove usage of dbt modules other than dbt.artifacts -from dbt import tracking -from dbt.flags import get_flags -from dbt.contracts.graph.nodes import ( - GraphMemberNode, - ManifestNode, SourceDefinition, UnitTestDefinition, + Seed, + Analysis, + SingularTest, + HookNode, + Model, + SqlOperation, + GenericTest, + Snapshot, ) NodeEdgeMap = Dict[str, List[str]] UniqueID = str +ManifestResource = Union[ + Seed, + Analysis, + SingularTest, + HookNode, + Model, + SqlOperation, + GenericTest, + Snapshot, +] +DisabledManifestResource = Union[ + ManifestResource, + SourceDefinition, + Exposure, + Metric, + SavedQuery, + SemanticModel, + UnitTestDefinition, +] @dataclass @@ -70,16 +89,6 @@ class ManifestMetadata(BaseArtifactMetadata): metadata=dict(description="The type name of the adapter"), ) - def __post_init__(self): - if tracking.active_user is None: - return - - if self.user_id is None: - self.user_id = tracking.active_user.id - - if self.send_anonymous_usage_stats is None: - self.send_anonymous_usage_stats = get_flags().SEND_ANONYMOUS_USAGE_STATS - @classmethod def default(cls): return cls( @@ -90,7 +99,7 @@ def default(cls): @dataclass @schema_version("manifest", 12) class WritableManifest(ArtifactMixin): - nodes: Mapping[UniqueID, ManifestNode] = field( + nodes: Mapping[UniqueID, ManifestResource] = field( metadata=dict(description=("The nodes defined in the dbt project and its dependencies")) ) sources: Mapping[UniqueID, SourceDefinition] = field( @@ -116,7 +125,7 @@ class WritableManifest(ArtifactMixin): selectors: Mapping[UniqueID, Any] = field( metadata=dict(description=("The selectors defined in selectors.yml")) ) - disabled: Optional[Mapping[UniqueID, List[GraphMemberNode]]] = field( + disabled: Optional[Mapping[UniqueID, List[DisabledManifestResource]]] = field( metadata=dict(description="A mapping of the disabled nodes in the target") ) parent_map: Optional[NodeEdgeMap] = field( diff --git a/core/dbt/artifacts/schemas/results.py b/core/dbt/artifacts/schemas/results.py index 2e452f44678..5b36d7fe6fb 100644 --- a/core/dbt/artifacts/schemas/results.py +++ b/core/dbt/artifacts/schemas/results.py @@ -1,9 +1,5 @@ from dbt.contracts.graph.nodes import ResultNode -from dbt_common.events.functions import fire_event -from dbt.events.types import TimingInfoCollected -from dbt_common.events.contextvars import get_node_info from dbt_common.events.helpers import datetime_to_json_string -from dbt.logger import TimingProcessor from dbt_common.utils import cast_to_str, cast_to_int from dbt_common.dataclass_schema import dbtClassMixin, StrEnum @@ -45,13 +41,6 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, traceback): self.timing_info.end() self.callback(self.timing_info) - # Note: when legacy logger is removed, we can remove the following line - with TimingProcessor(self.timing_info): - fire_event( - TimingInfoCollected( - timing_info=self.timing_info.to_msg_dict(), node_info=get_node_info() - ) - ) class RunningStatus(StrEnum): diff --git a/core/dbt/artifacts/schemas/run/v5/run.py b/core/dbt/artifacts/schemas/run/v5/run.py index eb731b71b5d..47cc0cb3b87 100644 --- a/core/dbt/artifacts/schemas/run/v5/run.py +++ b/core/dbt/artifacts/schemas/run/v5/run.py @@ -1,11 +1,12 @@ import threading -from typing import Any, Optional, Iterable, Tuple, Sequence, Dict -import agate +from typing import Any, Optional, Iterable, Tuple, Sequence, Dict, TYPE_CHECKING +import copy from dataclasses import dataclass, field from datetime import datetime -from dbt.contracts.graph.nodes import CompiledNode +from dbt.constants import SECRET_ENV_PREFIX +from dbt.artifacts.resources import CompiledResource from dbt.artifacts.schemas.base import ( BaseArtifactMetadata, ArtifactMixin, @@ -20,11 +21,16 @@ ExecutionResult, ) from dbt_common.clients.system import write_json +from dbt.exceptions import scrub_secrets + + +if TYPE_CHECKING: + import agate @dataclass class RunResult(NodeResult): - agate_table: Optional[agate.Table] = field( + agate_table: Optional["agate.Table"] = field( default=None, metadata={"serialize": lambda x: None, "deserialize": lambda x: None} ) @@ -64,7 +70,7 @@ class RunResultOutput(BaseResult): def process_run_result(result: RunResult) -> RunResultOutput: - compiled = isinstance(result.node, CompiledNode) + compiled = isinstance(result.node, CompiledResource) return RunResultOutput( unique_id=result.node.unique_id, @@ -120,7 +126,26 @@ def from_execution_results( dbt_schema_version=str(cls.dbt_schema_version), generated_at=generated_at, ) - return cls(metadata=meta, results=processed_results, elapsed_time=elapsed_time, args=args) + + secret_vars = [ + v for k, v in args["vars"].items() if k.startswith(SECRET_ENV_PREFIX) and v.strip() + ] + + scrubbed_args = copy.deepcopy(args) + + # scrub secrets in invocation command + scrubbed_args["invocation_command"] = scrub_secrets( + scrubbed_args["invocation_command"], secret_vars + ) + + # scrub secrets in vars dict + scrubbed_args["vars"] = { + k: scrub_secrets(v, secret_vars) for k, v in scrubbed_args["vars"].items() + } + + return cls( + metadata=meta, results=processed_results, elapsed_time=elapsed_time, args=scrubbed_args + ) @classmethod def compatible_previous_versions(cls) -> Iterable[Tuple[str, int]]: diff --git a/core/dbt/contracts/graph/utils.py b/core/dbt/artifacts/utils/validation.py similarity index 100% rename from core/dbt/contracts/graph/utils.py rename to core/dbt/artifacts/utils/validation.py diff --git a/core/dbt/cli/flags.py b/core/dbt/cli/flags.py index 3cfafc9296f..9d49d339543 100644 --- a/core/dbt/cli/flags.py +++ b/core/dbt/cli/flags.py @@ -28,6 +28,7 @@ FLAGS_DEFAULTS = { "INDIRECT_SELECTION": "eager", "TARGET_PATH": None, + "DEFER_STATE": None, # necessary because of retry construction of flags "WARN_ERROR": None, # Cli args without project_flags or env var option. "FULL_REFRESH": False, @@ -351,6 +352,11 @@ def set_common_global_flags(self): if getattr(self, "MACRO_DEBUGGING", None) is not None: jinja.MACRO_DEBUGGING = getattr(self, "MACRO_DEBUGGING") + # This is here to prevent mypy from complaining about all of the + # attributes which we added dynamically. + def __getattr__(self, name: str) -> Any: + return super().__getattribute__(name) # type: ignore + CommandParams = List[str] @@ -399,7 +405,10 @@ def add_fn(x): # MultiOption flags come back as lists, but we want to pass them as space separated strings if isinstance(v, list): - v = " ".join(v) + if len(v) > 0: + v = " ".join(v) + else: + continue if k == "macro" and command == CliCommand.RUN_OPERATION: add_fn(v) diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 924f0e397bb..07a9de861a7 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -20,24 +20,6 @@ from dbt.artifacts.schemas.catalog import CatalogArtifact from dbt.artifacts.schemas.run import RunExecutionResult from dbt_common.events.base_types import EventMsg -from dbt.task.build import BuildTask -from dbt.task.clean import CleanTask -from dbt.task.clone import CloneTask -from dbt.task.compile import CompileTask -from dbt.task.debug import DebugTask -from dbt.task.deps import DepsTask -from dbt.task.docs.generate import GenerateTask -from dbt.task.docs.serve import ServeTask -from dbt.task.freshness import FreshnessTask -from dbt.task.init import InitTask -from dbt.task.list import ListTask -from dbt.task.retry import RetryTask -from dbt.task.run import RunTask -from dbt.task.run_operation import RunOperationTask -from dbt.task.seed import SeedTask -from dbt.task.show import ShowTask -from dbt.task.snapshot import SnapshotTask -from dbt.task.test import TestTask @dataclass @@ -72,7 +54,7 @@ def __init__( def invoke(self, args: List[str], **kwargs) -> dbtRunnerResult: try: - dbt_ctx = cli.make_context(cli.name, args) + dbt_ctx = cli.make_context(cli.name, args.copy()) dbt_ctx.obj = { "manifest": self.manifest, "callbacks": self.callbacks, @@ -145,12 +127,14 @@ def global_flags(func): @p.populate_cache @p.print @p.printer_width + @p.profile @p.quiet @p.record_timing_info @p.send_anonymous_usage_stats @p.single_threaded @p.state @p.static_parser + @p.target @p.use_colors @p.use_colors_file @p.use_experimental_parser @@ -187,17 +171,17 @@ def cli(ctx, **kwargs): @click.pass_context @global_flags @p.exclude +@p.export_saved_queries @p.full_refresh -@p.include_saved_query -@p.profile +@p.deprecated_include_saved_query @p.profiles_dir @p.project_dir @p.resource_type +@p.exclude_resource_type @p.select @p.selector @p.show @p.store_failures -@p.target @p.target_path @p.threads @p.vars @@ -209,6 +193,8 @@ def cli(ctx, **kwargs): @requires.manifest def build(ctx, **kwargs): """Run all seeds, models, snapshots, and tests in DAG order""" + from dbt.task.build import BuildTask + task = BuildTask( ctx.obj["flags"], ctx.obj["runtime_config"], @@ -225,10 +211,8 @@ def build(ctx, **kwargs): @click.pass_context @global_flags @p.clean_project_files_only -@p.profile @p.profiles_dir @p.project_dir -@p.target @p.target_path @p.vars @requires.postflight @@ -237,6 +221,8 @@ def build(ctx, **kwargs): @requires.project def clean(ctx, **kwargs): """Delete all folders in the clean-targets list (usually the dbt_packages and target directories.)""" + from dbt.task.clean import CleanTask + task = CleanTask(ctx.obj["flags"], ctx.obj["project"]) results = task.run() @@ -258,14 +244,12 @@ def docs(ctx, **kwargs): @global_flags @p.compile_docs @p.exclude -@p.profile @p.profiles_dir @p.project_dir @p.select @p.selector @p.empty_catalog @p.static -@p.target @p.target_path @p.threads @p.vars @@ -277,6 +261,8 @@ def docs(ctx, **kwargs): @requires.manifest(write=False) def docs_generate(ctx, **kwargs): """Generate the documentation website for your project""" + from dbt.task.docs.generate import GenerateTask + task = GenerateTask( ctx.obj["flags"], ctx.obj["runtime_config"], @@ -294,10 +280,8 @@ def docs_generate(ctx, **kwargs): @global_flags @p.browser @p.port -@p.profile @p.profiles_dir @p.project_dir -@p.target @p.target_path @p.vars @requires.postflight @@ -307,6 +291,8 @@ def docs_generate(ctx, **kwargs): @requires.runtime_config def docs_serve(ctx, **kwargs): """Serve the documentation website for your project""" + from dbt.task.docs.serve import ServeTask + task = ServeTask( ctx.obj["flags"], ctx.obj["runtime_config"], @@ -325,7 +311,6 @@ def docs_serve(ctx, **kwargs): @p.full_refresh @p.show_output_format @p.introspect -@p.profile @p.profiles_dir @p.project_dir @p.empty @@ -333,7 +318,6 @@ def docs_serve(ctx, **kwargs): @p.selector @p.inline @p.compile_inject_ephemeral_ctes -@p.target @p.target_path @p.threads @p.vars @@ -346,6 +330,8 @@ def docs_serve(ctx, **kwargs): def compile(ctx, **kwargs): """Generates executable SQL from source, model, test, and analysis files. Compiled SQL files are written to the target/ directory.""" + from dbt.task.compile import CompileTask + task = CompileTask( ctx.obj["flags"], ctx.obj["runtime_config"], @@ -366,13 +352,11 @@ def compile(ctx, **kwargs): @p.show_output_format @p.show_limit @p.introspect -@p.profile @p.profiles_dir @p.project_dir @p.select @p.selector @p.inline -@p.target @p.target_path @p.threads @p.vars @@ -385,6 +369,8 @@ def compile(ctx, **kwargs): def show(ctx, **kwargs): """Generates executable SQL for a named resource or inline query, runs that SQL, and returns a preview of the results. Does not materialize anything to the warehouse.""" + from dbt.task.show import ShowTask + task = ShowTask( ctx.obj["flags"], ctx.obj["runtime_config"], @@ -402,19 +388,17 @@ def show(ctx, **kwargs): @global_flags @p.debug_connection @p.config_dir -@p.profile @p.profiles_dir_exists_false @p.project_dir -@p.target @p.vars @requires.postflight @requires.preflight def debug(ctx, **kwargs): """Show information on the current dbt environment and check dependencies, then test the database connection. Not to be confused with the --debug option which increases verbosity.""" + from dbt.task.debug import DebugTask task = DebugTask( ctx.obj["flags"], - None, ) results = task.run() @@ -426,10 +410,8 @@ def debug(ctx, **kwargs): @cli.command("deps") @click.pass_context @global_flags -@p.profile @p.profiles_dir_exists_false @p.project_dir -@p.target @p.vars @p.source @p.lock @@ -450,6 +432,8 @@ def deps(ctx, **kwargs): There is a way to add new packages by providing an `--add-package` flag to deps command which will allow user to specify a package they want to add in the format of packagename@version. """ + from dbt.task.deps import DepsTask + flags = ctx.obj["flags"] if flags.ADD_PACKAGE: if not flags.ADD_PACKAGE["version"] and flags.SOURCE != "local": @@ -469,17 +453,17 @@ def deps(ctx, **kwargs): @global_flags # for backwards compatibility, accept 'project_name' as an optional positional argument @click.argument("project_name", required=False) -@p.profile @p.profiles_dir_exists_false @p.project_dir @p.skip_profile_setup -@p.target @p.vars @requires.postflight @requires.preflight def init(ctx, **kwargs): """Initialize a new dbt project.""" - task = InitTask(ctx.obj["flags"], None) + from dbt.task.init import InitTask + + task = InitTask(ctx.obj["flags"]) results = task.run() success = task.interpret_results(results) @@ -494,13 +478,12 @@ def init(ctx, **kwargs): @p.models @p.output @p.output_keys -@p.profile @p.profiles_dir @p.project_dir @p.resource_type +@p.exclude_resource_type @p.raw_select @p.selector -@p.target @p.target_path @p.vars @requires.postflight @@ -511,6 +494,8 @@ def init(ctx, **kwargs): @requires.manifest def list(ctx, **kwargs): """List the resources in your project""" + from dbt.task.list import ListTask + task = ListTask( ctx.obj["flags"], ctx.obj["runtime_config"], @@ -532,10 +517,8 @@ def list(ctx, **kwargs): @cli.command("parse") @click.pass_context @global_flags -@p.profile @p.profiles_dir @p.project_dir -@p.target @p.target_path @p.threads @p.vars @@ -557,13 +540,11 @@ def parse(ctx, **kwargs): @global_flags @p.exclude @p.full_refresh -@p.profile @p.profiles_dir @p.project_dir @p.empty @p.select @p.selector -@p.target @p.target_path @p.threads @p.vars @@ -575,6 +556,8 @@ def parse(ctx, **kwargs): @requires.manifest def run(ctx, **kwargs): """Compile SQL and execute against the current target database.""" + from dbt.task.run import RunTask + task = RunTask( ctx.obj["flags"], ctx.obj["runtime_config"], @@ -593,8 +576,7 @@ def run(ctx, **kwargs): @p.project_dir @p.profiles_dir @p.vars -@p.profile -@p.target +@p.target_path @p.threads @p.full_refresh @requires.postflight @@ -604,6 +586,8 @@ def run(ctx, **kwargs): @requires.runtime_config def retry(ctx, **kwargs): """Retry the nodes that failed in the previous run.""" + from dbt.task.retry import RetryTask + # Retry will parse manifest inside the task after we consolidate the flags task = RetryTask( ctx.obj["flags"], @@ -621,13 +605,12 @@ def retry(ctx, **kwargs): @global_flags @p.exclude @p.full_refresh -@p.profile @p.profiles_dir @p.project_dir @p.resource_type +@p.exclude_resource_type @p.select @p.selector -@p.target @p.target_path @p.threads @p.vars @@ -639,6 +622,8 @@ def retry(ctx, **kwargs): @requires.postflight def clone(ctx, **kwargs): """Create clones of selected nodes based on their location in the manifest provided to --state.""" + from dbt.task.clone import CloneTask + task = CloneTask( ctx.obj["flags"], ctx.obj["runtime_config"], @@ -656,10 +641,8 @@ def clone(ctx, **kwargs): @global_flags @click.argument("macro") @p.args -@p.profile @p.profiles_dir @p.project_dir -@p.target @p.target_path @p.threads @p.vars @@ -671,6 +654,8 @@ def clone(ctx, **kwargs): @requires.manifest def run_operation(ctx, **kwargs): """Run the named macro with any supplied arguments.""" + from dbt.task.run_operation import RunOperationTask + task = RunOperationTask( ctx.obj["flags"], ctx.obj["runtime_config"], @@ -688,13 +673,11 @@ def run_operation(ctx, **kwargs): @global_flags @p.exclude @p.full_refresh -@p.profile @p.profiles_dir @p.project_dir @p.select @p.selector @p.show -@p.target @p.target_path @p.threads @p.vars @@ -706,6 +689,8 @@ def run_operation(ctx, **kwargs): @requires.manifest def seed(ctx, **kwargs): """Load data from csv files into your data warehouse.""" + from dbt.task.seed import SeedTask + task = SeedTask( ctx.obj["flags"], ctx.obj["runtime_config"], @@ -721,12 +706,10 @@ def seed(ctx, **kwargs): @click.pass_context @global_flags @p.exclude -@p.profile @p.profiles_dir @p.project_dir @p.select @p.selector -@p.target @p.target_path @p.threads @p.vars @@ -738,6 +721,8 @@ def seed(ctx, **kwargs): @requires.manifest def snapshot(ctx, **kwargs): """Execute snapshots defined in your project""" + from dbt.task.snapshot import SnapshotTask + task = SnapshotTask( ctx.obj["flags"], ctx.obj["runtime_config"], @@ -763,12 +748,10 @@ def source(ctx, **kwargs): @global_flags @p.exclude @p.output_path # TODO: Is this ok to re-use? We have three different output params, how much can we consolidate? -@p.profile @p.profiles_dir @p.project_dir @p.select @p.selector -@p.target @p.target_path @p.threads @p.vars @@ -780,6 +763,8 @@ def source(ctx, **kwargs): @requires.manifest def freshness(ctx, **kwargs): """check the current freshness of the project's sources""" + from dbt.task.freshness import FreshnessTask + task = FreshnessTask( ctx.obj["flags"], ctx.obj["runtime_config"], @@ -802,13 +787,11 @@ def freshness(ctx, **kwargs): @click.pass_context @global_flags @p.exclude -@p.profile @p.profiles_dir @p.project_dir @p.select @p.selector @p.store_failures -@p.target @p.target_path @p.threads @p.vars @@ -820,6 +803,8 @@ def freshness(ctx, **kwargs): @requires.manifest def test(ctx, **kwargs): """Runs tests on data in deployed models. Run this after `dbt run`""" + from dbt.task.test import TestTask + task = TestTask( ctx.obj["flags"], ctx.obj["runtime_config"], diff --git a/core/dbt/cli/option_types.py b/core/dbt/cli/option_types.py index 7dc725f6b52..d55aa736e16 100644 --- a/core/dbt/cli/option_types.py +++ b/core/dbt/cli/option_types.py @@ -80,7 +80,10 @@ class ChoiceTuple(Choice): name = "CHOICE_TUPLE" def convert(self, value, param, ctx): - for value_item in value: - super().convert(value_item, param, ctx) + if not isinstance(value, str): + for value_item in value: + super().convert(value_item, param, ctx) + else: + super().convert(value, param, ctx) return value diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index 3e03376f890..b2716728ce6 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -105,6 +105,14 @@ help="Specify the nodes to exclude.", ) +export_saved_queries = click.option( + "--export-saved-queries/--no-export-saved-queries", + envvar="DBT_EXPORT_SAVED_QUERIES", + help="Export saved queries within the 'build' command, otherwise no-op", + is_flag=True, + hidden=True, +) + fail_fast = click.option( "--fail-fast/--no-fail-fast", "-x/ ", @@ -334,7 +342,7 @@ profile = click.option( "--profile", - envvar=None, + envvar="DBT_PROFILE", help="Which existing profile to load. Overrides setting in dbt_project.yml.", ) @@ -383,16 +391,18 @@ resource_type = click.option( "--resource-types", "--resource-type", - envvar=None, + envvar="DBT_RESOURCE_TYPES", help="Restricts the types of resources that dbt will include", type=ChoiceTuple( [ "metric", "semantic_model", + "saved_query", "source", "analysis", "model", "test", + "unit_test", "exposure", "snapshot", "seed", @@ -406,7 +416,35 @@ default=(), ) -include_saved_query = click.option( +exclude_resource_type = click.option( + "--exclude-resource-types", + "--exclude-resource-type", + envvar="DBT_EXCLUDE_RESOURCE_TYPES", + help="Specify the types of resources that dbt will exclude", + type=ChoiceTuple( + [ + "metric", + "semantic_model", + "saved_query", + "source", + "analysis", + "model", + "test", + "unit_test", + "exposure", + "snapshot", + "seed", + "default", + ], + case_sensitive=False, + ), + cls=MultiOption, + multiple=True, + default=(), +) + +# Renamed to --export-saved-queries +deprecated_include_saved_query = click.option( "--include-saved-query/--no-include-saved-query", envvar="DBT_INCLUDE_SAVED_QUERY", help="Include saved queries in the list of resources to be selected for build command", @@ -565,7 +603,7 @@ target = click.option( "--target", "-t", - envvar=None, + envvar="DBT_TARGET", help="Which target to load for the given profile", ) diff --git a/core/dbt/cli/requires.py b/core/dbt/cli/requires.py index df42580c734..487984afec2 100644 --- a/core/dbt/cli/requires.py +++ b/core/dbt/cli/requires.py @@ -15,7 +15,7 @@ from dbt.cli.flags import Flags from dbt.config import RuntimeConfig from dbt.config.runtime import load_project, load_profile, UnsetProfile -from dbt.context.manifest import generate_query_header_context +from dbt.context.query_header import generate_query_header_context from dbt_common.events.base_types import EventLevel from dbt_common.events.functions import ( diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py index 207d973acf6..09695fc59d7 100644 --- a/core/dbt/compilation.py +++ b/core/dbt/compilation.py @@ -52,7 +52,9 @@ def print_compile_stats(stats: Dict[NodeType, int]): dbt.tracking.track_resource_counts(resource_counts) # do not include resource types that are not actually defined in the project - stat_line = ", ".join([pluralize(ct, t) for t, ct in stats.items() if ct != 0]) + stat_line = ", ".join( + [pluralize(ct, t).replace("_", " ") for t, ct in stats.items() if ct != 0] + ) fire_event(FoundStats(stat_line=stat_line)) @@ -77,6 +79,7 @@ def _generate_stats(manifest: Manifest) -> Dict[NodeType, int]: stats[NodeType.Macro] += len(manifest.macros) stats[NodeType.Group] += len(manifest.groups) stats[NodeType.SemanticModel] += len(manifest.semantic_models) + stats[NodeType.SavedQuery] += len(manifest.saved_queries) stats[NodeType.Unit] += len(manifest.unit_tests) # TODO: should we be counting dimensions + entities? @@ -271,7 +274,6 @@ def __init__(self, config) -> None: def initialize(self): make_directory(self.config.project_target_path) - make_directory(self.config.packages_install_path) # creates a ModelContext which is converted to # a dict for jinja rendering of SQL diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py index 9fe62773a9a..e32005aa91f 100644 --- a/core/dbt/config/runtime.py +++ b/core/dbt/config/runtime.py @@ -15,6 +15,7 @@ Type, ) +from dbt import tracking from dbt.adapters.factory import get_include_paths, get_relation_class_by_name from dbt.adapters.contracts.connection import AdapterRequiredConfig, Credentials, HasCredentials from dbt.adapters.contracts.relation import ComponentName @@ -283,6 +284,10 @@ def get_metadata(self) -> ManifestMetadata: return ManifestMetadata( project_name=self.project_name, project_id=self.hashed_name(), + user_id=tracking.active_user.id if tracking.active_user else None, + send_anonymous_usage_stats=get_flags().SEND_ANONYMOUS_USAGE_STATS + if tracking.active_user + else None, adapter_type=self.credentials.type, ) diff --git a/core/dbt/context/manifest.py b/core/dbt/context/manifest.py index d55d3ad0f21..0d95fd3b95f 100644 --- a/core/dbt/context/manifest.py +++ b/core/dbt/context/manifest.py @@ -71,13 +71,3 @@ def to_dict(self): @contextproperty() def context_macro_stack(self): return self.macro_stack - - -class QueryHeaderContext(ManifestContext): - def __init__(self, config: AdapterRequiredConfig, manifest: Manifest) -> None: - super().__init__(config, manifest, config.project_name) - - -def generate_query_header_context(config: AdapterRequiredConfig, manifest: Manifest): - ctx = QueryHeaderContext(config, manifest) - return ctx.to_dict() diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index 4c5d5e84391..77fadade09c 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -12,6 +12,8 @@ Type, Iterable, Mapping, + Tuple, + TYPE_CHECKING, ) from typing_extensions import Protocol @@ -21,7 +23,6 @@ from dbt_common.clients.jinja import MacroProtocol from dbt_common.context import get_invocation_context from dbt.adapters.factory import get_adapter, get_adapter_package_names, get_adapter_type_names -from dbt_common.clients import agate_helper from dbt.clients.jinja import get_rendered, MacroGenerator, MacroStack, UnitTestMacroGenerator from dbt.config import RuntimeConfig, Project from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER @@ -81,7 +82,8 @@ from dbt_common.utils import merge, AttrDict, cast_to_str from dbt import selected_resources -import agate +if TYPE_CHECKING: + import agate _MISSING = object() @@ -869,8 +871,10 @@ def load_result(self, name: str) -> Optional[AttrDict]: @contextmember() def store_result( - self, name: str, response: Any, agate_table: Optional[agate.Table] = None + self, name: str, response: Any, agate_table: Optional["agate.Table"] = None ) -> str: + from dbt_common.clients import agate_helper + if agate_table is None: agate_table = agate_helper.empty_table() @@ -890,7 +894,7 @@ def store_raw_result( message=Optional[str], code=Optional[str], rows_affected=Optional[str], - agate_table: Optional[agate.Table] = None, + agate_table: Optional["agate.Table"] = None, ) -> str: response = AdapterResponse(_message=message, code=code, rows_affected=rows_affected) return self.store_result(name, response, agate_table) @@ -939,7 +943,9 @@ def try_or_compiler_error( raise CompilationError(message_if_exception, self.model) @contextmember() - def load_agate_table(self) -> agate.Table: + def load_agate_table(self) -> "agate.Table": + from dbt_common.clients import agate_helper + if not isinstance(self.model, SeedNode): raise LoadAgateTableNotSeedError(self.model.resource_type, node=self.model) @@ -1639,12 +1645,47 @@ def generate_runtime_unit_test_context( ctx_dict = ctx.to_dict() if unit_test.overrides and unit_test.overrides.macros: + global_macro_overrides: Dict[str, Any] = {} + package_macro_overrides: Dict[Tuple[str, str], Any] = {} + + # split macro overrides into global and package-namespaced collections for macro_name, macro_value in unit_test.overrides.macros.items(): - context_value = ctx_dict.get(macro_name) - if isinstance(context_value, MacroGenerator): - ctx_dict[macro_name] = UnitTestMacroGenerator(context_value, macro_value) - else: + macro_name_split = macro_name.split(".") + macro_package = macro_name_split[0] if len(macro_name_split) == 2 else None + macro_name = macro_name_split[-1] + + # macro overrides of global macros + if macro_package is None and macro_name in ctx_dict: + original_context_value = ctx_dict[macro_name] + if isinstance(original_context_value, MacroGenerator): + macro_value = UnitTestMacroGenerator(original_context_value, macro_value) + global_macro_overrides[macro_name] = macro_value + + # macro overrides of package-namespaced macros + elif ( + macro_package + and macro_package in ctx_dict + and macro_name in ctx_dict[macro_package] + ): + original_context_value = ctx_dict[macro_package][macro_name] + if isinstance(original_context_value, MacroGenerator): + macro_value = UnitTestMacroGenerator(original_context_value, macro_value) + package_macro_overrides[(macro_package, macro_name)] = macro_value + + # macro overrides of package-namespaced macros + for (macro_package, macro_name), macro_override_value in package_macro_overrides.items(): + ctx_dict[macro_package][macro_name] = macro_override_value + # propgate override of namespaced dbt macro to global namespace + if macro_package == "dbt": ctx_dict[macro_name] = macro_value + + # macro overrides of global macros, which should take precedence over equivalent package-namespaced overrides + for macro_name, macro_override_value in global_macro_overrides.items(): + ctx_dict[macro_name] = macro_override_value + # propgate override of global dbt macro to dbt namespace + if ctx_dict["dbt"].get(macro_name): + ctx_dict["dbt"][macro_name] = macro_override_value + return ctx_dict diff --git a/core/dbt/context/query_header.py b/core/dbt/context/query_header.py new file mode 100644 index 00000000000..95c5a0b7a8f --- /dev/null +++ b/core/dbt/context/query_header.py @@ -0,0 +1,13 @@ +from dbt.adapters.contracts.connection import AdapterRequiredConfig +from dbt.context.manifest import ManifestContext +from dbt.contracts.graph.manifest import Manifest + + +class QueryHeaderContext(ManifestContext): + def __init__(self, config: AdapterRequiredConfig, manifest: Manifest) -> None: + super().__init__(config, manifest, config.project_name) + + +def generate_query_header_context(config: AdapterRequiredConfig, manifest: Manifest): + ctx = QueryHeaderContext(config, manifest) + return ctx.to_dict() diff --git a/core/dbt/contracts/files.py b/core/dbt/contracts/files.py index fe5f91d265e..714782161cc 100644 --- a/core/dbt/contracts/files.py +++ b/core/dbt/contracts/files.py @@ -1,4 +1,3 @@ -import hashlib import os from dataclasses import dataclass, field @@ -7,6 +6,7 @@ from dbt.constants import MAXIMUM_SEED_SIZE from dbt_common.dataclass_schema import dbtClassMixin, StrEnum +from dbt.artifacts.resources.base import FileHash from .util import SourceKey @@ -70,46 +70,6 @@ def seed_too_large(self) -> bool: return os.stat(self.full_path).st_size > MAXIMUM_SEED_SIZE -@dataclass -class FileHash(dbtClassMixin): - name: str # the hash type name - checksum: str # the hashlib.hash_type().hexdigest() of the file contents - - @classmethod - def empty(cls): - return FileHash(name="none", checksum="") - - @classmethod - def path(cls, path: str): - return FileHash(name="path", checksum=path) - - def __eq__(self, other): - if not isinstance(other, FileHash): - return NotImplemented - - if self.name == "none" or self.name != other.name: - return False - - return self.checksum == other.checksum - - def compare(self, contents: str) -> bool: - """Compare the file contents with the given hash""" - if self.name == "none": - return False - - return self.from_contents(contents, name=self.name) == self.checksum - - @classmethod - def from_contents(cls, contents: str, name="sha256") -> "FileHash": - """Create a file hash from the given file contents. The hash is always - the utf-8 encoding of the contents given, because dbt only reads files - as utf-8. - """ - data = contents.encode("utf-8") - checksum = hashlib.new(name, data).hexdigest() - return cls(name=name, checksum=checksum) - - @dataclass class RemoteFile(dbtClassMixin): def __init__(self, language) -> None: diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index 675cc136c81..ead3a23031e 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -22,6 +22,8 @@ ) from typing_extensions import Protocol +from dbt import deprecations +from dbt import tracking from dbt.contracts.graph.nodes import ( BaseNode, Documentation, @@ -33,7 +35,6 @@ ManifestNode, Metric, ModelNode, - DeferRelation, ResultNode, SavedQuery, SemanticModel, @@ -41,11 +42,17 @@ UnpatchedSourceDefinition, UnitTestDefinition, UnitTestFileFixture, + RESOURCE_CLASS_TO_NODE_CLASS, ) from dbt.contracts.graph.unparsed import SourcePatch, UnparsedVersion +from dbt.flags import get_flags # to preserve import paths -from dbt.artifacts.resources import NodeVersion +from dbt.artifacts.resources import ( + NodeVersion, + DeferRelation, + BaseResource, +) from dbt.artifacts.schemas.manifest import WritableManifest, ManifestMetadata, UniqueID from dbt.contracts.files import ( SourceFile, @@ -563,11 +570,29 @@ def __lt__(self, other: object) -> bool: class CandidateList(List[M]): - def last(self) -> Optional[Macro]: + def last_candidate( + self, valid_localities: Optional[List[Locality]] = None + ) -> Optional[MacroCandidate]: + """ + Obtain the last (highest precedence) MacroCandidate from the CandidateList of any locality in valid_localities. + If valid_localities is not specified, return the last MacroCandidate of any locality. + """ if not self: return None self.sort() - return self[-1].macro + + if valid_localities is None: + return self[-1] + + for candidate in reversed(self): + if candidate.locality in valid_localities: + return candidate + + return None + + def last(self) -> Optional[Macro]: + last_candidate = self.last_candidate() + return last_candidate.macro if last_candidate is not None else None def _get_locality(macro: Macro, root_project_name: str, internal_packages: Set[str]) -> Locality: @@ -772,6 +797,7 @@ class ManifestStateCheck(dbtClassMixin): NodeClassT = TypeVar("NodeClassT", bound="BaseNode") +ResourceClassT = TypeVar("ResourceClassT", bound="BaseResource") @dataclass @@ -922,7 +948,33 @@ def find_materialization_macro_by_name( for specificity, atype in enumerate(self._get_parent_adapter_types(adapter_type)) ) ) - return candidates.last() + core_candidates = [ + candidate for candidate in candidates if candidate.locality == Locality.Core + ] + + materialization_candidate = candidates.last_candidate() + # If an imported materialization macro was found that also had a core candidate, fire a deprecation + if ( + materialization_candidate is not None + and materialization_candidate.locality == Locality.Imported + and core_candidates + ): + # preserve legacy behaviour - allow materialization override + if ( + get_flags().require_explicit_package_overrides_for_builtin_materializations + is False + ): + deprecations.warn( + "package-materialization-override", + package_name=materialization_candidate.macro.package_name, + materialization_name=materialization_name, + ) + else: + materialization_candidate = candidates.last_candidate( + valid_localities=[Locality.Core, Locality.Root] + ) + + return materialization_candidate.macro if materialization_candidate else None def get_resource_fqns(self) -> Mapping[str, PathSet]: resource_fqns: Dict[str, Set[Tuple[str, ...]]] = {} @@ -1022,29 +1074,85 @@ def build_group_map(self): group_map[node.group].append(node.unique_id) self.group_map = group_map + def fill_tracking_metadata(self): + self.metadata.user_id = tracking.active_user.id if tracking.active_user else None + self.metadata.send_anonymous_usage_stats = get_flags().SEND_ANONYMOUS_USAGE_STATS + @classmethod + def from_writable_manifest(cls, writable_manifest: WritableManifest) -> "Manifest": + manifest = Manifest( + nodes=cls._map_resources_to_map_nodes(writable_manifest.nodes), + disabled=cls._map_list_resources_to_map_list_nodes(writable_manifest.disabled), + unit_tests=cls._map_resources_to_map_nodes(writable_manifest.unit_tests), + sources=cls._map_resources_to_map_nodes(writable_manifest.sources), + macros=cls._map_resources_to_map_nodes(writable_manifest.macros), + docs=cls._map_resources_to_map_nodes(writable_manifest.docs), + exposures=cls._map_resources_to_map_nodes(writable_manifest.exposures), + metrics=cls._map_resources_to_map_nodes(writable_manifest.metrics), + groups=cls._map_resources_to_map_nodes(writable_manifest.groups), + semantic_models=cls._map_resources_to_map_nodes(writable_manifest.semantic_models), + selectors={ + selector_id: selector + for selector_id, selector in writable_manifest.selectors.items() + }, + ) + + return manifest + def _map_nodes_to_map_resources(cls, nodes_map: MutableMapping[str, NodeClassT]): return {node_id: node.to_resource() for node_id, node in nodes_map.items()} + def _map_list_nodes_to_map_list_resources( + cls, nodes_map: MutableMapping[str, List[NodeClassT]] + ): + return { + node_id: [node.to_resource() for node in node_list] + for node_id, node_list in nodes_map.items() + } + + @classmethod + def _map_resources_to_map_nodes(cls, resources_map: Mapping[str, ResourceClassT]): + return { + node_id: RESOURCE_CLASS_TO_NODE_CLASS[type(resource)].from_resource(resource) + for node_id, resource in resources_map.items() + } + + @classmethod + def _map_list_resources_to_map_list_nodes( + cls, resources_map: Optional[Mapping[str, List[ResourceClassT]]] + ): + if resources_map is None: + return {} + + return { + node_id: [ + RESOURCE_CLASS_TO_NODE_CLASS[type(resource)].from_resource(resource) + for resource in resource_list + ] + for node_id, resource_list in resources_map.items() + } + def writable_manifest(self) -> "WritableManifest": self.build_parent_and_child_maps() self.build_group_map() + self.fill_tracking_metadata() + return WritableManifest( - nodes=self.nodes, + nodes=self._map_nodes_to_map_resources(self.nodes), sources=self._map_nodes_to_map_resources(self.sources), - macros=self.macros, - docs=self.docs, + macros=self._map_nodes_to_map_resources(self.macros), + docs=self._map_nodes_to_map_resources(self.docs), exposures=self._map_nodes_to_map_resources(self.exposures), metrics=self._map_nodes_to_map_resources(self.metrics), groups=self._map_nodes_to_map_resources(self.groups), selectors=self.selectors, metadata=self.metadata, - disabled=self.disabled, + disabled=self._map_list_nodes_to_map_list_resources(self.disabled), child_map=self.child_map, parent_map=self.parent_map, group_map=self.group_map, semantic_models=self._map_nodes_to_map_resources(self.semantic_models), - unit_tests=self.unit_tests, + unit_tests=self._map_nodes_to_map_resources(self.unit_tests), saved_queries=self._map_nodes_to_map_resources(self.saved_queries), ) @@ -1358,7 +1466,7 @@ def is_invalid_protected_ref( ) # Called by requires.manifest after ManifestLoader.get_full_manifest - def merge_from_artifact(self, other: "WritableManifest") -> None: + def merge_from_artifact(self, other: "Manifest") -> None: """Update this manifest by adding the 'defer_relation' attribute to all nodes with a counterpart in the stateful manifest used for deferral. @@ -1553,7 +1661,12 @@ def __reduce_ex__(self, protocol): class MacroManifest(MacroMethods): def __init__(self, macros) -> None: self.macros = macros - self.metadata = ManifestMetadata() + self.metadata = ManifestMetadata( + user_id=tracking.active_user.id if tracking.active_user else None, + send_anonymous_usage_stats=get_flags().SEND_ANONYMOUS_USAGE_STATS + if tracking.active_user + else None, + ) # This is returned by the 'graph' context property # in the ProviderContext class. self.flat_graph: Dict[str, Any] = {} diff --git a/core/dbt/contracts/graph/model_config.py b/core/dbt/contracts/graph/model_config.py index 35c8bbca9ce..b45c313327c 100644 --- a/core/dbt/contracts/graph/model_config.py +++ b/core/dbt/contracts/graph/model_config.py @@ -1,27 +1,22 @@ from dataclasses import field, dataclass -from typing import Any, List, Optional, Dict, Union, Type -from typing_extensions import Annotated +from typing import Any, List, Optional, Dict, Type from dbt.artifacts.resources import ( ExposureConfig, MetricConfig, SavedQueryConfig, SemanticModelConfig, + NodeConfig, + SeedConfig, + TestConfig, + SnapshotConfig, SourceConfig, + ModelConfig, + UnitTestConfig, ) -from dbt_common.contracts.config.base import BaseConfig, MergeBehavior, CompareBehavior -from dbt_common.contracts.config.materialization import OnConfigurationChangeOption -from dbt_common.contracts.config.metadata import Metadata, ShowBehavior -from dbt_common.dataclass_schema import ( - dbtClassMixin, - ValidationError, -) -from dbt.contracts.graph.unparsed import Docs -from dbt.contracts.graph.utils import validate_color -from dbt.contracts.util import list_str -from dbt import hooks -from dbt.node_types import NodeType, AccessType -from mashumaro.jsonschema.annotations import Pattern +from dbt_common.contracts.config.base import BaseConfig +from dbt_common.contracts.config.metadata import Metadata +from dbt.node_types import NodeType def metas(*metas: Metadata) -> Dict[str, Any]: @@ -38,263 +33,10 @@ def insensitive_patterns(*patterns: str): return "^({})$".format("|".join(lowercased)) -class Severity(str): - pass - - -@dataclass -class ContractConfig(dbtClassMixin): - enforced: bool = False - alias_types: bool = True - - -@dataclass -class Hook(dbtClassMixin): - sql: str - transaction: bool = True - index: Optional[int] = None - - -@dataclass -class NodeAndTestConfig(BaseConfig): - enabled: bool = True - # these fields are included in serialized output, but are not part of - # config comparison (they are part of database_representation) - alias: Optional[str] = field( - default=None, - metadata=CompareBehavior.Exclude.meta(), - ) - schema: Optional[str] = field( - default=None, - metadata=CompareBehavior.Exclude.meta(), - ) - database: Optional[str] = field( - default=None, - metadata=CompareBehavior.Exclude.meta(), - ) - tags: Union[List[str], str] = field( - default_factory=list_str, - metadata=metas(ShowBehavior.Hide, MergeBehavior.Append, CompareBehavior.Exclude), - ) - meta: Dict[str, Any] = field( - default_factory=dict, - metadata=MergeBehavior.Update.meta(), - ) - group: Optional[str] = field( - default=None, - metadata=CompareBehavior.Exclude.meta(), - ) - - -@dataclass -class NodeConfig(NodeAndTestConfig): - # Note: if any new fields are added with MergeBehavior, also update the - # 'mergebehavior' dictionary - materialized: str = "view" - incremental_strategy: Optional[str] = None - persist_docs: Dict[str, Any] = field(default_factory=dict) - post_hook: List[Hook] = field( - default_factory=list, - metadata={"merge": MergeBehavior.Append, "alias": "post-hook"}, - ) - pre_hook: List[Hook] = field( - default_factory=list, - metadata={"merge": MergeBehavior.Append, "alias": "pre-hook"}, - ) - quoting: Dict[str, Any] = field( - default_factory=dict, - metadata=MergeBehavior.Update.meta(), - ) - # This is actually only used by seeds. Should it be available to others? - # That would be a breaking change! - column_types: Dict[str, Any] = field( - default_factory=dict, - metadata=MergeBehavior.Update.meta(), - ) - full_refresh: Optional[bool] = None - # 'unique_key' doesn't use 'Optional' because typing.get_type_hints was - # sometimes getting the Union order wrong, causing serialization failures. - unique_key: Union[str, List[str], None] = None - on_schema_change: Optional[str] = "ignore" - on_configuration_change: OnConfigurationChangeOption = field( - default_factory=OnConfigurationChangeOption.default - ) - grants: Dict[str, Any] = field( - default_factory=dict, metadata=MergeBehavior.DictKeyAppend.meta() - ) - packages: List[str] = field( - default_factory=list, - metadata=MergeBehavior.Append.meta(), - ) - docs: Docs = field( - default_factory=Docs, - metadata=MergeBehavior.Update.meta(), - ) - contract: ContractConfig = field( - default_factory=ContractConfig, - metadata=MergeBehavior.Update.meta(), - ) - - def __post_init__(self): - # we validate that node_color has a suitable value to prevent dbt-docs from crashing - if self.docs.node_color: - node_color = self.docs.node_color - if not validate_color(node_color): - raise ValidationError( - f"Invalid color name for docs.node_color: {node_color}. " - "It is neither a valid HTML color name nor a valid HEX code." - ) - - if ( - self.contract.enforced - and self.materialized == "incremental" - and self.on_schema_change not in ("append_new_columns", "fail") - ): - raise ValidationError( - f"Invalid value for on_schema_change: {self.on_schema_change}. Models " - "materialized as incremental with contracts enabled must set " - "on_schema_change to 'append_new_columns' or 'fail'" - ) - - @classmethod - def __pre_deserialize__(cls, data): - data = super().__pre_deserialize__(data) - for key in hooks.ModelHookType: - if key in data: - data[key] = [hooks.get_hook_dict(h) for h in data[key]] - return data - - # this is still used by jsonschema validation - @classmethod - def field_mapping(cls): - return {"post_hook": "post-hook", "pre_hook": "pre-hook"} - - -@dataclass -class ModelConfig(NodeConfig): - access: AccessType = field( - default=AccessType.Protected, - metadata=MergeBehavior.Update.meta(), - ) - - @dataclass class UnitTestNodeConfig(NodeConfig): expected_rows: List[Dict[str, Any]] = field(default_factory=list) - - -@dataclass -class SeedConfig(NodeConfig): - materialized: str = "seed" - delimiter: str = "," - quote_columns: Optional[bool] = None - - @classmethod - def validate(cls, data): - super().validate(data) - if data.get("materialized") and data.get("materialized") != "seed": - raise ValidationError("A seed must have a materialized value of 'seed'") - - -SEVERITY_PATTERN = r"^([Ww][Aa][Rr][Nn]|[Ee][Rr][Rr][Oo][Rr])$" - - -@dataclass -class TestConfig(NodeAndTestConfig): - __test__ = False - - # this is repeated because of a different default - schema: Optional[str] = field( - default="dbt_test__audit", - metadata=CompareBehavior.Exclude.meta(), - ) - materialized: str = "test" - # Annotated is used by mashumaro for jsonschema generation - severity: Annotated[Severity, Pattern(SEVERITY_PATTERN)] = Severity("ERROR") - store_failures: Optional[bool] = None - store_failures_as: Optional[str] = None - where: Optional[str] = None - limit: Optional[int] = None - fail_calc: str = "count(*)" - warn_if: str = "!= 0" - error_if: str = "!= 0" - - def __post_init__(self): - """ - The presence of a setting for `store_failures_as` overrides any existing setting for `store_failures`, - regardless of level of granularity. If `store_failures_as` is not set, then `store_failures` takes effect. - At the time of implementation, `store_failures = True` would always create a table; the user could not - configure this. Hence, if `store_failures = True` and `store_failures_as` is not specified, then it - should be set to "table" to mimic the existing functionality. - - A side effect of this overriding functionality is that `store_failures_as="view"` at the project - level cannot be turned off at the model level without setting both `store_failures_as` and - `store_failures`. The former would cascade down and override `store_failures=False`. The proposal - is to include "ephemeral" as a value for `store_failures_as`, which effectively sets - `store_failures=False`. - - The exception handling for this is tricky. If we raise an exception here, the entire run fails at - parse time. We would rather well-formed models run successfully, leaving only exceptions to be rerun - if necessary. Hence, the exception needs to be raised in the test materialization. In order to do so, - we need to make sure that we go down the `store_failures = True` route with the invalid setting for - `store_failures_as`. This results in the `.get()` defaulted to `True` below, instead of a normal - dictionary lookup as is done in the `if` block. Refer to the test materialization for the - exception that is raise as a result of an invalid value. - - The intention of this block is to behave as if `store_failures_as` is the only setting, - but still allow for backwards compatibility for `store_failures`. - See https://github.com/dbt-labs/dbt-core/issues/6914 for more information. - """ - - # if `store_failures_as` is not set, it gets set by `store_failures` - # the settings below mimic existing behavior prior to `store_failures_as` - get_store_failures_as_map = { - True: "table", - False: "ephemeral", - None: None, - } - - # if `store_failures_as` is set, it dictates what `store_failures` gets set to - # the settings below overrides whatever `store_failures` is set to by the user - get_store_failures_map = { - "ephemeral": False, - "table": True, - "view": True, - } - - if self.store_failures_as is None: - self.store_failures_as = get_store_failures_as_map[self.store_failures] - else: - self.store_failures = get_store_failures_map.get(self.store_failures_as, True) - - @classmethod - def same_contents(cls, unrendered: Dict[str, Any], other: Dict[str, Any]) -> bool: - """This is like __eq__, except it explicitly checks certain fields.""" - modifiers = [ - "severity", - "where", - "limit", - "fail_calc", - "warn_if", - "error_if", - "store_failures", - "store_failures_as", - ] - - seen = set() - for _, target_name in cls._get_fields(): - key = target_name - seen.add(key) - if key in modifiers: - if not cls.compare_key(unrendered, other, key): - return False - return True - - @classmethod - def validate(cls, data): - super().validate(data) - if data.get("materialized") and data.get("materialized") != "test": - raise ValidationError("A test must have a materialized value of 'test'") + expected_sql: Optional[str] = None @dataclass @@ -303,70 +45,6 @@ class EmptySnapshotConfig(NodeConfig): unique_key: Optional[str] = None # override NodeConfig unique_key definition -@dataclass -class SnapshotConfig(EmptySnapshotConfig): - strategy: Optional[str] = None - unique_key: Optional[str] = None - target_schema: Optional[str] = None - target_database: Optional[str] = None - updated_at: Optional[str] = None - # Not using Optional because of serialization issues with a Union of str and List[str] - check_cols: Union[str, List[str], None] = None - - @classmethod - def validate(cls, data): - super().validate(data) - # Note: currently you can't just set these keys in schema.yml because this validation - # will fail when parsing the snapshot node. - if not data.get("strategy") or not data.get("unique_key") or not data.get("target_schema"): - raise ValidationError( - "Snapshots must be configured with a 'strategy', 'unique_key', " - "and 'target_schema'." - ) - if data.get("strategy") == "check": - if not data.get("check_cols"): - raise ValidationError( - "A snapshot configured with the check strategy must " - "specify a check_cols configuration." - ) - if isinstance(data["check_cols"], str) and data["check_cols"] != "all": - raise ValidationError( - f"Invalid value for 'check_cols': {data['check_cols']}. " - "Expected 'all' or a list of strings." - ) - elif data.get("strategy") == "timestamp": - if not data.get("updated_at"): - raise ValidationError( - "A snapshot configured with the timestamp strategy " - "must specify an updated_at configuration." - ) - if data.get("check_cols"): - raise ValidationError("A 'timestamp' snapshot should not have 'check_cols'") - # If the strategy is not 'check' or 'timestamp' it's a custom strategy, - # formerly supported with GenericSnapshotConfig - - if data.get("materialized") and data.get("materialized") != "snapshot": - raise ValidationError("A snapshot must have a materialized value of 'snapshot'") - - # Called by "calculate_node_config_dict" in ContextConfigGenerator - def finalize_and_validate(self): - data = self.to_dict(omit_none=True) - self.validate(data) - return self.from_dict(data) - - -@dataclass -class UnitTestConfig(BaseConfig): - tags: Union[str, List[str]] = field( - default_factory=list_str, - metadata=metas(ShowBehavior.Hide, MergeBehavior.Append, CompareBehavior.Exclude), - ) - meta: Dict[str, Any] = field( - default_factory=dict, - metadata=MergeBehavior.Update.meta(), - ) - - RESOURCE_TYPES: Dict[NodeType, Type[BaseConfig]] = { NodeType.Metric: MetricConfig, NodeType.SemanticModel: SemanticModelConfig, @@ -375,7 +53,7 @@ class UnitTestConfig(BaseConfig): NodeType.Source: SourceConfig, NodeType.Seed: SeedConfig, NodeType.Test: TestConfig, - NodeType.Model: NodeConfig, + NodeType.Model: ModelConfig, NodeType.Snapshot: SnapshotConfig, NodeType.Unit: UnitTestConfig, } diff --git a/core/dbt/contracts/graph/nodes.py b/core/dbt/contracts/graph/nodes.py index 303fcfdc323..e1f409ff1de 100644 --- a/core/dbt/contracts/graph/nodes.py +++ b/core/dbt/contracts/graph/nodes.py @@ -1,6 +1,5 @@ import os from datetime import datetime -import time from dataclasses import dataclass, field import hashlib @@ -16,17 +15,13 @@ Type, Iterator, Literal, + get_args, ) from dbt import deprecations -from dbt_common.contracts.constraints import ( - ConstraintType, - ModelLevelConstraint, -) -from dbt_common.dataclass_schema import dbtClassMixin +from dbt_common.contracts.constraints import ConstraintType from dbt_common.clients.system import write_file -from dbt.contracts.files import FileHash from dbt.contracts.graph.unparsed import ( HasYamlMetadata, TestDef, @@ -34,9 +29,10 @@ UnparsedSourceTableDefinition, UnparsedColumn, UnitTestOverrides, - UnitTestInputFixture, - UnitTestOutputFixture, - UnitTestNodeVersions, +) +from dbt.contracts.graph.model_config import ( + UnitTestNodeConfig, + EmptySnapshotConfig, ) from dbt.contracts.graph.node_args import ModelNodeArgs from dbt_common.events.functions import warn_or_error @@ -57,25 +53,12 @@ VERSIONED_NODE_TYPES, ) -from .model_config import ( - NodeConfig, - ModelConfig, - SeedConfig, - TestConfig, - EmptySnapshotConfig, - SnapshotConfig, - UnitTestConfig, - UnitTestNodeConfig, -) from dbt.artifacts.resources import ( BaseResource, - ColumnInfo as ColumnInfoResource, DependsOn, Docs, Exposure as ExposureResource, - HasRelationMetadata as HasRelationMetadataResource, - MacroDependsOn, MacroArgument, Documentation as DocumentationResource, Macro as MacroResource, @@ -83,11 +66,29 @@ NodeVersion, Group as GroupResource, GraphResource, - Quoting as QuotingResource, - RefArgs as RefArgsResource, SavedQuery as SavedQueryResource, SemanticModel as SemanticModelResource, + ParsedResourceMandatory, + ParsedResource, + CompiledResource, + HasRelationMetadata as HasRelationMetadataResource, + FileHash, + NodeConfig, + ColumnInfo, + InjectedCTE, + Analysis as AnalysisResource, + HookNode as HookNodeResource, + Model as ModelResource, + ModelConfig, + SqlOperation as SqlOperationResource, + Seed as SeedResource, + SingularTest as SingularTestResource, + GenericTest as GenericTestResource, + Snapshot as SnapshotResource, + Quoting as QuotingResource, SourceDefinition as SourceDefinitionResource, + MetricInputMeasure, + UnitTestDefinition as UnitTestDefinitionResource, ) # ===================================================================== @@ -180,35 +181,30 @@ def same_fqn(self, other) -> bool: @dataclass -class Contract(dbtClassMixin): - enforced: bool = False - alias_types: bool = True - checksum: Optional[str] = None - - -@dataclass -class DeferRelation(HasRelationMetadataResource): - alias: str - relation_name: Optional[str] +class HasRelationMetadata(HasRelationMetadataResource): + @classmethod + def __pre_deserialize__(cls, data): + data = super().__pre_deserialize__(data) + if "database" not in data: + data["database"] = None + return data @property - def identifier(self): - return self.alias + def quoting_dict(self) -> Dict[str, bool]: + if hasattr(self, "quoting"): + return self.quoting.to_dict(omit_none=True) + else: + return {} @dataclass -class ParsedNodeMandatory(GraphNode, HasRelationMetadataResource): - alias: str - checksum: FileHash - config: NodeConfig = field(default_factory=NodeConfig) - - @property - def identifier(self): - return self.alias +class ParsedNodeMandatory(ParsedResourceMandatory, GraphNode, HasRelationMetadata): + pass # This needs to be in all ManifestNodes and also in SourceDefinition, -# because of "source freshness" +# because of "source freshness". Should not be in artifacts, because we +# don't write out _event_status. @dataclass class NodeInfoMixin: _event_status: Dict[str, Any] = field(default_factory=dict) @@ -244,21 +240,7 @@ def clear_event_status(self): @dataclass -class ParsedNode(NodeInfoMixin, ParsedNodeMandatory, SerializableType): - tags: List[str] = field(default_factory=list) - description: str = field(default="") - columns: Dict[str, ColumnInfoResource] = field(default_factory=dict) - meta: Dict[str, Any] = field(default_factory=dict) - group: Optional[str] = None - docs: Docs = field(default_factory=Docs) - patch_path: Optional[str] = None - build_path: Optional[str] = None - unrendered_config: Dict[str, Any] = field(default_factory=dict) - created_at: float = field(default_factory=lambda: time.time()) - config_call_dict: Dict[str, Any] = field(default_factory=dict) - relation_name: Optional[str] = None - raw_code: str = "" - +class ParsedNode(ParsedResource, NodeInfoMixin, ParsedNodeMandatory, SerializableType): def get_target_write_path(self, target_path: str, subdirectory: str): # This is called for both the "compiled" subdirectory of "target" and the "run" subdirectory if os.path.basename(self.path) == os.path.basename(self.original_file_path): @@ -298,8 +280,6 @@ def _deserialize(cls, dct: Dict[str, int]): return AnalysisNode.from_dict(dct) elif resource_type == "seed": return SeedNode.from_dict(dct) - elif resource_type == "rpc": - return RPCNode.from_dict(dct) elif resource_type == "sql": return SqlNode.from_dict(dct) elif resource_type == "test": @@ -397,31 +377,10 @@ def is_external_node(self): @dataclass -class InjectedCTE(dbtClassMixin): - """Used in CompiledNodes as part of ephemeral model processing""" - - id: str - sql: str - - -@dataclass -class CompiledNode(ParsedNode): +class CompiledNode(CompiledResource, ParsedNode): """Contains attributes necessary for SQL files and nodes with refs, sources, etc, so all ManifestNodes except SeedNode.""" - language: str = "sql" - refs: List[RefArgsResource] = field(default_factory=list) - sources: List[List[str]] = field(default_factory=list) - metrics: List[List[str]] = field(default_factory=list) - depends_on: DependsOn = field(default_factory=DependsOn) - compiled_path: Optional[str] = None - compiled: bool = False - compiled_code: Optional[str] = None - extra_ctes_injected: bool = False - extra_ctes: List[InjectedCTE] = field(default_factory=list) - _pre_injected_sql: Optional[str] = None - contract: Contract = field(default_factory=Contract) - @property def empty(self): return not self.raw_code.strip() @@ -439,20 +398,6 @@ def set_cte(self, cte_id: str, sql: str): else: self.extra_ctes.append(InjectedCTE(id=cte_id, sql=sql)) - def __post_serialize__(self, dct): - dct = super().__post_serialize__(dct) - if "_pre_injected_sql" in dct: - del dct["_pre_injected_sql"] - # Remove compiled attributes - if "compiled" in dct and dct["compiled"] is False: - del dct["compiled"] - del dct["extra_ctes_injected"] - del dct["extra_ctes"] - # "omit_none" means these might not be in the dictionary - if "compiled_code" in dct: - del dct["compiled_code"] - return dct - @property def depends_on_nodes(self): return self.depends_on.nodes @@ -468,26 +413,24 @@ def depends_on_macros(self): @dataclass -class AnalysisNode(CompiledNode): - resource_type: Literal[NodeType.Analysis] +class AnalysisNode(AnalysisResource, CompiledNode): + @classmethod + def resource_class(cls) -> Type[AnalysisResource]: + return AnalysisResource @dataclass -class HookNode(CompiledNode): - resource_type: Literal[NodeType.Operation] - index: Optional[int] = None +class HookNode(HookNodeResource, CompiledNode): + @classmethod + def resource_class(cls) -> Type[HookNodeResource]: + return HookNodeResource @dataclass -class ModelNode(CompiledNode): - resource_type: Literal[NodeType.Model] - access: AccessType = AccessType.Protected - config: ModelConfig = field(default_factory=ModelConfig) - constraints: List[ModelLevelConstraint] = field(default_factory=list) - version: Optional[NodeVersion] = None - latest_version: Optional[NodeVersion] = None - deprecation_date: Optional[datetime] = None - defer_relation: Optional[DeferRelation] = None +class ModelNode(ModelResource, CompiledNode): + @classmethod + def resource_class(cls) -> Type[ModelResource]: + return ModelResource @classmethod def from_args(cls, args: ModelNodeArgs) -> "ModelNode": @@ -541,6 +484,60 @@ def search_name(self): def materialization_enforces_constraints(self) -> bool: return self.config.materialized in ["table", "incremental"] + def infer_primary_key(self, data_tests: List["GenericTestNode"]) -> List[str]: + """ + Infers the columns that can be used as primary key of a model in the following order: + 1. Columns with primary key constraints + 2. Columns with unique and not_null data tests + 3. Columns with enabled unique or dbt_utils.unique_combination_of_columns data tests + 4. Columns with disabled unique or dbt_utils.unique_combination_of_columns data tests + """ + for constraint in self.constraints: + if constraint.type == ConstraintType.primary_key: + return constraint.columns + + for column, column_info in self.columns.items(): + for column_constraint in column_info.constraints: + if column_constraint.type == ConstraintType.primary_key: + return [column] + + columns_with_enabled_unique_tests = set() + columns_with_disabled_unique_tests = set() + columns_with_not_null_tests = set() + for test in data_tests: + columns = [] + if "column_name" in test.test_metadata.kwargs: + columns = [test.test_metadata.kwargs["column_name"]] + elif "combination_of_columns" in test.test_metadata.kwargs: + columns = test.test_metadata.kwargs["combination_of_columns"] + + for column in columns: + if test.test_metadata.name in ["unique", "unique_combination_of_columns"]: + if test.config.enabled: + columns_with_enabled_unique_tests.add(column) + else: + columns_with_disabled_unique_tests.add(column) + elif test.test_metadata.name == "not_null": + columns_with_not_null_tests.add(column) + + columns_with_unique_and_not_null_tests = [] + for column in columns_with_not_null_tests: + if ( + column in columns_with_enabled_unique_tests + or column in columns_with_disabled_unique_tests + ): + columns_with_unique_and_not_null_tests.append(column) + if columns_with_unique_and_not_null_tests: + return columns_with_unique_and_not_null_tests + + if columns_with_enabled_unique_tests: + return list(columns_with_enabled_unique_tests) + + if columns_with_disabled_unique_tests: + return list(columns_with_disabled_unique_tests) + + return [] + def same_contents(self, old, adapter_type) -> bool: return super().same_contents(old, adapter_type) and self.same_ref_representation(old) @@ -765,15 +762,11 @@ def same_contract(self, old, adapter_type=None) -> bool: return False -# TODO: rm? @dataclass -class RPCNode(CompiledNode): - resource_type: Literal[NodeType.RPCCall] - - -@dataclass -class SqlNode(CompiledNode): - resource_type: Literal[NodeType.SqlOperation] +class SqlNode(SqlOperationResource, CompiledNode): + @classmethod + def resource_class(cls) -> Type[SqlOperationResource]: + return SqlOperationResource # ==================================== @@ -782,14 +775,10 @@ class SqlNode(CompiledNode): @dataclass -class SeedNode(ParsedNode): # No SQLDefaults! - resource_type: Literal[NodeType.Seed] - config: SeedConfig = field(default_factory=SeedConfig) - # seeds need the root_path because the contents are not loaded initially - # and we need the root_path to load the seed later - root_path: Optional[str] = None - depends_on: MacroDependsOn = field(default_factory=MacroDependsOn) - defer_relation: Optional[DeferRelation] = None +class SeedNode(SeedResource, ParsedNode): # No SQLDefaults! + @classmethod + def resource_class(cls) -> Type[SeedResource]: + return SeedResource def same_seeds(self, other: "SeedNode") -> bool: # for seeds, we check the hashes. If the hashes are different types, @@ -908,11 +897,10 @@ def is_relational(self): @dataclass -class SingularTestNode(TestShouldStoreFailures, CompiledNode): - resource_type: Literal[NodeType.Test] - # Was not able to make mypy happy and keep the code working. We need to - # refactor the various configs. - config: TestConfig = field(default_factory=TestConfig) # type: ignore +class SingularTestNode(SingularTestResource, TestShouldStoreFailures, CompiledNode): + @classmethod + def resource_class(cls) -> Type[SingularTestResource]: + return SingularTestResource @property def test_node_type(self): @@ -925,33 +913,10 @@ def test_node_type(self): @dataclass -class TestMetadata(dbtClassMixin): - __test__ = False - - name: str - # kwargs are the args that are left in the test builder after - # removing configs. They are set from the test builder when - # the test node is created. - kwargs: Dict[str, Any] = field(default_factory=dict) - namespace: Optional[str] = None - - -# This has to be separated out because it has no default and so -# has to be included as a superclass, not an attribute -@dataclass -class HasTestMetadata(dbtClassMixin): - test_metadata: TestMetadata - - -@dataclass -class GenericTestNode(TestShouldStoreFailures, CompiledNode, HasTestMetadata): - resource_type: Literal[NodeType.Test] - column_name: Optional[str] = None - file_key_name: Optional[str] = None - # Was not able to make mypy happy and keep the code working. We need to - # refactor the various configs. - config: TestConfig = field(default_factory=TestConfig) # type: ignore - attached_node: Optional[str] = None +class GenericTestNode(GenericTestResource, TestShouldStoreFailures, CompiledNode): + @classmethod + def resource_class(cls) -> Type[GenericTestResource]: + return GenericTestResource def same_contents(self, other, adapter_type: Optional[str]) -> bool: if other is None: @@ -984,33 +949,10 @@ class UnitTestNode(CompiledNode): @dataclass -class UnitTestDefinitionMandatory: - model: str - given: Sequence[UnitTestInputFixture] - expect: UnitTestOutputFixture - - -@dataclass -class UnitTestDefinition(NodeInfoMixin, GraphNode, UnitTestDefinitionMandatory): - description: str = "" - overrides: Optional[UnitTestOverrides] = None - depends_on: DependsOn = field(default_factory=DependsOn) - config: UnitTestConfig = field(default_factory=UnitTestConfig) - checksum: Optional[str] = None - schema: Optional[str] = None - created_at: float = field(default_factory=lambda: time.time()) - versions: Optional[UnitTestNodeVersions] = None - version: Optional[NodeVersion] = None - - @property - def build_path(self): - # TODO: is this actually necessary? - return self.original_file_path - - @property - def compiled_path(self): - # TODO: is this actually necessary? - return self.original_file_path +class UnitTestDefinition(NodeInfoMixin, GraphNode, UnitTestDefinitionResource): + @classmethod + def resource_class(cls) -> Type[UnitTestDefinitionResource]: + return UnitTestDefinitionResource @property def depends_on_nodes(self): @@ -1049,7 +991,7 @@ def same_contents(self, other: Optional["UnitTestDefinition"]) -> bool: @dataclass class UnitTestFileFixture(BaseNode): resource_type: Literal[NodeType.Fixture] - rows: Optional[List[Dict[str, Any]]] = None + rows: Optional[Union[List[Dict[str, Any]], str]] = None # ==================================== @@ -1071,10 +1013,10 @@ class IntermediateSnapshotNode(CompiledNode): @dataclass -class SnapshotNode(CompiledNode): - resource_type: Literal[NodeType.Snapshot] - config: SnapshotConfig - defer_relation: Optional[DeferRelation] = None +class SnapshotNode(SnapshotResource, CompiledNode): + @classmethod + def resource_class(cls) -> Type[SnapshotResource]: + return SnapshotResource # ==================================== @@ -1084,6 +1026,10 @@ class SnapshotNode(CompiledNode): @dataclass class Macro(MacroResource, BaseNode): + @classmethod + def resource_class(cls) -> Type[MacroResource]: + return MacroResource + def same_contents(self, other: Optional["Macro"]) -> bool: if other is None: return False @@ -1103,6 +1049,10 @@ def depends_on_macros(self): @dataclass class Documentation(DocumentationResource, BaseNode): + @classmethod + def resource_class(cls) -> Type[DocumentationResource]: + return DocumentationResource + @property def search_name(self): return self.name @@ -1219,7 +1169,7 @@ class SourceDefinition( NodeInfoMixin, GraphNode, SourceDefinitionResource, - HasRelationMetadataResource, + HasRelationMetadata, ): @classmethod def resource_class(cls) -> Type[SourceDefinitionResource]: @@ -1452,6 +1402,12 @@ def same_contents(self, old: Optional["Metric"]) -> bool: and True ) + def add_input_measure(self, input_measure: MetricInputMeasure) -> None: + for existing_input_measure in self.type_params.input_measures: + if input_measure == existing_input_measure: + return + self.type_params.input_measures.append(input_measure) + # ==================================== # Group node @@ -1617,7 +1573,7 @@ class ParsedPatch(HasYamlMetadata): # may be empty. @dataclass class ParsedNodePatch(ParsedPatch): - columns: Dict[str, ColumnInfoResource] + columns: Dict[str, ColumnInfo] access: Optional[str] version: Optional[NodeVersion] latest_version: Optional[NodeVersion] @@ -1642,7 +1598,6 @@ class ParsedMacroPatch(ParsedPatch): SingularTestNode, HookNode, ModelNode, - RPCNode, SqlNode, GenericTestNode, SnapshotNode, @@ -1679,3 +1634,10 @@ class ParsedMacroPatch(ParsedPatch): ] TestNode = Union[SingularTestNode, GenericTestNode] + + +RESOURCE_CLASS_TO_NODE_CLASS: Dict[Type[BaseResource], Type[BaseNode]] = { + node_class.resource_class(): node_class + for node_class in get_args(Resource) + if node_class is not UnitTestNode +} diff --git a/core/dbt/contracts/graph/unparsed.py b/core/dbt/contracts/graph/unparsed.py index b26bc4743a3..caeaa5cee85 100644 --- a/core/dbt/contracts/graph/unparsed.py +++ b/core/dbt/contracts/graph/unparsed.py @@ -21,6 +21,10 @@ FreshnessThreshold, MaturityType, MeasureAggregationParameters, + UnitTestInputFixture, + UnitTestOutputFixture, + UnitTestNodeVersions, + UnitTestOverrides, ) # trigger the PathEncoder @@ -482,7 +486,8 @@ def __bool__(self): @dataclass class UnparsedMetricInputMeasure(dbtClassMixin): name: str - filter: Optional[Union[str, List[str]]] = None + # Note: `Union` must be the outermost part of the type annotation for serialization to work properly. + filter: Union[str, List[str], None] = None alias: Optional[str] = None join_to_timespine: bool = False fill_nulls_with: Optional[int] = None @@ -491,7 +496,8 @@ class UnparsedMetricInputMeasure(dbtClassMixin): @dataclass class UnparsedMetricInput(dbtClassMixin): name: str - filter: Optional[Union[str, List[str]]] = None + # Note: `Union` must be the outermost part of the type annotation for serialization to work properly. + filter: Union[str, List[str], None] = None alias: Optional[str] = None offset_window: Optional[str] = None offset_to_grain: Optional[str] = None # str is really a TimeGranularity Enum @@ -528,7 +534,8 @@ class UnparsedMetric(dbtClassMixin): type: str type_params: UnparsedMetricTypeParams description: str = "" - filter: Optional[Union[str, List[str]]] = None + # Note: `Union` must be the outermost part of the type annotation for serialization to work properly. + filter: Union[str, List[str], None] = None # metadata: Optional[Unparsedetadata] = None # TODO meta: Dict[str, Any] = field(default_factory=dict) tags: List[str] = field(default_factory=list) @@ -638,7 +645,8 @@ class UnparsedSemanticModel(dbtClassMixin): class UnparsedQueryParams(dbtClassMixin): metrics: List[str] = field(default_factory=list) group_by: List[str] = field(default_factory=list) - where: Optional[Union[str, List[str]]] = None + # Note: `Union` must be the outermost part of the type annotation for serialization to work properly. + where: Union[str, List[str], None] = None @dataclass @@ -674,39 +682,6 @@ def normalize_date(d: Optional[datetime.date]) -> Optional[datetime.datetime]: return dt -class UnitTestFormat(StrEnum): - CSV = "csv" - Dict = "dict" - - -@dataclass -class UnitTestInputFixture(dbtClassMixin): - input: str - rows: Optional[Union[str, List[Dict[str, Any]]]] = None - format: UnitTestFormat = UnitTestFormat.Dict - fixture: Optional[str] = None - - -@dataclass -class UnitTestOutputFixture(dbtClassMixin): - rows: Optional[Union[str, List[Dict[str, Any]]]] = None - format: UnitTestFormat = UnitTestFormat.Dict - fixture: Optional[str] = None - - -@dataclass -class UnitTestOverrides(dbtClassMixin): - macros: Dict[str, Any] = field(default_factory=dict) - vars: Dict[str, Any] = field(default_factory=dict) - env_vars: Dict[str, Any] = field(default_factory=dict) - - -@dataclass -class UnitTestNodeVersions(dbtClassMixin): - include: Optional[List[NodeVersion]] = None - exclude: Optional[List[NodeVersion]] = None - - @dataclass class UnparsedUnitTest(dbtClassMixin): name: str diff --git a/core/dbt/contracts/project.py b/core/dbt/contracts/project.py index d51310aef8a..a74b883005d 100644 --- a/core/dbt/contracts/project.py +++ b/core/dbt/contracts/project.py @@ -296,6 +296,7 @@ def validate(cls, data): @dataclass class ProjectFlags(ExtensibleDbtClassMixin): + allow_spaces_in_model_names: Optional[bool] = True cache_selected_only: Optional[bool] = None debug: Optional[bool] = None fail_fast: Optional[bool] = None @@ -307,6 +308,7 @@ class ProjectFlags(ExtensibleDbtClassMixin): partial_parse: Optional[bool] = None populate_cache: Optional[bool] = None printer_width: Optional[int] = None + require_explicit_package_overrides_for_builtin_materializations: bool = False send_anonymous_usage_stats: bool = DEFAULT_SEND_ANONYMOUS_USAGE_STATS source_freshness_run_project_hooks: bool = False static_parser: Optional[bool] = None @@ -320,7 +322,11 @@ class ProjectFlags(ExtensibleDbtClassMixin): @property def project_only_flags(self) -> Dict[str, Any]: - return {"source_freshness_run_project_hooks": self.source_freshness_run_project_hooks} + return { + "source_freshness_run_project_hooks": self.source_freshness_run_project_hooks, + "allow_spaces_in_model_names": self.allow_spaces_in_model_names, + "require_explicit_package_overrides_for_builtin_materializations": self.require_explicit_package_overrides_for_builtin_materializations, + } @dataclass diff --git a/core/dbt/contracts/state.py b/core/dbt/contracts/state.py index 9111e2dfb46..01a2eba958b 100644 --- a/core/dbt/contracts/state.py +++ b/core/dbt/contracts/state.py @@ -1,12 +1,13 @@ from pathlib import Path from typing import Optional -from dbt.contracts.graph.manifest import WritableManifest +from dbt.contracts.graph.manifest import Manifest +from dbt.artifacts.exceptions import IncompatibleSchemaError +from dbt.artifacts.schemas.manifest import WritableManifest from dbt.artifacts.schemas.freshness import FreshnessExecutionResultArtifact from dbt.artifacts.schemas.run import RunResultsArtifact from dbt_common.events.functions import fire_event from dbt.events.types import WarnStateTargetEqual -from dbt.exceptions import IncompatibleSchemaError def load_result_state(results_path) -> Optional[RunResultsArtifact]: @@ -24,7 +25,7 @@ def __init__(self, state_path: Path, target_path: Path, project_root: Path) -> N self.state_path: Path = state_path self.target_path: Path = target_path self.project_root: Path = project_root - self.manifest: Optional[WritableManifest] = None + self.manifest: Optional[Manifest] = None self.results: Optional[RunResultsArtifact] = None self.sources: Optional[FreshnessExecutionResultArtifact] = None self.sources_current: Optional[FreshnessExecutionResultArtifact] = None @@ -36,7 +37,8 @@ def __init__(self, state_path: Path, target_path: Path, project_root: Path) -> N manifest_path = self.project_root / self.state_path / "manifest.json" if manifest_path.exists() and manifest_path.is_file(): try: - self.manifest = WritableManifest.read_and_check_versions(str(manifest_path)) + writable_manifest = WritableManifest.read_and_check_versions(str(manifest_path)) + self.manifest = Manifest.from_writable_manifest(writable_manifest) except IncompatibleSchemaError as exc: exc.add_filename(str(manifest_path)) raise diff --git a/core/dbt/deprecations.py b/core/dbt/deprecations.py index 1b011128fb8..2bd50d7943a 100644 --- a/core/dbt/deprecations.py +++ b/core/dbt/deprecations.py @@ -118,6 +118,11 @@ def show(self, *args, **kwargs) -> None: active_deprecations.add(self.name) +class PackageMaterializationOverrideDeprecation(DBTDeprecation): + _name = "package-materialization-override" + _event = "PackageMaterializationOverrideDeprecation" + + def renamed_env_var(old_name: str, new_name: str): class EnvironmentVariableRenamed(DBTDeprecation): _name = f"environment-variable-renamed:{old_name}" @@ -157,6 +162,7 @@ def warn(name, *args, **kwargs): CollectFreshnessReturnSignature(), TestsConfigDeprecation(), ProjectFlagsMovedDeprecation(), + PackageMaterializationOverrideDeprecation(), ] deprecations: Dict[str, DBTDeprecation] = {d.name: d for d in deprecations_list} diff --git a/core/dbt/deps/resolver.py b/core/dbt/deps/resolver.py index 3d74bac4980..5f890109b0e 100644 --- a/core/dbt/deps/resolver.py +++ b/core/dbt/deps/resolver.py @@ -1,5 +1,5 @@ from dataclasses import dataclass, field -from typing import Dict, List, NoReturn, Union, Type, Iterator, Set, Any +from typing import Dict, List, NoReturn, Type, Iterator, Set, Any from dbt.exceptions import ( DuplicateDependencyToRootError, @@ -17,14 +17,13 @@ from dbt.deps.registry import RegistryUnpinnedPackage from dbt.contracts.project import ( + PackageSpec, LocalPackage, TarballPackage, GitPackage, RegistryPackage, ) -PackageContract = Union[LocalPackage, TarballPackage, GitPackage, RegistryPackage] - @dataclass class PackageListing: @@ -68,7 +67,7 @@ def incorporate(self, package: UnpinnedPackage): else: self.packages[key] = package - def update_from(self, src: List[PackageContract]) -> None: + def update_from(self, src: List[PackageSpec]) -> None: pkg: UnpinnedPackage for contract in src: if isinstance(contract, LocalPackage): @@ -84,9 +83,7 @@ def update_from(self, src: List[PackageContract]) -> None: self.incorporate(pkg) @classmethod - def from_contracts( - cls: Type["PackageListing"], src: List[PackageContract] - ) -> "PackageListing": + def from_contracts(cls: Type["PackageListing"], src: List[PackageSpec]) -> "PackageListing": self = cls({}) self.update_from(src) return self @@ -114,7 +111,7 @@ def _check_for_duplicate_project_names( def resolve_packages( - packages: List[PackageContract], + packages: List[PackageSpec], project: Project, cli_vars: Dict[str, Any], ) -> List[PinnedPackage]: @@ -137,7 +134,7 @@ def resolve_packages( return resolved -def resolve_lock_packages(packages: List[PackageContract]) -> List[PinnedPackage]: +def resolve_lock_packages(packages: List[PackageSpec]) -> List[PinnedPackage]: lock_packages = PackageListing.from_contracts(packages) final = PackageListing() diff --git a/core/dbt/events/core_types.proto b/core/dbt/events/core_types.proto index 556c01b32b4..cfb537ec75e 100644 --- a/core/dbt/events/core_types.proto +++ b/core/dbt/events/core_types.proto @@ -403,6 +403,41 @@ message ProjectFlagsMovedDeprecationMsg { ProjectFlagsMovedDeprecation data = 2; } +// D014 +message SpacesInModelNameDeprecation { + string model_name = 1; + string model_version = 2; + string level = 3; +} + +message SpacesInModelNameDeprecationMsg { + CoreEventInfo info = 1; + SpacesInModelNameDeprecation data = 2; +} + +// D015 +message TotalModelNamesWithSpacesDeprecation { + int32 count_invalid_names = 1; + bool show_debug_hint = 2; + string level = 3; +} + +// D016 +message PackageMaterializationOverrideDeprecation { + string package_name = 1; + string materialization_name = 2; +} + +message PackageMaterializationOverrideDeprecationMsg { + CoreEventInfo info = 1; + PackageMaterializationOverrideDeprecation data = 2; +} + +message TotalModelNamesWithSpacesDeprecationMsg { + CoreEventInfo info = 1; + TotalModelNamesWithSpacesDeprecation data = 2; +} + // I065 message DeprecatedModel { string model_name = 1; @@ -1331,8 +1366,23 @@ message LogFreshnessResultMsg { LogFreshnessResult data = 2; } +// Q018 +message LogNodeNoOpResult { + NodeInfo node_info = 1; + string description = 2; + string status = 3; + int32 index = 4; + int32 total = 5; + float execution_time = 6; +} + +message LogNodeNoOpResultMsg { + CoreEventInfo info = 1; + LogNodeNoOpResult data = 2; +} + -// Skipped Q019, Q020, Q021 +// Skipped Q020, Q021 // Q022 @@ -1757,6 +1807,7 @@ message RunResultWarning { string resource_type = 1; string node_name = 2; string path = 3; + NodeInfo node_info = 4; } message RunResultWarningMsg { @@ -1769,6 +1820,7 @@ message RunResultFailure { string resource_type = 1; string node_name = 2; string path = 3; + NodeInfo node_info = 4; } message RunResultFailureMsg { @@ -1789,6 +1841,7 @@ message StatsLineMsg { // Z024 message RunResultError { string msg = 1; + NodeInfo node_info = 2; } message RunResultErrorMsg { @@ -1799,6 +1852,7 @@ message RunResultErrorMsg { // Z025 message RunResultErrorNoMessage { string status = 1; + NodeInfo node_info = 2; } message RunResultErrorNoMessageMsg { @@ -1809,6 +1863,7 @@ message RunResultErrorNoMessageMsg { // Z026 message SQLCompiledPath { string path = 1; + NodeInfo node_info = 2; } message SQLCompiledPathMsg { @@ -1819,6 +1874,7 @@ message SQLCompiledPathMsg { // Z027 message CheckNodeTestFailure { string relation_name = 1; + NodeInfo node_info = 2; } message CheckNodeTestFailureMsg { @@ -1943,6 +1999,7 @@ message TrackingInitializeFailureMsg { // Z046 message RunResultWarningMessage { string msg = 1; + NodeInfo node_info = 2; } message RunResultWarningMessageMsg { diff --git a/core/dbt/events/core_types_pb2.py b/core/dbt/events/core_types_pb2.py index dabe67a9082..6f270b3b7ca 100644 --- a/core/dbt/events/core_types_pb2.py +++ b/core/dbt/events/core_types_pb2.py @@ -1,11 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: core_types.proto +# Protobuf Python Version: 4.25.3 """Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -15,727 +16,743 @@ from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x63ore_types.proto\x12\x0bproto_types\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x99\x02\n\rCoreEventInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x63ode\x18\x02 \x01(\t\x12\x0b\n\x03msg\x18\x03 \x01(\t\x12\r\n\x05level\x18\x04 \x01(\t\x12\x15\n\rinvocation_id\x18\x05 \x01(\t\x12\x0b\n\x03pid\x18\x06 \x01(\x05\x12\x0e\n\x06thread\x18\x07 \x01(\t\x12&\n\x02ts\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x05\x65xtra\x18\t \x03(\x0b\x32%.proto_types.CoreEventInfo.ExtraEntry\x12\x10\n\x08\x63\x61tegory\x18\n \x01(\t\x1a,\n\nExtraEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"V\n\x0cNodeRelation\x12\x10\n\x08\x64\x61tabase\x18\n \x01(\t\x12\x0e\n\x06schema\x18\x0b \x01(\t\x12\r\n\x05\x61lias\x18\x0c \x01(\t\x12\x15\n\rrelation_name\x18\r \x01(\t\"\x91\x02\n\x08NodeInfo\x12\x11\n\tnode_path\x18\x01 \x01(\t\x12\x11\n\tnode_name\x18\x02 \x01(\t\x12\x11\n\tunique_id\x18\x03 \x01(\t\x12\x15\n\rresource_type\x18\x04 \x01(\t\x12\x14\n\x0cmaterialized\x18\x05 \x01(\t\x12\x13\n\x0bnode_status\x18\x06 \x01(\t\x12\x17\n\x0fnode_started_at\x18\x07 \x01(\t\x12\x18\n\x10node_finished_at\x18\x08 \x01(\t\x12%\n\x04meta\x18\t \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x30\n\rnode_relation\x18\n \x01(\x0b\x32\x19.proto_types.NodeRelation\"\x7f\n\rTimingInfoMsg\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\nstarted_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x30\n\x0c\x63ompleted_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xd1\x01\n\x0cRunResultMsg\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\x12/\n\x0btiming_info\x18\x03 \x03(\x0b\x32\x1a.proto_types.TimingInfoMsg\x12\x0e\n\x06thread\x18\x04 \x01(\t\x12\x16\n\x0e\x65xecution_time\x18\x05 \x01(\x02\x12\x31\n\x10\x61\x64\x61pter_response\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x14\n\x0cnum_failures\x18\x07 \x01(\x05\"\\\n\nColumnType\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\x12\x1c\n\x14previous_column_type\x18\x02 \x01(\t\x12\x1b\n\x13\x63urrent_column_type\x18\x03 \x01(\t\"Y\n\x10\x43olumnConstraint\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\x12\x17\n\x0f\x63onstraint_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63onstraint_type\x18\x03 \x01(\t\"T\n\x0fModelConstraint\x12\x17\n\x0f\x63onstraint_name\x18\x01 \x01(\t\x12\x17\n\x0f\x63onstraint_type\x18\x02 \x01(\t\x12\x0f\n\x07\x63olumns\x18\x03 \x03(\t\"9\n\x11MainReportVersion\x12\x0f\n\x07version\x18\x01 \x01(\t\x12\x13\n\x0blog_version\x18\x02 \x01(\x05\"n\n\x14MainReportVersionMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1e.proto_types.MainReportVersion\"r\n\x0eMainReportArgs\x12\x33\n\x04\x61rgs\x18\x01 \x03(\x0b\x32%.proto_types.MainReportArgs.ArgsEntry\x1a+\n\tArgsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"h\n\x11MainReportArgsMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.MainReportArgs\"+\n\x15MainTrackingUserState\x12\x12\n\nuser_state\x18\x01 \x01(\t\"v\n\x18MainTrackingUserStateMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".proto_types.MainTrackingUserState\"5\n\x0fMergedFromState\x12\x12\n\nnum_merged\x18\x01 \x01(\x05\x12\x0e\n\x06sample\x18\x02 \x03(\t\"j\n\x12MergedFromStateMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.MergedFromState\"A\n\x14MissingProfileTarget\x12\x14\n\x0cprofile_name\x18\x01 \x01(\t\x12\x13\n\x0btarget_name\x18\x02 \x01(\t\"t\n\x17MissingProfileTargetMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.MissingProfileTarget\"(\n\x11InvalidOptionYAML\x12\x13\n\x0boption_name\x18\x01 \x01(\t\"n\n\x14InvalidOptionYAMLMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1e.proto_types.InvalidOptionYAML\"!\n\x12LogDbtProjectError\x12\x0b\n\x03\x65xc\x18\x01 \x01(\t\"p\n\x15LogDbtProjectErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.LogDbtProjectError\"3\n\x12LogDbtProfileError\x12\x0b\n\x03\x65xc\x18\x01 \x01(\t\x12\x10\n\x08profiles\x18\x02 \x03(\t\"p\n\x15LogDbtProfileErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.LogDbtProfileError\"!\n\x12StarterProjectPath\x12\x0b\n\x03\x64ir\x18\x01 \x01(\t\"p\n\x15StarterProjectPathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.StarterProjectPath\"$\n\x15\x43onfigFolderDirectory\x12\x0b\n\x03\x64ir\x18\x01 \x01(\t\"v\n\x18\x43onfigFolderDirectoryMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".proto_types.ConfigFolderDirectory\"\'\n\x14NoSampleProfileFound\x12\x0f\n\x07\x61\x64\x61pter\x18\x01 \x01(\t\"t\n\x17NoSampleProfileFoundMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.NoSampleProfileFound\"6\n\x18ProfileWrittenWithSample\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\"|\n\x1bProfileWrittenWithSampleMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.ProfileWrittenWithSample\"B\n$ProfileWrittenWithTargetTemplateYAML\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\"\x94\x01\n\'ProfileWrittenWithTargetTemplateYAMLMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12?\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x31.proto_types.ProfileWrittenWithTargetTemplateYAML\"C\n%ProfileWrittenWithProjectTemplateYAML\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\"\x96\x01\n(ProfileWrittenWithProjectTemplateYAMLMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12@\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x32.proto_types.ProfileWrittenWithProjectTemplateYAML\"\x12\n\x10SettingUpProfile\"l\n\x13SettingUpProfileMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.SettingUpProfile\"\x1c\n\x1aInvalidProfileTemplateYAML\"\x80\x01\n\x1dInvalidProfileTemplateYAMLMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x35\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\'.proto_types.InvalidProfileTemplateYAML\"(\n\x18ProjectNameAlreadyExists\x12\x0c\n\x04name\x18\x01 \x01(\t\"|\n\x1bProjectNameAlreadyExistsMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.ProjectNameAlreadyExists\"K\n\x0eProjectCreated\x12\x14\n\x0cproject_name\x18\x01 \x01(\t\x12\x10\n\x08\x64ocs_url\x18\x02 \x01(\t\x12\x11\n\tslack_url\x18\x03 \x01(\t\"h\n\x11ProjectCreatedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.ProjectCreated\"@\n\x1aPackageRedirectDeprecation\x12\x10\n\x08old_name\x18\x01 \x01(\t\x12\x10\n\x08new_name\x18\x02 \x01(\t\"\x80\x01\n\x1dPackageRedirectDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x35\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\'.proto_types.PackageRedirectDeprecation\"\x1f\n\x1dPackageInstallPathDeprecation\"\x86\x01\n PackageInstallPathDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x38\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32*.proto_types.PackageInstallPathDeprecation\"H\n\x1b\x43onfigSourcePathDeprecation\x12\x17\n\x0f\x64\x65precated_path\x18\x01 \x01(\t\x12\x10\n\x08\x65xp_path\x18\x02 \x01(\t\"\x82\x01\n\x1e\x43onfigSourcePathDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x36\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32(.proto_types.ConfigSourcePathDeprecation\"F\n\x19\x43onfigDataPathDeprecation\x12\x17\n\x0f\x64\x65precated_path\x18\x01 \x01(\t\x12\x10\n\x08\x65xp_path\x18\x02 \x01(\t\"~\n\x1c\x43onfigDataPathDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.proto_types.ConfigDataPathDeprecation\".\n\x17MetricAttributesRenamed\x12\x13\n\x0bmetric_name\x18\x01 \x01(\t\"z\n\x1aMetricAttributesRenamedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.MetricAttributesRenamed\"+\n\x17\x45xposureNameDeprecation\x12\x10\n\x08\x65xposure\x18\x01 \x01(\t\"z\n\x1a\x45xposureNameDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.ExposureNameDeprecation\"^\n\x13InternalDeprecation\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06reason\x18\x02 \x01(\t\x12\x18\n\x10suggested_action\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\"r\n\x16InternalDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.InternalDeprecation\"@\n\x1a\x45nvironmentVariableRenamed\x12\x10\n\x08old_name\x18\x01 \x01(\t\x12\x10\n\x08new_name\x18\x02 \x01(\t\"\x80\x01\n\x1d\x45nvironmentVariableRenamedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x35\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\'.proto_types.EnvironmentVariableRenamed\"3\n\x18\x43onfigLogPathDeprecation\x12\x17\n\x0f\x64\x65precated_path\x18\x01 \x01(\t\"|\n\x1b\x43onfigLogPathDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.ConfigLogPathDeprecation\"6\n\x1b\x43onfigTargetPathDeprecation\x12\x17\n\x0f\x64\x65precated_path\x18\x01 \x01(\t\"\x82\x01\n\x1e\x43onfigTargetPathDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x36\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32(.proto_types.ConfigTargetPathDeprecation\"C\n\x16TestsConfigDeprecation\x12\x17\n\x0f\x64\x65precated_path\x18\x01 \x01(\t\x12\x10\n\x08\x65xp_path\x18\x02 \x01(\t\"x\n\x19TestsConfigDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x31\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32#.proto_types.TestsConfigDeprecation\"\x1e\n\x1cProjectFlagsMovedDeprecation\"\x84\x01\n\x1fProjectFlagsMovedDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x37\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32).proto_types.ProjectFlagsMovedDeprecation\"V\n\x0f\x44\x65precatedModel\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x15\n\rmodel_version\x18\x02 \x01(\t\x12\x18\n\x10\x64\x65precation_date\x18\x03 \x01(\t\"j\n\x12\x44\x65precatedModelMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.DeprecatedModel\"7\n\x12InputFileDiffError\x12\x10\n\x08\x63\x61tegory\x18\x01 \x01(\t\x12\x0f\n\x07\x66ile_id\x18\x02 \x01(\t\"p\n\x15InputFileDiffErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.InputFileDiffError\"?\n\x14InvalidValueForField\x12\x12\n\nfield_name\x18\x01 \x01(\t\x12\x13\n\x0b\x66ield_value\x18\x02 \x01(\t\"t\n\x17InvalidValueForFieldMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.InvalidValueForField\"Q\n\x11ValidationWarning\x12\x15\n\rresource_type\x18\x01 \x01(\t\x12\x12\n\nfield_name\x18\x02 \x01(\t\x12\x11\n\tnode_name\x18\x03 \x01(\t\"n\n\x14ValidationWarningMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1e.proto_types.ValidationWarning\"!\n\x11ParsePerfInfoPath\x12\x0c\n\x04path\x18\x01 \x01(\t\"n\n\x14ParsePerfInfoPathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1e.proto_types.ParsePerfInfoPath\"1\n!PartialParsingErrorProcessingFile\x12\x0c\n\x04\x66ile\x18\x01 \x01(\t\"\x8e\x01\n$PartialParsingErrorProcessingFileMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12<\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32..proto_types.PartialParsingErrorProcessingFile\"\x86\x01\n\x13PartialParsingError\x12?\n\x08\x65xc_info\x18\x01 \x03(\x0b\x32-.proto_types.PartialParsingError.ExcInfoEntry\x1a.\n\x0c\x45xcInfoEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"r\n\x16PartialParsingErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.PartialParsingError\"\x1b\n\x19PartialParsingSkipParsing\"~\n\x1cPartialParsingSkipParsingMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.proto_types.PartialParsingSkipParsing\"&\n\x14UnableToPartialParse\x12\x0e\n\x06reason\x18\x01 \x01(\t\"t\n\x17UnableToPartialParseMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.UnableToPartialParse\"f\n\x12StateCheckVarsHash\x12\x10\n\x08\x63hecksum\x18\x01 \x01(\t\x12\x0c\n\x04vars\x18\x02 \x01(\t\x12\x0f\n\x07profile\x18\x03 \x01(\t\x12\x0e\n\x06target\x18\x04 \x01(\t\x12\x0f\n\x07version\x18\x05 \x01(\t\"p\n\x15StateCheckVarsHashMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.StateCheckVarsHash\"\x1a\n\x18PartialParsingNotEnabled\"|\n\x1bPartialParsingNotEnabledMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.PartialParsingNotEnabled\"C\n\x14ParsedFileLoadFailed\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0b\n\x03\x65xc\x18\x02 \x01(\t\x12\x10\n\x08\x65xc_info\x18\x03 \x01(\t\"t\n\x17ParsedFileLoadFailedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.ParsedFileLoadFailed\"H\n\x15PartialParsingEnabled\x12\x0f\n\x07\x64\x65leted\x18\x01 \x01(\x05\x12\r\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x05\x12\x0f\n\x07\x63hanged\x18\x03 \x01(\x05\"v\n\x18PartialParsingEnabledMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".proto_types.PartialParsingEnabled\"8\n\x12PartialParsingFile\x12\x0f\n\x07\x66ile_id\x18\x01 \x01(\t\x12\x11\n\toperation\x18\x02 \x01(\t\"p\n\x15PartialParsingFileMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.PartialParsingFile\"\xaf\x01\n\x1fInvalidDisabledTargetInTestNode\x12\x1b\n\x13resource_type_title\x18\x01 \x01(\t\x12\x11\n\tunique_id\x18\x02 \x01(\t\x12\x1a\n\x12original_file_path\x18\x03 \x01(\t\x12\x13\n\x0btarget_kind\x18\x04 \x01(\t\x12\x13\n\x0btarget_name\x18\x05 \x01(\t\x12\x16\n\x0etarget_package\x18\x06 \x01(\t\"\x8a\x01\n\"InvalidDisabledTargetInTestNodeMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12:\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32,.proto_types.InvalidDisabledTargetInTestNode\"7\n\x18UnusedResourceConfigPath\x12\x1b\n\x13unused_config_paths\x18\x01 \x03(\t\"|\n\x1bUnusedResourceConfigPathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.UnusedResourceConfigPath\"3\n\rSeedIncreased\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"f\n\x10SeedIncreasedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12(\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1a.proto_types.SeedIncreased\">\n\x18SeedExceedsLimitSamePath\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"|\n\x1bSeedExceedsLimitSamePathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.SeedExceedsLimitSamePath\"D\n\x1eSeedExceedsLimitAndPathChanged\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\x88\x01\n!SeedExceedsLimitAndPathChangedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x39\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32+.proto_types.SeedExceedsLimitAndPathChanged\"\\\n\x1fSeedExceedsLimitChecksumChanged\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x15\n\rchecksum_name\x18\x03 \x01(\t\"\x8a\x01\n\"SeedExceedsLimitChecksumChangedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12:\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32,.proto_types.SeedExceedsLimitChecksumChanged\"%\n\x0cUnusedTables\x12\x15\n\runused_tables\x18\x01 \x03(\t\"d\n\x0fUnusedTablesMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.UnusedTables\"\x87\x01\n\x17WrongResourceSchemaFile\x12\x12\n\npatch_name\x18\x01 \x01(\t\x12\x15\n\rresource_type\x18\x02 \x01(\t\x12\x1c\n\x14plural_resource_type\x18\x03 \x01(\t\x12\x10\n\x08yaml_key\x18\x04 \x01(\t\x12\x11\n\tfile_path\x18\x05 \x01(\t\"z\n\x1aWrongResourceSchemaFileMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.WrongResourceSchemaFile\"K\n\x10NoNodeForYamlKey\x12\x12\n\npatch_name\x18\x01 \x01(\t\x12\x10\n\x08yaml_key\x18\x02 \x01(\t\x12\x11\n\tfile_path\x18\x03 \x01(\t\"l\n\x13NoNodeForYamlKeyMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.NoNodeForYamlKey\"+\n\x15MacroNotFoundForPatch\x12\x12\n\npatch_name\x18\x01 \x01(\t\"v\n\x18MacroNotFoundForPatchMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".proto_types.MacroNotFoundForPatch\"\xb8\x01\n\x16NodeNotFoundOrDisabled\x12\x1a\n\x12original_file_path\x18\x01 \x01(\t\x12\x11\n\tunique_id\x18\x02 \x01(\t\x12\x1b\n\x13resource_type_title\x18\x03 \x01(\t\x12\x13\n\x0btarget_name\x18\x04 \x01(\t\x12\x13\n\x0btarget_kind\x18\x05 \x01(\t\x12\x16\n\x0etarget_package\x18\x06 \x01(\t\x12\x10\n\x08\x64isabled\x18\x07 \x01(\t\"x\n\x19NodeNotFoundOrDisabledMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x31\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32#.proto_types.NodeNotFoundOrDisabled\"H\n\x0fJinjaLogWarning\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x0b\n\x03msg\x18\x02 \x01(\t\"j\n\x12JinjaLogWarningMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.JinjaLogWarning\"E\n\x0cJinjaLogInfo\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x0b\n\x03msg\x18\x02 \x01(\t\"d\n\x0fJinjaLogInfoMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.JinjaLogInfo\"F\n\rJinjaLogDebug\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x0b\n\x03msg\x18\x02 \x01(\t\"f\n\x10JinjaLogDebugMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12(\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1a.proto_types.JinjaLogDebug\"\xae\x01\n\x1eUnpinnedRefNewVersionAvailable\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x15\n\rref_node_name\x18\x02 \x01(\t\x12\x18\n\x10ref_node_package\x18\x03 \x01(\t\x12\x18\n\x10ref_node_version\x18\x04 \x01(\t\x12\x17\n\x0fref_max_version\x18\x05 \x01(\t\"\x88\x01\n!UnpinnedRefNewVersionAvailableMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x39\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32+.proto_types.UnpinnedRefNewVersionAvailable\"\xc6\x01\n\x1cUpcomingReferenceDeprecation\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x19\n\x11ref_model_package\x18\x02 \x01(\t\x12\x16\n\x0eref_model_name\x18\x03 \x01(\t\x12\x19\n\x11ref_model_version\x18\x04 \x01(\t\x12 \n\x18ref_model_latest_version\x18\x05 \x01(\t\x12\"\n\x1aref_model_deprecation_date\x18\x06 \x01(\t\"\x84\x01\n\x1fUpcomingReferenceDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x37\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32).proto_types.UpcomingReferenceDeprecation\"\xbd\x01\n\x13\x44\x65precatedReference\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x19\n\x11ref_model_package\x18\x02 \x01(\t\x12\x16\n\x0eref_model_name\x18\x03 \x01(\t\x12\x19\n\x11ref_model_version\x18\x04 \x01(\t\x12 \n\x18ref_model_latest_version\x18\x05 \x01(\t\x12\"\n\x1aref_model_deprecation_date\x18\x06 \x01(\t\"r\n\x16\x44\x65precatedReferenceMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.DeprecatedReference\"<\n$UnsupportedConstraintMaterialization\x12\x14\n\x0cmaterialized\x18\x01 \x01(\t\"\x94\x01\n\'UnsupportedConstraintMaterializationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12?\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x31.proto_types.UnsupportedConstraintMaterialization\"M\n\x14ParseInlineNodeError\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x0b\n\x03\x65xc\x18\x02 \x01(\t\"t\n\x17ParseInlineNodeErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.ParseInlineNodeError\"(\n\x19SemanticValidationFailure\x12\x0b\n\x03msg\x18\x02 \x01(\t\"~\n\x1cSemanticValidationFailureMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.proto_types.SemanticValidationFailure\"\x8a\x03\n\x19UnversionedBreakingChange\x12\x18\n\x10\x62reaking_changes\x18\x01 \x03(\t\x12\x12\n\nmodel_name\x18\x02 \x01(\t\x12\x17\n\x0fmodel_file_path\x18\x03 \x01(\t\x12\"\n\x1a\x63ontract_enforced_disabled\x18\x04 \x01(\x08\x12\x17\n\x0f\x63olumns_removed\x18\x05 \x03(\t\x12\x34\n\x13\x63olumn_type_changes\x18\x06 \x03(\x0b\x32\x17.proto_types.ColumnType\x12I\n\"enforced_column_constraint_removed\x18\x07 \x03(\x0b\x32\x1d.proto_types.ColumnConstraint\x12G\n!enforced_model_constraint_removed\x18\x08 \x03(\x0b\x32\x1c.proto_types.ModelConstraint\x12\x1f\n\x17materialization_changed\x18\t \x03(\t\"~\n\x1cUnversionedBreakingChangeMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.proto_types.UnversionedBreakingChange\"*\n\x14WarnStateTargetEqual\x12\x12\n\nstate_path\x18\x01 \x01(\t\"t\n\x17WarnStateTargetEqualMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.WarnStateTargetEqual\"%\n\x16\x46reshnessConfigProblem\x12\x0b\n\x03msg\x18\x01 \x01(\t\"x\n\x19\x46reshnessConfigProblemMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x31\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32#.proto_types.FreshnessConfigProblem\"/\n\x1dGitSparseCheckoutSubdirectory\x12\x0e\n\x06subdir\x18\x01 \x01(\t\"\x86\x01\n GitSparseCheckoutSubdirectoryMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x38\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32*.proto_types.GitSparseCheckoutSubdirectory\"/\n\x1bGitProgressCheckoutRevision\x12\x10\n\x08revision\x18\x01 \x01(\t\"\x82\x01\n\x1eGitProgressCheckoutRevisionMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x36\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32(.proto_types.GitProgressCheckoutRevision\"4\n%GitProgressUpdatingExistingDependency\x12\x0b\n\x03\x64ir\x18\x01 \x01(\t\"\x96\x01\n(GitProgressUpdatingExistingDependencyMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12@\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x32.proto_types.GitProgressUpdatingExistingDependency\".\n\x1fGitProgressPullingNewDependency\x12\x0b\n\x03\x64ir\x18\x01 \x01(\t\"\x8a\x01\n\"GitProgressPullingNewDependencyMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12:\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32,.proto_types.GitProgressPullingNewDependency\"\x1d\n\x0eGitNothingToDo\x12\x0b\n\x03sha\x18\x01 \x01(\t\"h\n\x11GitNothingToDoMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.GitNothingToDo\"E\n\x1fGitProgressUpdatedCheckoutRange\x12\x11\n\tstart_sha\x18\x01 \x01(\t\x12\x0f\n\x07\x65nd_sha\x18\x02 \x01(\t\"\x8a\x01\n\"GitProgressUpdatedCheckoutRangeMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12:\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32,.proto_types.GitProgressUpdatedCheckoutRange\"*\n\x17GitProgressCheckedOutAt\x12\x0f\n\x07\x65nd_sha\x18\x01 \x01(\t\"z\n\x1aGitProgressCheckedOutAtMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.GitProgressCheckedOutAt\")\n\x1aRegistryProgressGETRequest\x12\x0b\n\x03url\x18\x01 \x01(\t\"\x80\x01\n\x1dRegistryProgressGETRequestMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x35\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\'.proto_types.RegistryProgressGETRequest\"=\n\x1bRegistryProgressGETResponse\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x11\n\tresp_code\x18\x02 \x01(\x05\"\x82\x01\n\x1eRegistryProgressGETResponseMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x36\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32(.proto_types.RegistryProgressGETResponse\"_\n\x1dSelectorReportInvalidSelector\x12\x17\n\x0fvalid_selectors\x18\x01 \x01(\t\x12\x13\n\x0bspec_method\x18\x02 \x01(\t\x12\x10\n\x08raw_spec\x18\x03 \x01(\t\"\x86\x01\n SelectorReportInvalidSelectorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x38\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32*.proto_types.SelectorReportInvalidSelector\"\x15\n\x13\x44\x65psNoPackagesFound\"r\n\x16\x44\x65psNoPackagesFoundMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.DepsNoPackagesFound\"/\n\x17\x44\x65psStartPackageInstall\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\"z\n\x1a\x44\x65psStartPackageInstallMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.DepsStartPackageInstall\"\'\n\x0f\x44\x65psInstallInfo\x12\x14\n\x0cversion_name\x18\x01 \x01(\t\"j\n\x12\x44\x65psInstallInfoMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.DepsInstallInfo\"-\n\x13\x44\x65psUpdateAvailable\x12\x16\n\x0eversion_latest\x18\x01 \x01(\t\"r\n\x16\x44\x65psUpdateAvailableMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.DepsUpdateAvailable\"\x0e\n\x0c\x44\x65psUpToDate\"d\n\x0f\x44\x65psUpToDateMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.DepsUpToDate\",\n\x14\x44\x65psListSubdirectory\x12\x14\n\x0csubdirectory\x18\x01 \x01(\t\"t\n\x17\x44\x65psListSubdirectoryMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.DepsListSubdirectory\".\n\x1a\x44\x65psNotifyUpdatesAvailable\x12\x10\n\x08packages\x18\x01 \x03(\t\"\x80\x01\n\x1d\x44\x65psNotifyUpdatesAvailableMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x35\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\'.proto_types.DepsNotifyUpdatesAvailable\".\n\x1fRegistryIndexProgressGETRequest\x12\x0b\n\x03url\x18\x01 \x01(\t\"\x8a\x01\n\"RegistryIndexProgressGETRequestMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12:\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32,.proto_types.RegistryIndexProgressGETRequest\"B\n RegistryIndexProgressGETResponse\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x11\n\tresp_code\x18\x02 \x01(\x05\"\x8c\x01\n#RegistryIndexProgressGETResponseMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12;\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32-.proto_types.RegistryIndexProgressGETResponse\"2\n\x1eRegistryResponseUnexpectedType\x12\x10\n\x08response\x18\x01 \x01(\t\"\x88\x01\n!RegistryResponseUnexpectedTypeMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x39\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32+.proto_types.RegistryResponseUnexpectedType\"2\n\x1eRegistryResponseMissingTopKeys\x12\x10\n\x08response\x18\x01 \x01(\t\"\x88\x01\n!RegistryResponseMissingTopKeysMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x39\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32+.proto_types.RegistryResponseMissingTopKeys\"5\n!RegistryResponseMissingNestedKeys\x12\x10\n\x08response\x18\x01 \x01(\t\"\x8e\x01\n$RegistryResponseMissingNestedKeysMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12<\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32..proto_types.RegistryResponseMissingNestedKeys\"3\n\x1fRegistryResponseExtraNestedKeys\x12\x10\n\x08response\x18\x01 \x01(\t\"\x8a\x01\n\"RegistryResponseExtraNestedKeysMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12:\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32,.proto_types.RegistryResponseExtraNestedKeys\"(\n\x18\x44\x65psSetDownloadDirectory\x12\x0c\n\x04path\x18\x01 \x01(\t\"|\n\x1b\x44\x65psSetDownloadDirectoryMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.DepsSetDownloadDirectory\"-\n\x0c\x44\x65psUnpinned\x12\x10\n\x08revision\x18\x01 \x01(\t\x12\x0b\n\x03git\x18\x02 \x01(\t\"d\n\x0f\x44\x65psUnpinnedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.DepsUnpinned\"/\n\x1bNoNodesForSelectionCriteria\x12\x10\n\x08spec_raw\x18\x01 \x01(\t\"\x82\x01\n\x1eNoNodesForSelectionCriteriaMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x36\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32(.proto_types.NoNodesForSelectionCriteria\")\n\x10\x44\x65psLockUpdating\x12\x15\n\rlock_filepath\x18\x01 \x01(\t\"l\n\x13\x44\x65psLockUpdatingMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.DepsLockUpdating\"R\n\x0e\x44\x65psAddPackage\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x19\n\x11packages_filepath\x18\x03 \x01(\t\"h\n\x11\x44\x65psAddPackageMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.DepsAddPackage\"\xa7\x01\n\x19\x44\x65psFoundDuplicatePackage\x12S\n\x0fremoved_package\x18\x01 \x03(\x0b\x32:.proto_types.DepsFoundDuplicatePackage.RemovedPackageEntry\x1a\x35\n\x13RemovedPackageEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"~\n\x1c\x44\x65psFoundDuplicatePackageMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.proto_types.DepsFoundDuplicatePackage\"$\n\x12\x44\x65psVersionMissing\x12\x0e\n\x06source\x18\x01 \x01(\t\"p\n\x15\x44\x65psVersionMissingMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.DepsVersionMissing\"/\n\x17\x44\x65psScrubbedPackageName\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\"z\n\x1a\x44\x65psScrubbedPackageNameMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.DepsScrubbedPackageName\"*\n\x1bRunningOperationCaughtError\x12\x0b\n\x03\x65xc\x18\x01 \x01(\t\"\x82\x01\n\x1eRunningOperationCaughtErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x36\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32(.proto_types.RunningOperationCaughtError\"\x11\n\x0f\x43ompileComplete\"j\n\x12\x43ompileCompleteMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.CompileComplete\"\x18\n\x16\x46reshnessCheckComplete\"x\n\x19\x46reshnessCheckCompleteMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x31\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32#.proto_types.FreshnessCheckComplete\"\x1c\n\nSeedHeader\x12\x0e\n\x06header\x18\x01 \x01(\t\"`\n\rSeedHeaderMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12%\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x17.proto_types.SeedHeader\"]\n\x12SQLRunnerException\x12\x0b\n\x03\x65xc\x18\x01 \x01(\t\x12\x10\n\x08\x65xc_info\x18\x02 \x01(\t\x12(\n\tnode_info\x18\x03 \x01(\x0b\x32\x15.proto_types.NodeInfo\"p\n\x15SQLRunnerExceptionMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.SQLRunnerException\"\xa8\x01\n\rLogTestResult\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05index\x18\x04 \x01(\x05\x12\x12\n\nnum_models\x18\x05 \x01(\x05\x12\x16\n\x0e\x65xecution_time\x18\x06 \x01(\x02\x12\x14\n\x0cnum_failures\x18\x07 \x01(\x05\"f\n\x10LogTestResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12(\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1a.proto_types.LogTestResult\"k\n\x0cLogStartLine\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\r\n\x05index\x18\x03 \x01(\x05\x12\r\n\x05total\x18\x04 \x01(\x05\"d\n\x0fLogStartLineMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.LogStartLine\"\x95\x01\n\x0eLogModelResult\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05index\x18\x04 \x01(\x05\x12\r\n\x05total\x18\x05 \x01(\x05\x12\x16\n\x0e\x65xecution_time\x18\x06 \x01(\x02\"h\n\x11LogModelResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.LogModelResult\"\x92\x02\n\x11LogSnapshotResult\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05index\x18\x04 \x01(\x05\x12\r\n\x05total\x18\x05 \x01(\x05\x12\x16\n\x0e\x65xecution_time\x18\x06 \x01(\x02\x12\x34\n\x03\x63\x66g\x18\x07 \x03(\x0b\x32\'.proto_types.LogSnapshotResult.CfgEntry\x12\x16\n\x0eresult_message\x18\x08 \x01(\t\x1a*\n\x08\x43\x66gEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"n\n\x14LogSnapshotResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1e.proto_types.LogSnapshotResult\"\xb9\x01\n\rLogSeedResult\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\x16\n\x0eresult_message\x18\x03 \x01(\t\x12\r\n\x05index\x18\x04 \x01(\x05\x12\r\n\x05total\x18\x05 \x01(\x05\x12\x16\n\x0e\x65xecution_time\x18\x06 \x01(\x02\x12\x0e\n\x06schema\x18\x07 \x01(\t\x12\x10\n\x08relation\x18\x08 \x01(\t\"f\n\x10LogSeedResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12(\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1a.proto_types.LogSeedResult\"\xad\x01\n\x12LogFreshnessResult\x12\x0e\n\x06status\x18\x01 \x01(\t\x12(\n\tnode_info\x18\x02 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\r\n\x05index\x18\x03 \x01(\x05\x12\r\n\x05total\x18\x04 \x01(\x05\x12\x16\n\x0e\x65xecution_time\x18\x05 \x01(\x02\x12\x13\n\x0bsource_name\x18\x06 \x01(\t\x12\x12\n\ntable_name\x18\x07 \x01(\t\"p\n\x15LogFreshnessResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.LogFreshnessResult\"\"\n\rLogCancelLine\x12\x11\n\tconn_name\x18\x01 \x01(\t\"f\n\x10LogCancelLineMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12(\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1a.proto_types.LogCancelLine\"\x1f\n\x0f\x44\x65\x66\x61ultSelector\x12\x0c\n\x04name\x18\x01 \x01(\t\"j\n\x12\x44\x65\x66\x61ultSelectorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.DefaultSelector\"5\n\tNodeStart\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\"^\n\x0cNodeStartMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12$\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x16.proto_types.NodeStart\"g\n\x0cNodeFinished\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12-\n\nrun_result\x18\x02 \x01(\x0b\x32\x19.proto_types.RunResultMsg\"d\n\x0fNodeFinishedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.NodeFinished\"+\n\x1bQueryCancelationUnsupported\x12\x0c\n\x04type\x18\x01 \x01(\t\"\x82\x01\n\x1eQueryCancelationUnsupportedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x36\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32(.proto_types.QueryCancelationUnsupported\"O\n\x0f\x43oncurrencyLine\x12\x13\n\x0bnum_threads\x18\x01 \x01(\x05\x12\x13\n\x0btarget_name\x18\x02 \x01(\t\x12\x12\n\nnode_count\x18\x03 \x01(\x05\"j\n\x12\x43oncurrencyLineMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.ConcurrencyLine\"E\n\x19WritingInjectedSQLForNode\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\"~\n\x1cWritingInjectedSQLForNodeMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.proto_types.WritingInjectedSQLForNode\"9\n\rNodeCompiling\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\"f\n\x10NodeCompilingMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12(\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1a.proto_types.NodeCompiling\"9\n\rNodeExecuting\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\"f\n\x10NodeExecutingMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12(\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1a.proto_types.NodeExecuting\"m\n\x10LogHookStartLine\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x11\n\tstatement\x18\x02 \x01(\t\x12\r\n\x05index\x18\x03 \x01(\x05\x12\r\n\x05total\x18\x04 \x01(\x05\"l\n\x13LogHookStartLineMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.LogHookStartLine\"\x93\x01\n\x0eLogHookEndLine\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x11\n\tstatement\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05index\x18\x04 \x01(\x05\x12\r\n\x05total\x18\x05 \x01(\x05\x12\x16\n\x0e\x65xecution_time\x18\x06 \x01(\x02\"h\n\x11LogHookEndLineMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.LogHookEndLine\"\x93\x01\n\x0fSkippingDetails\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x15\n\rresource_type\x18\x02 \x01(\t\x12\x0e\n\x06schema\x18\x03 \x01(\t\x12\x11\n\tnode_name\x18\x04 \x01(\t\x12\r\n\x05index\x18\x05 \x01(\x05\x12\r\n\x05total\x18\x06 \x01(\x05\"j\n\x12SkippingDetailsMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.SkippingDetails\"\r\n\x0bNothingToDo\"b\n\x0eNothingToDoMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12&\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x18.proto_types.NothingToDo\",\n\x1dRunningOperationUncaughtError\x12\x0b\n\x03\x65xc\x18\x01 \x01(\t\"\x86\x01\n RunningOperationUncaughtErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x38\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32*.proto_types.RunningOperationUncaughtError\"\x93\x01\n\x0c\x45ndRunResult\x12*\n\x07results\x18\x01 \x03(\x0b\x32\x19.proto_types.RunResultMsg\x12\x14\n\x0c\x65lapsed_time\x18\x02 \x01(\x02\x12\x30\n\x0cgenerated_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07success\x18\x04 \x01(\x08\"d\n\x0f\x45ndRunResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.EndRunResult\"\x11\n\x0fNoNodesSelected\"j\n\x12NoNodesSelectedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.NoNodesSelected\"w\n\x10\x43ommandCompleted\x12\x0f\n\x07\x63ommand\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x30\n\x0c\x63ompleted_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07\x65lapsed\x18\x04 \x01(\x02\"l\n\x13\x43ommandCompletedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.CommandCompleted\"k\n\x08ShowNode\x12\x11\n\tnode_name\x18\x01 \x01(\t\x12\x0f\n\x07preview\x18\x02 \x01(\t\x12\x11\n\tis_inline\x18\x03 \x01(\x08\x12\x15\n\routput_format\x18\x04 \x01(\t\x12\x11\n\tunique_id\x18\x05 \x01(\t\"\\\n\x0bShowNodeMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12#\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x15.proto_types.ShowNode\"p\n\x0c\x43ompiledNode\x12\x11\n\tnode_name\x18\x01 \x01(\t\x12\x10\n\x08\x63ompiled\x18\x02 \x01(\t\x12\x11\n\tis_inline\x18\x03 \x01(\x08\x12\x15\n\routput_format\x18\x04 \x01(\t\x12\x11\n\tunique_id\x18\x05 \x01(\t\"d\n\x0f\x43ompiledNodeMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.CompiledNode\"b\n\x17\x43\x61tchableExceptionOnRun\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x0b\n\x03\x65xc\x18\x02 \x01(\t\x12\x10\n\x08\x65xc_info\x18\x03 \x01(\t\"z\n\x1a\x43\x61tchableExceptionOnRunMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.CatchableExceptionOnRun\"_\n\x12InternalErrorOnRun\x12\x12\n\nbuild_path\x18\x01 \x01(\t\x12\x0b\n\x03\x65xc\x18\x02 \x01(\t\x12(\n\tnode_info\x18\x03 \x01(\x0b\x32\x15.proto_types.NodeInfo\"p\n\x15InternalErrorOnRunMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.InternalErrorOnRun\"u\n\x15GenericExceptionOnRun\x12\x12\n\nbuild_path\x18\x01 \x01(\t\x12\x11\n\tunique_id\x18\x02 \x01(\t\x12\x0b\n\x03\x65xc\x18\x03 \x01(\t\x12(\n\tnode_info\x18\x04 \x01(\x0b\x32\x15.proto_types.NodeInfo\"v\n\x18GenericExceptionOnRunMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".proto_types.GenericExceptionOnRun\"N\n\x1aNodeConnectionReleaseError\x12\x11\n\tnode_name\x18\x01 \x01(\t\x12\x0b\n\x03\x65xc\x18\x02 \x01(\t\x12\x10\n\x08\x65xc_info\x18\x03 \x01(\t\"\x80\x01\n\x1dNodeConnectionReleaseErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x35\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\'.proto_types.NodeConnectionReleaseError\"\x1f\n\nFoundStats\x12\x11\n\tstat_line\x18\x01 \x01(\t\"`\n\rFoundStatsMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12%\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x17.proto_types.FoundStats\"\x17\n\x15MainKeyboardInterrupt\"v\n\x18MainKeyboardInterruptMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".proto_types.MainKeyboardInterrupt\"#\n\x14MainEncounteredError\x12\x0b\n\x03\x65xc\x18\x01 \x01(\t\"t\n\x17MainEncounteredErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.MainEncounteredError\"%\n\x0eMainStackTrace\x12\x13\n\x0bstack_trace\x18\x01 \x01(\t\"h\n\x11MainStackTraceMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.MainStackTrace\"p\n\x13TimingInfoCollected\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12/\n\x0btiming_info\x18\x02 \x01(\x0b\x32\x1a.proto_types.TimingInfoMsg\"r\n\x16TimingInfoCollectedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.TimingInfoCollected\"&\n\x12LogDebugStackTrace\x12\x10\n\x08\x65xc_info\x18\x01 \x01(\t\"p\n\x15LogDebugStackTraceMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.LogDebugStackTrace\"\x1e\n\x0e\x43heckCleanPath\x12\x0c\n\x04path\x18\x01 \x01(\t\"h\n\x11\x43heckCleanPathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.CheckCleanPath\" \n\x10\x43onfirmCleanPath\x12\x0c\n\x04path\x18\x01 \x01(\t\"l\n\x13\x43onfirmCleanPathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.ConfirmCleanPath\"\"\n\x12ProtectedCleanPath\x12\x0c\n\x04path\x18\x01 \x01(\t\"p\n\x15ProtectedCleanPathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.ProtectedCleanPath\"\x14\n\x12\x46inishedCleanPaths\"p\n\x15\x46inishedCleanPathsMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.FinishedCleanPaths\"5\n\x0bOpenCommand\x12\x10\n\x08open_cmd\x18\x01 \x01(\t\x12\x14\n\x0cprofiles_dir\x18\x02 \x01(\t\"b\n\x0eOpenCommandMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12&\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x18.proto_types.OpenCommand\"0\n\x0fServingDocsPort\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\"j\n\x12ServingDocsPortMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.ServingDocsPort\"%\n\x15ServingDocsAccessInfo\x12\x0c\n\x04port\x18\x01 \x01(\t\"v\n\x18ServingDocsAccessInfoMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".proto_types.ServingDocsAccessInfo\"\x15\n\x13ServingDocsExitInfo\"r\n\x16ServingDocsExitInfoMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.ServingDocsExitInfo\"J\n\x10RunResultWarning\x12\x15\n\rresource_type\x18\x01 \x01(\t\x12\x11\n\tnode_name\x18\x02 \x01(\t\x12\x0c\n\x04path\x18\x03 \x01(\t\"l\n\x13RunResultWarningMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.RunResultWarning\"J\n\x10RunResultFailure\x12\x15\n\rresource_type\x18\x01 \x01(\t\x12\x11\n\tnode_name\x18\x02 \x01(\t\x12\x0c\n\x04path\x18\x03 \x01(\t\"l\n\x13RunResultFailureMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.RunResultFailure\"k\n\tStatsLine\x12\x30\n\x05stats\x18\x01 \x03(\x0b\x32!.proto_types.StatsLine.StatsEntry\x1a,\n\nStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"^\n\x0cStatsLineMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12$\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x16.proto_types.StatsLine\"\x1d\n\x0eRunResultError\x12\x0b\n\x03msg\x18\x01 \x01(\t\"h\n\x11RunResultErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.RunResultError\")\n\x17RunResultErrorNoMessage\x12\x0e\n\x06status\x18\x01 \x01(\t\"z\n\x1aRunResultErrorNoMessageMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.RunResultErrorNoMessage\"\x1f\n\x0fSQLCompiledPath\x12\x0c\n\x04path\x18\x01 \x01(\t\"j\n\x12SQLCompiledPathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.SQLCompiledPath\"-\n\x14\x43heckNodeTestFailure\x12\x15\n\rrelation_name\x18\x01 \x01(\t\"t\n\x17\x43heckNodeTestFailureMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.CheckNodeTestFailure\"W\n\x0f\x45ndOfRunSummary\x12\x12\n\nnum_errors\x18\x01 \x01(\x05\x12\x14\n\x0cnum_warnings\x18\x02 \x01(\x05\x12\x1a\n\x12keyboard_interrupt\x18\x03 \x01(\x08\"j\n\x12\x45ndOfRunSummaryMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.EndOfRunSummary\"U\n\x13LogSkipBecauseError\x12\x0e\n\x06schema\x18\x01 \x01(\t\x12\x10\n\x08relation\x18\x02 \x01(\t\x12\r\n\x05index\x18\x03 \x01(\x05\x12\r\n\x05total\x18\x04 \x01(\x05\"r\n\x16LogSkipBecauseErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.LogSkipBecauseError\"\x14\n\x12\x45nsureGitInstalled\"p\n\x15\x45nsureGitInstalledMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.EnsureGitInstalled\"\x1a\n\x18\x44\x65psCreatingLocalSymlink\"|\n\x1b\x44\x65psCreatingLocalSymlinkMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.DepsCreatingLocalSymlink\"\x19\n\x17\x44\x65psSymlinkNotAvailable\"z\n\x1a\x44\x65psSymlinkNotAvailableMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.DepsSymlinkNotAvailable\"\x11\n\x0f\x44isableTracking\"j\n\x12\x44isableTrackingMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.DisableTracking\"\x1e\n\x0cSendingEvent\x12\x0e\n\x06kwargs\x18\x01 \x01(\t\"d\n\x0fSendingEventMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.SendingEvent\"\x12\n\x10SendEventFailure\"l\n\x13SendEventFailureMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.SendEventFailure\"\r\n\x0b\x46lushEvents\"b\n\x0e\x46lushEventsMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12&\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x18.proto_types.FlushEvents\"\x14\n\x12\x46lushEventsFailure\"p\n\x15\x46lushEventsFailureMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.FlushEventsFailure\"-\n\x19TrackingInitializeFailure\x12\x10\n\x08\x65xc_info\x18\x01 \x01(\t\"~\n\x1cTrackingInitializeFailureMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.proto_types.TrackingInitializeFailure\"&\n\x17RunResultWarningMessage\x12\x0b\n\x03msg\x18\x01 \x01(\t\"z\n\x1aRunResultWarningMessageMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.RunResultWarningMessage\"\x1a\n\x0b\x44\x65\x62ugCmdOut\x12\x0b\n\x03msg\x18\x01 \x01(\t\"b\n\x0e\x44\x65\x62ugCmdOutMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12&\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x18.proto_types.DebugCmdOut\"\x1d\n\x0e\x44\x65\x62ugCmdResult\x12\x0b\n\x03msg\x18\x01 \x01(\t\"h\n\x11\x44\x65\x62ugCmdResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.DebugCmdResult\"\x19\n\nListCmdOut\x12\x0b\n\x03msg\x18\x01 \x01(\t\"`\n\rListCmdOutMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12%\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x17.proto_types.ListCmdOut\"\xec\x01\n\x0eResourceReport\x12\x14\n\x0c\x63ommand_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ommand_success\x18\x03 \x01(\x08\x12\x1f\n\x17\x63ommand_wall_clock_time\x18\x04 \x01(\x02\x12\x19\n\x11process_user_time\x18\x05 \x01(\x02\x12\x1b\n\x13process_kernel_time\x18\x06 \x01(\x02\x12\x1b\n\x13process_mem_max_rss\x18\x07 \x01(\x03\x12\x19\n\x11process_in_blocks\x18\x08 \x01(\x03\x12\x1a\n\x12process_out_blocks\x18\t \x01(\x03\"h\n\x11ResourceReportMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.ResourceReportb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x63ore_types.proto\x12\x0bproto_types\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x99\x02\n\rCoreEventInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x63ode\x18\x02 \x01(\t\x12\x0b\n\x03msg\x18\x03 \x01(\t\x12\r\n\x05level\x18\x04 \x01(\t\x12\x15\n\rinvocation_id\x18\x05 \x01(\t\x12\x0b\n\x03pid\x18\x06 \x01(\x05\x12\x0e\n\x06thread\x18\x07 \x01(\t\x12&\n\x02ts\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x05\x65xtra\x18\t \x03(\x0b\x32%.proto_types.CoreEventInfo.ExtraEntry\x12\x10\n\x08\x63\x61tegory\x18\n \x01(\t\x1a,\n\nExtraEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"V\n\x0cNodeRelation\x12\x10\n\x08\x64\x61tabase\x18\n \x01(\t\x12\x0e\n\x06schema\x18\x0b \x01(\t\x12\r\n\x05\x61lias\x18\x0c \x01(\t\x12\x15\n\rrelation_name\x18\r \x01(\t\"\x91\x02\n\x08NodeInfo\x12\x11\n\tnode_path\x18\x01 \x01(\t\x12\x11\n\tnode_name\x18\x02 \x01(\t\x12\x11\n\tunique_id\x18\x03 \x01(\t\x12\x15\n\rresource_type\x18\x04 \x01(\t\x12\x14\n\x0cmaterialized\x18\x05 \x01(\t\x12\x13\n\x0bnode_status\x18\x06 \x01(\t\x12\x17\n\x0fnode_started_at\x18\x07 \x01(\t\x12\x18\n\x10node_finished_at\x18\x08 \x01(\t\x12%\n\x04meta\x18\t \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x30\n\rnode_relation\x18\n \x01(\x0b\x32\x19.proto_types.NodeRelation\"\x7f\n\rTimingInfoMsg\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\nstarted_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x30\n\x0c\x63ompleted_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xd1\x01\n\x0cRunResultMsg\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\x12/\n\x0btiming_info\x18\x03 \x03(\x0b\x32\x1a.proto_types.TimingInfoMsg\x12\x0e\n\x06thread\x18\x04 \x01(\t\x12\x16\n\x0e\x65xecution_time\x18\x05 \x01(\x02\x12\x31\n\x10\x61\x64\x61pter_response\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x14\n\x0cnum_failures\x18\x07 \x01(\x05\"\\\n\nColumnType\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\x12\x1c\n\x14previous_column_type\x18\x02 \x01(\t\x12\x1b\n\x13\x63urrent_column_type\x18\x03 \x01(\t\"Y\n\x10\x43olumnConstraint\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\x12\x17\n\x0f\x63onstraint_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63onstraint_type\x18\x03 \x01(\t\"T\n\x0fModelConstraint\x12\x17\n\x0f\x63onstraint_name\x18\x01 \x01(\t\x12\x17\n\x0f\x63onstraint_type\x18\x02 \x01(\t\x12\x0f\n\x07\x63olumns\x18\x03 \x03(\t\"9\n\x11MainReportVersion\x12\x0f\n\x07version\x18\x01 \x01(\t\x12\x13\n\x0blog_version\x18\x02 \x01(\x05\"n\n\x14MainReportVersionMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1e.proto_types.MainReportVersion\"r\n\x0eMainReportArgs\x12\x33\n\x04\x61rgs\x18\x01 \x03(\x0b\x32%.proto_types.MainReportArgs.ArgsEntry\x1a+\n\tArgsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"h\n\x11MainReportArgsMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.MainReportArgs\"+\n\x15MainTrackingUserState\x12\x12\n\nuser_state\x18\x01 \x01(\t\"v\n\x18MainTrackingUserStateMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".proto_types.MainTrackingUserState\"5\n\x0fMergedFromState\x12\x12\n\nnum_merged\x18\x01 \x01(\x05\x12\x0e\n\x06sample\x18\x02 \x03(\t\"j\n\x12MergedFromStateMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.MergedFromState\"A\n\x14MissingProfileTarget\x12\x14\n\x0cprofile_name\x18\x01 \x01(\t\x12\x13\n\x0btarget_name\x18\x02 \x01(\t\"t\n\x17MissingProfileTargetMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.MissingProfileTarget\"(\n\x11InvalidOptionYAML\x12\x13\n\x0boption_name\x18\x01 \x01(\t\"n\n\x14InvalidOptionYAMLMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1e.proto_types.InvalidOptionYAML\"!\n\x12LogDbtProjectError\x12\x0b\n\x03\x65xc\x18\x01 \x01(\t\"p\n\x15LogDbtProjectErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.LogDbtProjectError\"3\n\x12LogDbtProfileError\x12\x0b\n\x03\x65xc\x18\x01 \x01(\t\x12\x10\n\x08profiles\x18\x02 \x03(\t\"p\n\x15LogDbtProfileErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.LogDbtProfileError\"!\n\x12StarterProjectPath\x12\x0b\n\x03\x64ir\x18\x01 \x01(\t\"p\n\x15StarterProjectPathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.StarterProjectPath\"$\n\x15\x43onfigFolderDirectory\x12\x0b\n\x03\x64ir\x18\x01 \x01(\t\"v\n\x18\x43onfigFolderDirectoryMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".proto_types.ConfigFolderDirectory\"\'\n\x14NoSampleProfileFound\x12\x0f\n\x07\x61\x64\x61pter\x18\x01 \x01(\t\"t\n\x17NoSampleProfileFoundMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.NoSampleProfileFound\"6\n\x18ProfileWrittenWithSample\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\"|\n\x1bProfileWrittenWithSampleMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.ProfileWrittenWithSample\"B\n$ProfileWrittenWithTargetTemplateYAML\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\"\x94\x01\n\'ProfileWrittenWithTargetTemplateYAMLMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12?\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x31.proto_types.ProfileWrittenWithTargetTemplateYAML\"C\n%ProfileWrittenWithProjectTemplateYAML\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\"\x96\x01\n(ProfileWrittenWithProjectTemplateYAMLMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12@\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x32.proto_types.ProfileWrittenWithProjectTemplateYAML\"\x12\n\x10SettingUpProfile\"l\n\x13SettingUpProfileMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.SettingUpProfile\"\x1c\n\x1aInvalidProfileTemplateYAML\"\x80\x01\n\x1dInvalidProfileTemplateYAMLMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x35\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\'.proto_types.InvalidProfileTemplateYAML\"(\n\x18ProjectNameAlreadyExists\x12\x0c\n\x04name\x18\x01 \x01(\t\"|\n\x1bProjectNameAlreadyExistsMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.ProjectNameAlreadyExists\"K\n\x0eProjectCreated\x12\x14\n\x0cproject_name\x18\x01 \x01(\t\x12\x10\n\x08\x64ocs_url\x18\x02 \x01(\t\x12\x11\n\tslack_url\x18\x03 \x01(\t\"h\n\x11ProjectCreatedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.ProjectCreated\"@\n\x1aPackageRedirectDeprecation\x12\x10\n\x08old_name\x18\x01 \x01(\t\x12\x10\n\x08new_name\x18\x02 \x01(\t\"\x80\x01\n\x1dPackageRedirectDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x35\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\'.proto_types.PackageRedirectDeprecation\"\x1f\n\x1dPackageInstallPathDeprecation\"\x86\x01\n PackageInstallPathDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x38\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32*.proto_types.PackageInstallPathDeprecation\"H\n\x1b\x43onfigSourcePathDeprecation\x12\x17\n\x0f\x64\x65precated_path\x18\x01 \x01(\t\x12\x10\n\x08\x65xp_path\x18\x02 \x01(\t\"\x82\x01\n\x1e\x43onfigSourcePathDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x36\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32(.proto_types.ConfigSourcePathDeprecation\"F\n\x19\x43onfigDataPathDeprecation\x12\x17\n\x0f\x64\x65precated_path\x18\x01 \x01(\t\x12\x10\n\x08\x65xp_path\x18\x02 \x01(\t\"~\n\x1c\x43onfigDataPathDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.proto_types.ConfigDataPathDeprecation\".\n\x17MetricAttributesRenamed\x12\x13\n\x0bmetric_name\x18\x01 \x01(\t\"z\n\x1aMetricAttributesRenamedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.MetricAttributesRenamed\"+\n\x17\x45xposureNameDeprecation\x12\x10\n\x08\x65xposure\x18\x01 \x01(\t\"z\n\x1a\x45xposureNameDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.ExposureNameDeprecation\"^\n\x13InternalDeprecation\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06reason\x18\x02 \x01(\t\x12\x18\n\x10suggested_action\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\"r\n\x16InternalDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.InternalDeprecation\"@\n\x1a\x45nvironmentVariableRenamed\x12\x10\n\x08old_name\x18\x01 \x01(\t\x12\x10\n\x08new_name\x18\x02 \x01(\t\"\x80\x01\n\x1d\x45nvironmentVariableRenamedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x35\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\'.proto_types.EnvironmentVariableRenamed\"3\n\x18\x43onfigLogPathDeprecation\x12\x17\n\x0f\x64\x65precated_path\x18\x01 \x01(\t\"|\n\x1b\x43onfigLogPathDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.ConfigLogPathDeprecation\"6\n\x1b\x43onfigTargetPathDeprecation\x12\x17\n\x0f\x64\x65precated_path\x18\x01 \x01(\t\"\x82\x01\n\x1e\x43onfigTargetPathDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x36\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32(.proto_types.ConfigTargetPathDeprecation\"C\n\x16TestsConfigDeprecation\x12\x17\n\x0f\x64\x65precated_path\x18\x01 \x01(\t\x12\x10\n\x08\x65xp_path\x18\x02 \x01(\t\"x\n\x19TestsConfigDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x31\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32#.proto_types.TestsConfigDeprecation\"\x1e\n\x1cProjectFlagsMovedDeprecation\"\x84\x01\n\x1fProjectFlagsMovedDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x37\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32).proto_types.ProjectFlagsMovedDeprecation\"X\n\x1cSpacesInModelNameDeprecation\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x15\n\rmodel_version\x18\x02 \x01(\t\x12\r\n\x05level\x18\x03 \x01(\t\"\x84\x01\n\x1fSpacesInModelNameDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x37\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32).proto_types.SpacesInModelNameDeprecation\"k\n$TotalModelNamesWithSpacesDeprecation\x12\x1b\n\x13\x63ount_invalid_names\x18\x01 \x01(\x05\x12\x17\n\x0fshow_debug_hint\x18\x02 \x01(\x08\x12\r\n\x05level\x18\x03 \x01(\t\"_\n)PackageMaterializationOverrideDeprecation\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x1c\n\x14materialization_name\x18\x02 \x01(\t\"\x9e\x01\n,PackageMaterializationOverrideDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x44\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x36.proto_types.PackageMaterializationOverrideDeprecation\"\x94\x01\n\'TotalModelNamesWithSpacesDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12?\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x31.proto_types.TotalModelNamesWithSpacesDeprecation\"V\n\x0f\x44\x65precatedModel\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x15\n\rmodel_version\x18\x02 \x01(\t\x12\x18\n\x10\x64\x65precation_date\x18\x03 \x01(\t\"j\n\x12\x44\x65precatedModelMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.DeprecatedModel\"7\n\x12InputFileDiffError\x12\x10\n\x08\x63\x61tegory\x18\x01 \x01(\t\x12\x0f\n\x07\x66ile_id\x18\x02 \x01(\t\"p\n\x15InputFileDiffErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.InputFileDiffError\"?\n\x14InvalidValueForField\x12\x12\n\nfield_name\x18\x01 \x01(\t\x12\x13\n\x0b\x66ield_value\x18\x02 \x01(\t\"t\n\x17InvalidValueForFieldMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.InvalidValueForField\"Q\n\x11ValidationWarning\x12\x15\n\rresource_type\x18\x01 \x01(\t\x12\x12\n\nfield_name\x18\x02 \x01(\t\x12\x11\n\tnode_name\x18\x03 \x01(\t\"n\n\x14ValidationWarningMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1e.proto_types.ValidationWarning\"!\n\x11ParsePerfInfoPath\x12\x0c\n\x04path\x18\x01 \x01(\t\"n\n\x14ParsePerfInfoPathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1e.proto_types.ParsePerfInfoPath\"1\n!PartialParsingErrorProcessingFile\x12\x0c\n\x04\x66ile\x18\x01 \x01(\t\"\x8e\x01\n$PartialParsingErrorProcessingFileMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12<\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32..proto_types.PartialParsingErrorProcessingFile\"\x86\x01\n\x13PartialParsingError\x12?\n\x08\x65xc_info\x18\x01 \x03(\x0b\x32-.proto_types.PartialParsingError.ExcInfoEntry\x1a.\n\x0c\x45xcInfoEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"r\n\x16PartialParsingErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.PartialParsingError\"\x1b\n\x19PartialParsingSkipParsing\"~\n\x1cPartialParsingSkipParsingMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.proto_types.PartialParsingSkipParsing\"&\n\x14UnableToPartialParse\x12\x0e\n\x06reason\x18\x01 \x01(\t\"t\n\x17UnableToPartialParseMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.UnableToPartialParse\"f\n\x12StateCheckVarsHash\x12\x10\n\x08\x63hecksum\x18\x01 \x01(\t\x12\x0c\n\x04vars\x18\x02 \x01(\t\x12\x0f\n\x07profile\x18\x03 \x01(\t\x12\x0e\n\x06target\x18\x04 \x01(\t\x12\x0f\n\x07version\x18\x05 \x01(\t\"p\n\x15StateCheckVarsHashMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.StateCheckVarsHash\"\x1a\n\x18PartialParsingNotEnabled\"|\n\x1bPartialParsingNotEnabledMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.PartialParsingNotEnabled\"C\n\x14ParsedFileLoadFailed\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0b\n\x03\x65xc\x18\x02 \x01(\t\x12\x10\n\x08\x65xc_info\x18\x03 \x01(\t\"t\n\x17ParsedFileLoadFailedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.ParsedFileLoadFailed\"H\n\x15PartialParsingEnabled\x12\x0f\n\x07\x64\x65leted\x18\x01 \x01(\x05\x12\r\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x05\x12\x0f\n\x07\x63hanged\x18\x03 \x01(\x05\"v\n\x18PartialParsingEnabledMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".proto_types.PartialParsingEnabled\"8\n\x12PartialParsingFile\x12\x0f\n\x07\x66ile_id\x18\x01 \x01(\t\x12\x11\n\toperation\x18\x02 \x01(\t\"p\n\x15PartialParsingFileMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.PartialParsingFile\"\xaf\x01\n\x1fInvalidDisabledTargetInTestNode\x12\x1b\n\x13resource_type_title\x18\x01 \x01(\t\x12\x11\n\tunique_id\x18\x02 \x01(\t\x12\x1a\n\x12original_file_path\x18\x03 \x01(\t\x12\x13\n\x0btarget_kind\x18\x04 \x01(\t\x12\x13\n\x0btarget_name\x18\x05 \x01(\t\x12\x16\n\x0etarget_package\x18\x06 \x01(\t\"\x8a\x01\n\"InvalidDisabledTargetInTestNodeMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12:\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32,.proto_types.InvalidDisabledTargetInTestNode\"7\n\x18UnusedResourceConfigPath\x12\x1b\n\x13unused_config_paths\x18\x01 \x03(\t\"|\n\x1bUnusedResourceConfigPathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.UnusedResourceConfigPath\"3\n\rSeedIncreased\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"f\n\x10SeedIncreasedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12(\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1a.proto_types.SeedIncreased\">\n\x18SeedExceedsLimitSamePath\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"|\n\x1bSeedExceedsLimitSamePathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.SeedExceedsLimitSamePath\"D\n\x1eSeedExceedsLimitAndPathChanged\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\x88\x01\n!SeedExceedsLimitAndPathChangedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x39\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32+.proto_types.SeedExceedsLimitAndPathChanged\"\\\n\x1fSeedExceedsLimitChecksumChanged\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x15\n\rchecksum_name\x18\x03 \x01(\t\"\x8a\x01\n\"SeedExceedsLimitChecksumChangedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12:\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32,.proto_types.SeedExceedsLimitChecksumChanged\"%\n\x0cUnusedTables\x12\x15\n\runused_tables\x18\x01 \x03(\t\"d\n\x0fUnusedTablesMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.UnusedTables\"\x87\x01\n\x17WrongResourceSchemaFile\x12\x12\n\npatch_name\x18\x01 \x01(\t\x12\x15\n\rresource_type\x18\x02 \x01(\t\x12\x1c\n\x14plural_resource_type\x18\x03 \x01(\t\x12\x10\n\x08yaml_key\x18\x04 \x01(\t\x12\x11\n\tfile_path\x18\x05 \x01(\t\"z\n\x1aWrongResourceSchemaFileMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.WrongResourceSchemaFile\"K\n\x10NoNodeForYamlKey\x12\x12\n\npatch_name\x18\x01 \x01(\t\x12\x10\n\x08yaml_key\x18\x02 \x01(\t\x12\x11\n\tfile_path\x18\x03 \x01(\t\"l\n\x13NoNodeForYamlKeyMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.NoNodeForYamlKey\"+\n\x15MacroNotFoundForPatch\x12\x12\n\npatch_name\x18\x01 \x01(\t\"v\n\x18MacroNotFoundForPatchMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".proto_types.MacroNotFoundForPatch\"\xb8\x01\n\x16NodeNotFoundOrDisabled\x12\x1a\n\x12original_file_path\x18\x01 \x01(\t\x12\x11\n\tunique_id\x18\x02 \x01(\t\x12\x1b\n\x13resource_type_title\x18\x03 \x01(\t\x12\x13\n\x0btarget_name\x18\x04 \x01(\t\x12\x13\n\x0btarget_kind\x18\x05 \x01(\t\x12\x16\n\x0etarget_package\x18\x06 \x01(\t\x12\x10\n\x08\x64isabled\x18\x07 \x01(\t\"x\n\x19NodeNotFoundOrDisabledMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x31\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32#.proto_types.NodeNotFoundOrDisabled\"H\n\x0fJinjaLogWarning\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x0b\n\x03msg\x18\x02 \x01(\t\"j\n\x12JinjaLogWarningMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.JinjaLogWarning\"E\n\x0cJinjaLogInfo\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x0b\n\x03msg\x18\x02 \x01(\t\"d\n\x0fJinjaLogInfoMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.JinjaLogInfo\"F\n\rJinjaLogDebug\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x0b\n\x03msg\x18\x02 \x01(\t\"f\n\x10JinjaLogDebugMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12(\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1a.proto_types.JinjaLogDebug\"\xae\x01\n\x1eUnpinnedRefNewVersionAvailable\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x15\n\rref_node_name\x18\x02 \x01(\t\x12\x18\n\x10ref_node_package\x18\x03 \x01(\t\x12\x18\n\x10ref_node_version\x18\x04 \x01(\t\x12\x17\n\x0fref_max_version\x18\x05 \x01(\t\"\x88\x01\n!UnpinnedRefNewVersionAvailableMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x39\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32+.proto_types.UnpinnedRefNewVersionAvailable\"\xc6\x01\n\x1cUpcomingReferenceDeprecation\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x19\n\x11ref_model_package\x18\x02 \x01(\t\x12\x16\n\x0eref_model_name\x18\x03 \x01(\t\x12\x19\n\x11ref_model_version\x18\x04 \x01(\t\x12 \n\x18ref_model_latest_version\x18\x05 \x01(\t\x12\"\n\x1aref_model_deprecation_date\x18\x06 \x01(\t\"\x84\x01\n\x1fUpcomingReferenceDeprecationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x37\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32).proto_types.UpcomingReferenceDeprecation\"\xbd\x01\n\x13\x44\x65precatedReference\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x19\n\x11ref_model_package\x18\x02 \x01(\t\x12\x16\n\x0eref_model_name\x18\x03 \x01(\t\x12\x19\n\x11ref_model_version\x18\x04 \x01(\t\x12 \n\x18ref_model_latest_version\x18\x05 \x01(\t\x12\"\n\x1aref_model_deprecation_date\x18\x06 \x01(\t\"r\n\x16\x44\x65precatedReferenceMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.DeprecatedReference\"<\n$UnsupportedConstraintMaterialization\x12\x14\n\x0cmaterialized\x18\x01 \x01(\t\"\x94\x01\n\'UnsupportedConstraintMaterializationMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12?\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x31.proto_types.UnsupportedConstraintMaterialization\"M\n\x14ParseInlineNodeError\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x0b\n\x03\x65xc\x18\x02 \x01(\t\"t\n\x17ParseInlineNodeErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.ParseInlineNodeError\"(\n\x19SemanticValidationFailure\x12\x0b\n\x03msg\x18\x02 \x01(\t\"~\n\x1cSemanticValidationFailureMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.proto_types.SemanticValidationFailure\"\x8a\x03\n\x19UnversionedBreakingChange\x12\x18\n\x10\x62reaking_changes\x18\x01 \x03(\t\x12\x12\n\nmodel_name\x18\x02 \x01(\t\x12\x17\n\x0fmodel_file_path\x18\x03 \x01(\t\x12\"\n\x1a\x63ontract_enforced_disabled\x18\x04 \x01(\x08\x12\x17\n\x0f\x63olumns_removed\x18\x05 \x03(\t\x12\x34\n\x13\x63olumn_type_changes\x18\x06 \x03(\x0b\x32\x17.proto_types.ColumnType\x12I\n\"enforced_column_constraint_removed\x18\x07 \x03(\x0b\x32\x1d.proto_types.ColumnConstraint\x12G\n!enforced_model_constraint_removed\x18\x08 \x03(\x0b\x32\x1c.proto_types.ModelConstraint\x12\x1f\n\x17materialization_changed\x18\t \x03(\t\"~\n\x1cUnversionedBreakingChangeMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.proto_types.UnversionedBreakingChange\"*\n\x14WarnStateTargetEqual\x12\x12\n\nstate_path\x18\x01 \x01(\t\"t\n\x17WarnStateTargetEqualMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.WarnStateTargetEqual\"%\n\x16\x46reshnessConfigProblem\x12\x0b\n\x03msg\x18\x01 \x01(\t\"x\n\x19\x46reshnessConfigProblemMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x31\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32#.proto_types.FreshnessConfigProblem\"/\n\x1dGitSparseCheckoutSubdirectory\x12\x0e\n\x06subdir\x18\x01 \x01(\t\"\x86\x01\n GitSparseCheckoutSubdirectoryMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x38\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32*.proto_types.GitSparseCheckoutSubdirectory\"/\n\x1bGitProgressCheckoutRevision\x12\x10\n\x08revision\x18\x01 \x01(\t\"\x82\x01\n\x1eGitProgressCheckoutRevisionMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x36\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32(.proto_types.GitProgressCheckoutRevision\"4\n%GitProgressUpdatingExistingDependency\x12\x0b\n\x03\x64ir\x18\x01 \x01(\t\"\x96\x01\n(GitProgressUpdatingExistingDependencyMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12@\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x32.proto_types.GitProgressUpdatingExistingDependency\".\n\x1fGitProgressPullingNewDependency\x12\x0b\n\x03\x64ir\x18\x01 \x01(\t\"\x8a\x01\n\"GitProgressPullingNewDependencyMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12:\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32,.proto_types.GitProgressPullingNewDependency\"\x1d\n\x0eGitNothingToDo\x12\x0b\n\x03sha\x18\x01 \x01(\t\"h\n\x11GitNothingToDoMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.GitNothingToDo\"E\n\x1fGitProgressUpdatedCheckoutRange\x12\x11\n\tstart_sha\x18\x01 \x01(\t\x12\x0f\n\x07\x65nd_sha\x18\x02 \x01(\t\"\x8a\x01\n\"GitProgressUpdatedCheckoutRangeMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12:\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32,.proto_types.GitProgressUpdatedCheckoutRange\"*\n\x17GitProgressCheckedOutAt\x12\x0f\n\x07\x65nd_sha\x18\x01 \x01(\t\"z\n\x1aGitProgressCheckedOutAtMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.GitProgressCheckedOutAt\")\n\x1aRegistryProgressGETRequest\x12\x0b\n\x03url\x18\x01 \x01(\t\"\x80\x01\n\x1dRegistryProgressGETRequestMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x35\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\'.proto_types.RegistryProgressGETRequest\"=\n\x1bRegistryProgressGETResponse\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x11\n\tresp_code\x18\x02 \x01(\x05\"\x82\x01\n\x1eRegistryProgressGETResponseMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x36\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32(.proto_types.RegistryProgressGETResponse\"_\n\x1dSelectorReportInvalidSelector\x12\x17\n\x0fvalid_selectors\x18\x01 \x01(\t\x12\x13\n\x0bspec_method\x18\x02 \x01(\t\x12\x10\n\x08raw_spec\x18\x03 \x01(\t\"\x86\x01\n SelectorReportInvalidSelectorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x38\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32*.proto_types.SelectorReportInvalidSelector\"\x15\n\x13\x44\x65psNoPackagesFound\"r\n\x16\x44\x65psNoPackagesFoundMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.DepsNoPackagesFound\"/\n\x17\x44\x65psStartPackageInstall\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\"z\n\x1a\x44\x65psStartPackageInstallMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.DepsStartPackageInstall\"\'\n\x0f\x44\x65psInstallInfo\x12\x14\n\x0cversion_name\x18\x01 \x01(\t\"j\n\x12\x44\x65psInstallInfoMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.DepsInstallInfo\"-\n\x13\x44\x65psUpdateAvailable\x12\x16\n\x0eversion_latest\x18\x01 \x01(\t\"r\n\x16\x44\x65psUpdateAvailableMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.DepsUpdateAvailable\"\x0e\n\x0c\x44\x65psUpToDate\"d\n\x0f\x44\x65psUpToDateMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.DepsUpToDate\",\n\x14\x44\x65psListSubdirectory\x12\x14\n\x0csubdirectory\x18\x01 \x01(\t\"t\n\x17\x44\x65psListSubdirectoryMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.DepsListSubdirectory\".\n\x1a\x44\x65psNotifyUpdatesAvailable\x12\x10\n\x08packages\x18\x01 \x03(\t\"\x80\x01\n\x1d\x44\x65psNotifyUpdatesAvailableMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x35\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\'.proto_types.DepsNotifyUpdatesAvailable\".\n\x1fRegistryIndexProgressGETRequest\x12\x0b\n\x03url\x18\x01 \x01(\t\"\x8a\x01\n\"RegistryIndexProgressGETRequestMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12:\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32,.proto_types.RegistryIndexProgressGETRequest\"B\n RegistryIndexProgressGETResponse\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x11\n\tresp_code\x18\x02 \x01(\x05\"\x8c\x01\n#RegistryIndexProgressGETResponseMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12;\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32-.proto_types.RegistryIndexProgressGETResponse\"2\n\x1eRegistryResponseUnexpectedType\x12\x10\n\x08response\x18\x01 \x01(\t\"\x88\x01\n!RegistryResponseUnexpectedTypeMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x39\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32+.proto_types.RegistryResponseUnexpectedType\"2\n\x1eRegistryResponseMissingTopKeys\x12\x10\n\x08response\x18\x01 \x01(\t\"\x88\x01\n!RegistryResponseMissingTopKeysMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x39\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32+.proto_types.RegistryResponseMissingTopKeys\"5\n!RegistryResponseMissingNestedKeys\x12\x10\n\x08response\x18\x01 \x01(\t\"\x8e\x01\n$RegistryResponseMissingNestedKeysMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12<\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32..proto_types.RegistryResponseMissingNestedKeys\"3\n\x1fRegistryResponseExtraNestedKeys\x12\x10\n\x08response\x18\x01 \x01(\t\"\x8a\x01\n\"RegistryResponseExtraNestedKeysMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12:\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32,.proto_types.RegistryResponseExtraNestedKeys\"(\n\x18\x44\x65psSetDownloadDirectory\x12\x0c\n\x04path\x18\x01 \x01(\t\"|\n\x1b\x44\x65psSetDownloadDirectoryMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.DepsSetDownloadDirectory\"-\n\x0c\x44\x65psUnpinned\x12\x10\n\x08revision\x18\x01 \x01(\t\x12\x0b\n\x03git\x18\x02 \x01(\t\"d\n\x0f\x44\x65psUnpinnedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.DepsUnpinned\"/\n\x1bNoNodesForSelectionCriteria\x12\x10\n\x08spec_raw\x18\x01 \x01(\t\"\x82\x01\n\x1eNoNodesForSelectionCriteriaMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x36\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32(.proto_types.NoNodesForSelectionCriteria\")\n\x10\x44\x65psLockUpdating\x12\x15\n\rlock_filepath\x18\x01 \x01(\t\"l\n\x13\x44\x65psLockUpdatingMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.DepsLockUpdating\"R\n\x0e\x44\x65psAddPackage\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x19\n\x11packages_filepath\x18\x03 \x01(\t\"h\n\x11\x44\x65psAddPackageMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.DepsAddPackage\"\xa7\x01\n\x19\x44\x65psFoundDuplicatePackage\x12S\n\x0fremoved_package\x18\x01 \x03(\x0b\x32:.proto_types.DepsFoundDuplicatePackage.RemovedPackageEntry\x1a\x35\n\x13RemovedPackageEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"~\n\x1c\x44\x65psFoundDuplicatePackageMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.proto_types.DepsFoundDuplicatePackage\"$\n\x12\x44\x65psVersionMissing\x12\x0e\n\x06source\x18\x01 \x01(\t\"p\n\x15\x44\x65psVersionMissingMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.DepsVersionMissing\"/\n\x17\x44\x65psScrubbedPackageName\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\"z\n\x1a\x44\x65psScrubbedPackageNameMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.DepsScrubbedPackageName\"*\n\x1bRunningOperationCaughtError\x12\x0b\n\x03\x65xc\x18\x01 \x01(\t\"\x82\x01\n\x1eRunningOperationCaughtErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x36\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32(.proto_types.RunningOperationCaughtError\"\x11\n\x0f\x43ompileComplete\"j\n\x12\x43ompileCompleteMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.CompileComplete\"\x18\n\x16\x46reshnessCheckComplete\"x\n\x19\x46reshnessCheckCompleteMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x31\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32#.proto_types.FreshnessCheckComplete\"\x1c\n\nSeedHeader\x12\x0e\n\x06header\x18\x01 \x01(\t\"`\n\rSeedHeaderMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12%\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x17.proto_types.SeedHeader\"]\n\x12SQLRunnerException\x12\x0b\n\x03\x65xc\x18\x01 \x01(\t\x12\x10\n\x08\x65xc_info\x18\x02 \x01(\t\x12(\n\tnode_info\x18\x03 \x01(\x0b\x32\x15.proto_types.NodeInfo\"p\n\x15SQLRunnerExceptionMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.SQLRunnerException\"\xa8\x01\n\rLogTestResult\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05index\x18\x04 \x01(\x05\x12\x12\n\nnum_models\x18\x05 \x01(\x05\x12\x16\n\x0e\x65xecution_time\x18\x06 \x01(\x02\x12\x14\n\x0cnum_failures\x18\x07 \x01(\x05\"f\n\x10LogTestResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12(\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1a.proto_types.LogTestResult\"k\n\x0cLogStartLine\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\r\n\x05index\x18\x03 \x01(\x05\x12\r\n\x05total\x18\x04 \x01(\x05\"d\n\x0fLogStartLineMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.LogStartLine\"\x95\x01\n\x0eLogModelResult\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05index\x18\x04 \x01(\x05\x12\r\n\x05total\x18\x05 \x01(\x05\x12\x16\n\x0e\x65xecution_time\x18\x06 \x01(\x02\"h\n\x11LogModelResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.LogModelResult\"\x92\x02\n\x11LogSnapshotResult\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05index\x18\x04 \x01(\x05\x12\r\n\x05total\x18\x05 \x01(\x05\x12\x16\n\x0e\x65xecution_time\x18\x06 \x01(\x02\x12\x34\n\x03\x63\x66g\x18\x07 \x03(\x0b\x32\'.proto_types.LogSnapshotResult.CfgEntry\x12\x16\n\x0eresult_message\x18\x08 \x01(\t\x1a*\n\x08\x43\x66gEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"n\n\x14LogSnapshotResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1e.proto_types.LogSnapshotResult\"\xb9\x01\n\rLogSeedResult\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\x16\n\x0eresult_message\x18\x03 \x01(\t\x12\r\n\x05index\x18\x04 \x01(\x05\x12\r\n\x05total\x18\x05 \x01(\x05\x12\x16\n\x0e\x65xecution_time\x18\x06 \x01(\x02\x12\x0e\n\x06schema\x18\x07 \x01(\t\x12\x10\n\x08relation\x18\x08 \x01(\t\"f\n\x10LogSeedResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12(\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1a.proto_types.LogSeedResult\"\xad\x01\n\x12LogFreshnessResult\x12\x0e\n\x06status\x18\x01 \x01(\t\x12(\n\tnode_info\x18\x02 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\r\n\x05index\x18\x03 \x01(\x05\x12\r\n\x05total\x18\x04 \x01(\x05\x12\x16\n\x0e\x65xecution_time\x18\x05 \x01(\x02\x12\x13\n\x0bsource_name\x18\x06 \x01(\t\x12\x12\n\ntable_name\x18\x07 \x01(\t\"p\n\x15LogFreshnessResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.LogFreshnessResult\"\x98\x01\n\x11LogNodeNoOpResult\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05index\x18\x04 \x01(\x05\x12\r\n\x05total\x18\x05 \x01(\x05\x12\x16\n\x0e\x65xecution_time\x18\x06 \x01(\x02\"n\n\x14LogNodeNoOpResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1e.proto_types.LogNodeNoOpResult\"\"\n\rLogCancelLine\x12\x11\n\tconn_name\x18\x01 \x01(\t\"f\n\x10LogCancelLineMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12(\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1a.proto_types.LogCancelLine\"\x1f\n\x0f\x44\x65\x66\x61ultSelector\x12\x0c\n\x04name\x18\x01 \x01(\t\"j\n\x12\x44\x65\x66\x61ultSelectorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.DefaultSelector\"5\n\tNodeStart\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\"^\n\x0cNodeStartMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12$\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x16.proto_types.NodeStart\"g\n\x0cNodeFinished\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12-\n\nrun_result\x18\x02 \x01(\x0b\x32\x19.proto_types.RunResultMsg\"d\n\x0fNodeFinishedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.NodeFinished\"+\n\x1bQueryCancelationUnsupported\x12\x0c\n\x04type\x18\x01 \x01(\t\"\x82\x01\n\x1eQueryCancelationUnsupportedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x36\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32(.proto_types.QueryCancelationUnsupported\"O\n\x0f\x43oncurrencyLine\x12\x13\n\x0bnum_threads\x18\x01 \x01(\x05\x12\x13\n\x0btarget_name\x18\x02 \x01(\t\x12\x12\n\nnode_count\x18\x03 \x01(\x05\"j\n\x12\x43oncurrencyLineMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.ConcurrencyLine\"E\n\x19WritingInjectedSQLForNode\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\"~\n\x1cWritingInjectedSQLForNodeMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.proto_types.WritingInjectedSQLForNode\"9\n\rNodeCompiling\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\"f\n\x10NodeCompilingMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12(\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1a.proto_types.NodeCompiling\"9\n\rNodeExecuting\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\"f\n\x10NodeExecutingMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12(\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1a.proto_types.NodeExecuting\"m\n\x10LogHookStartLine\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x11\n\tstatement\x18\x02 \x01(\t\x12\r\n\x05index\x18\x03 \x01(\x05\x12\r\n\x05total\x18\x04 \x01(\x05\"l\n\x13LogHookStartLineMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.LogHookStartLine\"\x93\x01\n\x0eLogHookEndLine\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x11\n\tstatement\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05index\x18\x04 \x01(\x05\x12\r\n\x05total\x18\x05 \x01(\x05\x12\x16\n\x0e\x65xecution_time\x18\x06 \x01(\x02\"h\n\x11LogHookEndLineMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.LogHookEndLine\"\x93\x01\n\x0fSkippingDetails\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x15\n\rresource_type\x18\x02 \x01(\t\x12\x0e\n\x06schema\x18\x03 \x01(\t\x12\x11\n\tnode_name\x18\x04 \x01(\t\x12\r\n\x05index\x18\x05 \x01(\x05\x12\r\n\x05total\x18\x06 \x01(\x05\"j\n\x12SkippingDetailsMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.SkippingDetails\"\r\n\x0bNothingToDo\"b\n\x0eNothingToDoMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12&\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x18.proto_types.NothingToDo\",\n\x1dRunningOperationUncaughtError\x12\x0b\n\x03\x65xc\x18\x01 \x01(\t\"\x86\x01\n RunningOperationUncaughtErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x38\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32*.proto_types.RunningOperationUncaughtError\"\x93\x01\n\x0c\x45ndRunResult\x12*\n\x07results\x18\x01 \x03(\x0b\x32\x19.proto_types.RunResultMsg\x12\x14\n\x0c\x65lapsed_time\x18\x02 \x01(\x02\x12\x30\n\x0cgenerated_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07success\x18\x04 \x01(\x08\"d\n\x0f\x45ndRunResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.EndRunResult\"\x11\n\x0fNoNodesSelected\"j\n\x12NoNodesSelectedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.NoNodesSelected\"w\n\x10\x43ommandCompleted\x12\x0f\n\x07\x63ommand\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x30\n\x0c\x63ompleted_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07\x65lapsed\x18\x04 \x01(\x02\"l\n\x13\x43ommandCompletedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.CommandCompleted\"k\n\x08ShowNode\x12\x11\n\tnode_name\x18\x01 \x01(\t\x12\x0f\n\x07preview\x18\x02 \x01(\t\x12\x11\n\tis_inline\x18\x03 \x01(\x08\x12\x15\n\routput_format\x18\x04 \x01(\t\x12\x11\n\tunique_id\x18\x05 \x01(\t\"\\\n\x0bShowNodeMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12#\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x15.proto_types.ShowNode\"p\n\x0c\x43ompiledNode\x12\x11\n\tnode_name\x18\x01 \x01(\t\x12\x10\n\x08\x63ompiled\x18\x02 \x01(\t\x12\x11\n\tis_inline\x18\x03 \x01(\x08\x12\x15\n\routput_format\x18\x04 \x01(\t\x12\x11\n\tunique_id\x18\x05 \x01(\t\"d\n\x0f\x43ompiledNodeMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.CompiledNode\"b\n\x17\x43\x61tchableExceptionOnRun\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12\x0b\n\x03\x65xc\x18\x02 \x01(\t\x12\x10\n\x08\x65xc_info\x18\x03 \x01(\t\"z\n\x1a\x43\x61tchableExceptionOnRunMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.CatchableExceptionOnRun\"_\n\x12InternalErrorOnRun\x12\x12\n\nbuild_path\x18\x01 \x01(\t\x12\x0b\n\x03\x65xc\x18\x02 \x01(\t\x12(\n\tnode_info\x18\x03 \x01(\x0b\x32\x15.proto_types.NodeInfo\"p\n\x15InternalErrorOnRunMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.InternalErrorOnRun\"u\n\x15GenericExceptionOnRun\x12\x12\n\nbuild_path\x18\x01 \x01(\t\x12\x11\n\tunique_id\x18\x02 \x01(\t\x12\x0b\n\x03\x65xc\x18\x03 \x01(\t\x12(\n\tnode_info\x18\x04 \x01(\x0b\x32\x15.proto_types.NodeInfo\"v\n\x18GenericExceptionOnRunMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".proto_types.GenericExceptionOnRun\"N\n\x1aNodeConnectionReleaseError\x12\x11\n\tnode_name\x18\x01 \x01(\t\x12\x0b\n\x03\x65xc\x18\x02 \x01(\t\x12\x10\n\x08\x65xc_info\x18\x03 \x01(\t\"\x80\x01\n\x1dNodeConnectionReleaseErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x35\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\'.proto_types.NodeConnectionReleaseError\"\x1f\n\nFoundStats\x12\x11\n\tstat_line\x18\x01 \x01(\t\"`\n\rFoundStatsMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12%\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x17.proto_types.FoundStats\"\x17\n\x15MainKeyboardInterrupt\"v\n\x18MainKeyboardInterruptMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".proto_types.MainKeyboardInterrupt\"#\n\x14MainEncounteredError\x12\x0b\n\x03\x65xc\x18\x01 \x01(\t\"t\n\x17MainEncounteredErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.MainEncounteredError\"%\n\x0eMainStackTrace\x12\x13\n\x0bstack_trace\x18\x01 \x01(\t\"h\n\x11MainStackTraceMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.MainStackTrace\"p\n\x13TimingInfoCollected\x12(\n\tnode_info\x18\x01 \x01(\x0b\x32\x15.proto_types.NodeInfo\x12/\n\x0btiming_info\x18\x02 \x01(\x0b\x32\x1a.proto_types.TimingInfoMsg\"r\n\x16TimingInfoCollectedMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.TimingInfoCollected\"&\n\x12LogDebugStackTrace\x12\x10\n\x08\x65xc_info\x18\x01 \x01(\t\"p\n\x15LogDebugStackTraceMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.LogDebugStackTrace\"\x1e\n\x0e\x43heckCleanPath\x12\x0c\n\x04path\x18\x01 \x01(\t\"h\n\x11\x43heckCleanPathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.CheckCleanPath\" \n\x10\x43onfirmCleanPath\x12\x0c\n\x04path\x18\x01 \x01(\t\"l\n\x13\x43onfirmCleanPathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.ConfirmCleanPath\"\"\n\x12ProtectedCleanPath\x12\x0c\n\x04path\x18\x01 \x01(\t\"p\n\x15ProtectedCleanPathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.ProtectedCleanPath\"\x14\n\x12\x46inishedCleanPaths\"p\n\x15\x46inishedCleanPathsMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.FinishedCleanPaths\"5\n\x0bOpenCommand\x12\x10\n\x08open_cmd\x18\x01 \x01(\t\x12\x14\n\x0cprofiles_dir\x18\x02 \x01(\t\"b\n\x0eOpenCommandMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12&\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x18.proto_types.OpenCommand\"0\n\x0fServingDocsPort\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\"j\n\x12ServingDocsPortMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.ServingDocsPort\"%\n\x15ServingDocsAccessInfo\x12\x0c\n\x04port\x18\x01 \x01(\t\"v\n\x18ServingDocsAccessInfoMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".proto_types.ServingDocsAccessInfo\"\x15\n\x13ServingDocsExitInfo\"r\n\x16ServingDocsExitInfoMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.ServingDocsExitInfo\"t\n\x10RunResultWarning\x12\x15\n\rresource_type\x18\x01 \x01(\t\x12\x11\n\tnode_name\x18\x02 \x01(\t\x12\x0c\n\x04path\x18\x03 \x01(\t\x12(\n\tnode_info\x18\x04 \x01(\x0b\x32\x15.proto_types.NodeInfo\"l\n\x13RunResultWarningMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.RunResultWarning\"t\n\x10RunResultFailure\x12\x15\n\rresource_type\x18\x01 \x01(\t\x12\x11\n\tnode_name\x18\x02 \x01(\t\x12\x0c\n\x04path\x18\x03 \x01(\t\x12(\n\tnode_info\x18\x04 \x01(\x0b\x32\x15.proto_types.NodeInfo\"l\n\x13RunResultFailureMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.RunResultFailure\"k\n\tStatsLine\x12\x30\n\x05stats\x18\x01 \x03(\x0b\x32!.proto_types.StatsLine.StatsEntry\x1a,\n\nStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"^\n\x0cStatsLineMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12$\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x16.proto_types.StatsLine\"G\n\x0eRunResultError\x12\x0b\n\x03msg\x18\x01 \x01(\t\x12(\n\tnode_info\x18\x02 \x01(\x0b\x32\x15.proto_types.NodeInfo\"h\n\x11RunResultErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.RunResultError\"S\n\x17RunResultErrorNoMessage\x12\x0e\n\x06status\x18\x01 \x01(\t\x12(\n\tnode_info\x18\x02 \x01(\x0b\x32\x15.proto_types.NodeInfo\"z\n\x1aRunResultErrorNoMessageMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.RunResultErrorNoMessage\"I\n\x0fSQLCompiledPath\x12\x0c\n\x04path\x18\x01 \x01(\t\x12(\n\tnode_info\x18\x02 \x01(\x0b\x32\x15.proto_types.NodeInfo\"j\n\x12SQLCompiledPathMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.SQLCompiledPath\"W\n\x14\x43heckNodeTestFailure\x12\x15\n\rrelation_name\x18\x01 \x01(\t\x12(\n\tnode_info\x18\x02 \x01(\x0b\x32\x15.proto_types.NodeInfo\"t\n\x17\x43heckNodeTestFailureMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12/\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32!.proto_types.CheckNodeTestFailure\"W\n\x0f\x45ndOfRunSummary\x12\x12\n\nnum_errors\x18\x01 \x01(\x05\x12\x14\n\x0cnum_warnings\x18\x02 \x01(\x05\x12\x1a\n\x12keyboard_interrupt\x18\x03 \x01(\x08\"j\n\x12\x45ndOfRunSummaryMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.EndOfRunSummary\"U\n\x13LogSkipBecauseError\x12\x0e\n\x06schema\x18\x01 \x01(\t\x12\x10\n\x08relation\x18\x02 \x01(\t\x12\r\n\x05index\x18\x03 \x01(\x05\x12\r\n\x05total\x18\x04 \x01(\x05\"r\n\x16LogSkipBecauseErrorMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12.\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32 .proto_types.LogSkipBecauseError\"\x14\n\x12\x45nsureGitInstalled\"p\n\x15\x45nsureGitInstalledMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.EnsureGitInstalled\"\x1a\n\x18\x44\x65psCreatingLocalSymlink\"|\n\x1b\x44\x65psCreatingLocalSymlinkMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x33\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32%.proto_types.DepsCreatingLocalSymlink\"\x19\n\x17\x44\x65psSymlinkNotAvailable\"z\n\x1a\x44\x65psSymlinkNotAvailableMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.DepsSymlinkNotAvailable\"\x11\n\x0f\x44isableTracking\"j\n\x12\x44isableTrackingMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12*\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1c.proto_types.DisableTracking\"\x1e\n\x0cSendingEvent\x12\x0e\n\x06kwargs\x18\x01 \x01(\t\"d\n\x0fSendingEventMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\'\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x19.proto_types.SendingEvent\"\x12\n\x10SendEventFailure\"l\n\x13SendEventFailureMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1d.proto_types.SendEventFailure\"\r\n\x0b\x46lushEvents\"b\n\x0e\x46lushEventsMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12&\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x18.proto_types.FlushEvents\"\x14\n\x12\x46lushEventsFailure\"p\n\x15\x46lushEventsFailureMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12-\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1f.proto_types.FlushEventsFailure\"-\n\x19TrackingInitializeFailure\x12\x10\n\x08\x65xc_info\x18\x01 \x01(\t\"~\n\x1cTrackingInitializeFailureMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x34\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32&.proto_types.TrackingInitializeFailure\"P\n\x17RunResultWarningMessage\x12\x0b\n\x03msg\x18\x01 \x01(\t\x12(\n\tnode_info\x18\x02 \x01(\x0b\x32\x15.proto_types.NodeInfo\"z\n\x1aRunResultWarningMessageMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12\x32\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32$.proto_types.RunResultWarningMessage\"\x1a\n\x0b\x44\x65\x62ugCmdOut\x12\x0b\n\x03msg\x18\x01 \x01(\t\"b\n\x0e\x44\x65\x62ugCmdOutMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12&\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x18.proto_types.DebugCmdOut\"\x1d\n\x0e\x44\x65\x62ugCmdResult\x12\x0b\n\x03msg\x18\x01 \x01(\t\"h\n\x11\x44\x65\x62ugCmdResultMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.DebugCmdResult\"\x19\n\nListCmdOut\x12\x0b\n\x03msg\x18\x01 \x01(\t\"`\n\rListCmdOutMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12%\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x17.proto_types.ListCmdOut\"\xec\x01\n\x0eResourceReport\x12\x14\n\x0c\x63ommand_name\x18\x02 \x01(\t\x12\x17\n\x0f\x63ommand_success\x18\x03 \x01(\x08\x12\x1f\n\x17\x63ommand_wall_clock_time\x18\x04 \x01(\x02\x12\x19\n\x11process_user_time\x18\x05 \x01(\x02\x12\x1b\n\x13process_kernel_time\x18\x06 \x01(\x02\x12\x1b\n\x13process_mem_max_rss\x18\x07 \x01(\x03\x12\x19\n\x11process_in_blocks\x18\x08 \x01(\x03\x12\x1a\n\x12process_out_blocks\x18\t \x01(\x03\"h\n\x11ResourceReportMsg\x12(\n\x04info\x18\x01 \x01(\x0b\x32\x1a.proto_types.CoreEventInfo\x12)\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.proto_types.ResourceReportb\x06proto3') -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'core_types_pb2', globals()) +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'core_types_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _COREEVENTINFO_EXTRAENTRY._options = None - _COREEVENTINFO_EXTRAENTRY._serialized_options = b'8\001' - _MAINREPORTARGS_ARGSENTRY._options = None - _MAINREPORTARGS_ARGSENTRY._serialized_options = b'8\001' - _PARTIALPARSINGERROR_EXCINFOENTRY._options = None - _PARTIALPARSINGERROR_EXCINFOENTRY._serialized_options = b'8\001' - _DEPSFOUNDDUPLICATEPACKAGE_REMOVEDPACKAGEENTRY._options = None - _DEPSFOUNDDUPLICATEPACKAGE_REMOVEDPACKAGEENTRY._serialized_options = b'8\001' - _LOGSNAPSHOTRESULT_CFGENTRY._options = None - _LOGSNAPSHOTRESULT_CFGENTRY._serialized_options = b'8\001' - _STATSLINE_STATSENTRY._options = None - _STATSLINE_STATSENTRY._serialized_options = b'8\001' - _COREEVENTINFO._serialized_start=97 - _COREEVENTINFO._serialized_end=378 - _COREEVENTINFO_EXTRAENTRY._serialized_start=334 - _COREEVENTINFO_EXTRAENTRY._serialized_end=378 - _NODERELATION._serialized_start=380 - _NODERELATION._serialized_end=466 - _NODEINFO._serialized_start=469 - _NODEINFO._serialized_end=742 - _TIMINGINFOMSG._serialized_start=744 - _TIMINGINFOMSG._serialized_end=871 - _RUNRESULTMSG._serialized_start=874 - _RUNRESULTMSG._serialized_end=1083 - _COLUMNTYPE._serialized_start=1085 - _COLUMNTYPE._serialized_end=1177 - _COLUMNCONSTRAINT._serialized_start=1179 - _COLUMNCONSTRAINT._serialized_end=1268 - _MODELCONSTRAINT._serialized_start=1270 - _MODELCONSTRAINT._serialized_end=1354 - _MAINREPORTVERSION._serialized_start=1356 - _MAINREPORTVERSION._serialized_end=1413 - _MAINREPORTVERSIONMSG._serialized_start=1415 - _MAINREPORTVERSIONMSG._serialized_end=1525 - _MAINREPORTARGS._serialized_start=1527 - _MAINREPORTARGS._serialized_end=1641 - _MAINREPORTARGS_ARGSENTRY._serialized_start=1598 - _MAINREPORTARGS_ARGSENTRY._serialized_end=1641 - _MAINREPORTARGSMSG._serialized_start=1643 - _MAINREPORTARGSMSG._serialized_end=1747 - _MAINTRACKINGUSERSTATE._serialized_start=1749 - _MAINTRACKINGUSERSTATE._serialized_end=1792 - _MAINTRACKINGUSERSTATEMSG._serialized_start=1794 - _MAINTRACKINGUSERSTATEMSG._serialized_end=1912 - _MERGEDFROMSTATE._serialized_start=1914 - _MERGEDFROMSTATE._serialized_end=1967 - _MERGEDFROMSTATEMSG._serialized_start=1969 - _MERGEDFROMSTATEMSG._serialized_end=2075 - _MISSINGPROFILETARGET._serialized_start=2077 - _MISSINGPROFILETARGET._serialized_end=2142 - _MISSINGPROFILETARGETMSG._serialized_start=2144 - _MISSINGPROFILETARGETMSG._serialized_end=2260 - _INVALIDOPTIONYAML._serialized_start=2262 - _INVALIDOPTIONYAML._serialized_end=2302 - _INVALIDOPTIONYAMLMSG._serialized_start=2304 - _INVALIDOPTIONYAMLMSG._serialized_end=2414 - _LOGDBTPROJECTERROR._serialized_start=2416 - _LOGDBTPROJECTERROR._serialized_end=2449 - _LOGDBTPROJECTERRORMSG._serialized_start=2451 - _LOGDBTPROJECTERRORMSG._serialized_end=2563 - _LOGDBTPROFILEERROR._serialized_start=2565 - _LOGDBTPROFILEERROR._serialized_end=2616 - _LOGDBTPROFILEERRORMSG._serialized_start=2618 - _LOGDBTPROFILEERRORMSG._serialized_end=2730 - _STARTERPROJECTPATH._serialized_start=2732 - _STARTERPROJECTPATH._serialized_end=2765 - _STARTERPROJECTPATHMSG._serialized_start=2767 - _STARTERPROJECTPATHMSG._serialized_end=2879 - _CONFIGFOLDERDIRECTORY._serialized_start=2881 - _CONFIGFOLDERDIRECTORY._serialized_end=2917 - _CONFIGFOLDERDIRECTORYMSG._serialized_start=2919 - _CONFIGFOLDERDIRECTORYMSG._serialized_end=3037 - _NOSAMPLEPROFILEFOUND._serialized_start=3039 - _NOSAMPLEPROFILEFOUND._serialized_end=3078 - _NOSAMPLEPROFILEFOUNDMSG._serialized_start=3080 - _NOSAMPLEPROFILEFOUNDMSG._serialized_end=3196 - _PROFILEWRITTENWITHSAMPLE._serialized_start=3198 - _PROFILEWRITTENWITHSAMPLE._serialized_end=3252 - _PROFILEWRITTENWITHSAMPLEMSG._serialized_start=3254 - _PROFILEWRITTENWITHSAMPLEMSG._serialized_end=3378 - _PROFILEWRITTENWITHTARGETTEMPLATEYAML._serialized_start=3380 - _PROFILEWRITTENWITHTARGETTEMPLATEYAML._serialized_end=3446 - _PROFILEWRITTENWITHTARGETTEMPLATEYAMLMSG._serialized_start=3449 - _PROFILEWRITTENWITHTARGETTEMPLATEYAMLMSG._serialized_end=3597 - _PROFILEWRITTENWITHPROJECTTEMPLATEYAML._serialized_start=3599 - _PROFILEWRITTENWITHPROJECTTEMPLATEYAML._serialized_end=3666 - _PROFILEWRITTENWITHPROJECTTEMPLATEYAMLMSG._serialized_start=3669 - _PROFILEWRITTENWITHPROJECTTEMPLATEYAMLMSG._serialized_end=3819 - _SETTINGUPPROFILE._serialized_start=3821 - _SETTINGUPPROFILE._serialized_end=3839 - _SETTINGUPPROFILEMSG._serialized_start=3841 - _SETTINGUPPROFILEMSG._serialized_end=3949 - _INVALIDPROFILETEMPLATEYAML._serialized_start=3951 - _INVALIDPROFILETEMPLATEYAML._serialized_end=3979 - _INVALIDPROFILETEMPLATEYAMLMSG._serialized_start=3982 - _INVALIDPROFILETEMPLATEYAMLMSG._serialized_end=4110 - _PROJECTNAMEALREADYEXISTS._serialized_start=4112 - _PROJECTNAMEALREADYEXISTS._serialized_end=4152 - _PROJECTNAMEALREADYEXISTSMSG._serialized_start=4154 - _PROJECTNAMEALREADYEXISTSMSG._serialized_end=4278 - _PROJECTCREATED._serialized_start=4280 - _PROJECTCREATED._serialized_end=4355 - _PROJECTCREATEDMSG._serialized_start=4357 - _PROJECTCREATEDMSG._serialized_end=4461 - _PACKAGEREDIRECTDEPRECATION._serialized_start=4463 - _PACKAGEREDIRECTDEPRECATION._serialized_end=4527 - _PACKAGEREDIRECTDEPRECATIONMSG._serialized_start=4530 - _PACKAGEREDIRECTDEPRECATIONMSG._serialized_end=4658 - _PACKAGEINSTALLPATHDEPRECATION._serialized_start=4660 - _PACKAGEINSTALLPATHDEPRECATION._serialized_end=4691 - _PACKAGEINSTALLPATHDEPRECATIONMSG._serialized_start=4694 - _PACKAGEINSTALLPATHDEPRECATIONMSG._serialized_end=4828 - _CONFIGSOURCEPATHDEPRECATION._serialized_start=4830 - _CONFIGSOURCEPATHDEPRECATION._serialized_end=4902 - _CONFIGSOURCEPATHDEPRECATIONMSG._serialized_start=4905 - _CONFIGSOURCEPATHDEPRECATIONMSG._serialized_end=5035 - _CONFIGDATAPATHDEPRECATION._serialized_start=5037 - _CONFIGDATAPATHDEPRECATION._serialized_end=5107 - _CONFIGDATAPATHDEPRECATIONMSG._serialized_start=5109 - _CONFIGDATAPATHDEPRECATIONMSG._serialized_end=5235 - _METRICATTRIBUTESRENAMED._serialized_start=5237 - _METRICATTRIBUTESRENAMED._serialized_end=5283 - _METRICATTRIBUTESRENAMEDMSG._serialized_start=5285 - _METRICATTRIBUTESRENAMEDMSG._serialized_end=5407 - _EXPOSURENAMEDEPRECATION._serialized_start=5409 - _EXPOSURENAMEDEPRECATION._serialized_end=5452 - _EXPOSURENAMEDEPRECATIONMSG._serialized_start=5454 - _EXPOSURENAMEDEPRECATIONMSG._serialized_end=5576 - _INTERNALDEPRECATION._serialized_start=5578 - _INTERNALDEPRECATION._serialized_end=5672 - _INTERNALDEPRECATIONMSG._serialized_start=5674 - _INTERNALDEPRECATIONMSG._serialized_end=5788 - _ENVIRONMENTVARIABLERENAMED._serialized_start=5790 - _ENVIRONMENTVARIABLERENAMED._serialized_end=5854 - _ENVIRONMENTVARIABLERENAMEDMSG._serialized_start=5857 - _ENVIRONMENTVARIABLERENAMEDMSG._serialized_end=5985 - _CONFIGLOGPATHDEPRECATION._serialized_start=5987 - _CONFIGLOGPATHDEPRECATION._serialized_end=6038 - _CONFIGLOGPATHDEPRECATIONMSG._serialized_start=6040 - _CONFIGLOGPATHDEPRECATIONMSG._serialized_end=6164 - _CONFIGTARGETPATHDEPRECATION._serialized_start=6166 - _CONFIGTARGETPATHDEPRECATION._serialized_end=6220 - _CONFIGTARGETPATHDEPRECATIONMSG._serialized_start=6223 - _CONFIGTARGETPATHDEPRECATIONMSG._serialized_end=6353 - _TESTSCONFIGDEPRECATION._serialized_start=6355 - _TESTSCONFIGDEPRECATION._serialized_end=6422 - _TESTSCONFIGDEPRECATIONMSG._serialized_start=6424 - _TESTSCONFIGDEPRECATIONMSG._serialized_end=6544 - _PROJECTFLAGSMOVEDDEPRECATION._serialized_start=6546 - _PROJECTFLAGSMOVEDDEPRECATION._serialized_end=6576 - _PROJECTFLAGSMOVEDDEPRECATIONMSG._serialized_start=6579 - _PROJECTFLAGSMOVEDDEPRECATIONMSG._serialized_end=6711 - _DEPRECATEDMODEL._serialized_start=6713 - _DEPRECATEDMODEL._serialized_end=6799 - _DEPRECATEDMODELMSG._serialized_start=6801 - _DEPRECATEDMODELMSG._serialized_end=6907 - _INPUTFILEDIFFERROR._serialized_start=6909 - _INPUTFILEDIFFERROR._serialized_end=6964 - _INPUTFILEDIFFERRORMSG._serialized_start=6966 - _INPUTFILEDIFFERRORMSG._serialized_end=7078 - _INVALIDVALUEFORFIELD._serialized_start=7080 - _INVALIDVALUEFORFIELD._serialized_end=7143 - _INVALIDVALUEFORFIELDMSG._serialized_start=7145 - _INVALIDVALUEFORFIELDMSG._serialized_end=7261 - _VALIDATIONWARNING._serialized_start=7263 - _VALIDATIONWARNING._serialized_end=7344 - _VALIDATIONWARNINGMSG._serialized_start=7346 - _VALIDATIONWARNINGMSG._serialized_end=7456 - _PARSEPERFINFOPATH._serialized_start=7458 - _PARSEPERFINFOPATH._serialized_end=7491 - _PARSEPERFINFOPATHMSG._serialized_start=7493 - _PARSEPERFINFOPATHMSG._serialized_end=7603 - _PARTIALPARSINGERRORPROCESSINGFILE._serialized_start=7605 - _PARTIALPARSINGERRORPROCESSINGFILE._serialized_end=7654 - _PARTIALPARSINGERRORPROCESSINGFILEMSG._serialized_start=7657 - _PARTIALPARSINGERRORPROCESSINGFILEMSG._serialized_end=7799 - _PARTIALPARSINGERROR._serialized_start=7802 - _PARTIALPARSINGERROR._serialized_end=7936 - _PARTIALPARSINGERROR_EXCINFOENTRY._serialized_start=7890 - _PARTIALPARSINGERROR_EXCINFOENTRY._serialized_end=7936 - _PARTIALPARSINGERRORMSG._serialized_start=7938 - _PARTIALPARSINGERRORMSG._serialized_end=8052 - _PARTIALPARSINGSKIPPARSING._serialized_start=8054 - _PARTIALPARSINGSKIPPARSING._serialized_end=8081 - _PARTIALPARSINGSKIPPARSINGMSG._serialized_start=8083 - _PARTIALPARSINGSKIPPARSINGMSG._serialized_end=8209 - _UNABLETOPARTIALPARSE._serialized_start=8211 - _UNABLETOPARTIALPARSE._serialized_end=8249 - _UNABLETOPARTIALPARSEMSG._serialized_start=8251 - _UNABLETOPARTIALPARSEMSG._serialized_end=8367 - _STATECHECKVARSHASH._serialized_start=8369 - _STATECHECKVARSHASH._serialized_end=8471 - _STATECHECKVARSHASHMSG._serialized_start=8473 - _STATECHECKVARSHASHMSG._serialized_end=8585 - _PARTIALPARSINGNOTENABLED._serialized_start=8587 - _PARTIALPARSINGNOTENABLED._serialized_end=8613 - _PARTIALPARSINGNOTENABLEDMSG._serialized_start=8615 - _PARTIALPARSINGNOTENABLEDMSG._serialized_end=8739 - _PARSEDFILELOADFAILED._serialized_start=8741 - _PARSEDFILELOADFAILED._serialized_end=8808 - _PARSEDFILELOADFAILEDMSG._serialized_start=8810 - _PARSEDFILELOADFAILEDMSG._serialized_end=8926 - _PARTIALPARSINGENABLED._serialized_start=8928 - _PARTIALPARSINGENABLED._serialized_end=9000 - _PARTIALPARSINGENABLEDMSG._serialized_start=9002 - _PARTIALPARSINGENABLEDMSG._serialized_end=9120 - _PARTIALPARSINGFILE._serialized_start=9122 - _PARTIALPARSINGFILE._serialized_end=9178 - _PARTIALPARSINGFILEMSG._serialized_start=9180 - _PARTIALPARSINGFILEMSG._serialized_end=9292 - _INVALIDDISABLEDTARGETINTESTNODE._serialized_start=9295 - _INVALIDDISABLEDTARGETINTESTNODE._serialized_end=9470 - _INVALIDDISABLEDTARGETINTESTNODEMSG._serialized_start=9473 - _INVALIDDISABLEDTARGETINTESTNODEMSG._serialized_end=9611 - _UNUSEDRESOURCECONFIGPATH._serialized_start=9613 - _UNUSEDRESOURCECONFIGPATH._serialized_end=9668 - _UNUSEDRESOURCECONFIGPATHMSG._serialized_start=9670 - _UNUSEDRESOURCECONFIGPATHMSG._serialized_end=9794 - _SEEDINCREASED._serialized_start=9796 - _SEEDINCREASED._serialized_end=9847 - _SEEDINCREASEDMSG._serialized_start=9849 - _SEEDINCREASEDMSG._serialized_end=9951 - _SEEDEXCEEDSLIMITSAMEPATH._serialized_start=9953 - _SEEDEXCEEDSLIMITSAMEPATH._serialized_end=10015 - _SEEDEXCEEDSLIMITSAMEPATHMSG._serialized_start=10017 - _SEEDEXCEEDSLIMITSAMEPATHMSG._serialized_end=10141 - _SEEDEXCEEDSLIMITANDPATHCHANGED._serialized_start=10143 - _SEEDEXCEEDSLIMITANDPATHCHANGED._serialized_end=10211 - _SEEDEXCEEDSLIMITANDPATHCHANGEDMSG._serialized_start=10214 - _SEEDEXCEEDSLIMITANDPATHCHANGEDMSG._serialized_end=10350 - _SEEDEXCEEDSLIMITCHECKSUMCHANGED._serialized_start=10352 - _SEEDEXCEEDSLIMITCHECKSUMCHANGED._serialized_end=10444 - _SEEDEXCEEDSLIMITCHECKSUMCHANGEDMSG._serialized_start=10447 - _SEEDEXCEEDSLIMITCHECKSUMCHANGEDMSG._serialized_end=10585 - _UNUSEDTABLES._serialized_start=10587 - _UNUSEDTABLES._serialized_end=10624 - _UNUSEDTABLESMSG._serialized_start=10626 - _UNUSEDTABLESMSG._serialized_end=10726 - _WRONGRESOURCESCHEMAFILE._serialized_start=10729 - _WRONGRESOURCESCHEMAFILE._serialized_end=10864 - _WRONGRESOURCESCHEMAFILEMSG._serialized_start=10866 - _WRONGRESOURCESCHEMAFILEMSG._serialized_end=10988 - _NONODEFORYAMLKEY._serialized_start=10990 - _NONODEFORYAMLKEY._serialized_end=11065 - _NONODEFORYAMLKEYMSG._serialized_start=11067 - _NONODEFORYAMLKEYMSG._serialized_end=11175 - _MACRONOTFOUNDFORPATCH._serialized_start=11177 - _MACRONOTFOUNDFORPATCH._serialized_end=11220 - _MACRONOTFOUNDFORPATCHMSG._serialized_start=11222 - _MACRONOTFOUNDFORPATCHMSG._serialized_end=11340 - _NODENOTFOUNDORDISABLED._serialized_start=11343 - _NODENOTFOUNDORDISABLED._serialized_end=11527 - _NODENOTFOUNDORDISABLEDMSG._serialized_start=11529 - _NODENOTFOUNDORDISABLEDMSG._serialized_end=11649 - _JINJALOGWARNING._serialized_start=11651 - _JINJALOGWARNING._serialized_end=11723 - _JINJALOGWARNINGMSG._serialized_start=11725 - _JINJALOGWARNINGMSG._serialized_end=11831 - _JINJALOGINFO._serialized_start=11833 - _JINJALOGINFO._serialized_end=11902 - _JINJALOGINFOMSG._serialized_start=11904 - _JINJALOGINFOMSG._serialized_end=12004 - _JINJALOGDEBUG._serialized_start=12006 - _JINJALOGDEBUG._serialized_end=12076 - _JINJALOGDEBUGMSG._serialized_start=12078 - _JINJALOGDEBUGMSG._serialized_end=12180 - _UNPINNEDREFNEWVERSIONAVAILABLE._serialized_start=12183 - _UNPINNEDREFNEWVERSIONAVAILABLE._serialized_end=12357 - _UNPINNEDREFNEWVERSIONAVAILABLEMSG._serialized_start=12360 - _UNPINNEDREFNEWVERSIONAVAILABLEMSG._serialized_end=12496 - _UPCOMINGREFERENCEDEPRECATION._serialized_start=12499 - _UPCOMINGREFERENCEDEPRECATION._serialized_end=12697 - _UPCOMINGREFERENCEDEPRECATIONMSG._serialized_start=12700 - _UPCOMINGREFERENCEDEPRECATIONMSG._serialized_end=12832 - _DEPRECATEDREFERENCE._serialized_start=12835 - _DEPRECATEDREFERENCE._serialized_end=13024 - _DEPRECATEDREFERENCEMSG._serialized_start=13026 - _DEPRECATEDREFERENCEMSG._serialized_end=13140 - _UNSUPPORTEDCONSTRAINTMATERIALIZATION._serialized_start=13142 - _UNSUPPORTEDCONSTRAINTMATERIALIZATION._serialized_end=13202 - _UNSUPPORTEDCONSTRAINTMATERIALIZATIONMSG._serialized_start=13205 - _UNSUPPORTEDCONSTRAINTMATERIALIZATIONMSG._serialized_end=13353 - _PARSEINLINENODEERROR._serialized_start=13355 - _PARSEINLINENODEERROR._serialized_end=13432 - _PARSEINLINENODEERRORMSG._serialized_start=13434 - _PARSEINLINENODEERRORMSG._serialized_end=13550 - _SEMANTICVALIDATIONFAILURE._serialized_start=13552 - _SEMANTICVALIDATIONFAILURE._serialized_end=13592 - _SEMANTICVALIDATIONFAILUREMSG._serialized_start=13594 - _SEMANTICVALIDATIONFAILUREMSG._serialized_end=13720 - _UNVERSIONEDBREAKINGCHANGE._serialized_start=13723 - _UNVERSIONEDBREAKINGCHANGE._serialized_end=14117 - _UNVERSIONEDBREAKINGCHANGEMSG._serialized_start=14119 - _UNVERSIONEDBREAKINGCHANGEMSG._serialized_end=14245 - _WARNSTATETARGETEQUAL._serialized_start=14247 - _WARNSTATETARGETEQUAL._serialized_end=14289 - _WARNSTATETARGETEQUALMSG._serialized_start=14291 - _WARNSTATETARGETEQUALMSG._serialized_end=14407 - _FRESHNESSCONFIGPROBLEM._serialized_start=14409 - _FRESHNESSCONFIGPROBLEM._serialized_end=14446 - _FRESHNESSCONFIGPROBLEMMSG._serialized_start=14448 - _FRESHNESSCONFIGPROBLEMMSG._serialized_end=14568 - _GITSPARSECHECKOUTSUBDIRECTORY._serialized_start=14570 - _GITSPARSECHECKOUTSUBDIRECTORY._serialized_end=14617 - _GITSPARSECHECKOUTSUBDIRECTORYMSG._serialized_start=14620 - _GITSPARSECHECKOUTSUBDIRECTORYMSG._serialized_end=14754 - _GITPROGRESSCHECKOUTREVISION._serialized_start=14756 - _GITPROGRESSCHECKOUTREVISION._serialized_end=14803 - _GITPROGRESSCHECKOUTREVISIONMSG._serialized_start=14806 - _GITPROGRESSCHECKOUTREVISIONMSG._serialized_end=14936 - _GITPROGRESSUPDATINGEXISTINGDEPENDENCY._serialized_start=14938 - _GITPROGRESSUPDATINGEXISTINGDEPENDENCY._serialized_end=14990 - _GITPROGRESSUPDATINGEXISTINGDEPENDENCYMSG._serialized_start=14993 - _GITPROGRESSUPDATINGEXISTINGDEPENDENCYMSG._serialized_end=15143 - _GITPROGRESSPULLINGNEWDEPENDENCY._serialized_start=15145 - _GITPROGRESSPULLINGNEWDEPENDENCY._serialized_end=15191 - _GITPROGRESSPULLINGNEWDEPENDENCYMSG._serialized_start=15194 - _GITPROGRESSPULLINGNEWDEPENDENCYMSG._serialized_end=15332 - _GITNOTHINGTODO._serialized_start=15334 - _GITNOTHINGTODO._serialized_end=15363 - _GITNOTHINGTODOMSG._serialized_start=15365 - _GITNOTHINGTODOMSG._serialized_end=15469 - _GITPROGRESSUPDATEDCHECKOUTRANGE._serialized_start=15471 - _GITPROGRESSUPDATEDCHECKOUTRANGE._serialized_end=15540 - _GITPROGRESSUPDATEDCHECKOUTRANGEMSG._serialized_start=15543 - _GITPROGRESSUPDATEDCHECKOUTRANGEMSG._serialized_end=15681 - _GITPROGRESSCHECKEDOUTAT._serialized_start=15683 - _GITPROGRESSCHECKEDOUTAT._serialized_end=15725 - _GITPROGRESSCHECKEDOUTATMSG._serialized_start=15727 - _GITPROGRESSCHECKEDOUTATMSG._serialized_end=15849 - _REGISTRYPROGRESSGETREQUEST._serialized_start=15851 - _REGISTRYPROGRESSGETREQUEST._serialized_end=15892 - _REGISTRYPROGRESSGETREQUESTMSG._serialized_start=15895 - _REGISTRYPROGRESSGETREQUESTMSG._serialized_end=16023 - _REGISTRYPROGRESSGETRESPONSE._serialized_start=16025 - _REGISTRYPROGRESSGETRESPONSE._serialized_end=16086 - _REGISTRYPROGRESSGETRESPONSEMSG._serialized_start=16089 - _REGISTRYPROGRESSGETRESPONSEMSG._serialized_end=16219 - _SELECTORREPORTINVALIDSELECTOR._serialized_start=16221 - _SELECTORREPORTINVALIDSELECTOR._serialized_end=16316 - _SELECTORREPORTINVALIDSELECTORMSG._serialized_start=16319 - _SELECTORREPORTINVALIDSELECTORMSG._serialized_end=16453 - _DEPSNOPACKAGESFOUND._serialized_start=16455 - _DEPSNOPACKAGESFOUND._serialized_end=16476 - _DEPSNOPACKAGESFOUNDMSG._serialized_start=16478 - _DEPSNOPACKAGESFOUNDMSG._serialized_end=16592 - _DEPSSTARTPACKAGEINSTALL._serialized_start=16594 - _DEPSSTARTPACKAGEINSTALL._serialized_end=16641 - _DEPSSTARTPACKAGEINSTALLMSG._serialized_start=16643 - _DEPSSTARTPACKAGEINSTALLMSG._serialized_end=16765 - _DEPSINSTALLINFO._serialized_start=16767 - _DEPSINSTALLINFO._serialized_end=16806 - _DEPSINSTALLINFOMSG._serialized_start=16808 - _DEPSINSTALLINFOMSG._serialized_end=16914 - _DEPSUPDATEAVAILABLE._serialized_start=16916 - _DEPSUPDATEAVAILABLE._serialized_end=16961 - _DEPSUPDATEAVAILABLEMSG._serialized_start=16963 - _DEPSUPDATEAVAILABLEMSG._serialized_end=17077 - _DEPSUPTODATE._serialized_start=17079 - _DEPSUPTODATE._serialized_end=17093 - _DEPSUPTODATEMSG._serialized_start=17095 - _DEPSUPTODATEMSG._serialized_end=17195 - _DEPSLISTSUBDIRECTORY._serialized_start=17197 - _DEPSLISTSUBDIRECTORY._serialized_end=17241 - _DEPSLISTSUBDIRECTORYMSG._serialized_start=17243 - _DEPSLISTSUBDIRECTORYMSG._serialized_end=17359 - _DEPSNOTIFYUPDATESAVAILABLE._serialized_start=17361 - _DEPSNOTIFYUPDATESAVAILABLE._serialized_end=17407 - _DEPSNOTIFYUPDATESAVAILABLEMSG._serialized_start=17410 - _DEPSNOTIFYUPDATESAVAILABLEMSG._serialized_end=17538 - _REGISTRYINDEXPROGRESSGETREQUEST._serialized_start=17540 - _REGISTRYINDEXPROGRESSGETREQUEST._serialized_end=17586 - _REGISTRYINDEXPROGRESSGETREQUESTMSG._serialized_start=17589 - _REGISTRYINDEXPROGRESSGETREQUESTMSG._serialized_end=17727 - _REGISTRYINDEXPROGRESSGETRESPONSE._serialized_start=17729 - _REGISTRYINDEXPROGRESSGETRESPONSE._serialized_end=17795 - _REGISTRYINDEXPROGRESSGETRESPONSEMSG._serialized_start=17798 - _REGISTRYINDEXPROGRESSGETRESPONSEMSG._serialized_end=17938 - _REGISTRYRESPONSEUNEXPECTEDTYPE._serialized_start=17940 - _REGISTRYRESPONSEUNEXPECTEDTYPE._serialized_end=17990 - _REGISTRYRESPONSEUNEXPECTEDTYPEMSG._serialized_start=17993 - _REGISTRYRESPONSEUNEXPECTEDTYPEMSG._serialized_end=18129 - _REGISTRYRESPONSEMISSINGTOPKEYS._serialized_start=18131 - _REGISTRYRESPONSEMISSINGTOPKEYS._serialized_end=18181 - _REGISTRYRESPONSEMISSINGTOPKEYSMSG._serialized_start=18184 - _REGISTRYRESPONSEMISSINGTOPKEYSMSG._serialized_end=18320 - _REGISTRYRESPONSEMISSINGNESTEDKEYS._serialized_start=18322 - _REGISTRYRESPONSEMISSINGNESTEDKEYS._serialized_end=18375 - _REGISTRYRESPONSEMISSINGNESTEDKEYSMSG._serialized_start=18378 - _REGISTRYRESPONSEMISSINGNESTEDKEYSMSG._serialized_end=18520 - _REGISTRYRESPONSEEXTRANESTEDKEYS._serialized_start=18522 - _REGISTRYRESPONSEEXTRANESTEDKEYS._serialized_end=18573 - _REGISTRYRESPONSEEXTRANESTEDKEYSMSG._serialized_start=18576 - _REGISTRYRESPONSEEXTRANESTEDKEYSMSG._serialized_end=18714 - _DEPSSETDOWNLOADDIRECTORY._serialized_start=18716 - _DEPSSETDOWNLOADDIRECTORY._serialized_end=18756 - _DEPSSETDOWNLOADDIRECTORYMSG._serialized_start=18758 - _DEPSSETDOWNLOADDIRECTORYMSG._serialized_end=18882 - _DEPSUNPINNED._serialized_start=18884 - _DEPSUNPINNED._serialized_end=18929 - _DEPSUNPINNEDMSG._serialized_start=18931 - _DEPSUNPINNEDMSG._serialized_end=19031 - _NONODESFORSELECTIONCRITERIA._serialized_start=19033 - _NONODESFORSELECTIONCRITERIA._serialized_end=19080 - _NONODESFORSELECTIONCRITERIAMSG._serialized_start=19083 - _NONODESFORSELECTIONCRITERIAMSG._serialized_end=19213 - _DEPSLOCKUPDATING._serialized_start=19215 - _DEPSLOCKUPDATING._serialized_end=19256 - _DEPSLOCKUPDATINGMSG._serialized_start=19258 - _DEPSLOCKUPDATINGMSG._serialized_end=19366 - _DEPSADDPACKAGE._serialized_start=19368 - _DEPSADDPACKAGE._serialized_end=19450 - _DEPSADDPACKAGEMSG._serialized_start=19452 - _DEPSADDPACKAGEMSG._serialized_end=19556 - _DEPSFOUNDDUPLICATEPACKAGE._serialized_start=19559 - _DEPSFOUNDDUPLICATEPACKAGE._serialized_end=19726 - _DEPSFOUNDDUPLICATEPACKAGE_REMOVEDPACKAGEENTRY._serialized_start=19673 - _DEPSFOUNDDUPLICATEPACKAGE_REMOVEDPACKAGEENTRY._serialized_end=19726 - _DEPSFOUNDDUPLICATEPACKAGEMSG._serialized_start=19728 - _DEPSFOUNDDUPLICATEPACKAGEMSG._serialized_end=19854 - _DEPSVERSIONMISSING._serialized_start=19856 - _DEPSVERSIONMISSING._serialized_end=19892 - _DEPSVERSIONMISSINGMSG._serialized_start=19894 - _DEPSVERSIONMISSINGMSG._serialized_end=20006 - _DEPSSCRUBBEDPACKAGENAME._serialized_start=20008 - _DEPSSCRUBBEDPACKAGENAME._serialized_end=20055 - _DEPSSCRUBBEDPACKAGENAMEMSG._serialized_start=20057 - _DEPSSCRUBBEDPACKAGENAMEMSG._serialized_end=20179 - _RUNNINGOPERATIONCAUGHTERROR._serialized_start=20181 - _RUNNINGOPERATIONCAUGHTERROR._serialized_end=20223 - _RUNNINGOPERATIONCAUGHTERRORMSG._serialized_start=20226 - _RUNNINGOPERATIONCAUGHTERRORMSG._serialized_end=20356 - _COMPILECOMPLETE._serialized_start=20358 - _COMPILECOMPLETE._serialized_end=20375 - _COMPILECOMPLETEMSG._serialized_start=20377 - _COMPILECOMPLETEMSG._serialized_end=20483 - _FRESHNESSCHECKCOMPLETE._serialized_start=20485 - _FRESHNESSCHECKCOMPLETE._serialized_end=20509 - _FRESHNESSCHECKCOMPLETEMSG._serialized_start=20511 - _FRESHNESSCHECKCOMPLETEMSG._serialized_end=20631 - _SEEDHEADER._serialized_start=20633 - _SEEDHEADER._serialized_end=20661 - _SEEDHEADERMSG._serialized_start=20663 - _SEEDHEADERMSG._serialized_end=20759 - _SQLRUNNEREXCEPTION._serialized_start=20761 - _SQLRUNNEREXCEPTION._serialized_end=20854 - _SQLRUNNEREXCEPTIONMSG._serialized_start=20856 - _SQLRUNNEREXCEPTIONMSG._serialized_end=20968 - _LOGTESTRESULT._serialized_start=20971 - _LOGTESTRESULT._serialized_end=21139 - _LOGTESTRESULTMSG._serialized_start=21141 - _LOGTESTRESULTMSG._serialized_end=21243 - _LOGSTARTLINE._serialized_start=21245 - _LOGSTARTLINE._serialized_end=21352 - _LOGSTARTLINEMSG._serialized_start=21354 - _LOGSTARTLINEMSG._serialized_end=21454 - _LOGMODELRESULT._serialized_start=21457 - _LOGMODELRESULT._serialized_end=21606 - _LOGMODELRESULTMSG._serialized_start=21608 - _LOGMODELRESULTMSG._serialized_end=21712 - _LOGSNAPSHOTRESULT._serialized_start=21715 - _LOGSNAPSHOTRESULT._serialized_end=21989 - _LOGSNAPSHOTRESULT_CFGENTRY._serialized_start=21947 - _LOGSNAPSHOTRESULT_CFGENTRY._serialized_end=21989 - _LOGSNAPSHOTRESULTMSG._serialized_start=21991 - _LOGSNAPSHOTRESULTMSG._serialized_end=22101 - _LOGSEEDRESULT._serialized_start=22104 - _LOGSEEDRESULT._serialized_end=22289 - _LOGSEEDRESULTMSG._serialized_start=22291 - _LOGSEEDRESULTMSG._serialized_end=22393 - _LOGFRESHNESSRESULT._serialized_start=22396 - _LOGFRESHNESSRESULT._serialized_end=22569 - _LOGFRESHNESSRESULTMSG._serialized_start=22571 - _LOGFRESHNESSRESULTMSG._serialized_end=22683 - _LOGCANCELLINE._serialized_start=22685 - _LOGCANCELLINE._serialized_end=22719 - _LOGCANCELLINEMSG._serialized_start=22721 - _LOGCANCELLINEMSG._serialized_end=22823 - _DEFAULTSELECTOR._serialized_start=22825 - _DEFAULTSELECTOR._serialized_end=22856 - _DEFAULTSELECTORMSG._serialized_start=22858 - _DEFAULTSELECTORMSG._serialized_end=22964 - _NODESTART._serialized_start=22966 - _NODESTART._serialized_end=23019 - _NODESTARTMSG._serialized_start=23021 - _NODESTARTMSG._serialized_end=23115 - _NODEFINISHED._serialized_start=23117 - _NODEFINISHED._serialized_end=23220 - _NODEFINISHEDMSG._serialized_start=23222 - _NODEFINISHEDMSG._serialized_end=23322 - _QUERYCANCELATIONUNSUPPORTED._serialized_start=23324 - _QUERYCANCELATIONUNSUPPORTED._serialized_end=23367 - _QUERYCANCELATIONUNSUPPORTEDMSG._serialized_start=23370 - _QUERYCANCELATIONUNSUPPORTEDMSG._serialized_end=23500 - _CONCURRENCYLINE._serialized_start=23502 - _CONCURRENCYLINE._serialized_end=23581 - _CONCURRENCYLINEMSG._serialized_start=23583 - _CONCURRENCYLINEMSG._serialized_end=23689 - _WRITINGINJECTEDSQLFORNODE._serialized_start=23691 - _WRITINGINJECTEDSQLFORNODE._serialized_end=23760 - _WRITINGINJECTEDSQLFORNODEMSG._serialized_start=23762 - _WRITINGINJECTEDSQLFORNODEMSG._serialized_end=23888 - _NODECOMPILING._serialized_start=23890 - _NODECOMPILING._serialized_end=23947 - _NODECOMPILINGMSG._serialized_start=23949 - _NODECOMPILINGMSG._serialized_end=24051 - _NODEEXECUTING._serialized_start=24053 - _NODEEXECUTING._serialized_end=24110 - _NODEEXECUTINGMSG._serialized_start=24112 - _NODEEXECUTINGMSG._serialized_end=24214 - _LOGHOOKSTARTLINE._serialized_start=24216 - _LOGHOOKSTARTLINE._serialized_end=24325 - _LOGHOOKSTARTLINEMSG._serialized_start=24327 - _LOGHOOKSTARTLINEMSG._serialized_end=24435 - _LOGHOOKENDLINE._serialized_start=24438 - _LOGHOOKENDLINE._serialized_end=24585 - _LOGHOOKENDLINEMSG._serialized_start=24587 - _LOGHOOKENDLINEMSG._serialized_end=24691 - _SKIPPINGDETAILS._serialized_start=24694 - _SKIPPINGDETAILS._serialized_end=24841 - _SKIPPINGDETAILSMSG._serialized_start=24843 - _SKIPPINGDETAILSMSG._serialized_end=24949 - _NOTHINGTODO._serialized_start=24951 - _NOTHINGTODO._serialized_end=24964 - _NOTHINGTODOMSG._serialized_start=24966 - _NOTHINGTODOMSG._serialized_end=25064 - _RUNNINGOPERATIONUNCAUGHTERROR._serialized_start=25066 - _RUNNINGOPERATIONUNCAUGHTERROR._serialized_end=25110 - _RUNNINGOPERATIONUNCAUGHTERRORMSG._serialized_start=25113 - _RUNNINGOPERATIONUNCAUGHTERRORMSG._serialized_end=25247 - _ENDRUNRESULT._serialized_start=25250 - _ENDRUNRESULT._serialized_end=25397 - _ENDRUNRESULTMSG._serialized_start=25399 - _ENDRUNRESULTMSG._serialized_end=25499 - _NONODESSELECTED._serialized_start=25501 - _NONODESSELECTED._serialized_end=25518 - _NONODESSELECTEDMSG._serialized_start=25520 - _NONODESSELECTEDMSG._serialized_end=25626 - _COMMANDCOMPLETED._serialized_start=25628 - _COMMANDCOMPLETED._serialized_end=25747 - _COMMANDCOMPLETEDMSG._serialized_start=25749 - _COMMANDCOMPLETEDMSG._serialized_end=25857 - _SHOWNODE._serialized_start=25859 - _SHOWNODE._serialized_end=25966 - _SHOWNODEMSG._serialized_start=25968 - _SHOWNODEMSG._serialized_end=26060 - _COMPILEDNODE._serialized_start=26062 - _COMPILEDNODE._serialized_end=26174 - _COMPILEDNODEMSG._serialized_start=26176 - _COMPILEDNODEMSG._serialized_end=26276 - _CATCHABLEEXCEPTIONONRUN._serialized_start=26278 - _CATCHABLEEXCEPTIONONRUN._serialized_end=26376 - _CATCHABLEEXCEPTIONONRUNMSG._serialized_start=26378 - _CATCHABLEEXCEPTIONONRUNMSG._serialized_end=26500 - _INTERNALERRORONRUN._serialized_start=26502 - _INTERNALERRORONRUN._serialized_end=26597 - _INTERNALERRORONRUNMSG._serialized_start=26599 - _INTERNALERRORONRUNMSG._serialized_end=26711 - _GENERICEXCEPTIONONRUN._serialized_start=26713 - _GENERICEXCEPTIONONRUN._serialized_end=26830 - _GENERICEXCEPTIONONRUNMSG._serialized_start=26832 - _GENERICEXCEPTIONONRUNMSG._serialized_end=26950 - _NODECONNECTIONRELEASEERROR._serialized_start=26952 - _NODECONNECTIONRELEASEERROR._serialized_end=27030 - _NODECONNECTIONRELEASEERRORMSG._serialized_start=27033 - _NODECONNECTIONRELEASEERRORMSG._serialized_end=27161 - _FOUNDSTATS._serialized_start=27163 - _FOUNDSTATS._serialized_end=27194 - _FOUNDSTATSMSG._serialized_start=27196 - _FOUNDSTATSMSG._serialized_end=27292 - _MAINKEYBOARDINTERRUPT._serialized_start=27294 - _MAINKEYBOARDINTERRUPT._serialized_end=27317 - _MAINKEYBOARDINTERRUPTMSG._serialized_start=27319 - _MAINKEYBOARDINTERRUPTMSG._serialized_end=27437 - _MAINENCOUNTEREDERROR._serialized_start=27439 - _MAINENCOUNTEREDERROR._serialized_end=27474 - _MAINENCOUNTEREDERRORMSG._serialized_start=27476 - _MAINENCOUNTEREDERRORMSG._serialized_end=27592 - _MAINSTACKTRACE._serialized_start=27594 - _MAINSTACKTRACE._serialized_end=27631 - _MAINSTACKTRACEMSG._serialized_start=27633 - _MAINSTACKTRACEMSG._serialized_end=27737 - _TIMINGINFOCOLLECTED._serialized_start=27739 - _TIMINGINFOCOLLECTED._serialized_end=27851 - _TIMINGINFOCOLLECTEDMSG._serialized_start=27853 - _TIMINGINFOCOLLECTEDMSG._serialized_end=27967 - _LOGDEBUGSTACKTRACE._serialized_start=27969 - _LOGDEBUGSTACKTRACE._serialized_end=28007 - _LOGDEBUGSTACKTRACEMSG._serialized_start=28009 - _LOGDEBUGSTACKTRACEMSG._serialized_end=28121 - _CHECKCLEANPATH._serialized_start=28123 - _CHECKCLEANPATH._serialized_end=28153 - _CHECKCLEANPATHMSG._serialized_start=28155 - _CHECKCLEANPATHMSG._serialized_end=28259 - _CONFIRMCLEANPATH._serialized_start=28261 - _CONFIRMCLEANPATH._serialized_end=28293 - _CONFIRMCLEANPATHMSG._serialized_start=28295 - _CONFIRMCLEANPATHMSG._serialized_end=28403 - _PROTECTEDCLEANPATH._serialized_start=28405 - _PROTECTEDCLEANPATH._serialized_end=28439 - _PROTECTEDCLEANPATHMSG._serialized_start=28441 - _PROTECTEDCLEANPATHMSG._serialized_end=28553 - _FINISHEDCLEANPATHS._serialized_start=28555 - _FINISHEDCLEANPATHS._serialized_end=28575 - _FINISHEDCLEANPATHSMSG._serialized_start=28577 - _FINISHEDCLEANPATHSMSG._serialized_end=28689 - _OPENCOMMAND._serialized_start=28691 - _OPENCOMMAND._serialized_end=28744 - _OPENCOMMANDMSG._serialized_start=28746 - _OPENCOMMANDMSG._serialized_end=28844 - _SERVINGDOCSPORT._serialized_start=28846 - _SERVINGDOCSPORT._serialized_end=28894 - _SERVINGDOCSPORTMSG._serialized_start=28896 - _SERVINGDOCSPORTMSG._serialized_end=29002 - _SERVINGDOCSACCESSINFO._serialized_start=29004 - _SERVINGDOCSACCESSINFO._serialized_end=29041 - _SERVINGDOCSACCESSINFOMSG._serialized_start=29043 - _SERVINGDOCSACCESSINFOMSG._serialized_end=29161 - _SERVINGDOCSEXITINFO._serialized_start=29163 - _SERVINGDOCSEXITINFO._serialized_end=29184 - _SERVINGDOCSEXITINFOMSG._serialized_start=29186 - _SERVINGDOCSEXITINFOMSG._serialized_end=29300 - _RUNRESULTWARNING._serialized_start=29302 - _RUNRESULTWARNING._serialized_end=29376 - _RUNRESULTWARNINGMSG._serialized_start=29378 - _RUNRESULTWARNINGMSG._serialized_end=29486 - _RUNRESULTFAILURE._serialized_start=29488 - _RUNRESULTFAILURE._serialized_end=29562 - _RUNRESULTFAILUREMSG._serialized_start=29564 - _RUNRESULTFAILUREMSG._serialized_end=29672 - _STATSLINE._serialized_start=29674 - _STATSLINE._serialized_end=29781 - _STATSLINE_STATSENTRY._serialized_start=29737 - _STATSLINE_STATSENTRY._serialized_end=29781 - _STATSLINEMSG._serialized_start=29783 - _STATSLINEMSG._serialized_end=29877 - _RUNRESULTERROR._serialized_start=29879 - _RUNRESULTERROR._serialized_end=29908 - _RUNRESULTERRORMSG._serialized_start=29910 - _RUNRESULTERRORMSG._serialized_end=30014 - _RUNRESULTERRORNOMESSAGE._serialized_start=30016 - _RUNRESULTERRORNOMESSAGE._serialized_end=30057 - _RUNRESULTERRORNOMESSAGEMSG._serialized_start=30059 - _RUNRESULTERRORNOMESSAGEMSG._serialized_end=30181 - _SQLCOMPILEDPATH._serialized_start=30183 - _SQLCOMPILEDPATH._serialized_end=30214 - _SQLCOMPILEDPATHMSG._serialized_start=30216 - _SQLCOMPILEDPATHMSG._serialized_end=30322 - _CHECKNODETESTFAILURE._serialized_start=30324 - _CHECKNODETESTFAILURE._serialized_end=30369 - _CHECKNODETESTFAILUREMSG._serialized_start=30371 - _CHECKNODETESTFAILUREMSG._serialized_end=30487 - _ENDOFRUNSUMMARY._serialized_start=30489 - _ENDOFRUNSUMMARY._serialized_end=30576 - _ENDOFRUNSUMMARYMSG._serialized_start=30578 - _ENDOFRUNSUMMARYMSG._serialized_end=30684 - _LOGSKIPBECAUSEERROR._serialized_start=30686 - _LOGSKIPBECAUSEERROR._serialized_end=30771 - _LOGSKIPBECAUSEERRORMSG._serialized_start=30773 - _LOGSKIPBECAUSEERRORMSG._serialized_end=30887 - _ENSUREGITINSTALLED._serialized_start=30889 - _ENSUREGITINSTALLED._serialized_end=30909 - _ENSUREGITINSTALLEDMSG._serialized_start=30911 - _ENSUREGITINSTALLEDMSG._serialized_end=31023 - _DEPSCREATINGLOCALSYMLINK._serialized_start=31025 - _DEPSCREATINGLOCALSYMLINK._serialized_end=31051 - _DEPSCREATINGLOCALSYMLINKMSG._serialized_start=31053 - _DEPSCREATINGLOCALSYMLINKMSG._serialized_end=31177 - _DEPSSYMLINKNOTAVAILABLE._serialized_start=31179 - _DEPSSYMLINKNOTAVAILABLE._serialized_end=31204 - _DEPSSYMLINKNOTAVAILABLEMSG._serialized_start=31206 - _DEPSSYMLINKNOTAVAILABLEMSG._serialized_end=31328 - _DISABLETRACKING._serialized_start=31330 - _DISABLETRACKING._serialized_end=31347 - _DISABLETRACKINGMSG._serialized_start=31349 - _DISABLETRACKINGMSG._serialized_end=31455 - _SENDINGEVENT._serialized_start=31457 - _SENDINGEVENT._serialized_end=31487 - _SENDINGEVENTMSG._serialized_start=31489 - _SENDINGEVENTMSG._serialized_end=31589 - _SENDEVENTFAILURE._serialized_start=31591 - _SENDEVENTFAILURE._serialized_end=31609 - _SENDEVENTFAILUREMSG._serialized_start=31611 - _SENDEVENTFAILUREMSG._serialized_end=31719 - _FLUSHEVENTS._serialized_start=31721 - _FLUSHEVENTS._serialized_end=31734 - _FLUSHEVENTSMSG._serialized_start=31736 - _FLUSHEVENTSMSG._serialized_end=31834 - _FLUSHEVENTSFAILURE._serialized_start=31836 - _FLUSHEVENTSFAILURE._serialized_end=31856 - _FLUSHEVENTSFAILUREMSG._serialized_start=31858 - _FLUSHEVENTSFAILUREMSG._serialized_end=31970 - _TRACKINGINITIALIZEFAILURE._serialized_start=31972 - _TRACKINGINITIALIZEFAILURE._serialized_end=32017 - _TRACKINGINITIALIZEFAILUREMSG._serialized_start=32019 - _TRACKINGINITIALIZEFAILUREMSG._serialized_end=32145 - _RUNRESULTWARNINGMESSAGE._serialized_start=32147 - _RUNRESULTWARNINGMESSAGE._serialized_end=32185 - _RUNRESULTWARNINGMESSAGEMSG._serialized_start=32187 - _RUNRESULTWARNINGMESSAGEMSG._serialized_end=32309 - _DEBUGCMDOUT._serialized_start=32311 - _DEBUGCMDOUT._serialized_end=32337 - _DEBUGCMDOUTMSG._serialized_start=32339 - _DEBUGCMDOUTMSG._serialized_end=32437 - _DEBUGCMDRESULT._serialized_start=32439 - _DEBUGCMDRESULT._serialized_end=32468 - _DEBUGCMDRESULTMSG._serialized_start=32470 - _DEBUGCMDRESULTMSG._serialized_end=32574 - _LISTCMDOUT._serialized_start=32576 - _LISTCMDOUT._serialized_end=32601 - _LISTCMDOUTMSG._serialized_start=32603 - _LISTCMDOUTMSG._serialized_end=32699 - _RESOURCEREPORT._serialized_start=32702 - _RESOURCEREPORT._serialized_end=32938 - _RESOURCEREPORTMSG._serialized_start=32940 - _RESOURCEREPORTMSG._serialized_end=33044 + _globals['_COREEVENTINFO_EXTRAENTRY']._options = None + _globals['_COREEVENTINFO_EXTRAENTRY']._serialized_options = b'8\001' + _globals['_MAINREPORTARGS_ARGSENTRY']._options = None + _globals['_MAINREPORTARGS_ARGSENTRY']._serialized_options = b'8\001' + _globals['_PARTIALPARSINGERROR_EXCINFOENTRY']._options = None + _globals['_PARTIALPARSINGERROR_EXCINFOENTRY']._serialized_options = b'8\001' + _globals['_DEPSFOUNDDUPLICATEPACKAGE_REMOVEDPACKAGEENTRY']._options = None + _globals['_DEPSFOUNDDUPLICATEPACKAGE_REMOVEDPACKAGEENTRY']._serialized_options = b'8\001' + _globals['_LOGSNAPSHOTRESULT_CFGENTRY']._options = None + _globals['_LOGSNAPSHOTRESULT_CFGENTRY']._serialized_options = b'8\001' + _globals['_STATSLINE_STATSENTRY']._options = None + _globals['_STATSLINE_STATSENTRY']._serialized_options = b'8\001' + _globals['_COREEVENTINFO']._serialized_start=97 + _globals['_COREEVENTINFO']._serialized_end=378 + _globals['_COREEVENTINFO_EXTRAENTRY']._serialized_start=334 + _globals['_COREEVENTINFO_EXTRAENTRY']._serialized_end=378 + _globals['_NODERELATION']._serialized_start=380 + _globals['_NODERELATION']._serialized_end=466 + _globals['_NODEINFO']._serialized_start=469 + _globals['_NODEINFO']._serialized_end=742 + _globals['_TIMINGINFOMSG']._serialized_start=744 + _globals['_TIMINGINFOMSG']._serialized_end=871 + _globals['_RUNRESULTMSG']._serialized_start=874 + _globals['_RUNRESULTMSG']._serialized_end=1083 + _globals['_COLUMNTYPE']._serialized_start=1085 + _globals['_COLUMNTYPE']._serialized_end=1177 + _globals['_COLUMNCONSTRAINT']._serialized_start=1179 + _globals['_COLUMNCONSTRAINT']._serialized_end=1268 + _globals['_MODELCONSTRAINT']._serialized_start=1270 + _globals['_MODELCONSTRAINT']._serialized_end=1354 + _globals['_MAINREPORTVERSION']._serialized_start=1356 + _globals['_MAINREPORTVERSION']._serialized_end=1413 + _globals['_MAINREPORTVERSIONMSG']._serialized_start=1415 + _globals['_MAINREPORTVERSIONMSG']._serialized_end=1525 + _globals['_MAINREPORTARGS']._serialized_start=1527 + _globals['_MAINREPORTARGS']._serialized_end=1641 + _globals['_MAINREPORTARGS_ARGSENTRY']._serialized_start=1598 + _globals['_MAINREPORTARGS_ARGSENTRY']._serialized_end=1641 + _globals['_MAINREPORTARGSMSG']._serialized_start=1643 + _globals['_MAINREPORTARGSMSG']._serialized_end=1747 + _globals['_MAINTRACKINGUSERSTATE']._serialized_start=1749 + _globals['_MAINTRACKINGUSERSTATE']._serialized_end=1792 + _globals['_MAINTRACKINGUSERSTATEMSG']._serialized_start=1794 + _globals['_MAINTRACKINGUSERSTATEMSG']._serialized_end=1912 + _globals['_MERGEDFROMSTATE']._serialized_start=1914 + _globals['_MERGEDFROMSTATE']._serialized_end=1967 + _globals['_MERGEDFROMSTATEMSG']._serialized_start=1969 + _globals['_MERGEDFROMSTATEMSG']._serialized_end=2075 + _globals['_MISSINGPROFILETARGET']._serialized_start=2077 + _globals['_MISSINGPROFILETARGET']._serialized_end=2142 + _globals['_MISSINGPROFILETARGETMSG']._serialized_start=2144 + _globals['_MISSINGPROFILETARGETMSG']._serialized_end=2260 + _globals['_INVALIDOPTIONYAML']._serialized_start=2262 + _globals['_INVALIDOPTIONYAML']._serialized_end=2302 + _globals['_INVALIDOPTIONYAMLMSG']._serialized_start=2304 + _globals['_INVALIDOPTIONYAMLMSG']._serialized_end=2414 + _globals['_LOGDBTPROJECTERROR']._serialized_start=2416 + _globals['_LOGDBTPROJECTERROR']._serialized_end=2449 + _globals['_LOGDBTPROJECTERRORMSG']._serialized_start=2451 + _globals['_LOGDBTPROJECTERRORMSG']._serialized_end=2563 + _globals['_LOGDBTPROFILEERROR']._serialized_start=2565 + _globals['_LOGDBTPROFILEERROR']._serialized_end=2616 + _globals['_LOGDBTPROFILEERRORMSG']._serialized_start=2618 + _globals['_LOGDBTPROFILEERRORMSG']._serialized_end=2730 + _globals['_STARTERPROJECTPATH']._serialized_start=2732 + _globals['_STARTERPROJECTPATH']._serialized_end=2765 + _globals['_STARTERPROJECTPATHMSG']._serialized_start=2767 + _globals['_STARTERPROJECTPATHMSG']._serialized_end=2879 + _globals['_CONFIGFOLDERDIRECTORY']._serialized_start=2881 + _globals['_CONFIGFOLDERDIRECTORY']._serialized_end=2917 + _globals['_CONFIGFOLDERDIRECTORYMSG']._serialized_start=2919 + _globals['_CONFIGFOLDERDIRECTORYMSG']._serialized_end=3037 + _globals['_NOSAMPLEPROFILEFOUND']._serialized_start=3039 + _globals['_NOSAMPLEPROFILEFOUND']._serialized_end=3078 + _globals['_NOSAMPLEPROFILEFOUNDMSG']._serialized_start=3080 + _globals['_NOSAMPLEPROFILEFOUNDMSG']._serialized_end=3196 + _globals['_PROFILEWRITTENWITHSAMPLE']._serialized_start=3198 + _globals['_PROFILEWRITTENWITHSAMPLE']._serialized_end=3252 + _globals['_PROFILEWRITTENWITHSAMPLEMSG']._serialized_start=3254 + _globals['_PROFILEWRITTENWITHSAMPLEMSG']._serialized_end=3378 + _globals['_PROFILEWRITTENWITHTARGETTEMPLATEYAML']._serialized_start=3380 + _globals['_PROFILEWRITTENWITHTARGETTEMPLATEYAML']._serialized_end=3446 + _globals['_PROFILEWRITTENWITHTARGETTEMPLATEYAMLMSG']._serialized_start=3449 + _globals['_PROFILEWRITTENWITHTARGETTEMPLATEYAMLMSG']._serialized_end=3597 + _globals['_PROFILEWRITTENWITHPROJECTTEMPLATEYAML']._serialized_start=3599 + _globals['_PROFILEWRITTENWITHPROJECTTEMPLATEYAML']._serialized_end=3666 + _globals['_PROFILEWRITTENWITHPROJECTTEMPLATEYAMLMSG']._serialized_start=3669 + _globals['_PROFILEWRITTENWITHPROJECTTEMPLATEYAMLMSG']._serialized_end=3819 + _globals['_SETTINGUPPROFILE']._serialized_start=3821 + _globals['_SETTINGUPPROFILE']._serialized_end=3839 + _globals['_SETTINGUPPROFILEMSG']._serialized_start=3841 + _globals['_SETTINGUPPROFILEMSG']._serialized_end=3949 + _globals['_INVALIDPROFILETEMPLATEYAML']._serialized_start=3951 + _globals['_INVALIDPROFILETEMPLATEYAML']._serialized_end=3979 + _globals['_INVALIDPROFILETEMPLATEYAMLMSG']._serialized_start=3982 + _globals['_INVALIDPROFILETEMPLATEYAMLMSG']._serialized_end=4110 + _globals['_PROJECTNAMEALREADYEXISTS']._serialized_start=4112 + _globals['_PROJECTNAMEALREADYEXISTS']._serialized_end=4152 + _globals['_PROJECTNAMEALREADYEXISTSMSG']._serialized_start=4154 + _globals['_PROJECTNAMEALREADYEXISTSMSG']._serialized_end=4278 + _globals['_PROJECTCREATED']._serialized_start=4280 + _globals['_PROJECTCREATED']._serialized_end=4355 + _globals['_PROJECTCREATEDMSG']._serialized_start=4357 + _globals['_PROJECTCREATEDMSG']._serialized_end=4461 + _globals['_PACKAGEREDIRECTDEPRECATION']._serialized_start=4463 + _globals['_PACKAGEREDIRECTDEPRECATION']._serialized_end=4527 + _globals['_PACKAGEREDIRECTDEPRECATIONMSG']._serialized_start=4530 + _globals['_PACKAGEREDIRECTDEPRECATIONMSG']._serialized_end=4658 + _globals['_PACKAGEINSTALLPATHDEPRECATION']._serialized_start=4660 + _globals['_PACKAGEINSTALLPATHDEPRECATION']._serialized_end=4691 + _globals['_PACKAGEINSTALLPATHDEPRECATIONMSG']._serialized_start=4694 + _globals['_PACKAGEINSTALLPATHDEPRECATIONMSG']._serialized_end=4828 + _globals['_CONFIGSOURCEPATHDEPRECATION']._serialized_start=4830 + _globals['_CONFIGSOURCEPATHDEPRECATION']._serialized_end=4902 + _globals['_CONFIGSOURCEPATHDEPRECATIONMSG']._serialized_start=4905 + _globals['_CONFIGSOURCEPATHDEPRECATIONMSG']._serialized_end=5035 + _globals['_CONFIGDATAPATHDEPRECATION']._serialized_start=5037 + _globals['_CONFIGDATAPATHDEPRECATION']._serialized_end=5107 + _globals['_CONFIGDATAPATHDEPRECATIONMSG']._serialized_start=5109 + _globals['_CONFIGDATAPATHDEPRECATIONMSG']._serialized_end=5235 + _globals['_METRICATTRIBUTESRENAMED']._serialized_start=5237 + _globals['_METRICATTRIBUTESRENAMED']._serialized_end=5283 + _globals['_METRICATTRIBUTESRENAMEDMSG']._serialized_start=5285 + _globals['_METRICATTRIBUTESRENAMEDMSG']._serialized_end=5407 + _globals['_EXPOSURENAMEDEPRECATION']._serialized_start=5409 + _globals['_EXPOSURENAMEDEPRECATION']._serialized_end=5452 + _globals['_EXPOSURENAMEDEPRECATIONMSG']._serialized_start=5454 + _globals['_EXPOSURENAMEDEPRECATIONMSG']._serialized_end=5576 + _globals['_INTERNALDEPRECATION']._serialized_start=5578 + _globals['_INTERNALDEPRECATION']._serialized_end=5672 + _globals['_INTERNALDEPRECATIONMSG']._serialized_start=5674 + _globals['_INTERNALDEPRECATIONMSG']._serialized_end=5788 + _globals['_ENVIRONMENTVARIABLERENAMED']._serialized_start=5790 + _globals['_ENVIRONMENTVARIABLERENAMED']._serialized_end=5854 + _globals['_ENVIRONMENTVARIABLERENAMEDMSG']._serialized_start=5857 + _globals['_ENVIRONMENTVARIABLERENAMEDMSG']._serialized_end=5985 + _globals['_CONFIGLOGPATHDEPRECATION']._serialized_start=5987 + _globals['_CONFIGLOGPATHDEPRECATION']._serialized_end=6038 + _globals['_CONFIGLOGPATHDEPRECATIONMSG']._serialized_start=6040 + _globals['_CONFIGLOGPATHDEPRECATIONMSG']._serialized_end=6164 + _globals['_CONFIGTARGETPATHDEPRECATION']._serialized_start=6166 + _globals['_CONFIGTARGETPATHDEPRECATION']._serialized_end=6220 + _globals['_CONFIGTARGETPATHDEPRECATIONMSG']._serialized_start=6223 + _globals['_CONFIGTARGETPATHDEPRECATIONMSG']._serialized_end=6353 + _globals['_TESTSCONFIGDEPRECATION']._serialized_start=6355 + _globals['_TESTSCONFIGDEPRECATION']._serialized_end=6422 + _globals['_TESTSCONFIGDEPRECATIONMSG']._serialized_start=6424 + _globals['_TESTSCONFIGDEPRECATIONMSG']._serialized_end=6544 + _globals['_PROJECTFLAGSMOVEDDEPRECATION']._serialized_start=6546 + _globals['_PROJECTFLAGSMOVEDDEPRECATION']._serialized_end=6576 + _globals['_PROJECTFLAGSMOVEDDEPRECATIONMSG']._serialized_start=6579 + _globals['_PROJECTFLAGSMOVEDDEPRECATIONMSG']._serialized_end=6711 + _globals['_SPACESINMODELNAMEDEPRECATION']._serialized_start=6713 + _globals['_SPACESINMODELNAMEDEPRECATION']._serialized_end=6801 + _globals['_SPACESINMODELNAMEDEPRECATIONMSG']._serialized_start=6804 + _globals['_SPACESINMODELNAMEDEPRECATIONMSG']._serialized_end=6936 + _globals['_TOTALMODELNAMESWITHSPACESDEPRECATION']._serialized_start=6938 + _globals['_TOTALMODELNAMESWITHSPACESDEPRECATION']._serialized_end=7045 + _globals['_PACKAGEMATERIALIZATIONOVERRIDEDEPRECATION']._serialized_start=7047 + _globals['_PACKAGEMATERIALIZATIONOVERRIDEDEPRECATION']._serialized_end=7142 + _globals['_PACKAGEMATERIALIZATIONOVERRIDEDEPRECATIONMSG']._serialized_start=7145 + _globals['_PACKAGEMATERIALIZATIONOVERRIDEDEPRECATIONMSG']._serialized_end=7303 + _globals['_TOTALMODELNAMESWITHSPACESDEPRECATIONMSG']._serialized_start=7306 + _globals['_TOTALMODELNAMESWITHSPACESDEPRECATIONMSG']._serialized_end=7454 + _globals['_DEPRECATEDMODEL']._serialized_start=7456 + _globals['_DEPRECATEDMODEL']._serialized_end=7542 + _globals['_DEPRECATEDMODELMSG']._serialized_start=7544 + _globals['_DEPRECATEDMODELMSG']._serialized_end=7650 + _globals['_INPUTFILEDIFFERROR']._serialized_start=7652 + _globals['_INPUTFILEDIFFERROR']._serialized_end=7707 + _globals['_INPUTFILEDIFFERRORMSG']._serialized_start=7709 + _globals['_INPUTFILEDIFFERRORMSG']._serialized_end=7821 + _globals['_INVALIDVALUEFORFIELD']._serialized_start=7823 + _globals['_INVALIDVALUEFORFIELD']._serialized_end=7886 + _globals['_INVALIDVALUEFORFIELDMSG']._serialized_start=7888 + _globals['_INVALIDVALUEFORFIELDMSG']._serialized_end=8004 + _globals['_VALIDATIONWARNING']._serialized_start=8006 + _globals['_VALIDATIONWARNING']._serialized_end=8087 + _globals['_VALIDATIONWARNINGMSG']._serialized_start=8089 + _globals['_VALIDATIONWARNINGMSG']._serialized_end=8199 + _globals['_PARSEPERFINFOPATH']._serialized_start=8201 + _globals['_PARSEPERFINFOPATH']._serialized_end=8234 + _globals['_PARSEPERFINFOPATHMSG']._serialized_start=8236 + _globals['_PARSEPERFINFOPATHMSG']._serialized_end=8346 + _globals['_PARTIALPARSINGERRORPROCESSINGFILE']._serialized_start=8348 + _globals['_PARTIALPARSINGERRORPROCESSINGFILE']._serialized_end=8397 + _globals['_PARTIALPARSINGERRORPROCESSINGFILEMSG']._serialized_start=8400 + _globals['_PARTIALPARSINGERRORPROCESSINGFILEMSG']._serialized_end=8542 + _globals['_PARTIALPARSINGERROR']._serialized_start=8545 + _globals['_PARTIALPARSINGERROR']._serialized_end=8679 + _globals['_PARTIALPARSINGERROR_EXCINFOENTRY']._serialized_start=8633 + _globals['_PARTIALPARSINGERROR_EXCINFOENTRY']._serialized_end=8679 + _globals['_PARTIALPARSINGERRORMSG']._serialized_start=8681 + _globals['_PARTIALPARSINGERRORMSG']._serialized_end=8795 + _globals['_PARTIALPARSINGSKIPPARSING']._serialized_start=8797 + _globals['_PARTIALPARSINGSKIPPARSING']._serialized_end=8824 + _globals['_PARTIALPARSINGSKIPPARSINGMSG']._serialized_start=8826 + _globals['_PARTIALPARSINGSKIPPARSINGMSG']._serialized_end=8952 + _globals['_UNABLETOPARTIALPARSE']._serialized_start=8954 + _globals['_UNABLETOPARTIALPARSE']._serialized_end=8992 + _globals['_UNABLETOPARTIALPARSEMSG']._serialized_start=8994 + _globals['_UNABLETOPARTIALPARSEMSG']._serialized_end=9110 + _globals['_STATECHECKVARSHASH']._serialized_start=9112 + _globals['_STATECHECKVARSHASH']._serialized_end=9214 + _globals['_STATECHECKVARSHASHMSG']._serialized_start=9216 + _globals['_STATECHECKVARSHASHMSG']._serialized_end=9328 + _globals['_PARTIALPARSINGNOTENABLED']._serialized_start=9330 + _globals['_PARTIALPARSINGNOTENABLED']._serialized_end=9356 + _globals['_PARTIALPARSINGNOTENABLEDMSG']._serialized_start=9358 + _globals['_PARTIALPARSINGNOTENABLEDMSG']._serialized_end=9482 + _globals['_PARSEDFILELOADFAILED']._serialized_start=9484 + _globals['_PARSEDFILELOADFAILED']._serialized_end=9551 + _globals['_PARSEDFILELOADFAILEDMSG']._serialized_start=9553 + _globals['_PARSEDFILELOADFAILEDMSG']._serialized_end=9669 + _globals['_PARTIALPARSINGENABLED']._serialized_start=9671 + _globals['_PARTIALPARSINGENABLED']._serialized_end=9743 + _globals['_PARTIALPARSINGENABLEDMSG']._serialized_start=9745 + _globals['_PARTIALPARSINGENABLEDMSG']._serialized_end=9863 + _globals['_PARTIALPARSINGFILE']._serialized_start=9865 + _globals['_PARTIALPARSINGFILE']._serialized_end=9921 + _globals['_PARTIALPARSINGFILEMSG']._serialized_start=9923 + _globals['_PARTIALPARSINGFILEMSG']._serialized_end=10035 + _globals['_INVALIDDISABLEDTARGETINTESTNODE']._serialized_start=10038 + _globals['_INVALIDDISABLEDTARGETINTESTNODE']._serialized_end=10213 + _globals['_INVALIDDISABLEDTARGETINTESTNODEMSG']._serialized_start=10216 + _globals['_INVALIDDISABLEDTARGETINTESTNODEMSG']._serialized_end=10354 + _globals['_UNUSEDRESOURCECONFIGPATH']._serialized_start=10356 + _globals['_UNUSEDRESOURCECONFIGPATH']._serialized_end=10411 + _globals['_UNUSEDRESOURCECONFIGPATHMSG']._serialized_start=10413 + _globals['_UNUSEDRESOURCECONFIGPATHMSG']._serialized_end=10537 + _globals['_SEEDINCREASED']._serialized_start=10539 + _globals['_SEEDINCREASED']._serialized_end=10590 + _globals['_SEEDINCREASEDMSG']._serialized_start=10592 + _globals['_SEEDINCREASEDMSG']._serialized_end=10694 + _globals['_SEEDEXCEEDSLIMITSAMEPATH']._serialized_start=10696 + _globals['_SEEDEXCEEDSLIMITSAMEPATH']._serialized_end=10758 + _globals['_SEEDEXCEEDSLIMITSAMEPATHMSG']._serialized_start=10760 + _globals['_SEEDEXCEEDSLIMITSAMEPATHMSG']._serialized_end=10884 + _globals['_SEEDEXCEEDSLIMITANDPATHCHANGED']._serialized_start=10886 + _globals['_SEEDEXCEEDSLIMITANDPATHCHANGED']._serialized_end=10954 + _globals['_SEEDEXCEEDSLIMITANDPATHCHANGEDMSG']._serialized_start=10957 + _globals['_SEEDEXCEEDSLIMITANDPATHCHANGEDMSG']._serialized_end=11093 + _globals['_SEEDEXCEEDSLIMITCHECKSUMCHANGED']._serialized_start=11095 + _globals['_SEEDEXCEEDSLIMITCHECKSUMCHANGED']._serialized_end=11187 + _globals['_SEEDEXCEEDSLIMITCHECKSUMCHANGEDMSG']._serialized_start=11190 + _globals['_SEEDEXCEEDSLIMITCHECKSUMCHANGEDMSG']._serialized_end=11328 + _globals['_UNUSEDTABLES']._serialized_start=11330 + _globals['_UNUSEDTABLES']._serialized_end=11367 + _globals['_UNUSEDTABLESMSG']._serialized_start=11369 + _globals['_UNUSEDTABLESMSG']._serialized_end=11469 + _globals['_WRONGRESOURCESCHEMAFILE']._serialized_start=11472 + _globals['_WRONGRESOURCESCHEMAFILE']._serialized_end=11607 + _globals['_WRONGRESOURCESCHEMAFILEMSG']._serialized_start=11609 + _globals['_WRONGRESOURCESCHEMAFILEMSG']._serialized_end=11731 + _globals['_NONODEFORYAMLKEY']._serialized_start=11733 + _globals['_NONODEFORYAMLKEY']._serialized_end=11808 + _globals['_NONODEFORYAMLKEYMSG']._serialized_start=11810 + _globals['_NONODEFORYAMLKEYMSG']._serialized_end=11918 + _globals['_MACRONOTFOUNDFORPATCH']._serialized_start=11920 + _globals['_MACRONOTFOUNDFORPATCH']._serialized_end=11963 + _globals['_MACRONOTFOUNDFORPATCHMSG']._serialized_start=11965 + _globals['_MACRONOTFOUNDFORPATCHMSG']._serialized_end=12083 + _globals['_NODENOTFOUNDORDISABLED']._serialized_start=12086 + _globals['_NODENOTFOUNDORDISABLED']._serialized_end=12270 + _globals['_NODENOTFOUNDORDISABLEDMSG']._serialized_start=12272 + _globals['_NODENOTFOUNDORDISABLEDMSG']._serialized_end=12392 + _globals['_JINJALOGWARNING']._serialized_start=12394 + _globals['_JINJALOGWARNING']._serialized_end=12466 + _globals['_JINJALOGWARNINGMSG']._serialized_start=12468 + _globals['_JINJALOGWARNINGMSG']._serialized_end=12574 + _globals['_JINJALOGINFO']._serialized_start=12576 + _globals['_JINJALOGINFO']._serialized_end=12645 + _globals['_JINJALOGINFOMSG']._serialized_start=12647 + _globals['_JINJALOGINFOMSG']._serialized_end=12747 + _globals['_JINJALOGDEBUG']._serialized_start=12749 + _globals['_JINJALOGDEBUG']._serialized_end=12819 + _globals['_JINJALOGDEBUGMSG']._serialized_start=12821 + _globals['_JINJALOGDEBUGMSG']._serialized_end=12923 + _globals['_UNPINNEDREFNEWVERSIONAVAILABLE']._serialized_start=12926 + _globals['_UNPINNEDREFNEWVERSIONAVAILABLE']._serialized_end=13100 + _globals['_UNPINNEDREFNEWVERSIONAVAILABLEMSG']._serialized_start=13103 + _globals['_UNPINNEDREFNEWVERSIONAVAILABLEMSG']._serialized_end=13239 + _globals['_UPCOMINGREFERENCEDEPRECATION']._serialized_start=13242 + _globals['_UPCOMINGREFERENCEDEPRECATION']._serialized_end=13440 + _globals['_UPCOMINGREFERENCEDEPRECATIONMSG']._serialized_start=13443 + _globals['_UPCOMINGREFERENCEDEPRECATIONMSG']._serialized_end=13575 + _globals['_DEPRECATEDREFERENCE']._serialized_start=13578 + _globals['_DEPRECATEDREFERENCE']._serialized_end=13767 + _globals['_DEPRECATEDREFERENCEMSG']._serialized_start=13769 + _globals['_DEPRECATEDREFERENCEMSG']._serialized_end=13883 + _globals['_UNSUPPORTEDCONSTRAINTMATERIALIZATION']._serialized_start=13885 + _globals['_UNSUPPORTEDCONSTRAINTMATERIALIZATION']._serialized_end=13945 + _globals['_UNSUPPORTEDCONSTRAINTMATERIALIZATIONMSG']._serialized_start=13948 + _globals['_UNSUPPORTEDCONSTRAINTMATERIALIZATIONMSG']._serialized_end=14096 + _globals['_PARSEINLINENODEERROR']._serialized_start=14098 + _globals['_PARSEINLINENODEERROR']._serialized_end=14175 + _globals['_PARSEINLINENODEERRORMSG']._serialized_start=14177 + _globals['_PARSEINLINENODEERRORMSG']._serialized_end=14293 + _globals['_SEMANTICVALIDATIONFAILURE']._serialized_start=14295 + _globals['_SEMANTICVALIDATIONFAILURE']._serialized_end=14335 + _globals['_SEMANTICVALIDATIONFAILUREMSG']._serialized_start=14337 + _globals['_SEMANTICVALIDATIONFAILUREMSG']._serialized_end=14463 + _globals['_UNVERSIONEDBREAKINGCHANGE']._serialized_start=14466 + _globals['_UNVERSIONEDBREAKINGCHANGE']._serialized_end=14860 + _globals['_UNVERSIONEDBREAKINGCHANGEMSG']._serialized_start=14862 + _globals['_UNVERSIONEDBREAKINGCHANGEMSG']._serialized_end=14988 + _globals['_WARNSTATETARGETEQUAL']._serialized_start=14990 + _globals['_WARNSTATETARGETEQUAL']._serialized_end=15032 + _globals['_WARNSTATETARGETEQUALMSG']._serialized_start=15034 + _globals['_WARNSTATETARGETEQUALMSG']._serialized_end=15150 + _globals['_FRESHNESSCONFIGPROBLEM']._serialized_start=15152 + _globals['_FRESHNESSCONFIGPROBLEM']._serialized_end=15189 + _globals['_FRESHNESSCONFIGPROBLEMMSG']._serialized_start=15191 + _globals['_FRESHNESSCONFIGPROBLEMMSG']._serialized_end=15311 + _globals['_GITSPARSECHECKOUTSUBDIRECTORY']._serialized_start=15313 + _globals['_GITSPARSECHECKOUTSUBDIRECTORY']._serialized_end=15360 + _globals['_GITSPARSECHECKOUTSUBDIRECTORYMSG']._serialized_start=15363 + _globals['_GITSPARSECHECKOUTSUBDIRECTORYMSG']._serialized_end=15497 + _globals['_GITPROGRESSCHECKOUTREVISION']._serialized_start=15499 + _globals['_GITPROGRESSCHECKOUTREVISION']._serialized_end=15546 + _globals['_GITPROGRESSCHECKOUTREVISIONMSG']._serialized_start=15549 + _globals['_GITPROGRESSCHECKOUTREVISIONMSG']._serialized_end=15679 + _globals['_GITPROGRESSUPDATINGEXISTINGDEPENDENCY']._serialized_start=15681 + _globals['_GITPROGRESSUPDATINGEXISTINGDEPENDENCY']._serialized_end=15733 + _globals['_GITPROGRESSUPDATINGEXISTINGDEPENDENCYMSG']._serialized_start=15736 + _globals['_GITPROGRESSUPDATINGEXISTINGDEPENDENCYMSG']._serialized_end=15886 + _globals['_GITPROGRESSPULLINGNEWDEPENDENCY']._serialized_start=15888 + _globals['_GITPROGRESSPULLINGNEWDEPENDENCY']._serialized_end=15934 + _globals['_GITPROGRESSPULLINGNEWDEPENDENCYMSG']._serialized_start=15937 + _globals['_GITPROGRESSPULLINGNEWDEPENDENCYMSG']._serialized_end=16075 + _globals['_GITNOTHINGTODO']._serialized_start=16077 + _globals['_GITNOTHINGTODO']._serialized_end=16106 + _globals['_GITNOTHINGTODOMSG']._serialized_start=16108 + _globals['_GITNOTHINGTODOMSG']._serialized_end=16212 + _globals['_GITPROGRESSUPDATEDCHECKOUTRANGE']._serialized_start=16214 + _globals['_GITPROGRESSUPDATEDCHECKOUTRANGE']._serialized_end=16283 + _globals['_GITPROGRESSUPDATEDCHECKOUTRANGEMSG']._serialized_start=16286 + _globals['_GITPROGRESSUPDATEDCHECKOUTRANGEMSG']._serialized_end=16424 + _globals['_GITPROGRESSCHECKEDOUTAT']._serialized_start=16426 + _globals['_GITPROGRESSCHECKEDOUTAT']._serialized_end=16468 + _globals['_GITPROGRESSCHECKEDOUTATMSG']._serialized_start=16470 + _globals['_GITPROGRESSCHECKEDOUTATMSG']._serialized_end=16592 + _globals['_REGISTRYPROGRESSGETREQUEST']._serialized_start=16594 + _globals['_REGISTRYPROGRESSGETREQUEST']._serialized_end=16635 + _globals['_REGISTRYPROGRESSGETREQUESTMSG']._serialized_start=16638 + _globals['_REGISTRYPROGRESSGETREQUESTMSG']._serialized_end=16766 + _globals['_REGISTRYPROGRESSGETRESPONSE']._serialized_start=16768 + _globals['_REGISTRYPROGRESSGETRESPONSE']._serialized_end=16829 + _globals['_REGISTRYPROGRESSGETRESPONSEMSG']._serialized_start=16832 + _globals['_REGISTRYPROGRESSGETRESPONSEMSG']._serialized_end=16962 + _globals['_SELECTORREPORTINVALIDSELECTOR']._serialized_start=16964 + _globals['_SELECTORREPORTINVALIDSELECTOR']._serialized_end=17059 + _globals['_SELECTORREPORTINVALIDSELECTORMSG']._serialized_start=17062 + _globals['_SELECTORREPORTINVALIDSELECTORMSG']._serialized_end=17196 + _globals['_DEPSNOPACKAGESFOUND']._serialized_start=17198 + _globals['_DEPSNOPACKAGESFOUND']._serialized_end=17219 + _globals['_DEPSNOPACKAGESFOUNDMSG']._serialized_start=17221 + _globals['_DEPSNOPACKAGESFOUNDMSG']._serialized_end=17335 + _globals['_DEPSSTARTPACKAGEINSTALL']._serialized_start=17337 + _globals['_DEPSSTARTPACKAGEINSTALL']._serialized_end=17384 + _globals['_DEPSSTARTPACKAGEINSTALLMSG']._serialized_start=17386 + _globals['_DEPSSTARTPACKAGEINSTALLMSG']._serialized_end=17508 + _globals['_DEPSINSTALLINFO']._serialized_start=17510 + _globals['_DEPSINSTALLINFO']._serialized_end=17549 + _globals['_DEPSINSTALLINFOMSG']._serialized_start=17551 + _globals['_DEPSINSTALLINFOMSG']._serialized_end=17657 + _globals['_DEPSUPDATEAVAILABLE']._serialized_start=17659 + _globals['_DEPSUPDATEAVAILABLE']._serialized_end=17704 + _globals['_DEPSUPDATEAVAILABLEMSG']._serialized_start=17706 + _globals['_DEPSUPDATEAVAILABLEMSG']._serialized_end=17820 + _globals['_DEPSUPTODATE']._serialized_start=17822 + _globals['_DEPSUPTODATE']._serialized_end=17836 + _globals['_DEPSUPTODATEMSG']._serialized_start=17838 + _globals['_DEPSUPTODATEMSG']._serialized_end=17938 + _globals['_DEPSLISTSUBDIRECTORY']._serialized_start=17940 + _globals['_DEPSLISTSUBDIRECTORY']._serialized_end=17984 + _globals['_DEPSLISTSUBDIRECTORYMSG']._serialized_start=17986 + _globals['_DEPSLISTSUBDIRECTORYMSG']._serialized_end=18102 + _globals['_DEPSNOTIFYUPDATESAVAILABLE']._serialized_start=18104 + _globals['_DEPSNOTIFYUPDATESAVAILABLE']._serialized_end=18150 + _globals['_DEPSNOTIFYUPDATESAVAILABLEMSG']._serialized_start=18153 + _globals['_DEPSNOTIFYUPDATESAVAILABLEMSG']._serialized_end=18281 + _globals['_REGISTRYINDEXPROGRESSGETREQUEST']._serialized_start=18283 + _globals['_REGISTRYINDEXPROGRESSGETREQUEST']._serialized_end=18329 + _globals['_REGISTRYINDEXPROGRESSGETREQUESTMSG']._serialized_start=18332 + _globals['_REGISTRYINDEXPROGRESSGETREQUESTMSG']._serialized_end=18470 + _globals['_REGISTRYINDEXPROGRESSGETRESPONSE']._serialized_start=18472 + _globals['_REGISTRYINDEXPROGRESSGETRESPONSE']._serialized_end=18538 + _globals['_REGISTRYINDEXPROGRESSGETRESPONSEMSG']._serialized_start=18541 + _globals['_REGISTRYINDEXPROGRESSGETRESPONSEMSG']._serialized_end=18681 + _globals['_REGISTRYRESPONSEUNEXPECTEDTYPE']._serialized_start=18683 + _globals['_REGISTRYRESPONSEUNEXPECTEDTYPE']._serialized_end=18733 + _globals['_REGISTRYRESPONSEUNEXPECTEDTYPEMSG']._serialized_start=18736 + _globals['_REGISTRYRESPONSEUNEXPECTEDTYPEMSG']._serialized_end=18872 + _globals['_REGISTRYRESPONSEMISSINGTOPKEYS']._serialized_start=18874 + _globals['_REGISTRYRESPONSEMISSINGTOPKEYS']._serialized_end=18924 + _globals['_REGISTRYRESPONSEMISSINGTOPKEYSMSG']._serialized_start=18927 + _globals['_REGISTRYRESPONSEMISSINGTOPKEYSMSG']._serialized_end=19063 + _globals['_REGISTRYRESPONSEMISSINGNESTEDKEYS']._serialized_start=19065 + _globals['_REGISTRYRESPONSEMISSINGNESTEDKEYS']._serialized_end=19118 + _globals['_REGISTRYRESPONSEMISSINGNESTEDKEYSMSG']._serialized_start=19121 + _globals['_REGISTRYRESPONSEMISSINGNESTEDKEYSMSG']._serialized_end=19263 + _globals['_REGISTRYRESPONSEEXTRANESTEDKEYS']._serialized_start=19265 + _globals['_REGISTRYRESPONSEEXTRANESTEDKEYS']._serialized_end=19316 + _globals['_REGISTRYRESPONSEEXTRANESTEDKEYSMSG']._serialized_start=19319 + _globals['_REGISTRYRESPONSEEXTRANESTEDKEYSMSG']._serialized_end=19457 + _globals['_DEPSSETDOWNLOADDIRECTORY']._serialized_start=19459 + _globals['_DEPSSETDOWNLOADDIRECTORY']._serialized_end=19499 + _globals['_DEPSSETDOWNLOADDIRECTORYMSG']._serialized_start=19501 + _globals['_DEPSSETDOWNLOADDIRECTORYMSG']._serialized_end=19625 + _globals['_DEPSUNPINNED']._serialized_start=19627 + _globals['_DEPSUNPINNED']._serialized_end=19672 + _globals['_DEPSUNPINNEDMSG']._serialized_start=19674 + _globals['_DEPSUNPINNEDMSG']._serialized_end=19774 + _globals['_NONODESFORSELECTIONCRITERIA']._serialized_start=19776 + _globals['_NONODESFORSELECTIONCRITERIA']._serialized_end=19823 + _globals['_NONODESFORSELECTIONCRITERIAMSG']._serialized_start=19826 + _globals['_NONODESFORSELECTIONCRITERIAMSG']._serialized_end=19956 + _globals['_DEPSLOCKUPDATING']._serialized_start=19958 + _globals['_DEPSLOCKUPDATING']._serialized_end=19999 + _globals['_DEPSLOCKUPDATINGMSG']._serialized_start=20001 + _globals['_DEPSLOCKUPDATINGMSG']._serialized_end=20109 + _globals['_DEPSADDPACKAGE']._serialized_start=20111 + _globals['_DEPSADDPACKAGE']._serialized_end=20193 + _globals['_DEPSADDPACKAGEMSG']._serialized_start=20195 + _globals['_DEPSADDPACKAGEMSG']._serialized_end=20299 + _globals['_DEPSFOUNDDUPLICATEPACKAGE']._serialized_start=20302 + _globals['_DEPSFOUNDDUPLICATEPACKAGE']._serialized_end=20469 + _globals['_DEPSFOUNDDUPLICATEPACKAGE_REMOVEDPACKAGEENTRY']._serialized_start=20416 + _globals['_DEPSFOUNDDUPLICATEPACKAGE_REMOVEDPACKAGEENTRY']._serialized_end=20469 + _globals['_DEPSFOUNDDUPLICATEPACKAGEMSG']._serialized_start=20471 + _globals['_DEPSFOUNDDUPLICATEPACKAGEMSG']._serialized_end=20597 + _globals['_DEPSVERSIONMISSING']._serialized_start=20599 + _globals['_DEPSVERSIONMISSING']._serialized_end=20635 + _globals['_DEPSVERSIONMISSINGMSG']._serialized_start=20637 + _globals['_DEPSVERSIONMISSINGMSG']._serialized_end=20749 + _globals['_DEPSSCRUBBEDPACKAGENAME']._serialized_start=20751 + _globals['_DEPSSCRUBBEDPACKAGENAME']._serialized_end=20798 + _globals['_DEPSSCRUBBEDPACKAGENAMEMSG']._serialized_start=20800 + _globals['_DEPSSCRUBBEDPACKAGENAMEMSG']._serialized_end=20922 + _globals['_RUNNINGOPERATIONCAUGHTERROR']._serialized_start=20924 + _globals['_RUNNINGOPERATIONCAUGHTERROR']._serialized_end=20966 + _globals['_RUNNINGOPERATIONCAUGHTERRORMSG']._serialized_start=20969 + _globals['_RUNNINGOPERATIONCAUGHTERRORMSG']._serialized_end=21099 + _globals['_COMPILECOMPLETE']._serialized_start=21101 + _globals['_COMPILECOMPLETE']._serialized_end=21118 + _globals['_COMPILECOMPLETEMSG']._serialized_start=21120 + _globals['_COMPILECOMPLETEMSG']._serialized_end=21226 + _globals['_FRESHNESSCHECKCOMPLETE']._serialized_start=21228 + _globals['_FRESHNESSCHECKCOMPLETE']._serialized_end=21252 + _globals['_FRESHNESSCHECKCOMPLETEMSG']._serialized_start=21254 + _globals['_FRESHNESSCHECKCOMPLETEMSG']._serialized_end=21374 + _globals['_SEEDHEADER']._serialized_start=21376 + _globals['_SEEDHEADER']._serialized_end=21404 + _globals['_SEEDHEADERMSG']._serialized_start=21406 + _globals['_SEEDHEADERMSG']._serialized_end=21502 + _globals['_SQLRUNNEREXCEPTION']._serialized_start=21504 + _globals['_SQLRUNNEREXCEPTION']._serialized_end=21597 + _globals['_SQLRUNNEREXCEPTIONMSG']._serialized_start=21599 + _globals['_SQLRUNNEREXCEPTIONMSG']._serialized_end=21711 + _globals['_LOGTESTRESULT']._serialized_start=21714 + _globals['_LOGTESTRESULT']._serialized_end=21882 + _globals['_LOGTESTRESULTMSG']._serialized_start=21884 + _globals['_LOGTESTRESULTMSG']._serialized_end=21986 + _globals['_LOGSTARTLINE']._serialized_start=21988 + _globals['_LOGSTARTLINE']._serialized_end=22095 + _globals['_LOGSTARTLINEMSG']._serialized_start=22097 + _globals['_LOGSTARTLINEMSG']._serialized_end=22197 + _globals['_LOGMODELRESULT']._serialized_start=22200 + _globals['_LOGMODELRESULT']._serialized_end=22349 + _globals['_LOGMODELRESULTMSG']._serialized_start=22351 + _globals['_LOGMODELRESULTMSG']._serialized_end=22455 + _globals['_LOGSNAPSHOTRESULT']._serialized_start=22458 + _globals['_LOGSNAPSHOTRESULT']._serialized_end=22732 + _globals['_LOGSNAPSHOTRESULT_CFGENTRY']._serialized_start=22690 + _globals['_LOGSNAPSHOTRESULT_CFGENTRY']._serialized_end=22732 + _globals['_LOGSNAPSHOTRESULTMSG']._serialized_start=22734 + _globals['_LOGSNAPSHOTRESULTMSG']._serialized_end=22844 + _globals['_LOGSEEDRESULT']._serialized_start=22847 + _globals['_LOGSEEDRESULT']._serialized_end=23032 + _globals['_LOGSEEDRESULTMSG']._serialized_start=23034 + _globals['_LOGSEEDRESULTMSG']._serialized_end=23136 + _globals['_LOGFRESHNESSRESULT']._serialized_start=23139 + _globals['_LOGFRESHNESSRESULT']._serialized_end=23312 + _globals['_LOGFRESHNESSRESULTMSG']._serialized_start=23314 + _globals['_LOGFRESHNESSRESULTMSG']._serialized_end=23426 + _globals['_LOGNODENOOPRESULT']._serialized_start=23429 + _globals['_LOGNODENOOPRESULT']._serialized_end=23581 + _globals['_LOGNODENOOPRESULTMSG']._serialized_start=23583 + _globals['_LOGNODENOOPRESULTMSG']._serialized_end=23693 + _globals['_LOGCANCELLINE']._serialized_start=23695 + _globals['_LOGCANCELLINE']._serialized_end=23729 + _globals['_LOGCANCELLINEMSG']._serialized_start=23731 + _globals['_LOGCANCELLINEMSG']._serialized_end=23833 + _globals['_DEFAULTSELECTOR']._serialized_start=23835 + _globals['_DEFAULTSELECTOR']._serialized_end=23866 + _globals['_DEFAULTSELECTORMSG']._serialized_start=23868 + _globals['_DEFAULTSELECTORMSG']._serialized_end=23974 + _globals['_NODESTART']._serialized_start=23976 + _globals['_NODESTART']._serialized_end=24029 + _globals['_NODESTARTMSG']._serialized_start=24031 + _globals['_NODESTARTMSG']._serialized_end=24125 + _globals['_NODEFINISHED']._serialized_start=24127 + _globals['_NODEFINISHED']._serialized_end=24230 + _globals['_NODEFINISHEDMSG']._serialized_start=24232 + _globals['_NODEFINISHEDMSG']._serialized_end=24332 + _globals['_QUERYCANCELATIONUNSUPPORTED']._serialized_start=24334 + _globals['_QUERYCANCELATIONUNSUPPORTED']._serialized_end=24377 + _globals['_QUERYCANCELATIONUNSUPPORTEDMSG']._serialized_start=24380 + _globals['_QUERYCANCELATIONUNSUPPORTEDMSG']._serialized_end=24510 + _globals['_CONCURRENCYLINE']._serialized_start=24512 + _globals['_CONCURRENCYLINE']._serialized_end=24591 + _globals['_CONCURRENCYLINEMSG']._serialized_start=24593 + _globals['_CONCURRENCYLINEMSG']._serialized_end=24699 + _globals['_WRITINGINJECTEDSQLFORNODE']._serialized_start=24701 + _globals['_WRITINGINJECTEDSQLFORNODE']._serialized_end=24770 + _globals['_WRITINGINJECTEDSQLFORNODEMSG']._serialized_start=24772 + _globals['_WRITINGINJECTEDSQLFORNODEMSG']._serialized_end=24898 + _globals['_NODECOMPILING']._serialized_start=24900 + _globals['_NODECOMPILING']._serialized_end=24957 + _globals['_NODECOMPILINGMSG']._serialized_start=24959 + _globals['_NODECOMPILINGMSG']._serialized_end=25061 + _globals['_NODEEXECUTING']._serialized_start=25063 + _globals['_NODEEXECUTING']._serialized_end=25120 + _globals['_NODEEXECUTINGMSG']._serialized_start=25122 + _globals['_NODEEXECUTINGMSG']._serialized_end=25224 + _globals['_LOGHOOKSTARTLINE']._serialized_start=25226 + _globals['_LOGHOOKSTARTLINE']._serialized_end=25335 + _globals['_LOGHOOKSTARTLINEMSG']._serialized_start=25337 + _globals['_LOGHOOKSTARTLINEMSG']._serialized_end=25445 + _globals['_LOGHOOKENDLINE']._serialized_start=25448 + _globals['_LOGHOOKENDLINE']._serialized_end=25595 + _globals['_LOGHOOKENDLINEMSG']._serialized_start=25597 + _globals['_LOGHOOKENDLINEMSG']._serialized_end=25701 + _globals['_SKIPPINGDETAILS']._serialized_start=25704 + _globals['_SKIPPINGDETAILS']._serialized_end=25851 + _globals['_SKIPPINGDETAILSMSG']._serialized_start=25853 + _globals['_SKIPPINGDETAILSMSG']._serialized_end=25959 + _globals['_NOTHINGTODO']._serialized_start=25961 + _globals['_NOTHINGTODO']._serialized_end=25974 + _globals['_NOTHINGTODOMSG']._serialized_start=25976 + _globals['_NOTHINGTODOMSG']._serialized_end=26074 + _globals['_RUNNINGOPERATIONUNCAUGHTERROR']._serialized_start=26076 + _globals['_RUNNINGOPERATIONUNCAUGHTERROR']._serialized_end=26120 + _globals['_RUNNINGOPERATIONUNCAUGHTERRORMSG']._serialized_start=26123 + _globals['_RUNNINGOPERATIONUNCAUGHTERRORMSG']._serialized_end=26257 + _globals['_ENDRUNRESULT']._serialized_start=26260 + _globals['_ENDRUNRESULT']._serialized_end=26407 + _globals['_ENDRUNRESULTMSG']._serialized_start=26409 + _globals['_ENDRUNRESULTMSG']._serialized_end=26509 + _globals['_NONODESSELECTED']._serialized_start=26511 + _globals['_NONODESSELECTED']._serialized_end=26528 + _globals['_NONODESSELECTEDMSG']._serialized_start=26530 + _globals['_NONODESSELECTEDMSG']._serialized_end=26636 + _globals['_COMMANDCOMPLETED']._serialized_start=26638 + _globals['_COMMANDCOMPLETED']._serialized_end=26757 + _globals['_COMMANDCOMPLETEDMSG']._serialized_start=26759 + _globals['_COMMANDCOMPLETEDMSG']._serialized_end=26867 + _globals['_SHOWNODE']._serialized_start=26869 + _globals['_SHOWNODE']._serialized_end=26976 + _globals['_SHOWNODEMSG']._serialized_start=26978 + _globals['_SHOWNODEMSG']._serialized_end=27070 + _globals['_COMPILEDNODE']._serialized_start=27072 + _globals['_COMPILEDNODE']._serialized_end=27184 + _globals['_COMPILEDNODEMSG']._serialized_start=27186 + _globals['_COMPILEDNODEMSG']._serialized_end=27286 + _globals['_CATCHABLEEXCEPTIONONRUN']._serialized_start=27288 + _globals['_CATCHABLEEXCEPTIONONRUN']._serialized_end=27386 + _globals['_CATCHABLEEXCEPTIONONRUNMSG']._serialized_start=27388 + _globals['_CATCHABLEEXCEPTIONONRUNMSG']._serialized_end=27510 + _globals['_INTERNALERRORONRUN']._serialized_start=27512 + _globals['_INTERNALERRORONRUN']._serialized_end=27607 + _globals['_INTERNALERRORONRUNMSG']._serialized_start=27609 + _globals['_INTERNALERRORONRUNMSG']._serialized_end=27721 + _globals['_GENERICEXCEPTIONONRUN']._serialized_start=27723 + _globals['_GENERICEXCEPTIONONRUN']._serialized_end=27840 + _globals['_GENERICEXCEPTIONONRUNMSG']._serialized_start=27842 + _globals['_GENERICEXCEPTIONONRUNMSG']._serialized_end=27960 + _globals['_NODECONNECTIONRELEASEERROR']._serialized_start=27962 + _globals['_NODECONNECTIONRELEASEERROR']._serialized_end=28040 + _globals['_NODECONNECTIONRELEASEERRORMSG']._serialized_start=28043 + _globals['_NODECONNECTIONRELEASEERRORMSG']._serialized_end=28171 + _globals['_FOUNDSTATS']._serialized_start=28173 + _globals['_FOUNDSTATS']._serialized_end=28204 + _globals['_FOUNDSTATSMSG']._serialized_start=28206 + _globals['_FOUNDSTATSMSG']._serialized_end=28302 + _globals['_MAINKEYBOARDINTERRUPT']._serialized_start=28304 + _globals['_MAINKEYBOARDINTERRUPT']._serialized_end=28327 + _globals['_MAINKEYBOARDINTERRUPTMSG']._serialized_start=28329 + _globals['_MAINKEYBOARDINTERRUPTMSG']._serialized_end=28447 + _globals['_MAINENCOUNTEREDERROR']._serialized_start=28449 + _globals['_MAINENCOUNTEREDERROR']._serialized_end=28484 + _globals['_MAINENCOUNTEREDERRORMSG']._serialized_start=28486 + _globals['_MAINENCOUNTEREDERRORMSG']._serialized_end=28602 + _globals['_MAINSTACKTRACE']._serialized_start=28604 + _globals['_MAINSTACKTRACE']._serialized_end=28641 + _globals['_MAINSTACKTRACEMSG']._serialized_start=28643 + _globals['_MAINSTACKTRACEMSG']._serialized_end=28747 + _globals['_TIMINGINFOCOLLECTED']._serialized_start=28749 + _globals['_TIMINGINFOCOLLECTED']._serialized_end=28861 + _globals['_TIMINGINFOCOLLECTEDMSG']._serialized_start=28863 + _globals['_TIMINGINFOCOLLECTEDMSG']._serialized_end=28977 + _globals['_LOGDEBUGSTACKTRACE']._serialized_start=28979 + _globals['_LOGDEBUGSTACKTRACE']._serialized_end=29017 + _globals['_LOGDEBUGSTACKTRACEMSG']._serialized_start=29019 + _globals['_LOGDEBUGSTACKTRACEMSG']._serialized_end=29131 + _globals['_CHECKCLEANPATH']._serialized_start=29133 + _globals['_CHECKCLEANPATH']._serialized_end=29163 + _globals['_CHECKCLEANPATHMSG']._serialized_start=29165 + _globals['_CHECKCLEANPATHMSG']._serialized_end=29269 + _globals['_CONFIRMCLEANPATH']._serialized_start=29271 + _globals['_CONFIRMCLEANPATH']._serialized_end=29303 + _globals['_CONFIRMCLEANPATHMSG']._serialized_start=29305 + _globals['_CONFIRMCLEANPATHMSG']._serialized_end=29413 + _globals['_PROTECTEDCLEANPATH']._serialized_start=29415 + _globals['_PROTECTEDCLEANPATH']._serialized_end=29449 + _globals['_PROTECTEDCLEANPATHMSG']._serialized_start=29451 + _globals['_PROTECTEDCLEANPATHMSG']._serialized_end=29563 + _globals['_FINISHEDCLEANPATHS']._serialized_start=29565 + _globals['_FINISHEDCLEANPATHS']._serialized_end=29585 + _globals['_FINISHEDCLEANPATHSMSG']._serialized_start=29587 + _globals['_FINISHEDCLEANPATHSMSG']._serialized_end=29699 + _globals['_OPENCOMMAND']._serialized_start=29701 + _globals['_OPENCOMMAND']._serialized_end=29754 + _globals['_OPENCOMMANDMSG']._serialized_start=29756 + _globals['_OPENCOMMANDMSG']._serialized_end=29854 + _globals['_SERVINGDOCSPORT']._serialized_start=29856 + _globals['_SERVINGDOCSPORT']._serialized_end=29904 + _globals['_SERVINGDOCSPORTMSG']._serialized_start=29906 + _globals['_SERVINGDOCSPORTMSG']._serialized_end=30012 + _globals['_SERVINGDOCSACCESSINFO']._serialized_start=30014 + _globals['_SERVINGDOCSACCESSINFO']._serialized_end=30051 + _globals['_SERVINGDOCSACCESSINFOMSG']._serialized_start=30053 + _globals['_SERVINGDOCSACCESSINFOMSG']._serialized_end=30171 + _globals['_SERVINGDOCSEXITINFO']._serialized_start=30173 + _globals['_SERVINGDOCSEXITINFO']._serialized_end=30194 + _globals['_SERVINGDOCSEXITINFOMSG']._serialized_start=30196 + _globals['_SERVINGDOCSEXITINFOMSG']._serialized_end=30310 + _globals['_RUNRESULTWARNING']._serialized_start=30312 + _globals['_RUNRESULTWARNING']._serialized_end=30428 + _globals['_RUNRESULTWARNINGMSG']._serialized_start=30430 + _globals['_RUNRESULTWARNINGMSG']._serialized_end=30538 + _globals['_RUNRESULTFAILURE']._serialized_start=30540 + _globals['_RUNRESULTFAILURE']._serialized_end=30656 + _globals['_RUNRESULTFAILUREMSG']._serialized_start=30658 + _globals['_RUNRESULTFAILUREMSG']._serialized_end=30766 + _globals['_STATSLINE']._serialized_start=30768 + _globals['_STATSLINE']._serialized_end=30875 + _globals['_STATSLINE_STATSENTRY']._serialized_start=30831 + _globals['_STATSLINE_STATSENTRY']._serialized_end=30875 + _globals['_STATSLINEMSG']._serialized_start=30877 + _globals['_STATSLINEMSG']._serialized_end=30971 + _globals['_RUNRESULTERROR']._serialized_start=30973 + _globals['_RUNRESULTERROR']._serialized_end=31044 + _globals['_RUNRESULTERRORMSG']._serialized_start=31046 + _globals['_RUNRESULTERRORMSG']._serialized_end=31150 + _globals['_RUNRESULTERRORNOMESSAGE']._serialized_start=31152 + _globals['_RUNRESULTERRORNOMESSAGE']._serialized_end=31235 + _globals['_RUNRESULTERRORNOMESSAGEMSG']._serialized_start=31237 + _globals['_RUNRESULTERRORNOMESSAGEMSG']._serialized_end=31359 + _globals['_SQLCOMPILEDPATH']._serialized_start=31361 + _globals['_SQLCOMPILEDPATH']._serialized_end=31434 + _globals['_SQLCOMPILEDPATHMSG']._serialized_start=31436 + _globals['_SQLCOMPILEDPATHMSG']._serialized_end=31542 + _globals['_CHECKNODETESTFAILURE']._serialized_start=31544 + _globals['_CHECKNODETESTFAILURE']._serialized_end=31631 + _globals['_CHECKNODETESTFAILUREMSG']._serialized_start=31633 + _globals['_CHECKNODETESTFAILUREMSG']._serialized_end=31749 + _globals['_ENDOFRUNSUMMARY']._serialized_start=31751 + _globals['_ENDOFRUNSUMMARY']._serialized_end=31838 + _globals['_ENDOFRUNSUMMARYMSG']._serialized_start=31840 + _globals['_ENDOFRUNSUMMARYMSG']._serialized_end=31946 + _globals['_LOGSKIPBECAUSEERROR']._serialized_start=31948 + _globals['_LOGSKIPBECAUSEERROR']._serialized_end=32033 + _globals['_LOGSKIPBECAUSEERRORMSG']._serialized_start=32035 + _globals['_LOGSKIPBECAUSEERRORMSG']._serialized_end=32149 + _globals['_ENSUREGITINSTALLED']._serialized_start=32151 + _globals['_ENSUREGITINSTALLED']._serialized_end=32171 + _globals['_ENSUREGITINSTALLEDMSG']._serialized_start=32173 + _globals['_ENSUREGITINSTALLEDMSG']._serialized_end=32285 + _globals['_DEPSCREATINGLOCALSYMLINK']._serialized_start=32287 + _globals['_DEPSCREATINGLOCALSYMLINK']._serialized_end=32313 + _globals['_DEPSCREATINGLOCALSYMLINKMSG']._serialized_start=32315 + _globals['_DEPSCREATINGLOCALSYMLINKMSG']._serialized_end=32439 + _globals['_DEPSSYMLINKNOTAVAILABLE']._serialized_start=32441 + _globals['_DEPSSYMLINKNOTAVAILABLE']._serialized_end=32466 + _globals['_DEPSSYMLINKNOTAVAILABLEMSG']._serialized_start=32468 + _globals['_DEPSSYMLINKNOTAVAILABLEMSG']._serialized_end=32590 + _globals['_DISABLETRACKING']._serialized_start=32592 + _globals['_DISABLETRACKING']._serialized_end=32609 + _globals['_DISABLETRACKINGMSG']._serialized_start=32611 + _globals['_DISABLETRACKINGMSG']._serialized_end=32717 + _globals['_SENDINGEVENT']._serialized_start=32719 + _globals['_SENDINGEVENT']._serialized_end=32749 + _globals['_SENDINGEVENTMSG']._serialized_start=32751 + _globals['_SENDINGEVENTMSG']._serialized_end=32851 + _globals['_SENDEVENTFAILURE']._serialized_start=32853 + _globals['_SENDEVENTFAILURE']._serialized_end=32871 + _globals['_SENDEVENTFAILUREMSG']._serialized_start=32873 + _globals['_SENDEVENTFAILUREMSG']._serialized_end=32981 + _globals['_FLUSHEVENTS']._serialized_start=32983 + _globals['_FLUSHEVENTS']._serialized_end=32996 + _globals['_FLUSHEVENTSMSG']._serialized_start=32998 + _globals['_FLUSHEVENTSMSG']._serialized_end=33096 + _globals['_FLUSHEVENTSFAILURE']._serialized_start=33098 + _globals['_FLUSHEVENTSFAILURE']._serialized_end=33118 + _globals['_FLUSHEVENTSFAILUREMSG']._serialized_start=33120 + _globals['_FLUSHEVENTSFAILUREMSG']._serialized_end=33232 + _globals['_TRACKINGINITIALIZEFAILURE']._serialized_start=33234 + _globals['_TRACKINGINITIALIZEFAILURE']._serialized_end=33279 + _globals['_TRACKINGINITIALIZEFAILUREMSG']._serialized_start=33281 + _globals['_TRACKINGINITIALIZEFAILUREMSG']._serialized_end=33407 + _globals['_RUNRESULTWARNINGMESSAGE']._serialized_start=33409 + _globals['_RUNRESULTWARNINGMESSAGE']._serialized_end=33489 + _globals['_RUNRESULTWARNINGMESSAGEMSG']._serialized_start=33491 + _globals['_RUNRESULTWARNINGMESSAGEMSG']._serialized_end=33613 + _globals['_DEBUGCMDOUT']._serialized_start=33615 + _globals['_DEBUGCMDOUT']._serialized_end=33641 + _globals['_DEBUGCMDOUTMSG']._serialized_start=33643 + _globals['_DEBUGCMDOUTMSG']._serialized_end=33741 + _globals['_DEBUGCMDRESULT']._serialized_start=33743 + _globals['_DEBUGCMDRESULT']._serialized_end=33772 + _globals['_DEBUGCMDRESULTMSG']._serialized_start=33774 + _globals['_DEBUGCMDRESULTMSG']._serialized_end=33878 + _globals['_LISTCMDOUT']._serialized_start=33880 + _globals['_LISTCMDOUT']._serialized_end=33905 + _globals['_LISTCMDOUTMSG']._serialized_start=33907 + _globals['_LISTCMDOUTMSG']._serialized_end=34003 + _globals['_RESOURCEREPORT']._serialized_start=34006 + _globals['_RESOURCEREPORT']._serialized_end=34242 + _globals['_RESOURCEREPORTMSG']._serialized_start=34244 + _globals['_RESOURCEREPORTMSG']._serialized_end=34348 # @@protoc_insertion_point(module_scope) diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index cab3baf630c..68b4f70920d 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -1,7 +1,7 @@ import json from dbt.constants import MAXIMUM_SEED_SIZE_NAME, PIN_PACKAGE_URL -from dbt_common.ui import warning_tag, line_wrap_message, green, yellow, red +from dbt_common.ui import error_tag, warning_tag, line_wrap_message, green, yellow, red from dbt_common.events.base_types import EventLevel from dbt_common.events.format import ( format_fancy_output_line, @@ -390,7 +390,7 @@ def code(self) -> str: def message(self) -> str: description = ( f"The `{self.deprecated_path}` config has been renamed to `{self.exp_path}`. " - "Please update your `dbt_project.yml` configuration to reflect this change." + "Please see https://docs.getdbt.com/docs/build/data-tests#new-data_tests-syntax for more information." ) return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) @@ -408,6 +408,53 @@ def message(self) -> str: return warning_tag(f"Deprecated functionality\n\n{description}") +class SpacesInModelNameDeprecation(DynamicLevel): + def code(self) -> str: + return "D014" + + def message(self) -> str: + version = ".v" + self.model_version if self.model_version else "" + description = ( + f"Model `{self.model_name}{version}` has spaces in its name. This is deprecated and " + "may cause errors when using dbt." + ) + + if self.level == EventLevel.ERROR.value: + description = error_tag(description) + elif self.level == EventLevel.WARN.value: + description = warning_tag(description) + + return line_wrap_message(description) + + +class TotalModelNamesWithSpacesDeprecation(DynamicLevel): + def code(self) -> str: + return "D015" + + def message(self) -> str: + description = f"Spaces in model names found in {self.count_invalid_names} model(s), which is deprecated." + + if self.show_debug_hint: + description += " Run again with `--debug` to see them all." + + if self.level == EventLevel.ERROR.value: + description = error_tag(description) + elif self.level == EventLevel.WARN.value: + description = warning_tag(description) + + return line_wrap_message(description) + + +class PackageMaterializationOverrideDeprecation(WarnLevel): + def code(self) -> str: + return "D016" + + def message(self) -> str: + description = f"Installed package '{self.package_name}' is overriding the built-in materialization '{self.materialization_name}'. Overrides of built-in materializations from installed packages will be deprecated in future versions of dbt. Please refer to https://docs.getdbt.com/reference/global-configs/legacy-behaviors#require_explicit_package_overrides_for_builtin_materializations for detailed documentation and suggested workarounds." + + return line_wrap_message(warning_tag(description)) + + # ======================================================= # I - Project parsing # ======================================================= @@ -1094,7 +1141,7 @@ def code(self) -> str: return "M030" def message(self) -> str: - return f"The selection criterion '{self.spec_raw}' does not match any nodes" + return f"The selection criterion '{self.spec_raw}' does not match any enabled nodes" class DepsLockUpdating(InfoLevel): @@ -1343,7 +1390,22 @@ def status_to_level(cls, status): return EventLevel.INFO -# Skipped Q019, Q020, Q021 +class LogNodeNoOpResult(InfoLevel): + def code(self) -> str: + return "Q019" + + def message(self) -> str: + msg = f"NO-OP {self.description}" + return format_fancy_output_line( + msg=msg, + status=yellow("NO-OP"), + index=self.index, + total=self.total, + execution_time=self.execution_time, + ) + + +# Skipped Q020, Q021 class LogCancelLine(ErrorLevel): diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index 25e4c055f14..721e65b4c27 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -1,8 +1,7 @@ import json import re import io -import agate -from typing import Any, Dict, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional, Union, TYPE_CHECKING from dbt_common.exceptions import ( DbtRuntimeError, @@ -18,6 +17,12 @@ from dbt_common.dataclass_schema import ValidationError +from dbt.constants import SECRET_ENV_PREFIX + + +if TYPE_CHECKING: + import agate + class ContractBreakingChangeError(DbtRuntimeError): CODE = 10016 @@ -74,34 +79,6 @@ def __reduce__(self): return (JSONValidationError, (self.typename, self.errors)) -class IncompatibleSchemaError(DbtRuntimeError): - def __init__(self, expected: str, found: Optional[str] = None) -> None: - self.expected = expected - self.found = found - self.filename = "input file" - - super().__init__(msg=self.get_message()) - - def add_filename(self, filename: str): - self.filename = filename - self.msg = self.get_message() - - def get_message(self) -> str: - found_str = "nothing" - if self.found is not None: - found_str = f'"{self.found}"' - - msg = ( - f'Expected a schema version of "{self.expected}" in ' - f"{self.filename}, but found {found_str}. Are you running with a " - f"different version of dbt?" - ) - return msg - - CODE = 10014 - MESSAGE = "Incompatible Schema" - - class AliasError(DbtValidationError): pass @@ -358,7 +335,10 @@ def get_message(self) -> str: pretty_vars = json.dumps(dct, sort_keys=True, indent=4) msg = f"Required var '{self.var_name}' not found in config:\nVars supplied to {node_name} = {pretty_vars}" - return msg + return scrub_secrets(msg, self.var_secrets()) + + def var_secrets(self) -> List[str]: + return [v for k, v in self.merged.items() if k.startswith(SECRET_ENV_PREFIX) and v.strip()] class PackageNotFoundForMacroError(CompilationError): @@ -1349,7 +1329,7 @@ def __init__(self, yaml_columns, sql_columns): self.sql_columns = sql_columns super().__init__(msg=self.get_message()) - def get_mismatches(self) -> agate.Table: + def get_mismatches(self) -> "agate.Table": # avoid a circular import from dbt_common.clients.agate_helper import table_from_data_flat @@ -1400,7 +1380,7 @@ def get_message(self) -> str: "This model has an enforced contract, and its 'columns' specification is missing" ) - table: agate.Table = self.get_mismatches() + table: "agate.Table" = self.get_mismatches() # Hack to get Agate table output as string output = io.StringIO() table.print_table(output=output, max_rows=None, max_column_width=50) # type: ignore diff --git a/core/dbt/graph/selector_methods.py b/core/dbt/graph/selector_methods.py index 10403ba04fc..25d3af0d493 100644 --- a/core/dbt/graph/selector_methods.py +++ b/core/dbt/graph/selector_methods.py @@ -8,7 +8,7 @@ from .graph import UniqueId -from dbt.contracts.graph.manifest import Manifest, WritableManifest +from dbt.contracts.graph.manifest import Manifest from dbt.contracts.graph.nodes import ( SingularTestNode, Exposure, @@ -54,7 +54,6 @@ class MethodName(StrEnum): Metric = "metric" Result = "result" SourceStatus = "source_status" - Wildcard = "wildcard" Version = "version" SemanticModel = "semantic_model" SavedQuery = "saved_query" @@ -258,37 +257,36 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu :param str selector: The selector or node name """ non_source_nodes = list(self.non_source_nodes(included_nodes)) - for node, real_node in non_source_nodes: - if self.node_is_match(selector, real_node.fqn, real_node.is_versioned): - yield node + for unique_id, node in non_source_nodes: + if self.node_is_match(selector, node.fqn, node.is_versioned): + yield unique_id class TagSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: """yields nodes from included that have the specified tag""" - for node, real_node in self.all_nodes(included_nodes): - if hasattr(real_node, "tags") and any( - fnmatch(tag, selector) for tag in real_node.tags - ): - yield node + for unique_id, node in self.all_nodes(included_nodes): + if hasattr(node, "tags") and any(fnmatch(tag, selector) for tag in node.tags): + yield unique_id class GroupSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: """yields nodes from included in the specified group""" - for node, real_node in self.groupable_nodes(included_nodes): - if selector == real_node.config.get("group"): - yield node + for unique_id, node in self.groupable_nodes(included_nodes): + node_group = node.config.get("group") + if node_group and fnmatch(node_group, selector): + yield unique_id class AccessSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: """yields model nodes matching the specified access level""" - for node, real_node in self.parsed_nodes(included_nodes): - if not isinstance(real_node, ModelNode): + for unique_id, node in self.parsed_nodes(included_nodes): + if not isinstance(node, ModelNode): continue - if selector == real_node.access: - yield node + if selector == node.access: + yield unique_id class SourceSelectorMethod(SelectorMethod): @@ -311,14 +309,14 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu ).format(selector) raise DbtRuntimeError(msg) - for node, real_node in self.source_nodes(included_nodes): - if not fnmatch(real_node.package_name, target_package): + for unique_id, node in self.source_nodes(included_nodes): + if not fnmatch(node.package_name, target_package): continue - if not fnmatch(real_node.source_name, target_source): + if not fnmatch(node.source_name, target_source): continue - if not fnmatch(real_node.name, target_table): + if not fnmatch(node.name, target_table): continue - yield node + yield unique_id class ExposureSelectorMethod(SelectorMethod): @@ -337,13 +335,13 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu ).format(selector) raise DbtRuntimeError(msg) - for node, real_node in self.exposure_nodes(included_nodes): - if not fnmatch(real_node.package_name, target_package): + for unique_id, node in self.exposure_nodes(included_nodes): + if not fnmatch(node.package_name, target_package): continue - if not fnmatch(real_node.name, target_name): + if not fnmatch(node.name, target_name): continue - yield node + yield unique_id class MetricSelectorMethod(SelectorMethod): @@ -362,13 +360,13 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu ).format(selector) raise DbtRuntimeError(msg) - for node, real_node in self.metric_nodes(included_nodes): - if not fnmatch(real_node.package_name, target_package): + for unique_id, node in self.metric_nodes(included_nodes): + if not fnmatch(node.package_name, target_package): continue - if not fnmatch(real_node.name, target_name): + if not fnmatch(node.name, target_name): continue - yield node + yield unique_id class SemanticModelSelectorMethod(SelectorMethod): @@ -387,13 +385,13 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu ).format(selector) raise DbtRuntimeError(msg) - for node, real_node in self.semantic_model_nodes(included_nodes): - if not fnmatch(real_node.package_name, target_package): + for unique_id, node in self.semantic_model_nodes(included_nodes): + if not fnmatch(node.package_name, target_package): continue - if not fnmatch(real_node.name, target_name): + if not fnmatch(node.name, target_name): continue - yield node + yield unique_id class SavedQuerySelectorMethod(SelectorMethod): @@ -412,13 +410,13 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu ).format(selector) raise DbtRuntimeError(msg) - for node, real_node in self.saved_query_nodes(included_nodes): - if not fnmatch(real_node.package_name, target_package): + for unique_id, node in self.saved_query_nodes(included_nodes): + if not fnmatch(node.package_name, target_package): continue - if not fnmatch(real_node.name, target_name): + if not fnmatch(node.name, target_name): continue - yield node + yield unique_id class PathSelectorMethod(SelectorMethod): @@ -431,35 +429,35 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu else: root = Path.cwd() paths = set(p.relative_to(root) for p in root.glob(selector)) - for node, real_node in self.all_nodes(included_nodes): - ofp = Path(real_node.original_file_path) + for unique_id, node in self.all_nodes(included_nodes): + ofp = Path(node.original_file_path) if ofp in paths: - yield node - if hasattr(real_node, "patch_path") and real_node.patch_path: # type: ignore - pfp = real_node.patch_path.split("://")[1] # type: ignore + yield unique_id + if hasattr(node, "patch_path") and node.patch_path: # type: ignore + pfp = node.patch_path.split("://")[1] # type: ignore ymlfp = Path(pfp) if ymlfp in paths: - yield node + yield unique_id if any(parent in paths for parent in ofp.parents): - yield node + yield unique_id class FileSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: """Yields nodes from included that match the given file name.""" - for node, real_node in self.all_nodes(included_nodes): - if fnmatch(Path(real_node.original_file_path).name, selector): - yield node - elif fnmatch(Path(real_node.original_file_path).stem, selector): - yield node + for unique_id, node in self.all_nodes(included_nodes): + if fnmatch(Path(node.original_file_path).name, selector): + yield unique_id + elif fnmatch(Path(node.original_file_path).stem, selector): + yield unique_id class PackageSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: """Yields nodes from included that have the specified package""" - for node, real_node in self.all_nodes(included_nodes): - if fnmatch(real_node.package_name, selector): - yield node + for unique_id, node in self.all_nodes(included_nodes): + if fnmatch(node.package_name, selector): + yield unique_id def _getattr_descend(obj: Any, attrs: List[str]) -> Any: @@ -501,9 +499,9 @@ def search( # search sources is kind of useless now source configs only have # 'enabled', which you can't really filter on anyway, but maybe we'll # add more someday, so search them anyway. - for node, real_node in self.configurable_nodes(included_nodes): + for unique_id, node in self.configurable_nodes(included_nodes): try: - value = _getattr_descend(real_node.config, parts) + value = _getattr_descend(node.config, parts) except AttributeError: continue else: @@ -513,7 +511,7 @@ def search( or (CaseInsensitive(selector) == "true" and True in value) or (CaseInsensitive(selector) == "false" and False in value) ): - yield node + yield unique_id else: if ( (selector == value) @@ -521,7 +519,7 @@ def search( or (CaseInsensitive(selector) == "false") and value is False ): - yield node + yield unique_id class ResourceTypeSelectorMethod(SelectorMethod): @@ -530,9 +528,9 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu resource_type = NodeType(selector) except ValueError as exc: raise DbtRuntimeError(f'Invalid resource_type selector "{selector}"') from exc - for node, real_node in self.all_nodes(included_nodes): - if real_node.resource_type == resource_type: - yield node + for unique_id, node in self.all_nodes(included_nodes): + if node.resource_type == resource_type: + yield unique_id class TestNameSelectorMethod(SelectorMethod): @@ -725,7 +723,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu f'Got an invalid selector "{selector}", expected one of ' f'"{list(state_checks)}"' ) - manifest: WritableManifest = self.previous_state.manifest + manifest: Manifest = self.previous_state.manifest for unique_id, node in self.all_nodes(included_nodes): previous_node: Optional[SelectorTarget] = None @@ -733,7 +731,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu if unique_id in manifest.nodes: previous_node = manifest.nodes[unique_id] elif unique_id in manifest.sources: - previous_node = manifest.sources[unique_id] + previous_node = SourceDefinition.from_resource(manifest.sources[unique_id]) elif unique_id in manifest.exposures: previous_node = Exposure.from_resource(manifest.exposures[unique_id]) elif unique_id in manifest.metrics: @@ -741,7 +739,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu elif unique_id in manifest.semantic_models: previous_node = SemanticModel.from_resource(manifest.semantic_models[unique_id]) elif unique_id in manifest.unit_tests: - previous_node = manifest.unit_tests[unique_id] + previous_node = UnitTestDefinition.from_resource(manifest.unit_tests[unique_id]) keyword_args = {} if checker.__name__ in [ @@ -762,9 +760,9 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu matches = set( result.unique_id for result in self.previous_state.results if result.status == selector ) - for node, real_node in self.all_nodes(included_nodes): - if node in matches: - yield node + for unique_id, node in self.all_nodes(included_nodes): + if unique_id in matches: + yield unique_id class SourceStatusSelectorMethod(SelectorMethod): @@ -816,37 +814,37 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu ): matches.remove(unique_id) - for node, real_node in self.all_nodes(included_nodes): - if node in matches: - yield node + for unique_id, node in self.all_nodes(included_nodes): + if unique_id in matches: + yield unique_id class VersionSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: - for node, real_node in self.parsed_nodes(included_nodes): - if isinstance(real_node, ModelNode): + for unique_id, node in self.parsed_nodes(included_nodes): + if isinstance(node, ModelNode): if selector == "latest": - if real_node.is_latest_version: - yield node + if node.is_latest_version: + yield unique_id elif selector == "prerelease": if ( - real_node.version - and real_node.latest_version - and UnparsedVersion(v=real_node.version) - > UnparsedVersion(v=real_node.latest_version) + node.version + and node.latest_version + and UnparsedVersion(v=node.version) + > UnparsedVersion(v=node.latest_version) ): - yield node + yield unique_id elif selector == "old": if ( - real_node.version - and real_node.latest_version - and UnparsedVersion(v=real_node.version) - < UnparsedVersion(v=real_node.latest_version) + node.version + and node.latest_version + and UnparsedVersion(v=node.version) + < UnparsedVersion(v=node.latest_version) ): - yield node + yield unique_id elif selector == "none": - if real_node.version is None: - yield node + if node.version is None: + yield unique_id else: raise DbtRuntimeError( f'Invalid version type selector {selector}: expected one of: "latest", "prerelease", "old", or "none"' diff --git a/core/dbt/include/__init__.py b/core/dbt/include/__init__.py new file mode 100644 index 00000000000..b36383a6102 --- /dev/null +++ b/core/dbt/include/__init__.py @@ -0,0 +1,3 @@ +from pkgutil import extend_path + +__path__ = extend_path(__path__, __name__) diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index 8dbfc4cb3a4..e345a74183c 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -12,11 +12,12 @@ generate_generate_name_macro_context, ) from dbt.adapters.factory import get_adapter # noqa: F401 +from dbt.artifacts.resources import Contract from dbt.clients.jinja import get_rendered from dbt.config import Project, RuntimeConfig from dbt.context.context_config import ContextConfig from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.nodes import Contract, BaseNode, ManifestNode +from dbt.contracts.graph.nodes import BaseNode, ManifestNode from dbt.contracts.graph.unparsed import Docs, UnparsedNode from dbt.exceptions import ( DbtInternalError, @@ -108,7 +109,7 @@ def __init__(self, config: RuntimeConfig, manifest: Manifest, component: str) -> self.component = component def __call__(self, parsed_node: Any, override: Optional[str]) -> None: - if parsed_node.package_name in self.package_updaters: + if getattr(parsed_node, "package_name", None) in self.package_updaters: new_value = self.package_updaters[parsed_node.package_name](override, parsed_node) else: new_value = self.default_updater(override, parsed_node) @@ -292,7 +293,7 @@ def update_parsed_node_relation_names( self._update_node_alias(parsed_node, config_dict.get("alias")) # Snapshot nodes use special "target_database" and "target_schema" fields for some reason - if parsed_node.resource_type == NodeType.Snapshot: + if getattr(parsed_node, "resource_type", None) == NodeType.Snapshot: if "target_database" in config_dict and config_dict["target_database"]: parsed_node.database = config_dict["target_database"] if "target_schema" in config_dict and config_dict["target_schema"]: @@ -451,7 +452,7 @@ def _update_node_relation_name(self, node: ManifestNode): # and TestNodes that store_failures. # TestNodes do not get a relation_name without store failures # because no schema is created. - if node.is_relational and not node.is_ephemeral_model: + if getattr(node, "is_relational", None) and not getattr(node, "is_ephemeral_model", None): adapter = get_adapter(self.root_project) relation_cls = adapter.Relation node.relation_name = str(relation_cls.create_from(self.root_project, node)) diff --git a/core/dbt/parser/fixtures.py b/core/dbt/parser/fixtures.py index f12cc6f272a..b3002725674 100644 --- a/core/dbt/parser/fixtures.py +++ b/core/dbt/parser/fixtures.py @@ -26,6 +26,11 @@ def parse_file(self, file_block: FileBlock): assert isinstance(file_block.file, FixtureSourceFile) unique_id = self.generate_unique_id(file_block.name) + if file_block.file.path.relative_path.endswith(".sql"): + rows = file_block.file.contents # type: ignore + else: # endswith('.csv') + rows = self.get_rows(file_block.file.contents) # type: ignore + fixture = UnitTestFileFixture( name=file_block.name, path=file_block.file.path.relative_path, @@ -33,7 +38,7 @@ def parse_file(self, file_block: FileBlock): package_name=self.project.project_name, unique_id=unique_id, resource_type=NodeType.Fixture, - rows=self.get_rows(file_block.file.contents), + rows=rows, ) self.manifest.add_fixture(file_block.file, fixture) diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index 1c0071e3d08..843e2221df3 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -20,10 +20,11 @@ from itertools import chain import time -from dbt.context.manifest import generate_query_header_context +from dbt.context.query_header import generate_query_header_context from dbt.contracts.graph.semantic_manifest import SemanticManifest from dbt.contracts.state import PreviousState from dbt_common.events.base_types import EventLevel +from dbt_common.exceptions.base import DbtValidationError import dbt_common.utils import json import pprint @@ -45,6 +46,7 @@ MANIFEST_FILE_NAME, PARTIAL_PARSE_FILE_NAME, SEMANTIC_MANIFEST_FILE_NAME, + SECRET_ENV_PREFIX, ) from dbt_common.helper_types import PathSet from dbt_common.events.functions import fire_event, get_invocation_id, warn_or_error @@ -64,6 +66,8 @@ StateCheckVarsHash, DeprecatedModel, DeprecatedReference, + SpacesInModelNameDeprecation, + TotalModelNamesWithSpacesDeprecation, UpcomingReferenceDeprecation, ) from dbt.logger import DbtProcessState @@ -81,7 +85,7 @@ from dbt.context.macro_resolver import MacroResolver, TestMacroNamespace from dbt.context.configured import generate_macro_context from dbt.context.providers import ParseProvider, generate_runtime_macro_context -from dbt.contracts.files import FileHash, ParseFileType, SchemaSourceFile +from dbt.contracts.files import ParseFileType, SchemaSourceFile from dbt.parser.read_files import ( ReadFilesFromFileSystem, load_source_file, @@ -109,13 +113,14 @@ ResultNode, ModelNode, ) -from dbt.artifacts.resources import NodeRelation, NodeVersion +from dbt.artifacts.resources import NodeRelation, NodeVersion, FileHash from dbt.artifacts.schemas.base import Writable from dbt.exceptions import ( TargetNotFoundError, AmbiguousAliasError, InvalidAccessTypeError, DbtRuntimeError, + scrub_secrets, ) from dbt.parser.base import Parser from dbt.parser.analysis import AnalysisParser @@ -273,6 +278,7 @@ def __init__( # have been enabled, but not happening because of some issue. self.partially_parsing = False self.partial_parser: Optional[PartialParsing] = None + self.skip_parsing = False # This is a saved manifest from a previous run that's used for partial parsing self.saved_manifest: Optional[Manifest] = self.read_manifest_for_partial_parse() @@ -377,71 +383,15 @@ def load(self) -> Manifest: self._perf_info.path_count = len(self.manifest.files) self._perf_info.read_files_elapsed = time.perf_counter() - start_read_files - skip_parsing = False - if self.saved_manifest is not None: - self.partial_parser = PartialParsing(self.saved_manifest, self.manifest.files) - skip_parsing = self.partial_parser.skip_parsing() - if skip_parsing: - # nothing changed, so we don't need to generate project_parser_files - self.manifest = self.saved_manifest - else: - # create child_map and parent_map - self.saved_manifest.build_parent_and_child_maps() - # create group_map - self.saved_manifest.build_group_map() - # files are different, we need to create a new set of - # project_parser_files. - try: - project_parser_files = self.partial_parser.get_parsing_files() - self.partially_parsing = True - self.manifest = self.saved_manifest - except Exception as exc: - # pp_files should still be the full set and manifest is new manifest, - # since get_parsing_files failed - fire_event( - UnableToPartialParse( - reason="an error occurred. Switching to full reparse." - ) - ) - - # Get traceback info - tb_info = traceback.format_exc() - formatted_lines = tb_info.splitlines() - (_, line, method) = formatted_lines[-3].split(", ") - exc_info = { - "traceback": tb_info, - "exception": formatted_lines[-1], - "code": formatted_lines[-2], - "location": f"{line} {method}", - } - - # get file info for local logs - parse_file_type: str = "" - file_id = self.partial_parser.processing_file - if file_id: - source_file = None - if file_id in self.saved_manifest.files: - source_file = self.saved_manifest.files[file_id] - elif file_id in self.manifest.files: - source_file = self.manifest.files[file_id] - if source_file: - parse_file_type = source_file.parse_file_type - fire_event(PartialParsingErrorProcessingFile(file=file_id)) - exc_info["parse_file_type"] = parse_file_type - fire_event(PartialParsingError(exc_info=exc_info)) - - # Send event - if dbt.tracking.active_user is not None: - exc_info["full_reparse_reason"] = ReparseReason.exception - dbt.tracking.track_partial_parser(exc_info) - - if os.environ.get("DBT_PP_TEST"): - raise exc + self.skip_parsing = False + project_parser_files = self.safe_update_project_parser_files_partially( + project_parser_files + ) if self.manifest._parsing_info is None: self.manifest._parsing_info = ParsingInfo() - if skip_parsing: + if self.skip_parsing: fire_event(PartialParsingSkipParsing()) else: # Load Macros and tests @@ -559,7 +509,7 @@ def load(self) -> Manifest: # Inject any available external nodes, reprocess refs if changes to the manifest were made. external_nodes_modified = False - if skip_parsing: + if self.skip_parsing: # If we didn't skip parsing, this will have already run because it must run # before process_refs. If we did skip parsing, then it's possible that only # external nodes have changed and we need to run this to capture that. @@ -573,14 +523,77 @@ def load(self) -> Manifest: ) # parent and child maps will be rebuilt by write_manifest - if not skip_parsing or external_nodes_modified: + if not self.skip_parsing or external_nodes_modified: # write out the fully parsed manifest self.write_manifest_for_partial_parse() self.check_for_model_deprecations() + self.check_for_spaces_in_model_names() return self.manifest + def safe_update_project_parser_files_partially(self, project_parser_files: Dict) -> Dict: + if self.saved_manifest is None: + return project_parser_files + + self.partial_parser = PartialParsing(self.saved_manifest, self.manifest.files) # type: ignore[arg-type] + self.skip_parsing = self.partial_parser.skip_parsing() + if self.skip_parsing: + # nothing changed, so we don't need to generate project_parser_files + self.manifest = self.saved_manifest # type: ignore[assignment] + else: + # create child_map and parent_map + self.saved_manifest.build_parent_and_child_maps() # type: ignore[union-attr] + # create group_map + self.saved_manifest.build_group_map() # type: ignore[union-attr] + # files are different, we need to create a new set of + # project_parser_files. + try: + project_parser_files = self.partial_parser.get_parsing_files() + self.partially_parsing = True + self.manifest = self.saved_manifest # type: ignore[assignment] + except Exception as exc: + # pp_files should still be the full set and manifest is new manifest, + # since get_parsing_files failed + fire_event( + UnableToPartialParse(reason="an error occurred. Switching to full reparse.") + ) + + # Get traceback info + tb_info = traceback.format_exc() + # index last stack frame in traceback (i.e. lastest exception and its context) + tb_last_frame = traceback.extract_tb(exc.__traceback__)[-1] + exc_info = { + "traceback": tb_info, + "exception": tb_info.splitlines()[-1], + "code": tb_last_frame.line, # if the source is not available, it is None + "location": f"line {tb_last_frame.lineno} in {tb_last_frame.name}", + } + + # get file info for local logs + parse_file_type: str = "" + file_id = self.partial_parser.processing_file + if file_id: + source_file = None + if file_id in self.saved_manifest.files: + source_file = self.saved_manifest.files[file_id] + elif file_id in self.manifest.files: + source_file = self.manifest.files[file_id] + if source_file: + parse_file_type = source_file.parse_file_type + fire_event(PartialParsingErrorProcessingFile(file=file_id)) + exc_info["parse_file_type"] = parse_file_type + fire_event(PartialParsingError(exc_info=exc_info)) + # Send event + if dbt.tracking.active_user is not None: + exc_info["full_reparse_reason"] = ReparseReason.exception + dbt.tracking.track_partial_parser(exc_info) + + if os.environ.get("DBT_PP_TEST"): + raise exc + + return project_parser_files + def check_for_model_deprecations(self): for node in self.manifest.nodes.values(): if isinstance(node, ModelNode): @@ -617,6 +630,47 @@ def check_for_model_deprecations(self): ) ) + def check_for_spaces_in_model_names(self): + """Validates that model names do not contain spaces + + If `DEBUG` flag is `False`, logs only first bad model name + If `DEBUG` flag is `True`, logs every bad model name + If `ALLOW_SPACES_IN_MODEL_NAMES` is `False`, logs are `ERROR` level and an exception is raised if any names are bad + If `ALLOW_SPACES_IN_MODEL_NAMES` is `True`, logs are `WARN` level + """ + improper_model_names = 0 + level = ( + EventLevel.WARN + if self.root_project.args.ALLOW_SPACES_IN_MODEL_NAMES + else EventLevel.ERROR + ) + + for node in self.manifest.nodes.values(): + if isinstance(node, ModelNode) and " " in node.name: + if improper_model_names == 0 or self.root_project.args.DEBUG: + fire_event( + SpacesInModelNameDeprecation( + model_name=node.name, + model_version=version_to_str(node.version), + level=level.value, + ), + level=level, + ) + improper_model_names += 1 + + if improper_model_names > 0: + fire_event( + TotalModelNamesWithSpacesDeprecation( + count_invalid_names=improper_model_names, + show_debug_hint=(not self.root_project.args.DEBUG), + level=level.value, + ), + level=level, + ) + + if level == EventLevel.ERROR: + raise DbtValidationError("Model names cannot contain spaces") + def load_and_parse_macros(self, project_parser_files): for project in self.all_projects.values(): if project.project_name not in project_parser_files: @@ -831,13 +885,6 @@ def is_partial_parsable(self, manifest: Manifest) -> Tuple[bool, Optional[str]]: ) valid = False reparse_reason = ReparseReason.proj_env_vars_changed - if ( - self.manifest.state_check.profile_env_vars_hash - != manifest.state_check.profile_env_vars_hash - ): - fire_event(UnableToPartialParse(reason="env vars used in profiles.yml have changed")) - valid = False - reparse_reason = ReparseReason.prof_env_vars_changed missing_keys = { k @@ -947,6 +994,9 @@ def build_manifest_state_check(self): # of env_vars, that would need to change. # We are using the parsed cli_vars instead of config.args.vars, in order # to sort them and avoid reparsing because of ordering issues. + secret_vars = [ + v for k, v in config.cli_vars.items() if k.startswith(SECRET_ENV_PREFIX) and v.strip() + ] stringified_cli_vars = pprint.pformat(config.cli_vars) vars_hash = FileHash.from_contents( "\x00".join( @@ -961,7 +1011,7 @@ def build_manifest_state_check(self): fire_event( StateCheckVarsHash( checksum=vars_hash.checksum, - vars=stringified_cli_vars, + vars=scrub_secrets(stringified_cli_vars, secret_vars), profile=config.args.profile, target=config.args.target, version=__version__, @@ -976,18 +1026,18 @@ def build_manifest_state_check(self): env_var_str += f"{key}:{config.project_env_vars[key]}|" project_env_vars_hash = FileHash.from_contents(env_var_str) - # Create a FileHash of the env_vars in the project - key_list = list(config.profile_env_vars.keys()) - key_list.sort() - env_var_str = "" - for key in key_list: - env_var_str += f"{key}:{config.profile_env_vars[key]}|" - profile_env_vars_hash = FileHash.from_contents(env_var_str) + # Create a hash of the connection_info, which user has access to in + # jinja context. Thus attributes here may affect the parsing result. + # Ideally we should not expose all of the connection info to the jinja. - # Create a FileHash of the profile file - profile_path = os.path.join(get_flags().PROFILES_DIR, "profiles.yml") - with open(profile_path) as fp: - profile_hash = FileHash.from_contents(fp.read()) + # Renaming this variable mean that we will have to do a whole lot more + # change to make sure the previous manifest can be loaded correctly. + # This is an example of naming should be chosen based on the functionality + # rather than the implementation details. + connection_keys = list(config.credentials.connection_info()) + # avoid reparsing because of ordering issues + connection_keys.sort() + profile_hash = FileHash.from_contents(pprint.pformat(connection_keys)) # Create a FileHashes for dbt_project for all dependencies project_hashes = {} @@ -999,7 +1049,6 @@ def build_manifest_state_check(self): # Create the ManifestStateCheck object state_check = ManifestStateCheck( project_env_vars_hash=project_env_vars_hash, - profile_env_vars_hash=profile_env_vars_hash, vars_hash=vars_hash, profile_hash=profile_hash, project_hashes=project_hashes, @@ -1621,7 +1670,7 @@ def _process_metric_node( assert ( metric.type_params.measure is not None ), f"{metric} should have a measure defined, but it does not." - metric.type_params.input_measures.append(metric.type_params.measure) + metric.add_input_measure(metric.type_params.measure) _process_metric_depends_on( manifest=manifest, current_project=current_project, metric=metric ) @@ -1630,8 +1679,8 @@ def _process_metric_node( assert ( conversion_type_params ), f"{metric.name} is a conversion metric and must have conversion_type_params defined." - metric.type_params.input_measures.append(conversion_type_params.base_measure) - metric.type_params.input_measures.append(conversion_type_params.conversion_measure) + metric.add_input_measure(conversion_type_params.base_measure) + metric.add_input_measure(conversion_type_params.conversion_measure) _process_metric_depends_on( manifest=manifest, current_project=current_project, metric=metric ) @@ -1667,7 +1716,8 @@ def _process_metric_node( _process_metric_node( manifest=manifest, current_project=current_project, metric=target_metric ) - metric.type_params.input_measures.extend(target_metric.type_params.input_measures) + for input_measure in target_metric.type_params.input_measures: + metric.add_input_measure(input_measure) metric.depends_on.add_node(target_metric.unique_id) else: assert_values_exhausted(metric.type) diff --git a/core/dbt/parser/partial.py b/core/dbt/parser/partial.py index 32b4760f5a8..f9c558be6ba 100644 --- a/core/dbt/parser/partial.py +++ b/core/dbt/parser/partial.py @@ -211,6 +211,7 @@ def add_to_pp_files(self, source_file): if ( file_id not in self.project_parser_files[project_name][parser_name] and file_id not in self.file_diff["deleted"] + and file_id not in self.file_diff["deleted_schema_files"] ): self.project_parser_files[project_name][parser_name].append(file_id) @@ -467,7 +468,11 @@ def schedule_nodes_for_parsing(self, unique_ids): def _schedule_for_parsing(self, dict_key: str, element, name, delete: Callable) -> None: file_id = element.file_id - if file_id in self.saved_files and file_id not in self.file_diff["deleted"]: + if ( + file_id in self.saved_files + and file_id not in self.file_diff["deleted"] + and file_id not in self.file_diff["deleted_schema_files"] + ): schema_file = self.saved_files[file_id] elements = [] assert isinstance(schema_file, SchemaSourceFile) diff --git a/core/dbt/parser/read_files.py b/core/dbt/parser/read_files.py index b539ffd7920..314a2a0fdd1 100644 --- a/core/dbt/parser/read_files.py +++ b/core/dbt/parser/read_files.py @@ -81,7 +81,7 @@ def load_source_file( # the checksum to match the stored file contents file_contents = load_file_contents(path.absolute_path, strip=True) source_file.contents = file_contents - source_file.checksum = FileHash.from_contents(source_file.contents) + source_file.checksum = FileHash.from_contents(file_contents) if parse_file_type == ParseFileType.Schema and source_file.contents: dfy = yaml_from_file(source_file) @@ -145,11 +145,11 @@ def get_source_files(project, paths, extension, parse_file_type, saved_files, ig if parse_file_type == ParseFileType.Seed: fb_list.append(load_seed_source_file(fp, project.project_name)) # singular tests live in /tests but only generic tests live - # in /tests/generic so we want to skip those + # in /tests/generic and fixtures in /tests/fixture so we want to skip those else: if parse_file_type == ParseFileType.SingularTest: path = pathlib.Path(fp.relative_path) - if path.parts[0] == "generic": + if path.parts[0] in ["generic", "fixtures"]: continue file = load_source_file(fp, parse_file_type, project.project_name, saved_files) # only append the list if it has contents. added to fix #3568 @@ -431,7 +431,7 @@ def get_file_types_for_project(project): }, ParseFileType.Fixture: { "paths": project.fixture_paths, - "extensions": [".csv"], + "extensions": [".csv", ".sql"], "parser": "FixtureParser", }, } diff --git a/core/dbt/parser/schema_renderer.py b/core/dbt/parser/schema_renderer.py index ffa1c45c121..005f54f390e 100644 --- a/core/dbt/parser/schema_renderer.py +++ b/core/dbt/parser/schema_renderer.py @@ -74,12 +74,13 @@ def should_render_keypath(self, keypath: Keypath) -> bool: elif self._is_norender_key(keypath[0:]): return False elif self.key == "metrics": - # This ensures all key paths that end in 'filter' for a metric are skipped - if keypath[-1] == "filter": + # This ensures that metric filters are skipped + if keypath[-1] == "filter" or len(keypath) > 1 and keypath[-2] == "filter": return False elif self._is_norender_key(keypath[0:]): return False elif self.key == "saved_queries": + # This ensures that saved query filters are skipped if keypath[0] == "query_params" and len(keypath) > 1 and keypath[1] == "where": return False elif self._is_norender_key(keypath[0:]): diff --git a/core/dbt/parser/schema_yaml_readers.py b/core/dbt/parser/schema_yaml_readers.py index 8b1a780a0c6..b7c047d01dd 100644 --- a/core/dbt/parser/schema_yaml_readers.py +++ b/core/dbt/parser/schema_yaml_readers.py @@ -764,6 +764,22 @@ def parse_saved_query(self, unparsed: UnparsedSavedQuery) -> None: group=config.group, ) + for export in parsed.exports: + self.schema_parser.update_parsed_node_relation_names(export, export.config.to_dict()) # type: ignore + + if not export.config.schema_name: + export.config.schema_name = getattr(export, "schema", None) + delattr(export, "schema") + + export.config.database = getattr(export, "database", None) or export.config.database + delattr(export, "database") + + if not export.config.alias: + export.config.alias = getattr(export, "alias", None) + delattr(export, "alias") + + delattr(export, "relation_name") + # Only add thes saved query if it's enabled, otherwise we track it with other diabled nodes if parsed.config.enabled: self.manifest.add_saved_query(self.yaml.file, parsed) diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 9c67cfff665..838939b83fc 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -562,6 +562,14 @@ def validate_and_rename(data): validate_and_rename(column) def patch_node_config(self, node, patch): + if "access" in patch.config: + if AccessType.is_valid(patch.config["access"]): + patch.config["access"] = AccessType(patch.config["access"]) + else: + raise InvalidAccessTypeError( + unique_id=node.unique_id, + field_value=patch.config["access"], + ) # Get the ContextConfig that's used in calculating the config # This must match the model resource_type that's being patched config = ContextConfig( @@ -889,10 +897,43 @@ def patch_constraints(self, node, constraints): f"Type must be one of {[ct.value for ct in ConstraintType]}" ) - node.constraints = [ModelLevelConstraint.from_dict(c) for c in constraints] + self._validate_pk_constraints(node, constraints) + node.constraints = [ModelLevelConstraint.from_dict(c) for c in constraints] - def _validate_constraint_prerequisites(self, model_node: ModelNode): + def _validate_pk_constraints(self, model_node: ModelNode, constraints: List[Dict[str, Any]]): + errors = [] + # check for primary key constraints defined at the column level + pk_col: List[str] = [] + for col in model_node.columns.values(): + for constraint in col.constraints: + if constraint.type == ConstraintType.primary_key: + pk_col.append(col.name) + + if len(pk_col) > 1: + errors.append( + f"Found {len(pk_col)} columns ({pk_col}) with primary key constraints defined. " + "Primary keys for multiple columns must be defined as a model level constraint." + ) + + if len(pk_col) > 0 and ( + any( + constraint.type == ConstraintType.primary_key + for constraint in model_node.constraints + ) + or any(constraint["type"] == ConstraintType.primary_key for constraint in constraints) + ): + errors.append( + "Primary key constraints defined at the model level and the columns level. " + "Primary keys can be defined at the model level or the column level, not both." + ) + if errors: + raise ParsingError( + f"Primary key constraint error: ({model_node.original_file_path})\n" + + "\n".join(errors) + ) + + def _validate_constraint_prerequisites(self, model_node: ModelNode): column_warn_unsupported = [ constraint.warn_unsupported for column in model_node.columns.values() diff --git a/core/dbt/parser/sources.py b/core/dbt/parser/sources.py index 596745d632d..1f57efe79ce 100644 --- a/core/dbt/parser/sources.py +++ b/core/dbt/parser/sources.py @@ -132,14 +132,15 @@ def parse_source(self, target: UnpatchedSourceDefinition) -> SourceDefinition: refs = ParserRef.from_target(table) unique_id = target.unique_id description = table.description or "" - meta = table.meta or {} source_description = source.description or "" loaded_at_field = table.loaded_at_field or source.loaded_at_field freshness = merge_freshness(source.freshness, table.freshness) quoting = source.quoting.merged(table.quoting) # path = block.path.original_file_path + table_meta = table.meta or {} source_meta = source.meta or {} + meta = {**source_meta, **table_meta} # make sure we don't do duplicate tags from source + table tags = sorted(set(itertools.chain(source.tags, table.tags))) @@ -201,7 +202,7 @@ def parse_source(self, target: UnpatchedSourceDefinition) -> SourceDefinition: # runtime. fire_event( FreshnessConfigProblem( - msg=f"The configured adapter does not support metadata-based freshness. A loaded_at_field must be specified for source '{source.name}'." + msg=f"The configured adapter does not support metadata-based freshness. A loaded_at_field must be specified for source '{source.name}.{table.name}'." ) ) diff --git a/core/dbt/parser/unit_tests.py b/core/dbt/parser/unit_tests.py index eaad22308bb..added31cfd1 100644 --- a/core/dbt/parser/unit_tests.py +++ b/core/dbt/parser/unit_tests.py @@ -14,16 +14,16 @@ from dbt.context.providers import generate_parse_exposure, get_rendered from dbt.contracts.files import FileHash, SchemaSourceFile from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.model_config import UnitTestNodeConfig, ModelConfig +from dbt.contracts.graph.model_config import UnitTestNodeConfig +from dbt.artifacts.resources import ModelConfig, UnitTestConfig, UnitTestFormat from dbt.contracts.graph.nodes import ( ModelNode, UnitTestNode, UnitTestDefinition, DependsOn, - UnitTestConfig, UnitTestSourceDefinition, ) -from dbt.contracts.graph.unparsed import UnparsedUnitTest, UnitTestFormat +from dbt.contracts.graph.unparsed import UnparsedUnitTest from dbt.exceptions import ParsingError, InvalidUnitTestGivenInput from dbt.graph import UniqueId from dbt.node_types import NodeType @@ -68,6 +68,15 @@ def parse_unit_test_case(self, test_case: UnitTestDefinition): name = test_case.name if tested_node.is_versioned: name = name + f"_v{tested_node.version}" + expected_sql: Optional[str] = None + if test_case.expect.format == UnitTestFormat.SQL: + expected_rows: List[Dict[str, Any]] = [] + expected_sql = test_case.expect.rows # type: ignore + else: + assert isinstance(test_case.expect.rows, List) + expected_rows = deepcopy(test_case.expect.rows) + + assert isinstance(expected_rows, List) unit_test_node = UnitTestNode( name=name, resource_type=NodeType.Unit, @@ -76,8 +85,7 @@ def parse_unit_test_case(self, test_case: UnitTestDefinition): original_file_path=test_case.original_file_path, unique_id=test_case.unique_id, config=UnitTestNodeConfig( - materialized="unit", - expected_rows=deepcopy(test_case.expect.rows), # type:ignore + materialized="unit", expected_rows=expected_rows, expected_sql=expected_sql ), raw_code=tested_node.raw_code, database=tested_node.database, @@ -120,17 +128,24 @@ def parse_unit_test_case(self, test_case: UnitTestDefinition): original_input_node = self._get_original_input_node( given.input, tested_node, test_case.name ) + input_name = original_input_node.name common_fields = { "resource_type": NodeType.Model, - "original_file_path": original_input_node.original_file_path, + # root directory for input and output fixtures + "original_file_path": unit_test_node.original_file_path, "config": ModelConfig(materialized="ephemeral"), "database": original_input_node.database, "alias": original_input_node.identifier, "schema": original_input_node.schema, "fqn": original_input_node.fqn, "checksum": FileHash.empty(), - "raw_code": self._build_fixture_raw_code(given.rows, None), + "raw_code": self._build_fixture_raw_code(given.rows, None, given.format), + "package_name": original_input_node.package_name, + "unique_id": f"model.{original_input_node.package_name}.{input_name}", + "name": input_name, + "path": f"{input_name}.sql", + "defer_relation": original_input_node.defer_relation, } if original_input_node.resource_type in ( @@ -138,15 +153,7 @@ def parse_unit_test_case(self, test_case: UnitTestDefinition): NodeType.Seed, NodeType.Snapshot, ): - input_name = original_input_node.name - input_node = ModelNode( - **common_fields, - package_name=original_input_node.package_name, - unique_id=f"model.{original_input_node.package_name}.{input_name}", - name=input_name, - path=original_input_node.path or f"{input_name}.sql", - defer_relation=original_input_node.defer_relation, - ) + input_node = ModelNode(**common_fields) if ( original_input_node.resource_type == NodeType.Model and original_input_node.version @@ -157,13 +164,8 @@ def parse_unit_test_case(self, test_case: UnitTestDefinition): # We are reusing the database/schema/identifier from the original source, # but that shouldn't matter since this acts as an ephemeral model which just # wraps a CTE around the unit test node. - input_name = original_input_node.name input_node = UnitTestSourceDefinition( **common_fields, - package_name=original_input_node.package_name, - unique_id=f"model.{original_input_node.package_name}.{input_name}", - name=original_input_node.name, # must be the same name for source lookup to work - path=input_name + ".sql", # for writing out compiled_code source_name=original_input_node.source_name, # needed for source lookup ) # Sources need to go in the sources dictionary in order to create the right lookup @@ -179,12 +181,15 @@ def parse_unit_test_case(self, test_case: UnitTestDefinition): # Add unique ids of input_nodes to depends_on unit_test_node.depends_on.nodes.append(input_node.unique_id) - def _build_fixture_raw_code(self, rows, column_name_to_data_types) -> str: + def _build_fixture_raw_code(self, rows, column_name_to_data_types, fixture_format) -> str: # We're not currently using column_name_to_data_types, but leaving here for # possible future use. - return ("{{{{ get_fixture_sql({rows}, {column_name_to_data_types}) }}}}").format( - rows=rows, column_name_to_data_types=column_name_to_data_types - ) + if fixture_format == UnitTestFormat.SQL: + return rows + else: + return ("{{{{ get_fixture_sql({rows}, {column_name_to_data_types}) }}}}").format( + rows=rows, column_name_to_data_types=column_name_to_data_types + ) def _get_original_input_node(self, input: str, tested_node: ModelNode, test_case_name: str): """ @@ -359,13 +364,29 @@ def _validate_and_normalize_rows(self, ut_fixture, unit_test_definition, fixture ) if ut_fixture.fixture: - # find fixture file object and store unit_test_definition unique_id - fixture = self._get_fixture(ut_fixture.fixture, self.project.project_name) - fixture_source_file = self.manifest.files[fixture.file_id] - fixture_source_file.unit_tests.append(unit_test_definition.unique_id) - ut_fixture.rows = fixture.rows + ut_fixture.rows = self.get_fixture_file_rows( + ut_fixture.fixture, self.project.project_name, unit_test_definition.unique_id + ) else: ut_fixture.rows = self._convert_csv_to_list_of_dicts(ut_fixture.rows) + elif ut_fixture.format == UnitTestFormat.SQL: + if not (isinstance(ut_fixture.rows, str) or isinstance(ut_fixture.fixture, str)): + raise ParsingError( + f"Unit test {unit_test_definition.name} has {fixture_type} rows or fixtures " + f"which do not match format {ut_fixture.format}. Expected string." + ) + + if ut_fixture.fixture: + ut_fixture.rows = self.get_fixture_file_rows( + ut_fixture.fixture, self.project.project_name, unit_test_definition.unique_id + ) + + def get_fixture_file_rows(self, fixture_name, project_name, utdef_unique_id): + # find fixture file object and store unit_test_definition unique_id + fixture = self._get_fixture(fixture_name, project_name) + fixture_source_file = self.manifest.files[fixture.file_id] + fixture_source_file.unit_tests.append(utdef_unique_id) + return fixture.rows def _convert_csv_to_list_of_dicts(self, csv_string: str) -> List[Dict[str, Any]]: dummy_file = StringIO(csv_string) @@ -421,7 +442,6 @@ def find_tested_model_node( def process_models_for_unit_test( manifest: Manifest, current_project: str, unit_test_def: UnitTestDefinition, models_to_versions ): - # If the unit tests doesn't have a depends_on.nodes[0] then we weren't able to resolve # the model, either because versions hadn't been processed yet, or it's not a valid model name if not unit_test_def.depends_on.nodes: @@ -439,6 +459,31 @@ def process_models_for_unit_test( target_model_id = unit_test_def.depends_on.nodes[0] target_model = manifest.nodes[target_model_id] assert isinstance(target_model, ModelNode) + + target_model_is_incremental = "macro.dbt.is_incremental" in target_model.depends_on.macros + unit_test_def_has_incremental_override = unit_test_def.overrides and isinstance( + unit_test_def.overrides.macros.get("is_incremental"), bool + ) + + if target_model_is_incremental and (not unit_test_def_has_incremental_override): + raise ParsingError( + f"Boolean override for 'is_incremental' must be provided for unit test '{unit_test_def.name}' in model '{target_model.name}'" + ) + + unit_test_def_incremental_override_true = ( + unit_test_def.overrides and unit_test_def.overrides.macros.get("is_incremental") + ) + unit_test_def_has_this_input = "this" in [i.input for i in unit_test_def.given] + + if ( + target_model_is_incremental + and unit_test_def_incremental_override_true + and (not unit_test_def_has_this_input) + ): + raise ParsingError( + f"Unit test '{unit_test_def.name}' for incremental model '{target_model.name}' must have a 'this' input" + ) + # unit_test_versions = unit_test_def.versions # We're setting up unit tests for versioned models, so if # the model isn't versioned, we don't need to do anything diff --git a/core/dbt/task/README.md b/core/dbt/task/README.md index 9de939e4cc4..2b32f5dbfa8 100644 --- a/core/dbt/task/README.md +++ b/core/dbt/task/README.md @@ -1 +1,43 @@ # Task README + +### Task Hierarchy +``` +BaseTask + ┣ CleanTask + ┣ ConfiguredTask + ┃ ┣ GraphRunnableTask + ┃ ┃ ┣ CloneTask + ┃ ┃ ┣ CompileTask + ┃ ┃ ┃ ┣ GenerateTask + ┃ ┃ ┃ ┣ RunTask + ┃ ┃ ┃ ┃ ┣ BuildTask + ┃ ┃ ┃ ┃ ┣ FreshnessTask + ┃ ┃ ┃ ┃ ┣ SeedTask + ┃ ┃ ┃ ┃ ┣ SnapshotTask + ┃ ┃ ┃ ┃ ┗ TestTask + ┃ ┃ ┃ ┗ ShowTask + ┃ ┃ ┗ ListTask + ┃ ┣ RetryTask + ┃ ┣ RunOperationTask + ┃ ┗ ServeTask + ┣ DebugTask + ┣ DepsTask + ┗ InitTask +``` + +### Runner Hierarchy +``` +BaseRunner + ┣ CloneRunner + ┣ CompileRunner + ┃ ┣ GenericSqlRunner + ┃ ┃ ┣ SqlCompileRunner + ┃ ┃ ┗ SqlExecuteRunner + ┃ ┣ ModelRunner + ┃ ┃ ┣ SeedRunner + ┃ ┃ ┗ SnapshotRunner + ┃ ┣ ShowRunner + ┃ ┗ TestRunner + ┣ FreshnessRunner + ┗ SavedQueryRunner +``` diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index 690ae36a71b..d4c206b023c 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -6,16 +6,18 @@ from contextlib import nullcontext from datetime import datetime from pathlib import Path -from typing import Any, Dict, List, Optional, Type, Union +from typing import Any, Dict, List, Optional, Set from dbt.compilation import Compiler import dbt_common.exceptions.base import dbt.exceptions from dbt import tracking -from dbt.config import RuntimeConfig, Project +from dbt.cli.flags import Flags +from dbt.config import RuntimeConfig from dbt.config.profile import read_profile from dbt.constants import DBT_PROJECT_FILE_NAME from dbt.contracts.graph.manifest import Manifest +from dbt.artifacts.resources.types import NodeType from dbt.artifacts.schemas.results import TimingInfo, collect_timing_info from dbt.artifacts.schemas.results import NodeStatus, RunningStatus, RunStatus from dbt.artifacts.schemas.run import RunResult @@ -48,12 +50,6 @@ from dbt.task.printer import print_run_result_error -class NoneConfig: - @classmethod - def from_args(cls, args): - return None - - def read_profiles(profiles_dir=None): """This is only used for some error handling""" if profiles_dir is None: @@ -70,15 +66,11 @@ def read_profiles(profiles_dir=None): class BaseTask(metaclass=ABCMeta): - ConfigType: Union[Type[NoneConfig], Type[Project]] = NoneConfig - - def __init__(self, args, config, project=None) -> None: + def __init__(self, args: Flags) -> None: self.args = args - self.config = config - self.project = config if isinstance(config, Project) else project @classmethod - def pre_init_hook(cls, args): + def pre_init_hook(cls, args: Flags): """A hook called before the task is initialized.""" if args.log_format == "json": log_manager.format_json() @@ -92,23 +84,6 @@ def set_log_format(cls): else: log_manager.format_text() - @classmethod - def from_args(cls, args, *pargs, **kwargs): - try: - # This is usually RuntimeConfig - config = cls.ConfigType.from_args(args) - except dbt.exceptions.DbtProjectError as exc: - fire_event(LogDbtProjectError(exc=str(exc))) - - tracking.track_invalid_invocation(args=args, result_type=exc.result_type) - raise dbt_common.exceptions.DbtRuntimeError("Could not run dbt") from exc - except dbt.exceptions.DbtProfileError as exc: - all_profile_names = list(read_profiles(get_flags().PROFILES_DIR).keys()) - fire_event(LogDbtProfileError(exc=str(exc), profiles=all_profile_names)) - tracking.track_invalid_invocation(args=args, result_type=exc.result_type) - raise dbt_common.exceptions.DbtRuntimeError("Could not run dbt") from exc - return cls(args, config, *pargs, **kwargs) - @abstractmethod def run(self): raise dbt_common.exceptions.base.NotImplementedError("Not Implemented") @@ -152,10 +127,11 @@ def move_to_nearest_project_dir(project_dir: Optional[str]) -> Path: # produce the same behavior. currently this class only contains manifest compilation, # holding a manifest, and moving direcories. class ConfiguredTask(BaseTask): - ConfigType = RuntimeConfig - - def __init__(self, args, config, manifest: Optional[Manifest] = None) -> None: - super().__init__(args, config) + def __init__( + self, args: Flags, config: RuntimeConfig, manifest: Optional[Manifest] = None + ) -> None: + super().__init__(args) + self.config = config self.graph: Optional[Graph] = None self.manifest = manifest self.compiler = Compiler(self.config) @@ -173,9 +149,22 @@ def compile_manifest(self): dbt.tracking.track_runnable_timing({"graph_compilation_elapsed": compile_time}) @classmethod - def from_args(cls, args, *pargs, **kwargs): + def from_args(cls, args: Flags, *pargs, **kwargs): move_to_nearest_project_dir(args.project_dir) - return super().from_args(args, *pargs, **kwargs) + try: + # This is usually RuntimeConfig + config = RuntimeConfig.from_args(args) + except dbt.exceptions.DbtProjectError as exc: + fire_event(LogDbtProjectError(exc=str(exc))) + + tracking.track_invalid_invocation(args=args, result_type=exc.result_type) + raise dbt_common.exceptions.DbtRuntimeError("Could not run dbt") from exc + except dbt.exceptions.DbtProfileError as exc: + all_profile_names = list(read_profiles(get_flags().PROFILES_DIR).keys()) + fire_event(LogDbtProfileError(exc=str(exc), profiles=all_profile_names)) + tracking.track_invalid_invocation(args=args, result_type=exc.result_type) + raise dbt_common.exceptions.DbtRuntimeError("Could not run dbt") from exc + return cls(args, config, *pargs, **kwargs) class ExecutionContext: @@ -206,6 +195,9 @@ def __init__(self, config, adapter, node, node_index, num_nodes) -> None: def compile(self, manifest: Manifest) -> Any: pass + def _node_build_path(self) -> Optional[str]: + return self.node.build_path if hasattr(self.node, "build_path") else None + def get_result_status(self, result) -> Dict[str, str]: if result.status == NodeStatus.Error: return {"node_status": "error", "node_error": str(result.message)} @@ -338,7 +330,7 @@ def _handle_catchable_exception(self, e, ctx): def _handle_internal_exception(self, e, ctx): fire_event( InternalErrorOnRun( - build_path=self.node.build_path, exc=str(e), node_info=get_node_info() + build_path=self._node_build_path(), exc=str(e), node_info=get_node_info() ) ) return str(e) @@ -346,7 +338,7 @@ def _handle_internal_exception(self, e, ctx): def _handle_generic_exception(self, e, ctx): fire_event( GenericExceptionOnRun( - build_path=self.node.build_path, + build_path=self._node_build_path(), unique_id=self.node.unique_id, exc=str(e), node_info=get_node_info(), @@ -480,3 +472,30 @@ def on_skip(self): def do_skip(self, cause=None): self.skip = True self.skip_cause = cause + + +def resource_types_from_args( + args: Flags, all_resource_values: Set[NodeType], default_resource_values: Set[NodeType] +) -> Set[NodeType]: + + if not args.resource_types: + resource_types = default_resource_values + else: + # This is a list of strings, not NodeTypes + arg_resource_types = set(args.resource_types) + + if "all" in arg_resource_types: + arg_resource_types.remove("all") + arg_resource_types.update(all_resource_values) + if "default" in arg_resource_types: + arg_resource_types.remove("default") + arg_resource_types.update(default_resource_values) + # Convert to a set of NodeTypes now that the non-NodeType strings are gone + resource_types = set([NodeType(rt) for rt in list(arg_resource_types)]) + + if args.exclude_resource_types: + # Convert from a list of strings to a set of NodeTypes + exclude_resource_types = set([NodeType(rt) for rt in args.exclude_resource_types]) + resource_types = resource_types - exclude_resource_types + + return resource_types diff --git a/core/dbt/task/build.py b/core/dbt/task/build.py index e3ac5a6baa8..57f11c71bd5 100644 --- a/core/dbt/task/build.py +++ b/core/dbt/task/build.py @@ -8,50 +8,38 @@ from dbt.artifacts.schemas.results import NodeStatus, RunStatus from dbt.artifacts.schemas.run import RunResult +from dbt.cli.flags import Flags +from dbt.config.runtime import RuntimeConfig +from dbt.contracts.graph.manifest import Manifest from dbt.graph import ResourceTypeSelector, GraphQueue, Graph from dbt.node_types import NodeType from dbt.task.test import TestSelector -from dbt.task.base import BaseRunner +from dbt.task.base import BaseRunner, resource_types_from_args from dbt_common.events.functions import fire_event -from dbt.events.types import LogStartLine, LogModelResult -from dbt_common.events.base_types import EventLevel +from dbt.events.types import LogNodeNoOpResult from dbt.exceptions import DbtInternalError class SavedQueryRunner(BaseRunner): - # A no-op Runner for Saved Queries + # Stub. No-op Runner for Saved Queries, which require MetricFlow for execution. @property def description(self): - return "Saved Query {}".format(self.node.unique_id) + return f"saved query {self.node.name}" def before_execute(self): - fire_event( - LogStartLine( - description=self.description, - index=self.node_index, - total=self.num_nodes, - node_info=self.node.node_info, - ) - ) + pass def compile(self, manifest): return self.node def after_execute(self, result): - if result.status == NodeStatus.Error: - level = EventLevel.ERROR - else: - level = EventLevel.INFO fire_event( - LogModelResult( + LogNodeNoOpResult( description=self.description, - status=result.status, index=self.node_index, total=self.num_nodes, - execution_time=result.execution_time, node_info=self.node.node_info, - ), - level=level, + ) ) def execute(self, compiled_node, manifest): @@ -61,8 +49,8 @@ def execute(self, compiled_node, manifest): status=RunStatus.Success, timing=[], thread_id=threading.current_thread().name, - execution_time=0.1, - message="done", + execution_time=0, + message="NO-OP", adapter_response={}, failures=0, agate_table=None, @@ -85,27 +73,19 @@ class BuildTask(RunTask): NodeType.Seed: seed_runner, NodeType.Test: test_runner, NodeType.Unit: test_runner, + NodeType.SavedQuery: SavedQueryRunner, } ALL_RESOURCE_VALUES = frozenset({x for x in RUNNER_MAP.keys()}) - def __init__(self, args, config, manifest) -> None: + def __init__(self, args: Flags, config: RuntimeConfig, manifest: Manifest) -> None: super().__init__(args, config, manifest) self.selected_unit_tests: Set = set() self.model_to_unit_test_map: Dict[str, List] = {} def resource_types(self, no_unit_tests=False): - if self.args.include_saved_query: - self.RUNNER_MAP[NodeType.SavedQuery] = SavedQueryRunner - self.ALL_RESOURCE_VALUES = self.ALL_RESOURCE_VALUES.union({NodeType.SavedQuery}) - - if not self.args.resource_types: - resource_types = list(self.ALL_RESOURCE_VALUES) - else: - resource_types = set(self.args.resource_types) - - if "all" in resource_types: - resource_types.remove("all") - resource_types.update(self.ALL_RESOURCE_VALUES) + resource_types = resource_types_from_args( + self.args, set(self.ALL_RESOURCE_VALUES), set(self.ALL_RESOURCE_VALUES) + ) # First we get selected_nodes including unit tests, then without, # and do a set difference. diff --git a/core/dbt/task/clean.py b/core/dbt/task/clean.py index efae26bf6e1..c4e98f5db2b 100644 --- a/core/dbt/task/clean.py +++ b/core/dbt/task/clean.py @@ -9,6 +9,8 @@ FinishedCleanPaths, ) from dbt_common.exceptions import DbtRuntimeError +from dbt.cli.flags import Flags +from dbt.config.project import Project from dbt.task.base import ( BaseTask, move_to_nearest_project_dir, @@ -16,6 +18,11 @@ class CleanTask(BaseTask): + def __init__(self, args: Flags, config: Project): + super().__init__(args) + self.config = config + self.project = config + def run(self): """ This function takes all the paths in the target file diff --git a/core/dbt/task/clone.py b/core/dbt/task/clone.py index 720e992cd83..e245efc19cf 100644 --- a/core/dbt/task/clone.py +++ b/core/dbt/task/clone.py @@ -8,8 +8,8 @@ from dbt_common.dataclass_schema import dbtClassMixin from dbt_common.exceptions import DbtInternalError, CompilationError from dbt.graph import ResourceTypeSelector -from dbt.node_types import NodeType, REFABLE_NODE_TYPES -from dbt.task.base import BaseRunner +from dbt.node_types import REFABLE_NODE_TYPES +from dbt.task.base import BaseRunner, resource_types_from_args from dbt.task.run import _validate_materialization_relations_dict from dbt.task.runnable import GraphRunnableTask @@ -124,18 +124,13 @@ def before_run(self, adapter, selected_uids: AbstractSet[str]): @property def resource_types(self): - if not self.args.resource_types: - return REFABLE_NODE_TYPES - - values = set(self.args.resource_types) - - if "all" in values: - values.remove("all") - values.update(REFABLE_NODE_TYPES) - - values = [NodeType(val) for val in values if val in REFABLE_NODE_TYPES] + resource_types = resource_types_from_args( + self.args, set(REFABLE_NODE_TYPES), set(REFABLE_NODE_TYPES) + ) - return list(values) + # filter out any non-refable node types + resource_types = [rt for rt in resource_types if rt in REFABLE_NODE_TYPES] + return list(resource_types) def get_node_selector(self) -> ResourceTypeSelector: resource_types = self.resource_types diff --git a/core/dbt/task/debug.py b/core/dbt/task/debug.py index ea0f636bd6c..b388e4336ba 100644 --- a/core/dbt/task/debug.py +++ b/core/dbt/task/debug.py @@ -19,6 +19,7 @@ import dbt.exceptions import dbt_common.exceptions from dbt.adapters.factory import get_adapter, register_adapter +from dbt.cli.flags import Flags from dbt.config import PartialProject, Project, Profile from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer from dbt.artifacts.schemas.results import RunStatus @@ -77,8 +78,8 @@ class DebugRunStatus(Flag): class DebugTask(BaseTask): - def __init__(self, args, config) -> None: - super().__init__(args, config) + def __init__(self, args: Flags) -> None: + super().__init__(args) self.profiles_dir = args.PROFILES_DIR self.profile_path = os.path.join(self.profiles_dir, "profiles.yml") try: @@ -97,13 +98,6 @@ def __init__(self, args, config) -> None: self.profile: Optional[Profile] = None self.raw_profile_data: Optional[Dict[str, Any]] = None self.profile_name: Optional[str] = None - self.project: Optional[Project] = None - - @property - def project_profile(self): - if self.project is None: - return None - return self.project.profile_name def run(self) -> bool: # WARN: this is a legacy workflow that is not compatible with other runtime flags diff --git a/core/dbt/task/deps.py b/core/dbt/task/deps.py index 72787cd6565..0f8e45f073f 100644 --- a/core/dbt/task/deps.py +++ b/core/dbt/task/deps.py @@ -13,7 +13,7 @@ from dbt.deps.base import downloads_directory from dbt.deps.resolver import resolve_lock_packages, resolve_packages from dbt.deps.registry import RegistryPinnedPackage -from dbt.contracts.project import Package +from dbt.contracts.project import PackageSpec from dbt_common.events.functions import fire_event @@ -44,7 +44,7 @@ def increase_indent(self, flow=False, indentless=False): return super(dbtPackageDumper, self).increase_indent(flow, False) -def _create_sha1_hash(packages: List[Package]) -> str: +def _create_sha1_hash(packages: List[PackageSpec]) -> str: """Create a SHA1 hash of the packages list, this is used to determine if the packages for current execution matches the previous lock. @@ -94,14 +94,15 @@ def _create_packages_yml_entry(package: str, version: Optional[str], source: str class DepsTask(BaseTask): def __init__(self, args: Any, project: Project) -> None: + super().__init__(args=args) # N.B. This is a temporary fix for a bug when using relative paths via # --project-dir with deps. A larger overhaul of our path handling methods # is needed to fix this the "right" way. # See GH-7615 project.project_root = str(Path(project.project_root).resolve()) + self.project = project move_to_nearest_project_dir(project.project_root) - super().__init__(args=args, config=None, project=project) self.cli_vars = args.vars def track_package_install( @@ -201,7 +202,7 @@ def lock(self) -> None: ) with open(lock_filepath, "w") as lock_obj: - yaml.safe_dump(packages_installed, lock_obj) + yaml.dump(packages_installed, lock_obj, Dumper=dbtPackageDumper) fire_event(DepsLockUpdating(lock_filepath=lock_filepath)) diff --git a/core/dbt/task/docs/generate.py b/core/dbt/task/docs/generate.py index a8beb145918..800f997268d 100644 --- a/core/dbt/task/docs/generate.py +++ b/core/dbt/task/docs/generate.py @@ -283,7 +283,8 @@ def run(self) -> CatalogArtifact: node for node in self.manifest.nodes.values() if (node.is_relational and not node.is_ephemeral_model) - ] + ], + self.manifest.sources.values(), ) used_schemas = self.manifest.get_used_schemas() catalog_table, exceptions = adapter.get_filtered_catalog( diff --git a/core/dbt/task/docs/index.html b/core/dbt/task/docs/index.html index 3da725b2e59..6ccb6e73107 100644 --- a/core/dbt/task/docs/index.html +++ b/core/dbt/task/docs/index.html @@ -1,154 +1,30 @@ - + + + + - - - + dbt Docs + + - dbt Docs - - + + + - - - + + + + + + + - - - - - - - - - - - -
- icons - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - +*/var r=function(e,t){this.recycle(e,t)};function i(){return!1}function o(){return!0}r.prototype={instanceString:function(){return"event"},recycle:function(e,t){if(this.isImmediatePropagationStopped=this.isPropagationStopped=this.isDefaultPrevented=i,null!=e&&e.preventDefault?(this.type=e.type,this.isDefaultPrevented=e.defaultPrevented?o:i):null!=e&&e.type?t=e:this.type=e,null!=t&&(this.originalEvent=t.originalEvent,this.type=null!=t.type?t.type:this.type,this.cy=t.cy,this.target=t.target,this.position=t.position,this.renderedPosition=t.renderedPosition,this.namespace=t.namespace,this.layout=t.layout),null!=this.cy&&null!=this.position&&null==this.renderedPosition){var n=this.position,r=this.cy.zoom(),a=this.cy.pan();this.renderedPosition={x:n.x*r+a.x,y:n.y*r+a.y}}this.timeStamp=e&&e.timeStamp||Date.now()},preventDefault:function(){this.isDefaultPrevented=o;var e=this.originalEvent;e&&e.preventDefault&&e.preventDefault()},stopPropagation:function(){this.isPropagationStopped=o;var e=this.originalEvent;e&&e.stopPropagation&&e.stopPropagation()},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=o,this.stopPropagation()},isDefaultPrevented:i,isPropagationStopped:i,isImmediatePropagationStopped:i},e.exports=r},function(e,t,n){"use strict";var r=n(1);e.exports=function(e,t){var n=e.cy().hasCompoundNodes();function i(e){var t=e.pstyle("z-compound-depth");return"auto"===t.value?n?e.zDepth():0:"bottom"===t.value?-1:"top"===t.value?r.MAX_INT:0}var o=i(e)-i(t);if(0!==o)return o;function a(e){return"auto"===e.pstyle("z-index-compare").value&&e.isNode()?1:0}var s=a(e)-a(t);if(0!==s)return s;var l=e.pstyle("z-index").value-t.pstyle("z-index").value;return 0!==l?l:e.poolIndex()-t.poolIndex()}},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(6),a=function e(t){if(!(this instanceof e))return new e(t);r.core(t)?(this._private={cy:t,coreStyle:{}},this.length=0,this.resetToDefault()):i.error("A style must have a core reference")},s=a.prototype;s.instanceString=function(){return"style"},s.clear=function(){for(var e=0;e=e.deqFastCost*m)break}else if(o){if(h>=e.deqCost*c||h>=e.deqAvgCost*l)break}else if(g>=e.deqNoDrawCost*(1e3/60))break;var v=e.deq(t,p,d);if(!(v.length>0))break;for(var b=0;b0&&(e.onDeqd(t,u),!o&&e.shouldRedraw(t,u,p,d)&&i())}),o(t))}}}}},function(e,t,n){"use strict";var r=n(0),i=n(12),o=n(94),a=n(136),s=function(e){return void 0===e&&(e={}),r.plainObject(e)?new i(e):r.string(e)?o.apply(o,arguments):void 0};s.use=function(e){var t=Array.prototype.slice.call(arguments,1);return t.unshift(s),e.apply(null,t),this},s.version=n(137),s.stylesheet=s.Stylesheet=a,e.exports=s},function(e,t,n){"use strict";var r=n(0);e.exports={hex2tuple:function(e){if((4===e.length||7===e.length)&&"#"===e[0]){var t=void 0,n=void 0,r=void 0;return 4===e.length?(t=parseInt(e[1]+e[1],16),n=parseInt(e[2]+e[2],16),r=parseInt(e[3]+e[3],16)):(t=parseInt(e[1]+e[2],16),n=parseInt(e[3]+e[4],16),r=parseInt(e[5]+e[6],16)),[t,n,r]}},hsl2tuple:function(e){var t=void 0,n=void 0,r=void 0,i=void 0,o=void 0,a=void 0,s=void 0,l=void 0;function c(e,t,n){return n<0&&(n+=1),n>1&&(n-=1),n<1/6?e+6*(t-e)*n:n<.5?t:n<2/3?e+(t-e)*(2/3-n)*6:e}var u=new RegExp("^"+this.regex.hsla+"$").exec(e);if(u){if((n=parseInt(u[1]))<0?n=(360- -1*n%360)%360:n>360&&(n%=360),n/=360,(r=parseFloat(u[2]))<0||r>100)return;if(r/=100,(i=parseFloat(u[3]))<0||i>100)return;if(i/=100,void 0!==(o=u[4])&&((o=parseFloat(o))<0||o>1))return;if(0===r)a=s=l=Math.round(255*i);else{var d=i<.5?i*(1+r):i+r-i*r,p=2*i-d;a=Math.round(255*c(p,d,n+1/3)),s=Math.round(255*c(p,d,n)),l=Math.round(255*c(p,d,n-1/3))}t=[a,s,l,o]}return t},rgb2tuple:function(e){var t=void 0,n=new RegExp("^"+this.regex.rgba+"$").exec(e);if(n){t=[];for(var r=[],i=1;i<=3;i++){var o=n[i];if("%"===o[o.length-1]&&(r[i]=!0),o=parseFloat(o),r[i]&&(o=o/100*255),o<0||o>255)return;t.push(Math.floor(o))}var a=r[1]||r[2]||r[3],s=r[1]&&r[2]&&r[3];if(a&&!s)return;var l=n[4];if(void 0!==l){if((l=parseFloat(l))<0||l>1)return;t.push(l)}}return t},colorname2tuple:function(e){return this.colors[e.toLowerCase()]},color2tuple:function(e){return(r.array(e)?e:null)||this.colorname2tuple(e)||this.hex2tuple(e)||this.rgb2tuple(e)||this.hsl2tuple(e)},colors:{transparent:[0,0,0,0],aliceblue:[240,248,255],antiquewhite:[250,235,215],aqua:[0,255,255],aquamarine:[127,255,212],azure:[240,255,255],beige:[245,245,220],bisque:[255,228,196],black:[0,0,0],blanchedalmond:[255,235,205],blue:[0,0,255],blueviolet:[138,43,226],brown:[165,42,42],burlywood:[222,184,135],cadetblue:[95,158,160],chartreuse:[127,255,0],chocolate:[210,105,30],coral:[255,127,80],cornflowerblue:[100,149,237],cornsilk:[255,248,220],crimson:[220,20,60],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgoldenrod:[184,134,11],darkgray:[169,169,169],darkgreen:[0,100,0],darkgrey:[169,169,169],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkseagreen:[143,188,143],darkslateblue:[72,61,139],darkslategray:[47,79,79],darkslategrey:[47,79,79],darkturquoise:[0,206,209],darkviolet:[148,0,211],deeppink:[255,20,147],deepskyblue:[0,191,255],dimgray:[105,105,105],dimgrey:[105,105,105],dodgerblue:[30,144,255],firebrick:[178,34,34],floralwhite:[255,250,240],forestgreen:[34,139,34],fuchsia:[255,0,255],gainsboro:[220,220,220],ghostwhite:[248,248,255],gold:[255,215,0],goldenrod:[218,165,32],gray:[128,128,128],grey:[128,128,128],green:[0,128,0],greenyellow:[173,255,47],honeydew:[240,255,240],hotpink:[255,105,180],indianred:[205,92,92],indigo:[75,0,130],ivory:[255,255,240],khaki:[240,230,140],lavender:[230,230,250],lavenderblush:[255,240,245],lawngreen:[124,252,0],lemonchiffon:[255,250,205],lightblue:[173,216,230],lightcoral:[240,128,128],lightcyan:[224,255,255],lightgoldenrodyellow:[250,250,210],lightgray:[211,211,211],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightsalmon:[255,160,122],lightseagreen:[32,178,170],lightskyblue:[135,206,250],lightslategray:[119,136,153],lightslategrey:[119,136,153],lightsteelblue:[176,196,222],lightyellow:[255,255,224],lime:[0,255,0],limegreen:[50,205,50],linen:[250,240,230],magenta:[255,0,255],maroon:[128,0,0],mediumaquamarine:[102,205,170],mediumblue:[0,0,205],mediumorchid:[186,85,211],mediumpurple:[147,112,219],mediumseagreen:[60,179,113],mediumslateblue:[123,104,238],mediumspringgreen:[0,250,154],mediumturquoise:[72,209,204],mediumvioletred:[199,21,133],midnightblue:[25,25,112],mintcream:[245,255,250],mistyrose:[255,228,225],moccasin:[255,228,181],navajowhite:[255,222,173],navy:[0,0,128],oldlace:[253,245,230],olive:[128,128,0],olivedrab:[107,142,35],orange:[255,165,0],orangered:[255,69,0],orchid:[218,112,214],palegoldenrod:[238,232,170],palegreen:[152,251,152],paleturquoise:[175,238,238],palevioletred:[219,112,147],papayawhip:[255,239,213],peachpuff:[255,218,185],peru:[205,133,63],pink:[255,192,203],plum:[221,160,221],powderblue:[176,224,230],purple:[128,0,128],red:[255,0,0],rosybrown:[188,143,143],royalblue:[65,105,225],saddlebrown:[139,69,19],salmon:[250,128,114],sandybrown:[244,164,96],seagreen:[46,139,87],seashell:[255,245,238],sienna:[160,82,45],silver:[192,192,192],skyblue:[135,206,235],slateblue:[106,90,205],slategray:[112,128,144],slategrey:[112,128,144],snow:[255,250,250],springgreen:[0,255,127],steelblue:[70,130,180],tan:[210,180,140],teal:[0,128,128],thistle:[216,191,216],tomato:[255,99,71],turquoise:[64,224,208],violet:[238,130,238],wheat:[245,222,179],white:[255,255,255],whitesmoke:[245,245,245],yellow:[255,255,0],yellowgreen:[154,205,50]}}},function(e,t,n){"use strict";var r=n(0);e.exports={mapEmpty:function(e){return null==e||0===Object.keys(e).length},pushMap:function(e){var t=this.getMap(e);null==t?this.setMap(this.extend({},e,{value:[e.value]})):t.push(e.value)},setMap:function(e){for(var t=e.map,n=e.keys,i=n.length,o=0;ot?1:0}e.exports={sort:{ascending:r,descending:function(e,t){return-1*r(e,t)}}}},function(e,t,n){"use strict";function r(){this._obj={}}var i=r.prototype;i.set=function(e,t){this._obj[e]=t},i.delete=function(e){this._obj[e]=null},i.has=function(e){return null!=this._obj[e]},i.get=function(e){return this._obj[e]},e.exports=r},function(e,t,n){"use strict";var r=n(1),i={};[n(30),n(31),n(33),n(34),n(35),n(36),n(37),n(38),n(39),n(40),n(41)].forEach((function(e){r.extend(i,e)})),e.exports=i},function(e,t,n){"use strict";var r=n(0),i=function(e){return e={bfs:e.bfs||!e.dfs,dfs:e.dfs||!e.bfs},function(t,n,i){var o;r.plainObject(t)&&!r.elementOrCollection(t)&&(t=(o=t).roots||o.root,n=o.visit,i=o.directed),i=2!==arguments.length||r.fn(n)?i:n,n=r.fn(n)?n:function(){};for(var a,s=this._private.cy,l=t=r.string(t)?this.filter(t):t,c=[],u=[],d={},p={},f={},h=0,g=this.nodes(),m=this.edges(),v=0;v0;){var y=g.pop(),x=f(y),w=y.id();if(u[w]=x,x!==1/0){var k=y.neighborhood().intersect(p);for(m=0;m0)for(n.unshift(t);c[i.id()];){var o=c[i.id()];n.unshift(o.edge),n.unshift(o.node),i=o.node}return a.collection(n)}}}};e.exports=o},function(e,t){e.exports=n},function(e,t,n){"use strict";var r=n(0),i={kruskal:function(e){var t=this.cy();function n(e){for(var t=0;t0;){var y=n(f,v),x=i.getElementById(f[y]),w=x.id();if(b++,w==d){var k=t(u,d,h,[]);return{found:!0,distance:m[w],path:this.spawn(k),steps:b}}p.push(w),f.splice(y,1);for(var A=x._private.edges,E=0;Eb&&(u[m][v]=b,f[m][v]=v,h[m][v]=o[c])}if(!i)for(c=0;cb&&(u[m][v]=b,f[m][v]=v,h[m][v]=o[c]);for(var y=0;yu&&(u=t)},p=function(e){return c[e]},f=0;f0?S.edgesTo(E)[0]:E.edgesTo(S)[0]);E=E.id(),y[E]>y[k]+$&&(y[E]=y[k]+$,x.nodes.indexOf(E)<0?x.push(E):x.updateItem(E),b[E]=0,v[E]=[]),y[E]==y[k]+$&&(b[E]=b[E]+b[k],v[E].push(k))}else for(A=0;A0;)for(E=m.pop(),A=0;A0:void 0}},clearQueue:function(){return function(){var e=void 0!==this.length?this:[this];if(!(this._private.cy||this).styleEnabled())return this;for(var t=0;t0&&this.spawn(n).updateStyle().emit("class"),t},addClass:function(e){return this.toggleClass(e,!0)},hasClass:function(e){var t=this[0];return null!=t&&t._private.classes.has(e)},toggleClass:function(e,t){for(var n=e.match(/\S+/g)||[],r=[],i=0,o=this.length;i0&&this.spawn(r).updateStyle().emit("class"),this},removeClass:function(e){return this.toggleClass(e,!1)},flashClass:function(e,t){var n=this;if(null==t)t=250;else if(0===t)return n;return n.addClass(e),setTimeout((function(){n.removeClass(e)}),t),n}};e.exports=i},function(e,t,n){"use strict";n(0);var r=n(6),i={allAre:function(e){var t=new r(e);return this.every((function(e){return t.matches(e)}))},is:function(e){var t=new r(e);return this.some((function(e){return t.matches(e)}))},some:function(e,t){for(var n=0;n\\?\\@\\[\\]\\^\\`\\{\\|\\}\\~]",comparatorOp:"=|\\!=|>|>=|<|<=|\\$=|\\^=|\\*=",boolOp:"\\?|\\!|\\^",string:"\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'",number:n(1).regex.number,meta:"degree|indegree|outdegree",separator:"\\s*,\\s*",descendant:"\\s+",child:"\\s+>\\s+",subject:"\\$",group:"node|edge|\\*",directedEdge:"\\s+->\\s+",undirectedEdge:"\\s+<->\\s+"};r.variable="(?:[\\w-]|(?:\\\\"+r.metaChar+"))+",r.value=r.string+"|"+r.number,r.className=r.variable,r.id=r.variable,function(){var e=void 0,t=void 0,n=void 0;for(e=r.comparatorOp.split("|"),n=0;n=0||"="!==t&&(r.comparatorOp+="|\\!"+t)}(),e.exports=r},function(e,t,n){"use strict";var r=n(15).stateSelectorMatches,i=n(0),o=function(e,t){for(var n=!0,r=0;r=0&&(d=d.toLowerCase(),p=p.toLowerCase(),a=a.replace("@",""),f=!0);var h=!1;a.indexOf("!")>=0&&(a=a.replace("!",""),h=!0),f&&(s=p.toLowerCase(),u=d.toLowerCase());var g=!1;switch(a){case"*=":c=d.indexOf(p)>=0;break;case"$=":c=d.indexOf(p,d.length-p.length)>=0;break;case"^=":c=0===d.indexOf(p);break;case"=":c=u===s;break;case">":g=!0,c=u>s;break;case">=":g=!0,c=u>=s;break;case"<":g=!0,c=u0;){var u=o.shift();t(u),a.add(u.id()),s&&i(o,a,u)}return e}function a(e,t,n){if(n.isParent())for(var r=n._private.children,i=0;i1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,a)},i.forEachUp=function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,s)},i.forEachUpAndDown=function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,l)},i.ancestors=i.parents,e.exports=i},function(e,t,n){"use strict";var r,i=n(4),o=void 0;(o=r={data:i.data({field:"data",bindingEvent:"data",allowBinding:!0,allowSetting:!0,settingEvent:"data",settingTriggersEvent:!0,triggerFnName:"trigger",allowGetting:!0,immutableKeys:{id:!0,source:!0,target:!0,parent:!0},updateStyle:!0}),removeData:i.removeData({field:"data",event:"data",triggerFnName:"trigger",triggerEvent:!0,immutableKeys:{id:!0,source:!0,target:!0,parent:!0},updateStyle:!0}),scratch:i.data({field:"scratch",bindingEvent:"scratch",allowBinding:!0,allowSetting:!0,settingEvent:"scratch",settingTriggersEvent:!0,triggerFnName:"trigger",allowGetting:!0,updateStyle:!0}),removeScratch:i.removeData({field:"scratch",event:"scratch",triggerFnName:"trigger",triggerEvent:!0,updateStyle:!0}),rscratch:i.data({field:"rscratch",allowBinding:!1,allowSetting:!0,settingTriggersEvent:!1,allowGetting:!0}),removeRscratch:i.removeData({field:"rscratch",triggerEvent:!1}),id:function(){var e=this[0];if(e)return e._private.data.id}}).attr=o.data,o.removeAttr=o.removeData,e.exports=r},function(e,t,n){"use strict";var r=n(1),i={};function o(e){return function(t){if(void 0===t&&(t=!0),0!==this.length&&this.isNode()&&!this.removed()){for(var n=0,r=this[0],i=r._private.edges,o=0;ot})),minIndegree:a("indegree",(function(e,t){return et})),minOutdegree:a("outdegree",(function(e,t){return et}))}),r.extend(i,{totalDegree:function(e){for(var t=0,n=this.nodes(),r=0;r0,d=u;u&&(c=c[0]);var p=d?c.position():{x:0,y:0};return i={x:l.x-p.x,y:l.y-p.y},void 0===e?i:i[e]}for(var f=0;f0,v=m;m&&(g=g[0]);var b=v?g.position():{x:0,y:0};void 0!==t?h.position(e,t+b[e]):void 0!==i&&h.position({x:i.x+b.x,y:i.y+b.y})}}else if(!a)return;return this}}).modelPosition=s.point=s.position,s.modelPositions=s.points=s.positions,s.renderedPoint=s.renderedPosition,s.relativePoint=s.relativePosition,e.exports=r},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(2),a=void 0,s=void 0;a=s={},s.renderedBoundingBox=function(e){var t=this.boundingBox(e),n=this.cy(),r=n.zoom(),i=n.pan(),o=t.x1*r+i.x,a=t.x2*r+i.x,s=t.y1*r+i.y,l=t.y2*r+i.y;return{x1:o,x2:a,y1:s,y2:l,w:a-o,h:l-s}},s.dirtyCompoundBoundsCache=function(){var e=this.cy();return e.styleEnabled()&&e.hasCompoundNodes()?(this.forEachUp((function(e){e._private.compoundBoundsClean=!1,e.isParent()&&e.emit("bounds")})),this):this},s.updateCompoundBounds=function(){var e=this.cy();if(!e.styleEnabled()||!e.hasCompoundNodes())return this;if(e.batching())return this;var t=[];function n(e){if(e.isParent()){var n=e._private,r=e.children(),i="include"===e.pstyle("compound-sizing-wrt-labels").value,o={width:{val:e.pstyle("min-width").pfValue,left:e.pstyle("min-width-bias-left"),right:e.pstyle("min-width-bias-right")},height:{val:e.pstyle("min-height").pfValue,top:e.pstyle("min-height-bias-top"),bottom:e.pstyle("min-height-bias-bottom")}},a=r.boundingBox({includeLabels:i,includeOverlays:!1,useCache:!1}),s=n.position;0!==a.w&&0!==a.h||((a={w:e.pstyle("width").pfValue,h:e.pstyle("height").pfValue}).x1=s.x-a.w/2,a.x2=s.x+a.w/2,a.y1=s.y-a.h/2,a.y2=s.y+a.h/2);var l=o.width.left.value;"px"===o.width.left.units&&o.width.val>0&&(l=100*l/o.width.val);var c=o.width.right.value;"px"===o.width.right.units&&o.width.val>0&&(c=100*c/o.width.val);var u=o.height.top.value;"px"===o.height.top.units&&o.height.val>0&&(u=100*u/o.height.val);var d=o.height.bottom.value;"px"===o.height.bottom.units&&o.height.val>0&&(d=100*d/o.height.val);var p=b(o.width.val-a.w,l,c),f=p.biasDiff,h=p.biasComplementDiff,g=b(o.height.val-a.h,u,d),m=g.biasDiff,v=g.biasComplementDiff;n.autoPadding=function(e,t,n,r){if("%"!==n.units)return"px"===n.units?n.pfValue:0;switch(r){case"width":return e>0?n.pfValue*e:0;case"height":return t>0?n.pfValue*t:0;case"average":return e>0&&t>0?n.pfValue*(e+t)/2:0;case"min":return e>0&&t>0?e>t?n.pfValue*t:n.pfValue*e:0;case"max":return e>0&&t>0?e>t?n.pfValue*e:n.pfValue*t:0;default:return 0}}(a.w,a.h,e.pstyle("padding"),e.pstyle("padding-relative-to").value),n.autoWidth=Math.max(a.w,o.width.val),s.x=(-f+a.x1+a.x2+h)/2,n.autoHeight=Math.max(a.h,o.height.val),s.y=(-m+a.y1+a.y2+v)/2,t.push(e)}function b(e,t,n){var r=0,i=0,o=t+n;return e>0&&o>0&&(r=t/o*e,i=n/o*e),{biasDiff:r,biasComplementDiff:i}}}for(var r=0;re.x2?r:e.x2,e.y1=ne.y2?i:e.y2)},u=function(e,t,n){return i.getPrefixedProperty(e,t,n)},d=function(e,t,n){if(!t.cy().headless()){var r=t._private.rstyle,i=r.arrowWidth/2,o=void 0,a=void 0;"none"!==t.pstyle(n+"-arrow-shape").value&&("source"===n?(o=r.srcX,a=r.srcY):"target"===n?(o=r.tgtX,a=r.tgtY):(o=r.midX,a=r.midY),c(e,o-i,a-i,o+i,a+i))}},p=function(e,t,n){if(!t.cy().headless()){var r=void 0;r=n?n+"-":"";var i=t._private,o=i.rstyle;if(t.pstyle(r+"label").strValue){var a=t.pstyle("text-halign"),s=t.pstyle("text-valign"),l=u(o,"labelWidth",n),d=u(o,"labelHeight",n),p=u(o,"labelX",n),f=u(o,"labelY",n),h=t.pstyle(r+"text-margin-x").pfValue,g=t.pstyle(r+"text-margin-y").pfValue,m=t.isEdge(),v=t.pstyle(r+"text-rotation"),b=t.pstyle("text-outline-width").pfValue,y=t.pstyle("text-border-width").pfValue/2,x=t.pstyle("text-background-padding").pfValue,w=d+2*x,k=l+2*x,A=k/2,E=w/2,S=void 0,$=void 0,C=void 0,_=void 0;if(m)S=p-A,$=p+A,C=f-E,_=f+E;else{switch(a.value){case"left":S=p-k,$=p;break;case"center":S=p-A,$=p+A;break;case"right":S=p,$=p+k}switch(s.value){case"top":C=f-w,_=f;break;case"center":C=f-E,_=f+E;break;case"bottom":C=f,_=f+w}}var O=m&&"autorotate"===v.strValue,T=null!=v.pfValue&&0!==v.pfValue;if(O||T){var j=O?u(i.rstyle,"labelAngle",n):v.pfValue,P=Math.cos(j),D=Math.sin(j),R=function(e,t){return{x:(e-=p)*P-(t-=f)*D+p,y:e*D+t*P+f}},I=R(S,C),N=R(S,_),M=R($,C),z=R($,_);S=Math.min(I.x,N.x,M.x,z.x),$=Math.max(I.x,N.x,M.x,z.x),C=Math.min(I.y,N.y,M.y,z.y),_=Math.max(I.y,N.y,M.y,z.y)}S+=h-Math.max(b,y),$+=h+Math.max(b,y),C+=g-Math.max(b,y),_+=g+Math.max(b,y),c(e,S,C,$,_)}return e}},f=function(e){return e?"t":"f"},h=function(e){var t="";return t+=f(e.incudeNodes),t+=f(e.includeEdges),t+=f(e.includeLabels),t+=f(e.includeOverlays)},g=function(e,t){var n=e._private,r=void 0,i=e.cy().headless(),a=t===m?v:h(t);return t.useCache&&!i&&n.bbCache&&n.bbCache[a]?r=n.bbCache[a]:(r=function(e,t){var n=e._private.cy,r=n.styleEnabled(),i=n.headless(),a={x1:1/0,y1:1/0,x2:-1/0,y2:-1/0},s=e._private,u=r?e.pstyle("display").value:"element",f=e.isNode(),h=e.isEdge(),g=void 0,m=void 0,v=void 0,b=void 0,y=void 0,x=void 0,w="none"!==u;if(w){var k=0;r&&t.includeOverlays&&0!==e.pstyle("overlay-opacity").value&&(k=e.pstyle("overlay-padding").value);var A=0;if(r&&(A=e.pstyle("width").pfValue/2),f&&t.includeNodes){var E=e.position();y=E.x,x=E.y;var S=e.outerWidth()/2,$=e.outerHeight()/2;c(a,g=y-S-k,v=x-$-k,m=y+S+k,b=x+$+k)}else if(h&&t.includeEdges){var C=s.rstyle||{};if(r&&!i&&(g=Math.min(C.srcX,C.midX,C.tgtX),m=Math.max(C.srcX,C.midX,C.tgtX),v=Math.min(C.srcY,C.midY,C.tgtY),b=Math.max(C.srcY,C.midY,C.tgtY),c(a,g-=A,v-=A,m+=A,b+=A)),r&&!i&&"haystack"===e.pstyle("curve-style").strValue){var _=C.haystackPts||[];if(g=_[0].x,v=_[0].y,g>(m=_[1].x)){var O=g;g=m,m=O}if(v>(b=_[1].y)){var T=v;v=b,b=T}c(a,g-A,v-A,m+A,b+A)}else{for(var j=C.bezierPts||C.linePts||[],P=0;P(m=I.x)){var N=g;g=m,m=N}if((v=R.y)>(b=I.y)){var M=v;v=b,b=M}c(a,g-=A,v-=A,m+=A,b+=A)}}}if(r&&t.includeEdges&&h&&(d(a,e,"mid-source"),d(a,e,"mid-target"),d(a,e,"source"),d(a,e,"target")),r&&"yes"===e.pstyle("ghost").value){var z=e.pstyle("ghost-offset-x").pfValue,L=e.pstyle("ghost-offset-y").pfValue;c(a,a.x1+z,a.y1+L,a.x2+z,a.y2+L)}r&&(g=a.x1,m=a.x2,v=a.y1,b=a.y2,c(a,g-k,v-k,m+k,b+k)),r&&t.includeLabels&&(p(a,e,null),h&&(p(a,e,"source"),p(a,e,"target")))}return a.x1=l(a.x1),a.y1=l(a.y1),a.x2=l(a.x2),a.y2=l(a.y2),a.w=l(a.x2-a.x1),a.h=l(a.y2-a.y1),a.w>0&&a.h>0&&w&&o.expandBoundingBox(a,1),a}(e,t),i||(n.bbCache=n.bbCache||{},n.bbCache[a]=r)),r},m={includeNodes:!0,includeEdges:!0,includeLabels:!0,includeOverlays:!0,useCache:!0},v=h(m);function b(e){return{includeNodes:i.default(e.includeNodes,m.includeNodes),includeEdges:i.default(e.includeEdges,m.includeEdges),includeLabels:i.default(e.includeLabels,m.includeLabels),includeOverlays:i.default(e.includeOverlays,m.includeOverlays),useCache:i.default(e.useCache,m.useCache)}}s.boundingBox=function(e){if(1===this.length&&this[0]._private.bbCache&&(void 0===e||void 0===e.useCache||!0===e.useCache))return e=void 0===e?m:b(e),g(this[0],e);var t={x1:1/0,y1:1/0,x2:-1/0,y2:-1/0},n=b(e=e||i.staticEmptyObject()),r=this.cy().styleEnabled();r&&this.recalculateRenderedStyle(n.useCache),this.updateCompoundBounds();for(var o,a,s={},u=0;u1&&!a){var s=this.length-1,l=this[s],c=l._private.data.id;this[s]=void 0,this[o]=l,r.set(c,{ele:l,index:o})}return this.length--,this},unmerge:function(e){var t=this._private.cy;if(!e)return this;if(e&&r.string(e)){var n=e;e=t.mutableElements().filter(n)}for(var i=0;in&&(n=a,r=o)}return{value:n,ele:r}},min:function(e,t){for(var n=1/0,r=void 0,i=0;i=0&&i0&&t.push(u[0]),t.push(s[0])}return this.spawn(t,{unique:!0}).filter(e)}),"neighborhood"),closedNeighborhood:function(e){return this.neighborhood().add(this).filter(e)},openNeighborhood:function(e){return this.neighborhood(e)}}),o.neighbourhood=o.neighborhood,o.closedNeighbourhood=o.closedNeighborhood,o.openNeighbourhood=o.openNeighborhood,r.extend(o,{source:a((function(e){var t=this[0],n=void 0;return t&&(n=t._private.source||t.cy().collection()),n&&e?n.filter(e):n}),"source"),target:a((function(e){var t=this[0],n=void 0;return t&&(n=t._private.target||t.cy().collection()),n&&e?n.filter(e):n}),"target"),sources:u({attr:"source"}),targets:u({attr:"target"})}),r.extend(o,{edgesWith:a(d(),"edgesWith"),edgesTo:a(d({thisIsSrc:!0}),"edgesTo")}),r.extend(o,{connectedEdges:a((function(e){for(var t=[],n=0;n0);return i.map((function(e){var t=e.connectedEdges().stdFilter((function(t){return e.anySame(t.source())&&e.anySame(t.target())}));return e.union(t)}))}}),e.exports=o},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(7),a=n(14),s={add:function(e){var t=void 0,n=this;if(r.elementOrCollection(e)){var s=e;if(s._private.cy===n)t=s.restore();else{for(var l=[],c=0;c=0;t--)(0,e[t])();e.splice(0,e.length)},f=s.length-1;f>=0;f--){var h=s[f],g=h._private;g.stopped?(s.splice(f,1),g.hooked=!1,g.playing=!1,g.started=!1,p(g.frames)):(g.playing||g.applying)&&(g.playing&&g.applying&&(g.applying=!1),g.started||i(t,h,e,n),r(t,h,e,n),g.applying&&(g.applying=!1),p(g.frames),h.completed()&&(s.splice(f,1),g.hooked=!1,g.playing=!1,g.started=!1,p(g.completes)),c=!0)}return n||0!==s.length||0!==l.length||o.push(t),c}for(var s=!1,l=0;l0?(n.dirtyCompoundBoundsCache(),t.notify({type:"draw",eles:n})):t.notify({type:"draw"})),n.unmerge(o),t.emit("step")}},function(e,t,n){"use strict";var r=n(73),i=n(76),o=n(0);function a(e,t){return!!(null!=e&&null!=t&&(o.number(e)&&o.number(t)||e&&t))}e.exports=function(e,t,n,s){var l=!s,c=e._private,u=t._private,d=u.easing,p=u.startTime,f=(s?e:e.cy()).style();if(!u.easingImpl)if(null==d)u.easingImpl=r.linear;else{var h=void 0;h=o.string(d)?f.parse("transition-timing-function",d).value:d;var g=void 0,m=void 0;o.string(h)?(g=h,m=[]):(g=h[1],m=h.slice(2).map((function(e){return+e}))),m.length>0?("spring"===g&&m.push(u.duration),u.easingImpl=r[g].apply(null,m)):u.easingImpl=r[g]}var v=u.easingImpl,b=void 0;if(b=0===u.duration?1:(n-p)/u.duration,u.applying&&(b=u.progress),b<0?b=0:b>1&&(b=1),null==u.delay){var y=u.startPosition,x=u.position;if(x&&l&&!e.locked()){var w=e.position();a(y.x,x.x)&&(w.x=i(y.x,x.x,b,v)),a(y.y,x.y)&&(w.y=i(y.y,x.y,b,v)),e.emit("position")}var k=u.startPan,A=u.pan,E=c.pan,S=null!=A&&s;S&&(a(k.x,A.x)&&(E.x=i(k.x,A.x,b,v)),a(k.y,A.y)&&(E.y=i(k.y,A.y,b,v)),e.emit("pan"));var $=u.startZoom,C=u.zoom,_=null!=C&&s;_&&(a($,C)&&(c.zoom=i($,C,b,v)),e.emit("zoom")),(S||_)&&e.emit("viewport");var O=u.style;if(O&&O.length>0&&l){for(var T=0;T0?i=l:r=l}while(Math.abs(o)>a&&++c=o?b(t,s):0===u?s:x(t,r,r+c)}var k=!1;function A(){k=!0,e===t&&n===r||y()}var E=function(i){return k||A(),e===t&&n===r?i:0===i?0:1===i?1:m(w(i),t,r)};E.getControlPoints=function(){return[{x:e,y:t},{x:n,y:r}]};var S="generateBezier("+[e,t,n,r]+")";return E.toString=function(){return S},E}},function(e,t,n){"use strict"; +/*! Runge-Kutta spring physics function generator. Adapted from Framer.js, copyright Koen Bok. MIT License: http://en.wikipedia.org/wiki/MIT_License */var r=function(){function e(e){return-e.tension*e.x-e.friction*e.v}function t(t,n,r){var i={x:t.x+r.dx*n,v:t.v+r.dv*n,tension:t.tension,friction:t.friction};return{dx:i.v,dv:e(i)}}function n(n,r){var i={dx:n.v,dv:e(n)},o=t(n,.5*r,i),a=t(n,.5*r,o),s=t(n,r,a),l=1/6*(i.dx+2*(o.dx+a.dx)+s.dx),c=1/6*(i.dv+2*(o.dv+a.dv)+s.dv);return n.x=n.x+l*r,n.v=n.v+c*r,n}return function e(t,r,i){var o,a={x:-1,v:0,tension:null,friction:null},s=[0],l=0,c=void 0,u=void 0;for(t=parseFloat(t)||500,r=parseFloat(r)||20,i=i||null,a.tension=t,a.friction=r,c=(o=null!==i)?(l=e(t,r))/i*.016:.016;u=n(u||a,c),s.push(1+u.x),l+=16,Math.abs(u.x)>1e-4&&Math.abs(u.v)>1e-4;);return o?function(e){return s[e*(s.length-1)|0]}:l}}();e.exports=r},function(e,t,n){"use strict";var r=n(0);function i(e,t,n,r,i){if(1===r)return n;var o=i(t,n,r);return null==e||((e.roundValue||e.color)&&(o=Math.round(o)),void 0!==e.min&&(o=Math.max(o,e.min)),void 0!==e.max&&(o=Math.min(o,e.max))),o}function o(e,t){return null!=e.pfValue||null!=e.value?null==e.pfValue||null!=t&&"%"===t.type.units?e.value:e.pfValue:e}e.exports=function(e,t,n,a,s){var l=null!=s?s.type:null;n<0?n=0:n>1&&(n=1);var c=o(e,s),u=o(t,s);if(r.number(c)&&r.number(u))return i(l,c,u,n,a);if(r.array(c)&&r.array(u)){for(var d=[],p=0;p0},startBatch:function(){var e=this._private;return null==e.batchCount&&(e.batchCount=0),0===e.batchCount&&(e.batchingStyle=e.batchingNotify=!0,e.batchStyleEles=this.collection(),e.batchNotifyEles=this.collection(),e.batchNotifyTypes=[],e.batchNotifyTypes.ids={}),e.batchCount++,this},endBatch:function(){var e=this._private;return e.batchCount--,0===e.batchCount&&(e.batchingStyle=!1,e.batchStyleEles.updateStyle(),e.batchingNotify=!1,this.notify({type:e.batchNotifyTypes,eles:e.batchNotifyEles})),this},batch:function(e){return this.startBatch(),e(),this.endBatch(),this},batchData:function(e){var t=this;return this.batch((function(){for(var n=Object.keys(e),r=0;r0;)e.removeChild(e.childNodes[0]);this._private.renderer=null},onRender:function(e){return this.on("render",e)},offRender:function(e){return this.off("render",e)}};i.invalidateDimensions=i.resize,e.exports=i},function(e,t,n){"use strict";var r=n(0),i=n(7),o={collection:function(e,t){return r.string(e)?this.$(e):r.elementOrCollection(e)?e.collection():r.array(e)?new i(this,e,t):new i(this)},nodes:function(e){var t=this.$((function(e){return e.isNode()}));return e?t.filter(e):t},edges:function(e){var t=this.$((function(e){return e.isEdge()}));return e?t.filter(e):t},$:function(e){var t=this._private.elements;return e?t.filter(e):t.spawnSelf()},mutableElements:function(){return this._private.elements}};o.elements=o.filter=o.$,e.exports=o},function(e,t,n){"use strict";var r=n(0),i=n(18),o={style:function(e){return e&&this.setStyle(e).update(),this._private.style},setStyle:function(e){var t=this._private;return r.stylesheet(e)?t.style=e.generateStyle(this):r.array(e)?t.style=i.fromJson(this,e):r.string(e)?t.style=i.fromString(this,e):t.style=i(this),t.style}};e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(5),a={apply:function(e){var t=this._private,n=t.cy.collection();t.newStyle&&(t.contextStyles={},t.propDiffs={},this.cleanElements(e,!0));for(var r=0;r0;if(c||u){var d=void 0;c&&u||c?d=l.properties:u&&(d=l.mappedProperties);for(var p=0;p0){n=!0;break}t.hasPie=n;var i=e.pstyle("text-transform").strValue,o=e.pstyle("label").strValue,a=e.pstyle("source-label").strValue,s=e.pstyle("target-label").strValue,l=e.pstyle("font-style").strValue,c=e.pstyle("font-size").pfValue+"px",u=e.pstyle("font-family").strValue,d=e.pstyle("font-weight").strValue,p=l+"$"+c+"$"+u+"$"+d+"$"+i+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-outline-width").pfValue+"$"+e.pstyle("text-wrap").strValue+"$"+e.pstyle("text-max-width").pfValue;t.labelStyleKey=p,t.sourceLabelKey=p+"$"+a,t.targetLabelKey=p+"$"+s,t.labelKey=p+"$"+o,t.fontKey=l+"$"+d+"$"+c+"$"+u,t.styleKey=Date.now()}},applyParsedProperty:function(e,t){var n=this,o=t,a=e._private.style,s=void 0,l=n.types,c=n.properties[o.name].type,u=o.bypass,d=a[o.name],p=d&&d.bypass,f=e._private,h=function(){n.checkZOrderTrigger(e,o.name,d?d.value:null,o.value)};if("curve-style"===t.name&&"haystack"===t.value&&e.isEdge()&&(e.isLoop()||e.source().isParent()||e.target().isParent())&&(o=t=this.parse(t.name,"bezier",u)),o.delete)return a[o.name]=void 0,h(),!0;if(o.deleteBypassed)return d?!!d.bypass&&(d.bypassed=void 0,h(),!0):(h(),!0);if(o.deleteBypass)return d?!!d.bypass&&(a[o.name]=d.bypassed,h(),!0):(h(),!0);var g=function(){r.error("Do not assign mappings to elements without corresponding data (e.g. ele `"+e.id()+"` for property `"+o.name+"` with data field `"+o.field+"`); try a `["+o.field+"]` selector to limit scope to elements with `"+o.field+"` defined")};switch(o.mapped){case l.mapData:for(var m=o.field.split("."),v=f.data,b=0;b1&&(y=1),c.color){var x=o.valueMin[0],w=o.valueMax[0],k=o.valueMin[1],A=o.valueMax[1],E=o.valueMin[2],S=o.valueMax[2],$=null==o.valueMin[3]?1:o.valueMin[3],C=null==o.valueMax[3]?1:o.valueMax[3],_=[Math.round(x+(w-x)*y),Math.round(k+(A-k)*y),Math.round(E+(S-E)*y),Math.round($+(C-$)*y)];s={bypass:o.bypass,name:o.name,value:_,strValue:"rgb("+_[0]+", "+_[1]+", "+_[2]+")"}}else{if(!c.number)return!1;var O=o.valueMin+(o.valueMax-o.valueMin)*y;s=this.parse(o.name,O,o.bypass,"mapping")}s||(s=this.parse(o.name,d.strValue,o.bypass,"mapping")),s||g(),s.mapping=o,o=s;break;case l.data:var T=o.field.split("."),j=f.data;if(j)for(var P=0;P0&&l>0){for(var u={},d=!1,p=0;p0?e.delayAnimation(c).play().promise().then(t):t()})).then((function(){return e.animation({style:u,duration:l,easing:e.pstyle("transition-timing-function").value,queue:!1}).play().promise()})).then((function(){r.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1}))}else a.transitioning&&(this.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1)},checkZOrderTrigger:function(e,t,n,r){var i=this.properties[t];null==i.triggersZOrder||null!=n&&!i.triggersZOrder(n,r)||this._private.cy.notify({type:"zorder",eles:e})}};e.exports=a},function(e,t,n){"use strict";var r=n(0),i=n(1),o={applyBypass:function(e,t,n,o){var a=[];if("*"===t||"**"===t){if(void 0!==n)for(var s=0;sn.length?t.substr(n.length):""}function l(){o=o.length>a.length?o.substr(a.length):""}for(t=t.replace(/[/][*](\s|.)+?[*][/]/g,"");!t.match(/^\s*$/);){var c=t.match(/^\s*((?:.|\s)+?)\s*\{((?:.|\s)+?)\}/);if(!c){r.error("Halting stylesheet parsing: String stylesheet contains more to parse but no selector and block found in: "+t);break}n=c[0];var u=c[1];if("core"!==u&&new i(u)._private.invalid)r.error("Skipping parsing of block: Invalid selector found in string stylesheet: "+u),s();else{var d=c[2],p=!1;o=d;for(var f=[];!o.match(/^\s*$/);){var h=o.match(/^\s*(.+?)\s*:\s*(.+?)\s*;/);if(!h){r.error("Skipping parsing of block: Invalid formatting of style property and value definitions found in:"+d),p=!0;break}a=h[0];var g=h[1],m=h[2];this.properties[g]?this.parse(g,m)?(f.push({name:g,val:m}),l()):(r.error("Skipping property: Invalid property definition in: "+a),l()):(r.error("Skipping property: Invalid property name in: "+a),l())}if(p){s();break}this.selector(u);for(var v=0;v node").css({shape:"rectangle",padding:10,"background-color":"#eee","border-color":"#ccc","border-width":1}).selector("edge").css({width:3,"curve-style":"haystack"}).selector(":parent <-> node").css({"curve-style":"bezier","source-endpoint":"outside-to-line","target-endpoint":"outside-to-line"}).selector(":selected").css({"background-color":"#0169D9","line-color":"#0169D9","source-arrow-color":"#0169D9","target-arrow-color":"#0169D9","mid-source-arrow-color":"#0169D9","mid-target-arrow-color":"#0169D9"}).selector("node:parent:selected").css({"background-color":"#CCE1F9","border-color":"#aec8e5"}).selector(":active").css({"overlay-color":"black","overlay-padding":10,"overlay-opacity":.25}).selector("core").css({"selection-box-color":"#ddd","selection-box-opacity":.65,"selection-box-border-color":"#aaa","selection-box-border-width":1,"active-bg-color":"black","active-bg-opacity":.15,"active-bg-size":30,"outside-texture-bg-color":"#000","outside-texture-bg-opacity":.125}),this.defaultLength=this.length},e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(2),a={parse:function(e,t,n,o){if(i.fn(t))return this.parseImplWarn(e,t,n,o);var a=[e,t,n,"mapping"===o||!0===o||!1===o||null==o?"dontcare":o].join("$"),s=this.propCache=this.propCache||{},l=void 0;return(l=s[a])||(l=s[a]=this.parseImplWarn(e,t,n,o)),(n||"mapping"===o)&&(l=r.copy(l))&&(l.value=r.copy(l.value)),l},parseImplWarn:function(e,t,n,i){var o=this.parseImpl(e,t,n,i);return o||null==t||r.error("The style property `%s: %s` is invalid",e,t),o},parseImpl:function(e,t,n,a){e=r.camel2dash(e);var s=this.properties[e],l=t,c=this.types;if(!s)return null;if(void 0===t)return null;s.alias&&(s=s.pointsTo,e=s.name);var u=i.string(t);u&&(t=t.trim());var d=s.type;if(!d)return null;if(n&&(""===t||null===t))return{name:e,value:t,bypass:!0,deleteBypass:!0};if(i.fn(t))return{name:e,value:t,strValue:"fn",mapped:c.fn,bypass:n};var p=void 0,f=void 0;if(!u||a);else{if(p=new RegExp(c.data.regex).exec(t)){if(n)return!1;var h=c.data;return{name:e,value:p,strValue:""+t,mapped:h,field:p[1],bypass:n}}if(f=new RegExp(c.mapData.regex).exec(t)){if(n)return!1;if(d.multiple)return!1;var g=c.mapData;if(!d.color&&!d.number)return!1;var m=this.parse(e,f[4]);if(!m||m.mapped)return!1;var v=this.parse(e,f[5]);if(!v||v.mapped)return!1;if(m.value===v.value)return!1;if(d.color){var b=m.value,y=v.value;if(!(b[0]!==y[0]||b[1]!==y[1]||b[2]!==y[2]||b[3]!==y[3]&&(null!=b[3]&&1!==b[3]||null!=y[3]&&1!==y[3])))return!1}return{name:e,value:f,strValue:""+t,mapped:g,field:f[1],fieldMin:parseFloat(f[2]),fieldMax:parseFloat(f[3]),valueMin:m.value,valueMax:v.value,bypass:n}}}if(d.multiple&&"multiple"!==a){var x=void 0;if(x=u?t.split(/\s+/):i.array(t)?t:[t],d.evenMultiple&&x.length%2!=0)return null;for(var w=[],k=[],A=[],E=!1,S=0;Sd.max||d.strictMax&&t===d.max))return null;var P={name:e,value:t,strValue:""+t+(_||""),units:_,bypass:n};return d.unitless||"px"!==_&&"em"!==_?P.pfValue=t:P.pfValue="px"!==_&&_?this.getEmSizeInPixels()*t:t,"ms"!==_&&"s"!==_||(P.pfValue="ms"===_?t:1e3*t),"deg"!==_&&"rad"!==_||(P.pfValue="rad"===_?t:o.deg2rad(t)),"%"===_&&(P.pfValue=t/100),P}if(d.propList){var D=[],R=""+t;if("none"===R);else{for(var I=R.split(","),N=0;N0&&s>0&&!isNaN(n.w)&&!isNaN(n.h)&&n.w>0&&n.h>0)return{zoom:l=(l=(l=Math.min((a-2*t)/n.w,(s-2*t)/n.h))>this._private.maxZoom?this._private.maxZoom:l)t.maxZoom?t.maxZoom:s)t.maxZoom||!t.zoomingEnabled?a=!0:(t.zoom=l,o.push("zoom"))}if(i&&(!a||!e.cancelOnFailedZoom)&&t.panningEnabled){var c=e.pan;r.number(c.x)&&(t.pan.x=c.x,s=!1),r.number(c.y)&&(t.pan.y=c.y,s=!1),s||o.push("pan")}return o.length>0&&(o.push("viewport"),this.emit(o.join(" ")),this.notify({type:"viewport"})),this},center:function(e){var t=this.getCenterPan(e);return t&&(this._private.pan=t,this.emit("pan viewport"),this.notify({type:"viewport"})),this},getCenterPan:function(e,t){if(this._private.panningEnabled){if(r.string(e)){var n=e;e=this.mutableElements().filter(n)}else r.elementOrCollection(e)||(e=this.mutableElements());if(0!==e.length){var i=e.boundingBox(),o=this.width(),a=this.height();return{x:(o-(t=void 0===t?this._private.zoom:t)*(i.x1+i.x2))/2,y:(a-t*(i.y1+i.y2))/2}}}},reset:function(){return this._private.panningEnabled&&this._private.zoomingEnabled?(this.viewport({pan:{x:0,y:0},zoom:1}),this):this},invalidateSize:function(){this._private.sizeCache=null},size:function(){var e,t,n=this._private,r=n.container;return n.sizeCache=n.sizeCache||(r?(e=i.getComputedStyle(r),t=function(t){return parseFloat(e.getPropertyValue(t))},{width:r.clientWidth-t("padding-left")-t("padding-right"),height:r.clientHeight-t("padding-top")-t("padding-bottom")}):{width:1,height:1})},width:function(){return this.size().width},height:function(){return this.size().height},extent:function(){var e=this._private.pan,t=this._private.zoom,n=this.renderedExtent(),r={x1:(n.x1-e.x)/t,x2:(n.x2-e.x)/t,y1:(n.y1-e.y)/t,y2:(n.y2-e.y)/t};return r.w=r.x2-r.x1,r.h=r.y2-r.y1,r},renderedExtent:function(){var e=this.width(),t=this.height();return{x1:0,y1:0,x2:e,y2:t,w:e,h:t}}};a.centre=a.center,a.autolockNodes=a.autolock,a.autoungrabifyNodes=a.autoungrabify,e.exports=a},function(e,t,n){"use strict";var r=n(1),i=n(4),o=n(7),a=n(12),s=n(95),l=n(0),c=n(11),u={},d={};function p(e,t,n){var s=n,d=function(n){r.error("Can not register `"+t+"` for `"+e+"` since `"+n+"` already exists in the prototype and can not be overridden")};if("core"===e){if(a.prototype[t])return d(t);a.prototype[t]=n}else if("collection"===e){if(o.prototype[t])return d(t);o.prototype[t]=n}else if("layout"===e){for(var p=function(e){this.options=e,n.call(this,e),l.plainObject(this._private)||(this._private={}),this._private.cy=e.cy,this._private.listeners=[],this.createEmitter()},h=p.prototype=Object.create(n.prototype),g=[],m=0;m0;)m();c=n.collection();for(var v=function(e){var t=h[e],n=t.maxDegree(!1),r=t.filter((function(e){return e.degree(!1)===n}));c=c.add(r)},b=0;by.length-1;)y.push([]);y[J].push(X),Z.depth=J,Z.index=y[J].length-1}N()}var K=0;if(t.avoidOverlap)for(var ee=0;eec||0===t)&&(r+=l/u,i++)}return r/=i=Math.max(1,i),0===i&&(r=void 0),ie[e.id()]=r,r},ae=function(e,t){return oe(e)-oe(t)},se=0;se<3;se++){for(var le=0;le0&&y[0].length<=3?u/2:0),p=2*Math.PI/y[i].length*o;return 0===i&&1===y[0].length&&(d=1),{x:de+d*Math.cos(p),y:pe+d*Math.sin(p)}}return{x:de+(o+1-(a+1)/2)*s,y:(i+1)*c}}var f={x:de+(o+1-(a+1)/2)*s,y:(i+1)*c};return f},he={},ge=y.length-1;ge>=0;ge--)for(var me=y[ge],ve=0;ve1&&t.avoidOverlap){f*=1.75;var b=Math.cos(d)-Math.cos(0),y=Math.sin(d)-Math.sin(0),x=Math.sqrt(f*f/(b*b+y*y));p=Math.max(x,p)}return s.layoutPositions(this,t,(function(e,n){var r=t.startAngle+n*d*(a?1:-1),i=p*Math.cos(r),o=p*Math.sin(r);return{x:c+i,y:u+o}})),this},e.exports=s},function(e,t,n){"use strict";var r=n(1),i=n(2),o={fit:!0,padding:30,startAngle:1.5*Math.PI,sweep:void 0,clockwise:!0,equidistant:!1,minNodeSpacing:10,boundingBox:void 0,avoidOverlap:!0,nodeDimensionsIncludeLabels:!1,height:void 0,width:void 0,spacingFactor:void 0,concentric:function(e){return e.degree()},levelWidth:function(e){return e.maxDegree()/4},animate:!1,animationDuration:500,animationEasing:void 0,animateFilter:function(e,t){return!0},ready:void 0,stop:void 0,transform:function(e,t){return t}};function a(e){this.options=r.extend({},o,e)}a.prototype.run=function(){for(var e=this.options,t=e,n=void 0!==t.counterclockwise?!t.counterclockwise:t.clockwise,r=e.cy,o=t.eles.nodes().not(":parent"),a=i.makeBoundingBox(t.boundingBox?t.boundingBox:{x1:0,y1:0,w:r.width(),h:r.height()}),s=a.x1+a.w/2,l=a.y1+a.h/2,c=[],u=(t.startAngle,0),d=0;d0&&Math.abs(b[0].value-x.value)>=m&&(b=[],v.push(b)),b.push(x)}var w=u+t.minNodeSpacing;if(!t.avoidOverlap){var k=v.length>0&&v[0].length>1,A=(Math.min(a.w,a.h)/2-w)/(v.length+k?1:0);w=Math.min(w,A)}for(var E=0,S=0;S1&&t.avoidOverlap){var O=Math.cos(_)-Math.cos(0),T=Math.sin(_)-Math.sin(0),j=Math.sqrt(w*w/(O*O+T*T));E=Math.max(j,E)}$.r=E,E+=w}if(t.equidistant){for(var P=0,D=0,R=0;R0)var c=(p=r.nodeOverlap*s)*i/(b=Math.sqrt(i*i+o*o)),d=p*o/b;else{var p,f=u(e,i,o),h=u(t,-1*i,-1*o),g=h.x-f.x,m=h.y-f.y,v=g*g+m*m,b=Math.sqrt(v);c=(p=(e.nodeRepulsion+t.nodeRepulsion)/v)*g/b,d=p*m/b}e.isLocked||(e.offsetX-=c,e.offsetY-=d),t.isLocked||(t.offsetX+=c,t.offsetY+=d)}},l=function(e,t,n,r){if(n>0)var i=e.maxX-t.minX;else i=t.maxX-e.minX;if(r>0)var o=e.maxY-t.minY;else o=t.maxY-e.minY;return i>=0&&o>=0?Math.sqrt(i*i+o*o):0},u=function(e,t,n){var r=e.positionX,i=e.positionY,o=e.height||1,a=e.width||1,s=n/t,l=o/a,c={};return 0===t&&0n?(c.x=r,c.y=i+o/2,c):0t&&-1*l<=s&&s<=l?(c.x=r-a/2,c.y=i-a*n/2/t,c):0=l)?(c.x=r+o*t/2/n,c.y=i+o/2,c):0>n&&(s<=-1*l||s>=l)?(c.x=r-o*t/2/n,c.y=i-o/2,c):c},d=function(e,t){for(var n=0;n1){var h=t.gravity*d/f,g=t.gravity*p/f;u.offsetX+=h,u.offsetY+=g}}}}},f=function(e,t){var n=[],r=0,i=-1;for(n.push.apply(n,e.graphSet[0]),i+=e.graphSet[0].length;r<=i;){var o=n[r++],a=e.idToIndex[o],s=e.layoutNodes[a],l=s.children;if(0n)var i={x:n*e/r,y:n*t/r};else i={x:e,y:t};return i},m=function e(t,n){var r=t.parentId;if(null!=r){var i=n.layoutNodes[n.idToIndex[r]],o=!1;return(null==i.maxX||t.maxX+i.padRight>i.maxX)&&(i.maxX=t.maxX+i.padRight,o=!0),(null==i.minX||t.minX-i.padLefti.maxY)&&(i.maxY=t.maxY+i.padBottom,o=!0),(null==i.minY||t.minY-i.padTopg&&(p+=h+t.componentSpacing,d=0,f=0,h=0)}}}(0,i),r})).then((function(e){d.layoutNodes=e.layoutNodes,o.stop(),b()}));var b=function(){!0===e.animate||!1===e.animate?v({force:!0,next:function(){n.one("layoutstop",e.stop),n.emit({type:"layoutstop",layout:n})}}):e.eles.nodes().layoutPositions(n,e,(function(e){var t=d.layoutNodes[d.idToIndex[e.data("id")]];return{x:t.positionX,y:t.positionY}}))};return this},c.prototype.stop=function(){return this.stopped=!0,this.thread&&this.thread.stop(),this.emit("layoutstop"),this},c.prototype.destroy=function(){return this.thread&&this.thread.stop(),this};var u=function(e,t,n){for(var r=n.eles.edges(),i=n.eles.nodes(),s={isCompound:e.hasCompoundNodes(),layoutNodes:[],idToIndex:{},nodeSize:i.size(),graphSet:[],indexToGraph:[],layoutEdges:[],edgeSize:r.size(),temperature:n.initialTemp,clientWidth:e.width(),clientHeight:e.width(),boundingBox:o.makeBoundingBox(n.boundingBox?n.boundingBox:{x1:0,y1:0,w:e.width(),h:e.height()})},l=n.eles.components(),c={},u=0;u0)for(s.graphSet.push(A),u=0;ur.count?0:r.graph},p=function e(t,n,r,i){var o=i.graphSet[r];if(-1a){var h=u(),g=d();(h-1)*g>=a?u(h-1):(g-1)*h>=a&&d(g-1)}else for(;c*l=a?d(v+1):u(m+1)}var b=o.w/c,y=o.h/l;if(t.condense&&(b=0,y=0),t.avoidOverlap)for(var x=0;x=c&&(j=0,T++)},D={},R=0;R(r=i.sqdistToFiniteLine(e,t,w[k],w[k+1],w[k+2],w[k+3])))return b(n,r),!0}else if("bezier"===a.edgeType||"multibezier"===a.edgeType||"self"===a.edgeType||"compound"===a.edgeType)for(w=a.allpts,k=0;k+5(r=i.sqdistToQuadraticBezier(e,t,w[k],w[k+1],w[k+2],w[k+3],w[k+4],w[k+5])))return b(n,r),!0;v=v||o.source,x=x||o.target;var A=l.getArrowWidth(s,u),E=[{name:"source",x:a.arrowStartX,y:a.arrowStartY,angle:a.srcArrowAngle},{name:"target",x:a.arrowEndX,y:a.arrowEndY,angle:a.tgtArrowAngle},{name:"mid-source",x:a.midX,y:a.midY,angle:a.midsrcArrowAngle},{name:"mid-target",x:a.midX,y:a.midY,angle:a.midtgtArrowAngle}];for(k=0;k0&&(y(v),y(x))}function w(e,t,n){return o.getPrefixedProperty(e,t,n)}function k(n,r){var o,a=n._private,s=m;o=r?r+"-":"";var l=n.pstyle(o+"label").value;if("yes"===n.pstyle("text-events").strValue&&l){var c=a.rstyle,u=n.pstyle("text-border-width").pfValue,d=n.pstyle("text-background-padding").pfValue,p=w(c,"labelWidth",r)+u+2*s+2*d,f=w(c,"labelHeight",r)+u+2*s+2*d,h=w(c,"labelX",r),g=w(c,"labelY",r),v=w(a.rscratch,"labelAngle",r),y=h-p/2,x=h+p/2,k=g-f/2,A=g+f/2;if(v){var E=Math.cos(v),S=Math.sin(v),$=function(e,t){return{x:(e-=h)*E-(t-=g)*S+h,y:e*S+t*E+g}},C=$(y,k),_=$(y,A),O=$(x,k),T=$(x,A),j=[C.x,C.y,O.x,O.y,T.x,T.y,_.x,_.y];if(i.pointInsidePolygonPoints(e,t,j))return b(n),!0}else{var P={w:p,h:f,x1:y,x2:x,y1:k,y2:A};if(i.inBoundingBox(P,e,t))return b(n),!0}}}n&&(u=u.interactive);for(var A=u.length-1;A>=0;A--){var E=u[A];E.isNode()?y(E)||k(E):x(E)||k(E)||k(E,"source")||k(E,"target")}return d},getAllInBox:function(e,t,n,r){var o=this.getCachedZSortedEles().interactive,a=[],s=Math.min(e,n),l=Math.max(e,n),c=Math.min(t,r),u=Math.max(t,r);e=s,n=l,t=c,r=u;for(var d=i.makeBoundingBox({x1:e,y1:t,x2:n,y2:r}),p=0;pb?b+"$-$"+v:v+"$-$"+b,g&&(t="unbundled$-$"+h.id);var y=u[t];null==y&&(y=u[t]=[],d.push(t)),y.push(Bt),g&&(y.hasUnbundled=!0),m&&(y.hasBezier=!0)}else p.push(Bt)}for(var x=0;xGt.id()){var k=Ht;Ht=Gt,Gt=k}Wt=Ht.position(),Yt=Gt.position(),Xt=Ht.outerWidth(),Qt=Ht.outerHeight(),Zt=Gt.outerWidth(),Jt=Gt.outerHeight(),n=l.nodeShapes[this.getNodeShape(Ht)],o=l.nodeShapes[this.getNodeShape(Gt)],s=!1;var A={north:0,west:0,south:0,east:0,northwest:0,southwest:0,northeast:0,southeast:0},E=Wt.x,S=Wt.y,$=Xt,C=Qt,_=Yt.x,O=Yt.y,T=Zt,j=Jt,P=w.length;for(f=0;f=d||w){f={cp:b,segment:x};break}}if(f)break}b=f.cp;var k=(d-g)/(x=f.segment).length,A=x.t1-x.t0,E=u?x.t0+A*k:x.t1-A*k;E=r.bound(0,E,1),t=r.qbezierPtAt(b.p0,b.p1,b.p2,E),c=function(e,t,n,i){var o=r.bound(0,i-.001,1),a=r.bound(0,i+.001,1),s=r.qbezierPtAt(e,t,n,o),l=r.qbezierPtAt(e,t,n,a);return p(s,l)}(b.p0,b.p1,b.p2,E);break;case"straight":case"segments":case"haystack":var S,$,C,_,O=0,T=i.allpts.length;for(v=0;v+3=d));v+=2);E=(d-$)/S,E=r.bound(0,E,1),t=r.lineAt(C,_,E),c=p(C,_)}l("labelX",o,t.x),l("labelY",o,t.y),l("labelAutoAngle",o,c)}};c("source"),c("target"),this.applyLabelDimensions(e)}},applyLabelDimensions:function(e){this.applyPrefixedLabelDimensions(e),e.isEdge()&&(this.applyPrefixedLabelDimensions(e,"source"),this.applyPrefixedLabelDimensions(e,"target"))},applyPrefixedLabelDimensions:function(e,t){var n=e._private,r=this.getLabelText(e,t),i=this.calculateLabelDimensions(e,r);o.setPrefixedProperty(n.rstyle,"labelWidth",t,i.width),o.setPrefixedProperty(n.rscratch,"labelWidth",t,i.width),o.setPrefixedProperty(n.rstyle,"labelHeight",t,i.height),o.setPrefixedProperty(n.rscratch,"labelHeight",t,i.height)},getLabelText:function(e,t){var n=e._private,r=t?t+"-":"",i=e.pstyle(r+"label").strValue,a=e.pstyle("text-transform").value,s=function(e,r){return r?(o.setPrefixedProperty(n.rscratch,e,t,r),r):o.getPrefixedProperty(n.rscratch,e,t)};"none"==a||("uppercase"==a?i=i.toUpperCase():"lowercase"==a&&(i=i.toLowerCase()));var l=e.pstyle("text-wrap").value;if("wrap"===l){var c=s("labelKey");if(c&&s("labelWrapKey")===c)return s("labelWrapCachedText");for(var u=i.split("\n"),d=e.pstyle("text-max-width").pfValue,p=[],f=0;fd){for(var g=h.split(/\s+/),m="",v=0;vd);k++)x+=i[k],k===i.length-1&&(w=!0);return w||(x+="…"),x}return i},calculateLabelDimensions:function(e,t,n){var r=e._private.labelStyleKey+"$@$"+t;n&&(r+="$@$"+n);var i=this.labelDimCache||(this.labelDimCache={});if(i[r])return i[r];var o=e.pstyle("font-style").strValue,a=1*e.pstyle("font-size").pfValue+"px",s=e.pstyle("font-family").strValue,l=e.pstyle("font-weight").strValue,c=this.labelCalcDiv;c||(c=this.labelCalcDiv=document.createElement("div"),document.body.appendChild(c));var u=c.style;return u.fontFamily=s,u.fontStyle=o,u.fontSize=a,u.fontWeight=l,u.position="absolute",u.left="-9999px",u.top="-9999px",u.zIndex="-1",u.visibility="hidden",u.pointerEvents="none",u.padding="0",u.lineHeight="1","wrap"===e.pstyle("text-wrap").value?u.whiteSpace="pre":u.whiteSpace="normal",c.textContent=t,i[r]={width:Math.ceil(c.clientWidth/1),height:Math.ceil(c.clientHeight/1)},i[r]},calculateLabelAngles:function(e){var t=e._private.rscratch,n=e.isEdge(),r=e.pstyle("text-rotation"),i=r.strValue;"none"===i?t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle=0:n&&"autorotate"===i?(t.labelAngle=Math.atan(t.midDispY/t.midDispX),t.sourceLabelAngle=t.sourceLabelAutoAngle,t.targetLabelAngle=t.targetLabelAutoAngle):t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle="autorotate"===i?0:r.pfValue}};e.exports=a},function(e,t,n){"use strict";var r={getNodeShape:function(e){var t=e.pstyle("shape").value;if(e.isParent())return"rectangle"===t||"roundrectangle"===t||"cutrectangle"===t||"barrel"===t?t:"rectangle";if("polygon"===t){var n=e.pstyle("shape-polygon-points").value;return this.nodeShapes.makePolygon(n).name}return t}};e.exports=r},function(e,t,n){"use strict";var r={registerCalculationListeners:function(){var e=this.cy,t=e.collection(),n=this,r=function(e,n){var r=!(arguments.length>2&&void 0!==arguments[2])||arguments[2];t.merge(e);for(var i=0;i=e.desktopTapThreshold2}var C=n(i);b&&(e.hoverData.tapholdCancelled=!0),s=!0,t(v,["mousemove","vmousemove","tapdrag"],i,{position:{x:f[0],y:f[1]}});var _=function(){e.data.bgActivePosistion=void 0,e.hoverData.selecting||l.emit("boxstart"),m[4]=1,e.hoverData.selecting=!0,e.redrawHint("select",!0),e.redraw()};if(3===e.hoverData.which){if(b){var O={originalEvent:i,type:"cxtdrag",position:{x:f[0],y:f[1]}};x?x.emit(O):l.emit(O),e.hoverData.cxtDragged=!0,e.hoverData.cxtOver&&v===e.hoverData.cxtOver||(e.hoverData.cxtOver&&e.hoverData.cxtOver.emit({originalEvent:i,type:"cxtdragout",position:{x:f[0],y:f[1]}}),e.hoverData.cxtOver=v,v&&v.emit({originalEvent:i,type:"cxtdragover",position:{x:f[0],y:f[1]}}))}}else if(e.hoverData.dragging){if(s=!0,l.panningEnabled()&&l.userPanningEnabled()){var j;if(e.hoverData.justStartedPan){var P=e.hoverData.mdownPos;j={x:(f[0]-P[0])*c,y:(f[1]-P[1])*c},e.hoverData.justStartedPan=!1}else j={x:w[0]*c,y:w[1]*c};l.panBy(j),e.hoverData.dragged=!0}f=e.projectIntoViewport(i.clientX,i.clientY)}else if(1!=m[4]||null!=x&&!x.isEdge()){if(x&&x.isEdge()&&x.active()&&x.unactivate(),x&&x.grabbed()||v==y||(y&&t(y,["mouseout","tapdragout"],i,{position:{x:f[0],y:f[1]}}),v&&t(v,["mouseover","tapdragover"],i,{position:{x:f[0],y:f[1]}}),e.hoverData.last=v),x)if(b){if(l.boxSelectionEnabled()&&C)x&&x.grabbed()&&(p(k),x.emit("free")),_();else if(x&&x.grabbed()&&e.nodeIsDraggable(x)){var D=!e.dragData.didDrag;D&&e.redrawHint("eles",!0),e.dragData.didDrag=!0;var R=[];e.hoverData.draggingEles||u(l.collection(k),{inDragLayer:!0});for(var I=0;I0&&e.redrawHint("eles",!0),e.dragData.possibleDragElements=l=[]),t(s,["mouseup","tapend","vmouseup"],r,{position:{x:o[0],y:o[1]}}),e.dragData.didDrag||e.hoverData.dragged||e.hoverData.selecting||e.hoverData.isOverThresholdDrag||t(c,["click","tap","vclick"],r,{position:{x:o[0],y:o[1]}}),s!=c||e.dragData.didDrag||e.hoverData.selecting||null!=s&&s._private.selectable&&(e.hoverData.dragging||("additive"===i.selectionType()||u?s.selected()?s.unselect():s.select():u||(i.$(":selected").unmerge(s).unselect(),s.select())),e.redrawHint("eles",!0)),e.hoverData.selecting){var h=i.collection(e.getAllInBox(a[0],a[1],a[2],a[3]));e.redrawHint("select",!0),h.length>0&&e.redrawHint("eles",!0),i.emit("boxend");var g=function(e){return e.selectable()&&!e.selected()};"additive"===i.selectionType()||u||i.$(":selected").unmerge(h).unselect(),h.emit("box").stdFilter(g).select().emit("boxselect"),e.redraw()}if(e.hoverData.dragging&&(e.hoverData.dragging=!1,e.redrawHint("select",!0),e.redrawHint("eles",!0),e.redraw()),!a[4]){e.redrawHint("drag",!0),e.redrawHint("eles",!0);var m=c&&c.grabbed();p(l),m&&c.emit("free")}}a[4]=0,e.hoverData.down=null,e.hoverData.cxtStarted=!1,e.hoverData.draggingEles=!1,e.hoverData.selecting=!1,e.hoverData.isOverThresholdDrag=!1,e.dragData.didDrag=!1,e.hoverData.dragged=!1,e.hoverData.dragDelta=[],e.hoverData.mdownPos=null,e.hoverData.mdownGPos=null}}),!1),e.registerBinding(e.container,"wheel",(function(t){if(!e.scrollingPage){var n,r=e.cy,i=e.projectIntoViewport(t.clientX,t.clientY),o=[i[0]*r.zoom()+r.pan().x,i[1]*r.zoom()+r.pan().y];e.hoverData.draggingEles||e.hoverData.dragging||e.hoverData.cxtStarted||0!==e.selection[4]?t.preventDefault():r.panningEnabled()&&r.userPanningEnabled()&&r.zoomingEnabled()&&r.userZoomingEnabled()&&(t.preventDefault(),e.data.wheelZooming=!0,clearTimeout(e.data.wheelTimeout),e.data.wheelTimeout=setTimeout((function(){e.data.wheelZooming=!1,e.redrawHint("eles",!0),e.redraw()}),150),n=null!=t.deltaY?t.deltaY/-250:null!=t.wheelDeltaY?t.wheelDeltaY/1e3:t.wheelDelta/1e3,n*=e.wheelSensitivity,1===t.deltaMode&&(n*=33),r.zoom({level:r.zoom()*Math.pow(10,n),renderedPosition:{x:o[0],y:o[1]}}))}}),!0),e.registerBinding(window,"scroll",(function(t){e.scrollingPage=!0,clearTimeout(e.scrollingPageTimeout),e.scrollingPageTimeout=setTimeout((function(){e.scrollingPage=!1}),250)}),!0),e.registerBinding(e.container,"mouseout",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseout",position:{x:n[0],y:n[1]}})}),!1),e.registerBinding(e.container,"mouseover",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseover",position:{x:n[0],y:n[1]}})}),!1);var j,P,D,R,I=function(e,t,n,r){return Math.sqrt((n-e)*(n-e)+(r-t)*(r-t))},N=function(e,t,n,r){return(n-e)*(n-e)+(r-t)*(r-t)};if(e.registerBinding(e.container,"touchstart",j=function(n){if(T(n)){e.touchData.capture=!0,e.data.bgActivePosistion=void 0;var r=e.cy,i=e.touchData.now,o=e.touchData.earlier;if(n.touches[0]){var a=e.projectIntoViewport(n.touches[0].clientX,n.touches[0].clientY);i[0]=a[0],i[1]=a[1]}if(n.touches[1]&&(a=e.projectIntoViewport(n.touches[1].clientX,n.touches[1].clientY),i[2]=a[0],i[3]=a[1]),n.touches[2]&&(a=e.projectIntoViewport(n.touches[2].clientX,n.touches[2].clientY),i[4]=a[0],i[5]=a[1]),n.touches[1]){p(e.dragData.touchDragEles);var s=e.findContainerClientCoords();S=s[0],$=s[1],C=s[2],_=s[3],v=n.touches[0].clientX-S,b=n.touches[0].clientY-$,y=n.touches[1].clientX-S,x=n.touches[1].clientY-$,O=0<=v&&v<=C&&0<=y&&y<=C&&0<=b&&b<=_&&0<=x&&x<=_;var c=r.pan(),f=r.zoom();if(w=I(v,b,y,x),k=N(v,b,y,x),E=[((A=[(v+y)/2,(b+x)/2])[0]-c.x)/f,(A[1]-c.y)/f],k<4e4&&!n.touches[2]){var h=e.findNearestElement(i[0],i[1],!0,!0),g=e.findNearestElement(i[2],i[3],!0,!0);return h&&h.isNode()?(h.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=h):g&&g.isNode()?(g.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=g):r.emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxt=!0,e.touchData.cxtDragged=!1,e.data.bgActivePosistion=void 0,void e.redraw()}}if(n.touches[2]);else if(n.touches[1]);else if(n.touches[0]){var m=e.findNearestElements(i[0],i[1],!0,!0),j=m[0];if(null!=j&&(j.activate(),e.touchData.start=j,e.touchData.starts=m,e.nodeIsGrabbable(j))){var P=e.dragData.touchDragEles=[],D=null;e.redrawHint("eles",!0),e.redrawHint("drag",!0),j.selected()?(D=r.$((function(t){return t.selected()&&e.nodeIsGrabbable(t)})),u(D,{addToList:P})):d(j,{addToList:P}),l(j);var R=function(e){return{originalEvent:n,type:e,position:{x:i[0],y:i[1]}}};j.emit(R("grabon")),D?D.forEach((function(e){e.emit(R("grab"))})):j.emit(R("grab"))}t(j,["touchstart","tapstart","vmousedown"],n,{position:{x:i[0],y:i[1]}}),null==j&&(e.data.bgActivePosistion={x:a[0],y:a[1]},e.redrawHint("select",!0),e.redraw()),e.touchData.singleTouchMoved=!1,e.touchData.singleTouchStartTime=+new Date,clearTimeout(e.touchData.tapholdTimeout),e.touchData.tapholdTimeout=setTimeout((function(){!1!==e.touchData.singleTouchMoved||e.pinching||e.touchData.selecting||(t(e.touchData.start,["taphold"],n,{position:{x:i[0],y:i[1]}}),e.touchData.start||r.$(":selected").unselect())}),e.tapholdDuration)}if(n.touches.length>=1){for(var M=e.touchData.startPosition=[],z=0;z=e.touchTapThreshold2}if(i&&e.touchData.cxt){n.preventDefault();var D=n.touches[0].clientX-S,R=n.touches[0].clientY-$,M=n.touches[1].clientX-S,z=n.touches[1].clientY-$,L=N(D,R,M,z);if(L/k>=2.25||L>=22500){e.touchData.cxt=!1,e.data.bgActivePosistion=void 0,e.redrawHint("select",!0);var B={originalEvent:n,type:"cxttapend",position:{x:c[0],y:c[1]}};e.touchData.start?(e.touchData.start.unactivate().emit(B),e.touchData.start=null):l.emit(B)}}if(i&&e.touchData.cxt){B={originalEvent:n,type:"cxtdrag",position:{x:c[0],y:c[1]}},e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),e.touchData.start?e.touchData.start.emit(B):l.emit(B),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxtDragged=!0;var F=e.findNearestElement(c[0],c[1],!0,!0);e.touchData.cxtOver&&F===e.touchData.cxtOver||(e.touchData.cxtOver&&e.touchData.cxtOver.emit({originalEvent:n,type:"cxtdragout",position:{x:c[0],y:c[1]}}),e.touchData.cxtOver=F,F&&F.emit({originalEvent:n,type:"cxtdragover",position:{x:c[0],y:c[1]}}))}else if(i&&n.touches[2]&&l.boxSelectionEnabled())n.preventDefault(),e.data.bgActivePosistion=void 0,this.lastThreeTouch=+new Date,e.touchData.selecting||l.emit("boxstart"),e.touchData.selecting=!0,e.redrawHint("select",!0),s&&0!==s.length&&void 0!==s[0]?(s[2]=(c[0]+c[2]+c[4])/3,s[3]=(c[1]+c[3]+c[5])/3):(s[0]=(c[0]+c[2]+c[4])/3,s[1]=(c[1]+c[3]+c[5])/3,s[2]=(c[0]+c[2]+c[4])/3+1,s[3]=(c[1]+c[3]+c[5])/3+1),s[4]=1,e.touchData.selecting=!0,e.redraw();else if(i&&n.touches[1]&&l.zoomingEnabled()&&l.panningEnabled()&&l.userZoomingEnabled()&&l.userPanningEnabled()){if(n.preventDefault(),e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),ee=e.dragData.touchDragEles){e.redrawHint("drag",!0);for(var q=0;q0)return h[0]}return null},f=Object.keys(d),h=0;h0?p:r.roundRectangleIntersectLine(o,a,e,t,n,i,s)},checkPoint:function(e,t,n,i,o,a,s){var l=r.getRoundRectangleRadius(i,o),c=2*l;if(r.pointInsidePolygon(e,t,this.points,a,s,i,o-c,[0,-1],n))return!0;if(r.pointInsidePolygon(e,t,this.points,a,s,i-c,o,[0,-1],n))return!0;var u=i/2+2*n,d=o/2+2*n,p=[a-u,s-d,a-u,s,a+u,s,a+u,s-d];return!!r.pointInsidePolygonPoints(e,t,p)||!!r.checkInEllipse(e,t,c,c,a+i/2-l,s+o/2-l,n)||!!r.checkInEllipse(e,t,c,c,a-i/2+l,s+o/2-l,n)}}},registerNodeShapes:function(){var e=this.nodeShapes={},t=this;this.generateEllipse(),this.generatePolygon("triangle",r.generateUnitNgonPointsFitToSquare(3,0)),this.generatePolygon("rectangle",r.generateUnitNgonPointsFitToSquare(4,0)),e.square=e.rectangle,this.generateRoundRectangle(),this.generateCutRectangle(),this.generateBarrel(),this.generateBottomRoundrectangle(),this.generatePolygon("diamond",[0,1,1,0,0,-1,-1,0]),this.generatePolygon("pentagon",r.generateUnitNgonPointsFitToSquare(5,0)),this.generatePolygon("hexagon",r.generateUnitNgonPointsFitToSquare(6,0)),this.generatePolygon("heptagon",r.generateUnitNgonPointsFitToSquare(7,0)),this.generatePolygon("octagon",r.generateUnitNgonPointsFitToSquare(8,0));var n=new Array(20),i=r.generateUnitNgonPoints(5,0),o=r.generateUnitNgonPoints(5,Math.PI/5),a=.5*(3-Math.sqrt(5));a*=1.57;for(var s=0;s0&&t.data.lyrTxrCache.invalidateElements(n)}))}l.CANVAS_LAYERS=3,l.SELECT_BOX=0,l.DRAG=1,l.NODE=2,l.BUFFER_COUNT=3,l.TEXTURE_BUFFER=0,l.MOTIONBLUR_BUFFER_NODE=1,l.MOTIONBLUR_BUFFER_DRAG=2,l.redrawHint=function(e,t){var n=this;switch(e){case"eles":n.data.canvasNeedsRedraw[l.NODE]=t;break;case"drag":n.data.canvasNeedsRedraw[l.DRAG]=t;break;case"select":n.data.canvasNeedsRedraw[l.SELECT_BOX]=t}};var u="undefined"!=typeof Path2D;l.path2dEnabled=function(e){if(void 0===e)return this.pathsEnabled;this.pathsEnabled=!!e},l.usePaths=function(){return u&&this.pathsEnabled},[n(126),n(127),n(128),n(129),n(130),n(131),n(132),n(133),n(134),n(135)].forEach((function(e){r.extend(l,e)})),e.exports=s},function(e,t,n){"use strict";var r=n(2),i=n(1),o=n(9),a=n(19),s={dequeue:"dequeue",downscale:"downscale",highQuality:"highQuality"},l=function(e){this.renderer=e,this.onDequeues=[],this.setupDequeueing()},c=l.prototype;c.reasons=s,c.getTextureQueue=function(e){return this.eleImgCaches=this.eleImgCaches||{},this.eleImgCaches[e]=this.eleImgCaches[e]||[]},c.getRetiredTextureQueue=function(e){var t=this.eleImgCaches.retired=this.eleImgCaches.retired||{};return t[e]=t[e]||[]},c.getElementQueue=function(){return this.eleCacheQueue=this.eleCacheQueue||new o((function(e,t){return t.reqs-e.reqs}))},c.getElementIdToQueue=function(){return this.eleIdToCacheQueue=this.eleIdToCacheQueue||{}},c.getElement=function(e,t,n,i,o){var a=this,l=this.renderer,c=e._private.rscratch,u=l.cy.zoom();if(0===t.w||0===t.h||!e.visible())return null;if(null==i&&(i=Math.ceil(r.log2(u*n))),i<-4)i=-4;else if(u>=3.99||i>2)return null;var d,p=Math.pow(2,i),f=t.h*p,h=t.w*p,g=c.imgCaches=c.imgCaches||{},m=g[i];if(m)return m;if(d=f<=25?25:f<=50?50:50*Math.ceil(f/50),f>1024||h>1024||e.isEdge()||e.isParent())return null;var v=a.getTextureQueue(d),b=v[v.length-2],y=function(){return a.recycleTexture(d,h)||a.addTexture(d,h)};b||(b=v[v.length-1]),b||(b=y()),b.width-b.usedWidthi;$--)C=a.getElement(e,t,n,$,s.downscale);_()}else{var O;if(!A&&!E&&!S)for($=i-1;$>=-4;$--){var T;if(T=g[$]){O=T;break}}if(k(O))return a.queueElement(e,i),O;b.context.translate(b.usedWidth,0),b.context.scale(p,p),l.drawElement(b.context,e,t,w),b.context.scale(1/p,1/p),b.context.translate(-b.usedWidth,0)}return m=g[i]={ele:e,x:b.usedWidth,texture:b,level:i,scale:p,width:h,height:f,scaledLabelShown:w},b.usedWidth+=Math.ceil(h+8),b.eleCaches.push(m),a.checkTextureFullness(b),m},c.invalidateElement=function(e){var t=e._private.rscratch.imgCaches;if(t)for(var n=-4;n<=2;n++){var r=t[n];if(r){var o=r.texture;o.invalidatedWidth+=r.width,t[n]=null,i.removeFromArray(o.eleCaches,r),this.removeFromQueue(e),this.checkTextureUtility(o)}}},c.checkTextureUtility=function(e){e.invalidatedWidth>=.5*e.width&&this.retireTexture(e)},c.checkTextureFullness=function(e){var t=this.getTextureQueue(e.height);e.usedWidth/e.width>.8&&e.fullnessChecks>=10?i.removeFromArray(t,e):e.fullnessChecks++},c.retireTexture=function(e){var t=e.height,n=this.getTextureQueue(t);i.removeFromArray(n,e),e.retired=!0;for(var r=e.eleCaches,o=0;o=t)return a.retired=!1,a.usedWidth=0,a.invalidatedWidth=0,a.fullnessChecks=0,i.clearArray(a.eleCaches),a.context.setTransform(1,0,0,1,0,0),a.context.clearRect(0,0,a.width,a.height),i.removeFromArray(r,a),n.push(a),a}},c.queueElement=function(e,t){var n=this.getElementQueue(),r=this.getElementIdToQueue(),i=e.id(),o=r[i];if(o)o.level=Math.max(o.level,t),o.reqs++,n.updateItem(o);else{var a={ele:e,level:t,reqs:1};n.push(a),r[i]=a}},c.dequeue=function(e){for(var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=[],i=0;i<1&&t.size()>0;i++){var o=t.pop(),a=o.ele;if(null==a._private.rscratch.imgCaches[o.level]){n[a.id()]=null,r.push(o);var l=a.boundingBox();this.getElement(a,l,e,o.level,s.dequeue)}}return r},c.removeFromQueue=function(e){var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=n[e.id()];null!=r&&(r.reqs=i.MAX_INT,t.updateItem(r),t.pop(),n[e.id()]=null)},c.onDequeue=function(e){this.onDequeues.push(e)},c.offDequeue=function(e){i.removeFromArray(this.onDequeues,e)},c.setupDequeueing=a.setupDequeueing({deqRedrawThreshold:100,deqCost:.15,deqAvgCost:.1,deqNoDrawCost:.9,deqFastCost:.9,deq:function(e,t,n){return e.dequeue(t,n)},onDeqd:function(e,t){for(var n=0;n=3.99||n>2)return null;o.validateLayersElesOrdering(n,e);var l,c,u=o.layersByLevel,d=Math.pow(2,n),p=u[n]=u[n]||[];if(o.levelIsComplete(n,e))return p;!function(){var t=function(t){if(o.validateLayersElesOrdering(t,e),o.levelIsComplete(t,e))return c=u[t],!0},i=function(e){if(!c)for(var r=n+e;-4<=r&&r<=2&&!t(r);r+=e);};i(1),i(-1);for(var a=p.length-1;a>=0;a--){var s=p[a];s.invalid&&r.removeFromArray(p,s)}}();var f=function(t){var r=(t=t||{}).after;if(function(){if(!l){l=i.makeBoundingBox();for(var t=0;t16e6)return null;var a=o.makeLayer(l,n);if(null!=r){var s=p.indexOf(r)+1;p.splice(s,0,a)}else(void 0===t.insert||t.insert)&&p.unshift(a);return a};if(o.skipping&&!s)return null;for(var h=null,g=e.length/1,m=!s,v=0;v=g||!i.boundingBoxInBoundingBox(h.bb,b.boundingBox()))&&!(h=f({insert:!0,after:h})))return null;c||m?o.queueLayer(h,b):o.drawEleInLayer(h,b,n,t),h.eles.push(b),x[n]=h}}return c||(m?null:p)},c.getEleLevelForLayerLevel=function(e,t){return e},c.drawEleInLayer=function(e,t,n,r){var i=this.renderer,o=e.context,a=t.boundingBox();if(0!==a.w&&0!==a.h&&t.visible()){var s=this.eleTxrCache,l=s.reasons.highQuality;n=this.getEleLevelForLayerLevel(n,r);var c=s.getElement(t,a,null,n,l);c?(p(o,!1),o.drawImage(c.texture.canvas,c.x,0,c.width,c.height,a.x1,a.y1,a.w,a.h),p(o,!0)):i.drawElement(o,t)}},c.levelIsComplete=function(e,t){var n=this.layersByLevel[e];if(!n||0===n.length)return!1;for(var r=0,i=0;i0)return!1;if(o.invalid)return!1;r+=o.eles.length}return r===t.length},c.validateLayersElesOrdering=function(e,t){var n=this.layersByLevel[e];if(n)for(var r=0;r0){e=!0;break}}return e},c.invalidateElements=function(e){var t=this;t.lastInvalidationTime=r.performanceNow(),0!==e.length&&t.haveLayers()&&t.updateElementsInLayers(e,(function(e,n,r){t.invalidateLayer(e)}))},c.invalidateLayer=function(e){if(this.lastInvalidationTime=r.performanceNow(),!e.invalid){var t=e.level,n=e.eles,i=this.layersByLevel[t];r.removeFromArray(i,e),e.elesQueue=[],e.invalid=!0,e.replacement&&(e.replacement.invalid=!0);for(var o=0;o0&&void 0!==arguments[0]?arguments[0]:p;e.lineWidth=h,e.lineCap="butt",i.strokeStyle(e,d[0],d[1],d[2],n),i.drawEdgePath(t,e,o.allpts,f)},m=function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:p;i.drawArrowheads(e,t,n)};if(e.lineJoin="round","yes"===t.pstyle("ghost").value){var v=t.pstyle("ghost-offset-x").pfValue,b=t.pstyle("ghost-offset-y").pfValue,y=t.pstyle("ghost-opacity").value,x=p*y;e.translate(v,b),g(x),m(x),e.translate(-v,-b)}g(),m(),function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:c;e.lineWidth=l,"self"!==o.edgeType||a?e.lineCap="round":e.lineCap="butt",i.strokeStyle(e,u[0],u[1],u[2],n),i.drawEdgePath(t,e,o.allpts,"solid")}(),i.drawElementText(e,t,r),n&&e.translate(s.x1,s.y1)}},drawEdgePath:function(e,t,n,r){var i=e._private.rscratch,o=t,a=void 0,s=!1,l=this.usePaths();if(l){var c=n.join("$");i.pathCacheKey&&i.pathCacheKey===c?(a=t=i.pathCache,s=!0):(a=t=new Path2D,i.pathCacheKey=c,i.pathCache=a)}if(o.setLineDash)switch(r){case"dotted":o.setLineDash([1,1]);break;case"dashed":o.setLineDash([6,3]);break;case"solid":o.setLineDash([])}if(!s&&!i.badLine)switch(t.beginPath&&t.beginPath(),t.moveTo(n[0],n[1]),i.edgeType){case"bezier":case"self":case"compound":case"multibezier":if(e.hasClass("horizontal")){var u=n[4],d=n[5],p=(n[0]+n[4])/2;t.lineTo(n[0]+10,n[1]),t.bezierCurveTo(p,n[1],p,n[5],n[4]-10,n[5]),t.lineTo(u,d)}else if(e.hasClass("vertical")){var f=n[4],h=n[5],g=(n[1]+n[5])/2;t.bezierCurveTo(n[0],g,n[4],g,n[4],n[5]-10),t.lineTo(f,h)}else for(var m=2;m+30||T>0&&O>0){var P=p-j;switch(k){case"left":P-=m;break;case"center":P-=m/2}var D=f-v-j,R=m+2*j,I=v+2*j;if(_>0){var N=e.fillStyle,M=t.pstyle("text-background-color").value;e.fillStyle="rgba("+M[0]+","+M[1]+","+M[2]+","+_*o+")","roundrectangle"==t.pstyle("text-background-shape").strValue?(s=P,l=D,c=R,u=I,d=(d=2)||5,(a=e).beginPath(),a.moveTo(s+d,l),a.lineTo(s+c-d,l),a.quadraticCurveTo(s+c,l,s+c,l+d),a.lineTo(s+c,l+u-d),a.quadraticCurveTo(s+c,l+u,s+c-d,l+u),a.lineTo(s+d,l+u),a.quadraticCurveTo(s,l+u,s,l+u-d),a.lineTo(s,l+d),a.quadraticCurveTo(s,l,s+d,l),a.closePath(),a.fill()):e.fillRect(P,D,R,I),e.fillStyle=N}if(T>0&&O>0){var z=e.strokeStyle,L=e.lineWidth,B=t.pstyle("text-border-color").value,F=t.pstyle("text-border-style").value;if(e.strokeStyle="rgba("+B[0]+","+B[1]+","+B[2]+","+O*o+")",e.lineWidth=T,e.setLineDash)switch(F){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"double":e.lineWidth=T/4,e.setLineDash([]);break;case"solid":e.setLineDash([])}if(e.strokeRect(P,D,R,I),"double"===F){var q=T/2;e.strokeRect(P+q,D+q,R-2*q,I-2*q)}e.setLineDash&&e.setLineDash([]),e.lineWidth=L,e.strokeStyle=z}}var V=2*t.pstyle("text-outline-width").pfValue;if(V>0&&(e.lineWidth=V),"wrap"===t.pstyle("text-wrap").value){var U=r.getPrefixedProperty(i,"labelWrapCachedLines",n),H=v/U.length;switch(A){case"top":f-=(U.length-1)*H;break;case"center":case"bottom":f-=(U.length-1)*H}for(var G=0;G0&&e.strokeText(U[G],p,f),e.fillText(U[G],p,f),f+=H}else V>0&&e.strokeText(h,p,f),e.fillText(h,p,f);0!==E&&(e.rotate(-E),e.translate(-$,-C))}}},e.exports=o},function(e,t,n){"use strict";var r=n(0),i={drawNode:function(e,t,n,i){var o,a,s=this,l=t._private,c=l.rscratch,u=t.position();if(r.number(u.x)&&r.number(u.y)&&t.visible()){var d=t.effectiveOpacity(),p=s.usePaths(),f=void 0,h=!1,g=t.padding();o=t.width()+2*g,a=t.height()+2*g;var m=void 0;n&&(m=n,e.translate(-m.x1,-m.y1));for(var v=t.pstyle("background-image").value,b=new Array(v.length),y=new Array(v.length),x=0,w=0;w0&&void 0!==arguments[0]?arguments[0]:C;s.fillStyle(e,$[0],$[1],$[2],t)},P=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:T;s.strokeStyle(e,_[0],_[1],_[2],t)},D=t.pstyle("shape").strValue,R=t.pstyle("shape-polygon-points").pfValue;if(p){var I=D+"$"+o+"$"+a+("polygon"===D?"$"+R.join("$"):"");e.translate(u.x,u.y),c.pathCacheKey===I?(f=c.pathCache,h=!0):(f=new Path2D,c.pathCacheKey=I,c.pathCache=f)}var N,M,z,L=function(){if(!h){var n=u;p&&(n={x:0,y:0}),s.nodeShapes[s.getNodeShape(t)].draw(f||e,n.x,n.y,o,a)}p?e.fill(f):e.fill()},B=function(){for(var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,r=l.backgrounding,i=0,o=0;o0&&void 0!==arguments[0]&&arguments[0],r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:d;s.hasPie(t)&&(s.drawPie(e,t,r),n&&(p||s.nodeShapes[s.getNodeShape(t)].draw(e,u.x,u.y,o,a)))},q=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,n=(E>0?E:-E)*t,r=E>0?0:255;0!==E&&(s.fillStyle(e,r,r,r,n),p?e.fill(f):e.fill())},V=function(){if(S>0){if(e.lineWidth=S,e.lineCap="butt",e.setLineDash)switch(O){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"solid":case"double":e.setLineDash([])}if(p?e.stroke(f):e.stroke(),"double"===O){e.lineWidth=S/3;var t=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",p?e.stroke(f):e.stroke(),e.globalCompositeOperation=t}e.setLineDash&&e.setLineDash([])}};if("yes"===t.pstyle("ghost").value){var U=t.pstyle("ghost-offset-x").pfValue,H=t.pstyle("ghost-offset-y").pfValue,G=t.pstyle("ghost-opacity").value,W=G*d;e.translate(U,H),j(G*C),L(),B(W),F(0!==E||0!==S),q(W),P(G*T),V(),e.translate(-U,-H)}j(),L(),B(),F(0!==E||0!==S),q(),P(),V(),p&&e.translate(-u.x,-u.y),s.drawElementText(e,t,i),N=t.pstyle("overlay-padding").pfValue,M=t.pstyle("overlay-opacity").value,z=t.pstyle("overlay-color").value,M>0&&(s.fillStyle(e,z[0],z[1],z[2],M),s.nodeShapes.roundrectangle.draw(e,u.x,u.y,o+2*N,a+2*N),e.fill()),n&&e.translate(m.x1,m.y1)}},hasPie:function(e){return(e=e[0])._private.hasPie},drawPie:function(e,t,n,r){t=t[0],r=r||t.position();var i=t.cy().style(),o=t.pstyle("pie-size"),a=r.x,s=r.y,l=t.width(),c=t.height(),u=Math.min(l,c)/2,d=0;this.usePaths()&&(a=0,s=0),"%"===o.units?u*=o.pfValue:void 0!==o.pfValue&&(u=o.pfValue/2);for(var p=1;p<=i.pieBackgroundN;p++){var f=t.pstyle("pie-"+p+"-background-size").value,h=t.pstyle("pie-"+p+"-background-color").value,g=t.pstyle("pie-"+p+"-background-opacity").value*n,m=f/100;m+d>1&&(m=1-d);var v=1.5*Math.PI+2*Math.PI*d,b=v+2*Math.PI*m;0===f||d>=1||d+m>1||(e.beginPath(),e.moveTo(a,s),e.arc(a,s,u,v,b),e.closePath(),this.fillStyle(e,h[0],h[1],h[2],g),e.fill(),d+=m)}}};e.exports=i},function(e,t,n){"use strict";var r={},i=n(1);r.getPixelRatio=function(){var e=this.data.contexts[0];if(null!=this.forcedPixelRatio)return this.forcedPixelRatio;var t=e.backingStorePixelRatio||e.webkitBackingStorePixelRatio||e.mozBackingStorePixelRatio||e.msBackingStorePixelRatio||e.oBackingStorePixelRatio||e.backingStorePixelRatio||1;return(window.devicePixelRatio||1)/t},r.paintCache=function(e){for(var t,n=this.paintCaches=this.paintCaches||[],r=!0,i=0;is.minMbLowQualFrames&&(s.motionBlurPxRatio=s.mbPxRBlurry)),s.clearingMotionBlur&&(s.motionBlurPxRatio=1),s.textureDrawLastFrame&&!p&&(d[s.NODE]=!0,d[s.SELECT_BOX]=!0);var y=c.style()._private.coreStyle,x=c.zoom(),w=void 0!==o?o:x,k=c.pan(),A={x:k.x,y:k.y},E={zoom:x,pan:{x:k.x,y:k.y}},S=s.prevViewport;void 0===S||E.zoom!==S.zoom||E.pan.x!==S.pan.x||E.pan.y!==S.pan.y||m&&!g||(s.motionBlurPxRatio=1),a&&(A=a),w*=l,A.x*=l,A.y*=l;var $=s.getCachedZSortedEles();function C(e,t,n,r,i){var o=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",s.fillStyle(e,255,255,255,s.motionBlurTransparency),e.fillRect(t,n,r,i),e.globalCompositeOperation=o}function _(e,r){var i,l,c,d;s.clearingMotionBlur||e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]&&e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]?(i=A,l=w,c=s.canvasWidth,d=s.canvasHeight):(i={x:k.x*h,y:k.y*h},l=x*h,c=s.canvasWidth*h,d=s.canvasHeight*h),e.setTransform(1,0,0,1,0,0),"motionBlur"===r?C(e,0,0,c,d):t||void 0!==r&&!r||e.clearRect(0,0,c,d),n||(e.translate(i.x,i.y),e.scale(l,l)),a&&e.translate(a.x,a.y),o&&e.scale(o,o)}if(p||(s.textureDrawLastFrame=!1),p){if(s.textureDrawLastFrame=!0,!s.textureCache){s.textureCache={},s.textureCache.bb=c.mutableElements().boundingBox(),s.textureCache.texture=s.data.bufferCanvases[s.TEXTURE_BUFFER];var O=s.data.bufferContexts[s.TEXTURE_BUFFER];O.setTransform(1,0,0,1,0,0),O.clearRect(0,0,s.canvasWidth*s.textureMult,s.canvasHeight*s.textureMult),s.render({forcedContext:O,drawOnlyNodeLayer:!0,forcedPxRatio:l*s.textureMult}),(E=s.textureCache.viewport={zoom:c.zoom(),pan:c.pan(),width:s.canvasWidth,height:s.canvasHeight}).mpan={x:(0-E.pan.x)/E.zoom,y:(0-E.pan.y)/E.zoom}}d[s.DRAG]=!1,d[s.NODE]=!1;var T=u.contexts[s.NODE],j=s.textureCache.texture;E=s.textureCache.viewport,s.textureCache.bb,T.setTransform(1,0,0,1,0,0),f?C(T,0,0,E.width,E.height):T.clearRect(0,0,E.width,E.height);var P=y["outside-texture-bg-color"].value,D=y["outside-texture-bg-opacity"].value;s.fillStyle(T,P[0],P[1],P[2],D),T.fillRect(0,0,E.width,E.height),x=c.zoom(),_(T,!1),T.clearRect(E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l),T.drawImage(j,E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l)}else s.textureOnViewport&&!t&&(s.textureCache=null);var R=c.extent(),I=s.pinching||s.hoverData.dragging||s.swipePanning||s.data.wheelZooming||s.hoverData.draggingEles,N=s.hideEdgesOnViewport&&I,M=[];if(M[s.NODE]=!d[s.NODE]&&f&&!s.clearedForMotionBlur[s.NODE]||s.clearingMotionBlur,M[s.NODE]&&(s.clearedForMotionBlur[s.NODE]=!0),M[s.DRAG]=!d[s.DRAG]&&f&&!s.clearedForMotionBlur[s.DRAG]||s.clearingMotionBlur,M[s.DRAG]&&(s.clearedForMotionBlur[s.DRAG]=!0),d[s.NODE]||n||r||M[s.NODE]){var z=f&&!M[s.NODE]&&1!==h;_(T=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]:u.contexts[s.NODE]),f&&!z?"motionBlur":void 0),N?s.drawCachedNodes(T,$.nondrag,l,R):s.drawLayeredElements(T,$.nondrag,l,R),s.debug&&s.drawDebugPoints(T,$.nondrag),n||f||(d[s.NODE]=!1)}if(!r&&(d[s.DRAG]||n||M[s.DRAG])&&(z=f&&!M[s.DRAG]&&1!==h,_(T=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]:u.contexts[s.DRAG]),f&&!z?"motionBlur":void 0),N?s.drawCachedNodes(T,$.drag,l,R):s.drawCachedElements(T,$.drag,l,R),s.debug&&s.drawDebugPoints(T,$.drag),n||f||(d[s.DRAG]=!1)),s.showFps||!r&&d[s.SELECT_BOX]&&!n){if(_(T=t||u.contexts[s.SELECT_BOX]),1==s.selection[4]&&(s.hoverData.selecting||s.touchData.selecting)){x=s.cy.zoom();var L=y["selection-box-border-width"].value/x;T.lineWidth=L,T.fillStyle="rgba("+y["selection-box-color"].value[0]+","+y["selection-box-color"].value[1]+","+y["selection-box-color"].value[2]+","+y["selection-box-opacity"].value+")",T.fillRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]),L>0&&(T.strokeStyle="rgba("+y["selection-box-border-color"].value[0]+","+y["selection-box-border-color"].value[1]+","+y["selection-box-border-color"].value[2]+","+y["selection-box-opacity"].value+")",T.strokeRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]))}if(u.bgActivePosistion&&!s.hoverData.selecting){x=s.cy.zoom();var B=u.bgActivePosistion;T.fillStyle="rgba("+y["active-bg-color"].value[0]+","+y["active-bg-color"].value[1]+","+y["active-bg-color"].value[2]+","+y["active-bg-opacity"].value+")",T.beginPath(),T.arc(B.x,B.y,y["active-bg-size"].pfValue/x,0,2*Math.PI),T.fill()}var F=s.lastRedrawTime;if(s.showFps&&F){F=Math.round(F);var q=Math.round(1e3/F);T.setTransform(1,0,0,1,0,0),T.fillStyle="rgba(255, 0, 0, 0.75)",T.strokeStyle="rgba(255, 0, 0, 0.75)",T.lineWidth=1,T.fillText("1 frame = "+F+" ms = "+q+" fps",0,20),T.strokeRect(0,30,250,20),T.fillRect(0,30,250*Math.min(q/60,1),20)}n||(d[s.SELECT_BOX]=!1)}if(f&&1!==h){var V=u.contexts[s.NODE],U=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_NODE],H=u.contexts[s.DRAG],G=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_DRAG],W=function(e,t,n){e.setTransform(1,0,0,1,0,0),n||!b?e.clearRect(0,0,s.canvasWidth,s.canvasHeight):C(e,0,0,s.canvasWidth,s.canvasHeight);var r=h;e.drawImage(t,0,0,s.canvasWidth*r,s.canvasHeight*r,0,0,s.canvasWidth,s.canvasHeight)};(d[s.NODE]||M[s.NODE])&&(W(V,U,M[s.NODE]),d[s.NODE]=!1),(d[s.DRAG]||M[s.DRAG])&&(W(H,G,M[s.DRAG]),d[s.DRAG]=!1)}s.prevViewport=E,s.clearingMotionBlur&&(s.clearingMotionBlur=!1,s.motionBlurCleared=!0,s.motionBlur=!0),f&&(s.motionBlurTimeout=setTimeout((function(){s.motionBlurTimeout=null,s.clearedForMotionBlur[s.NODE]=!1,s.clearedForMotionBlur[s.DRAG]=!1,s.motionBlur=!1,s.clearingMotionBlur=!p,s.mbFrames=0,d[s.NODE]=!0,d[s.DRAG]=!0,s.redraw()}),100)),t||c.emit("render")},e.exports=r},function(e,t,n){"use strict";for(var r=n(2),i={drawPolygonPath:function(e,t,n,r,i,o){var a=r/2,s=i/2;e.beginPath&&e.beginPath(),e.moveTo(t+a*o[0],n+s*o[1]);for(var l=1;l0&&a>0){f.clearRect(0,0,o,a),f.globalCompositeOperation="source-over";var h=this.getCachedZSortedEles();if(e.full)f.translate(-n.x1*c,-n.y1*c),f.scale(c,c),this.drawElements(f,h),f.scale(1/c,1/c),f.translate(n.x1*c,n.y1*c);else{var g=t.pan(),m={x:g.x*c,y:g.y*c};c*=t.zoom(),f.translate(m.x,m.y),f.scale(c,c),this.drawElements(f,h),f.scale(1/c,1/c),f.translate(-m.x,-m.y)}e.bg&&(f.globalCompositeOperation="destination-over",f.fillStyle=e.bg,f.rect(0,0,o,a),f.fill())}return p},i.png=function(e){return a(e,this.bufferCanvasImage(e),"image/png")},i.jpg=function(e){return a(e,this.bufferCanvasImage(e),"image/jpeg")},e.exports=i},function(e,t,n){"use strict";var r={nodeShapeImpl:function(e,t,n,r,i,o,a){switch(e){case"ellipse":return this.drawEllipsePath(t,n,r,i,o);case"polygon":return this.drawPolygonPath(t,n,r,i,o,a);case"roundrectangle":return this.drawRoundRectanglePath(t,n,r,i,o);case"cutrectangle":return this.drawCutRectanglePath(t,n,r,i,o);case"bottomroundrectangle":return this.drawBottomRoundRectanglePath(t,n,r,i,o);case"barrel":return this.drawBarrelPath(t,n,r,i,o)}}};e.exports=r},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(18),a=function e(){if(!(this instanceof e))return new e;this.length=0},s=a.prototype;s.instanceString=function(){return"stylesheet"},s.selector=function(e){return this[this.length++]={selector:e,properties:[]},this},s.css=function(e,t){var n=this.length-1;if(r.string(e))this[n].properties.push({name:e,value:t});else if(r.plainObject(e))for(var a=e,s=0;s=0&&(e._idleTimeoutId=setTimeout((function(){e._onTimeout&&e._onTimeout()}),t))},n(239),t.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,t.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(this,n(35))},function(e,t,n){(function(e,t){!function(e,n){"use strict";if(!e.setImmediate){var r,i,o,a,s,l=1,c={},u=!1,d=e.document,p=Object.getPrototypeOf&&Object.getPrototypeOf(e);p=p&&p.setTimeout?p:e,"[object process]"==={}.toString.call(e.process)?r=function(e){t.nextTick((function(){h(e)}))}:!function(){if(e.postMessage&&!e.importScripts){var t=!0,n=e.onmessage;return e.onmessage=function(){t=!1},e.postMessage("","*"),e.onmessage=n,t}}()?e.MessageChannel?((o=new MessageChannel).port1.onmessage=function(e){h(e.data)},r=function(e){o.port2.postMessage(e)}):d&&"onreadystatechange"in d.createElement("script")?(i=d.documentElement,r=function(e){var t=d.createElement("script");t.onreadystatechange=function(){h(e),t.onreadystatechange=null,i.removeChild(t),t=null},i.appendChild(t)}):r=function(e){setTimeout(h,0,e)}:(a="setImmediate$"+Math.random()+"$",s=function(t){t.source===e&&"string"==typeof t.data&&0===t.data.indexOf(a)&&h(+t.data.slice(a.length))},e.addEventListener?e.addEventListener("message",s,!1):e.attachEvent("onmessage",s),r=function(t){e.postMessage(a+t,"*")}),p.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n1)for(var n=1;n=t||n<0||m&&e-c>=o}function w(){var e=f();if(x(e))return k(e);s=setTimeout(w,function(e){var n=t-(e-l);return m?p(n,o-(e-c)):n}(e))}function k(e){return s=void 0,v&&r?b(e):(r=i=void 0,a)}function A(){var e=f(),n=x(e);if(r=arguments,i=this,l=e,n){if(void 0===s)return y(l);if(m)return s=setTimeout(w,t),b(l)}return void 0===s&&(s=setTimeout(w,t)),a}return t=g(t)||0,h(n)&&(u=!!n.leading,o=(m="maxWait"in n)?d(g(n.maxWait)||0,t):o,v="trailing"in n?!!n.trailing:v),A.cancel=function(){void 0!==s&&clearTimeout(s),c=0,r=l=i=s=void 0},A.flush=function(){return void 0===s?a:k(f())},A}}).call(this,n(35))},function(e,t,n){e.exports=n(243)},function(e,t,n){var r,i,o;(function(){var n,a,s,l,c,u,d,p,f,h,g,m,v,b,y;s=Math.floor,h=Math.min,a=function(e,t){return et?1:0},f=function(e,t,n,r,i){var o;if(null==n&&(n=0),null==i&&(i=a),n<0)throw new Error("lo must be non-negative");for(null==r&&(r=e.length);nn;0<=n?t++:t--)c.push(t);return c}.apply(this).reverse()).length;rg;0<=g?++u:--u)m.push(c(e,n));return m},b=function(e,t,n,r){var i,o,s;for(null==r&&(r=a),i=e[n];n>t&&r(i,o=e[s=n-1>>1])<0;)e[n]=o,n=s;return e[n]=i},y=function(e,t,n){var r,i,o,s,l;for(null==n&&(n=a),i=e.length,l=t,o=e[t],r=2*t+1;r'+e.content+"":s+=">"+e.content+"";var l=t(s);return l.data("selector",e.selector),l.data("on-click-function",e.onClickFunction),l.data("show",void 0===e.show||e.show),l}function y(){var e;l("active")&&(e=s.children(),t(e).each((function(){x(t(this))})),i.off("tapstart",n),s.remove(),c(s=void 0,void 0),c("active",!1),c("anyVisibleChild",!1))}function x(e){var n="string"==typeof e?t("#"+e):e,r=n.data("cy-context-menus-cxtfcn"),o=n.data("selector"),a=n.data("call-on-click-function"),s=n.data("cy-context-menus-cxtcorefcn");r&&i.off("cxttap",o,r),s&&i.off("cxttap",s),a&&n.off("click",a),n.remove()}"get"!==e&&(c("options",a=function(e,t){var n={};for(var r in e)n[r]=e[r];for(var r in t)n[r]=t[r];return n}(r,e)),l("active")&&y(),c("active",!0),o=u(a.contextMenuClasses),(s=t("
")).addClass("cy-context-menus-cxt-menu"),c("cxtMenu",s),t("body").append(s),s=s,g(a.menuItems),i.on("tapstart",n=function(){p(s),c("cxtMenuPosition",void 0),c("currentCyEvent",void 0)}),t(".cy-context-menus-cxt-menu").contextmenu((function(){return!1})));return function(e){return{isActive:function(){return l("active")},appendMenuItem:function(t){return m(t),e},appendMenuItems:function(t){return g(t),e},removeMenuItem:function(t){return x(t),e},setTrailingDivider:function(n,r){return function(e,n){var r=t("#"+e);n?r.addClass("cy-context-menus-divider"):r.removeClass("cy-context-menus-divider")}(n,r),e},insertBeforeMenuItem:function(t,n){return v(t,n),e},moveBeforeOtherMenuItem:function(n,r){return function(e,n){if(e!==n){var r=t("#"+e).detach(),i=t("#"+n);r.insertBefore(i)}}(n,r),e},disableMenuItem:function(n){return t("#"+n).attr("disabled",!0),e},enableMenuItem:function(n){return t("#"+n).attr("disabled",!1),e},hideMenuItem:function(n){return t("#"+n).data("show",!1),p(t("#"+n)),e},showMenuItem:function(n){return t("#"+n).data("show",!0),d(t("#"+n)),e},destroy:function(){return y(),e}}}(this)}))}};e.exports&&(e.exports=o),void 0===(r=function(){return o}.call(t,n,t,e))||(e.exports=r),"undefined"!=typeof cytoscape&&i&&o(cytoscape,i)}()},function(e,t,n){var r;r=function(e){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var i=t[r]={i:r,l:!1,exports:{}};return e[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var i in e)n.d(r,i,function(t){return e[t]}.bind(null,i));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=0)}([function(e,t,n){var r=n(1),i=function(e){e&&e("layout","dagre",r)};"undefined"!=typeof cytoscape&&i(cytoscape),e.exports=i},function(e,t,n){function r(e){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var i=n(2),o=n(3),a=n(4);function s(e){this.options=o({},i,e)}s.prototype.run=function(){var e=this.options,t=e.cy,n=e.eles,i=function(e,t){return"function"==typeof t?t.apply(e,[e]):t},o=e.boundingBox||{x1:0,y1:0,w:t.width(),h:t.height()};void 0===o.x2&&(o.x2=o.x1+o.w),void 0===o.w&&(o.w=o.x2-o.x1),void 0===o.y2&&(o.y2=o.y1+o.h),void 0===o.h&&(o.h=o.y2-o.y1);var s=new a.graphlib.Graph({multigraph:!0,compound:!0}),l={},c=function(e,t){null!=t&&(l[e]=t)};c("nodesep",e.nodeSep),c("edgesep",e.edgeSep),c("ranksep",e.rankSep),c("rankdir",e.rankDir),c("ranker",e.ranker),s.setGraph(l),s.setDefaultEdgeLabel((function(){return{}})),s.setDefaultNodeLabel((function(){return{}}));for(var u=n.nodes(),d=0;d1?t-1:0),r=1;r-1}},function(e,t,n){var r=n(75);e.exports=function(e,t){var n=this.__data__,i=r(n,e);return i<0?(++this.size,n.push([e,t])):n[i][1]=t,this}},function(e,t,n){var r=n(74);e.exports=function(){this.__data__=new r,this.size=0}},function(e,t){e.exports=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n}},function(e,t){e.exports=function(e){return this.__data__.get(e)}},function(e,t){e.exports=function(e){return this.__data__.has(e)}},function(e,t,n){var r=n(74),i=n(117),o=n(118);e.exports=function(e,t){var n=this.__data__;if(n instanceof r){var a=n.__data__;if(!i||a.length<199)return a.push([e,t]),this.size=++n.size,this;n=this.__data__=new o(a)}return n.set(e,t),this.size=n.size,this}},function(e,t,n){var r=n(64),i=n(262),o=n(23),a=n(151),s=/^\[object .+?Constructor\]$/,l=Function.prototype,c=Object.prototype,u=l.toString,d=c.hasOwnProperty,p=RegExp("^"+u.call(d).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");e.exports=function(e){return!(!o(e)||i(e))&&(r(e)?p:s).test(a(e))}},function(e,t,n){var r=n(58),i=Object.prototype,o=i.hasOwnProperty,a=i.toString,s=r?r.toStringTag:void 0;e.exports=function(e){var t=o.call(e,s),n=e[s];try{e[s]=void 0;var r=!0}catch(e){}var i=a.call(e);return r&&(t?e[s]=n:delete e[s]),i}},function(e,t){var n=Object.prototype.toString;e.exports=function(e){return n.call(e)}},function(e,t,n){var r,i=n(263),o=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";e.exports=function(e){return!!o&&o in e}},function(e,t,n){var r=n(29)["__core-js_shared__"];e.exports=r},function(e,t){e.exports=function(e,t){return null==e?void 0:e[t]}},function(e,t,n){var r=n(266),i=n(74),o=n(117);e.exports=function(){this.size=0,this.__data__={hash:new r,map:new(o||i),string:new r}}},function(e,t,n){var r=n(267),i=n(268),o=n(269),a=n(270),s=n(271);function l(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t0){if(++t>=800)return arguments[0]}else t=0;return e.apply(void 0,arguments)}}},function(e,t,n){var r=n(173),i=n(340),o=n(344),a=n(174),s=n(345),l=n(129);e.exports=function(e,t,n){var c=-1,u=i,d=e.length,p=!0,f=[],h=f;if(n)p=!1,u=o;else if(d>=200){var g=t?null:s(e);if(g)return l(g);p=!1,u=a,h=new r}else h=t?[]:f;e:for(;++c-1}},function(e,t,n){var r=n(188),i=n(342),o=n(343);e.exports=function(e,t,n){return t==t?o(e,t,n):r(e,i,n)}},function(e,t){e.exports=function(e){return e!=e}},function(e,t){e.exports=function(e,t,n){for(var r=n-1,i=e.length;++r1||1===t.length&&e.hasEdge(t[0],t[0])}))}},function(e,t,n){var r=n(22);e.exports=function(e,t,n){return function(e,t,n){var r={},i=e.nodes();return i.forEach((function(e){r[e]={},r[e][e]={distance:0},i.forEach((function(t){e!==t&&(r[e][t]={distance:Number.POSITIVE_INFINITY})})),n(e).forEach((function(n){var i=n.v===e?n.w:n.v,o=t(n);r[e][i]={distance:o,predecessor:e}}))})),i.forEach((function(e){var t=r[e];i.forEach((function(n){var o=r[n];i.forEach((function(n){var r=o[e],i=t[n],a=o[n],s=r.distance+i.distance;s0;){if(n=l.removeMin(),r.has(s,n))a.setEdge(n,s[n]);else{if(u)throw new Error("Input graph is not connected: "+e);u=!0}e.nodeEdges(n).forEach(c)}return a}},function(e,t,n){"use strict";var r=n(11),i=n(399),o=n(402),a=n(403),s=n(20).normalizeRanks,l=n(405),c=n(20).removeEmptyRanks,u=n(406),d=n(407),p=n(408),f=n(409),h=n(418),g=n(20),m=n(28).Graph;e.exports=function(e,t){var n=t&&t.debugTiming?g.time:g.notime;n("layout",(function(){var t=n(" buildLayoutGraph",(function(){return function(e){var t=new m({multigraph:!0,compound:!0}),n=$(e.graph());return t.setGraph(r.merge({},b,S(n,v),r.pick(n,y))),r.forEach(e.nodes(),(function(n){var i=$(e.node(n));t.setNode(n,r.defaults(S(i,x),w)),t.setParent(n,e.parent(n))})),r.forEach(e.edges(),(function(n){var i=$(e.edge(n));t.setEdge(n,r.merge({},A,S(i,k),r.pick(i,E)))})),t}(e)}));n(" runLayout",(function(){!function(e,t){t(" makeSpaceForEdgeLabels",(function(){!function(e){var t=e.graph();t.ranksep/=2,r.forEach(e.edges(),(function(n){var r=e.edge(n);r.minlen*=2,"c"!==r.labelpos.toLowerCase()&&("TB"===t.rankdir||"BT"===t.rankdir?r.width+=r.labeloffset:r.height+=r.labeloffset)}))}(e)})),t(" removeSelfEdges",(function(){!function(e){r.forEach(e.edges(),(function(t){if(t.v===t.w){var n=e.node(t.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e:t,label:e.edge(t)}),e.removeEdge(t)}}))}(e)})),t(" acyclic",(function(){i.run(e)})),t(" nestingGraph.run",(function(){u.run(e)})),t(" rank",(function(){a(g.asNonCompoundGraph(e))})),t(" injectEdgeLabelProxies",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(n.width&&n.height){var r=e.node(t.v),i={rank:(e.node(t.w).rank-r.rank)/2+r.rank,e:t};g.addDummyNode(e,"edge-proxy",i,"_ep")}}))}(e)})),t(" removeEmptyRanks",(function(){c(e)})),t(" nestingGraph.cleanup",(function(){u.cleanup(e)})),t(" normalizeRanks",(function(){s(e)})),t(" assignRankMinMax",(function(){!function(e){var t=0;r.forEach(e.nodes(),(function(n){var i=e.node(n);i.borderTop&&(i.minRank=e.node(i.borderTop).rank,i.maxRank=e.node(i.borderBottom).rank,t=r.max(t,i.maxRank))})),e.graph().maxRank=t}(e)})),t(" removeEdgeLabelProxies",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);"edge-proxy"===n.dummy&&(e.edge(n.e).labelRank=n.rank,e.removeNode(t))}))}(e)})),t(" normalize.run",(function(){o.run(e)})),t(" parentDummyChains",(function(){l(e)})),t(" addBorderSegments",(function(){d(e)})),t(" order",(function(){f(e)})),t(" insertSelfEdges",(function(){!function(e){var t=g.buildLayerMatrix(e);r.forEach(t,(function(t){var n=0;r.forEach(t,(function(t,i){var o=e.node(t);o.order=i+n,r.forEach(o.selfEdges,(function(t){g.addDummyNode(e,"selfedge",{width:t.label.width,height:t.label.height,rank:o.rank,order:i+ ++n,e:t.e,label:t.label},"_se")})),delete o.selfEdges}))}))}(e)})),t(" adjustCoordinateSystem",(function(){p.adjust(e)})),t(" position",(function(){h(e)})),t(" positionSelfEdges",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);if("selfedge"===n.dummy){var r=e.node(n.e.v),i=r.x+r.width/2,o=r.y,a=n.x-i,s=r.height/2;e.setEdge(n.e,n.label),e.removeNode(t),n.label.points=[{x:i+2*a/3,y:o-s},{x:i+5*a/6,y:o-s},{x:i+a,y:o},{x:i+5*a/6,y:o+s},{x:i+2*a/3,y:o+s}],n.label.x=n.x,n.label.y=n.y}}))}(e)})),t(" removeBorderNodes",(function(){!function(e){r.forEach(e.nodes(),(function(t){if(e.children(t).length){var n=e.node(t),i=e.node(n.borderTop),o=e.node(n.borderBottom),a=e.node(r.last(n.borderLeft)),s=e.node(r.last(n.borderRight));n.width=Math.abs(s.x-a.x),n.height=Math.abs(o.y-i.y),n.x=a.x+n.width/2,n.y=i.y+n.height/2}})),r.forEach(e.nodes(),(function(t){"border"===e.node(t).dummy&&e.removeNode(t)}))}(e)})),t(" normalize.undo",(function(){o.undo(e)})),t(" fixupEdgeLabelCoords",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(r.has(n,"x"))switch("l"!==n.labelpos&&"r"!==n.labelpos||(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}}))}(e)})),t(" undoCoordinateSystem",(function(){p.undo(e)})),t(" translateGraph",(function(){!function(e){var t=Number.POSITIVE_INFINITY,n=0,i=Number.POSITIVE_INFINITY,o=0,a=e.graph(),s=a.marginx||0,l=a.marginy||0;function c(e){var r=e.x,a=e.y,s=e.width,l=e.height;t=Math.min(t,r-s/2),n=Math.max(n,r+s/2),i=Math.min(i,a-l/2),o=Math.max(o,a+l/2)}r.forEach(e.nodes(),(function(t){c(e.node(t))})),r.forEach(e.edges(),(function(t){var n=e.edge(t);r.has(n,"x")&&c(n)})),t-=s,i-=l,r.forEach(e.nodes(),(function(n){var r=e.node(n);r.x-=t,r.y-=i})),r.forEach(e.edges(),(function(n){var o=e.edge(n);r.forEach(o.points,(function(e){e.x-=t,e.y-=i})),r.has(o,"x")&&(o.x-=t),r.has(o,"y")&&(o.y-=i)})),a.width=n-t+s,a.height=o-i+l}(e)})),t(" assignNodeIntersects",(function(){!function(e){r.forEach(e.edges(),(function(t){var n,r,i=e.edge(t),o=e.node(t.v),a=e.node(t.w);i.points?(n=i.points[0],r=i.points[i.points.length-1]):(i.points=[],n=a,r=o),i.points.unshift(g.intersectRect(o,n)),i.points.push(g.intersectRect(a,r))}))}(e)})),t(" reversePoints",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);n.reversed&&n.points.reverse()}))}(e)})),t(" acyclic.undo",(function(){i.undo(e)}))}(t,n)})),n(" updateInputGraph",(function(){!function(e,t){r.forEach(e.nodes(),(function(n){var r=e.node(n),i=t.node(n);r&&(r.x=i.x,r.y=i.y,t.children(n).length&&(r.width=i.width,r.height=i.height))})),r.forEach(e.edges(),(function(n){var i=e.edge(n),o=t.edge(n);i.points=o.points,r.has(o,"x")&&(i.x=o.x,i.y=o.y)})),e.graph().width=t.graph().width,e.graph().height=t.graph().height}(e,t)}))}))};var v=["nodesep","edgesep","ranksep","marginx","marginy"],b={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},y=["acyclicer","ranker","rankdir","align"],x=["width","height"],w={width:0,height:0},k=["minlen","weight","width","height","labeloffset"],A={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},E=["labelpos"];function S(e,t){return r.mapValues(r.pick(e,t),Number)}function $(e){var t={};return r.forEach(e,(function(e,n){t[n.toLowerCase()]=e})),t}},function(e,t,n){var r=n(149);e.exports=function(e){return r(e,5)}},function(e,t,n){var r=n(89),i=n(57),o=n(90),a=n(48),s=Object.prototype,l=s.hasOwnProperty,c=r((function(e,t){e=Object(e);var n=-1,r=t.length,c=r>2?t[2]:void 0;for(c&&o(t[0],t[1],c)&&(r=1);++n-1?s[l?t[c]:c]:void 0}}},function(e,t,n){var r=n(188),i=n(37),o=n(365),a=Math.max;e.exports=function(e,t,n){var s=null==e?0:e.length;if(!s)return-1;var l=null==n?0:o(n);return l<0&&(l=a(s+l,0)),r(e,i(t,3),l)}},function(e,t,n){var r=n(196);e.exports=function(e){var t=r(e),n=t%1;return t==t?n?t-n:t:0}},function(e,t,n){var r=n(367),i=n(23),o=n(61),a=/^[-+]0x[0-9a-f]+$/i,s=/^0b[01]+$/i,l=/^0o[0-7]+$/i,c=parseInt;e.exports=function(e){if("number"==typeof e)return e;if(o(e))return NaN;if(i(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=i(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=r(e);var n=s.test(e);return n||l.test(e)?c(e.slice(2),n?2:8):a.test(e)?NaN:+e}},function(e,t,n){var r=n(368),i=/^\s+/;e.exports=function(e){return e?e.slice(0,r(e)+1).replace(i,""):e}},function(e,t){var n=/\s/;e.exports=function(e){for(var t=e.length;t--&&n.test(e.charAt(t)););return t}},function(e,t,n){var r=n(128),i=n(169),o=n(48);e.exports=function(e,t){return null==e?e:r(e,i(t),o)}},function(e,t){e.exports=function(e){var t=null==e?0:e.length;return t?e[t-1]:void 0}},function(e,t,n){var r=n(79),i=n(127),o=n(37);e.exports=function(e,t){var n={};return t=o(t,3),i(e,(function(e,i,o){r(n,i,t(e,i,o))})),n}},function(e,t,n){var r=n(132),i=n(373),o=n(49);e.exports=function(e){return e&&e.length?r(e,o,i):void 0}},function(e,t){e.exports=function(e,t){return e>t}},function(e,t,n){var r=n(375),i=n(379)((function(e,t,n){r(e,t,n)}));e.exports=i},function(e,t,n){var r=n(73),i=n(198),o=n(128),a=n(376),s=n(23),l=n(48),c=n(199);e.exports=function e(t,n,u,d,p){t!==n&&o(n,(function(o,l){if(p||(p=new r),s(o))a(t,n,l,u,e,d,p);else{var f=d?d(c(t,l),o,l+"",t,n,p):void 0;void 0===f&&(f=o),i(t,l,f)}}),l)}},function(e,t,n){var r=n(198),i=n(155),o=n(164),a=n(156),s=n(165),l=n(66),c=n(13),u=n(189),d=n(59),p=n(64),f=n(23),h=n(377),g=n(67),m=n(199),v=n(378);e.exports=function(e,t,n,b,y,x,w){var k=m(e,n),A=m(t,n),E=w.get(A);if(E)r(e,n,E);else{var S=x?x(k,A,n+"",e,t,w):void 0,$=void 0===S;if($){var C=c(A),_=!C&&d(A),O=!C&&!_&&g(A);S=A,C||_||O?c(k)?S=k:u(k)?S=a(k):_?($=!1,S=i(A,!0)):O?($=!1,S=o(A,!0)):S=[]:h(A)||l(A)?(S=k,l(k)?S=v(k):f(k)&&!p(k)||(S=s(A))):$=!1}$&&(w.set(A,S),y(S,A,b,x,w),w.delete(A)),r(e,n,S)}}},function(e,t,n){var r=n(47),i=n(84),o=n(34),a=Function.prototype,s=Object.prototype,l=a.toString,c=s.hasOwnProperty,u=l.call(Object);e.exports=function(e){if(!o(e)||"[object Object]"!=r(e))return!1;var t=i(e);if(null===t)return!0;var n=c.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&l.call(n)==u}},function(e,t,n){var r=n(65),i=n(48);e.exports=function(e){return r(e,i(e))}},function(e,t,n){var r=n(89),i=n(90);e.exports=function(e){return r((function(t,n){var r=-1,o=n.length,a=o>1?n[o-1]:void 0,s=o>2?n[2]:void 0;for(a=e.length>3&&"function"==typeof a?(o--,a):void 0,s&&i(n[0],n[1],s)&&(a=o<3?void 0:a,o=1),t=Object(t);++r1&&a(e,t[0],t[1])?t=[]:n>2&&a(t[0],t[1],t[2])&&(t=[t[0]]),i(e,r(t,1),[])}));e.exports=s},function(e,t,n){var r=n(88),i=n(86),o=n(37),a=n(184),s=n(393),l=n(82),c=n(394),u=n(49),d=n(13);e.exports=function(e,t,n){t=t.length?r(t,(function(e){return d(e)?function(t){return i(t,1===e.length?e[0]:e)}:e})):[u];var p=-1;t=r(t,l(o));var f=a(e,(function(e,n,i){return{criteria:r(t,(function(t){return t(e)})),index:++p,value:e}}));return s(f,(function(e,t){return c(e,t,n)}))}},function(e,t){e.exports=function(e,t){var n=e.length;for(e.sort(t);n--;)e[n]=e[n].value;return e}},function(e,t,n){var r=n(395);e.exports=function(e,t,n){for(var i=-1,o=e.criteria,a=t.criteria,s=o.length,l=n.length;++i=l?c:c*("desc"==n[i]?-1:1)}return e.index-t.index}},function(e,t,n){var r=n(61);e.exports=function(e,t){if(e!==t){var n=void 0!==e,i=null===e,o=e==e,a=r(e),s=void 0!==t,l=null===t,c=t==t,u=r(t);if(!l&&!u&&!a&&e>t||a&&s&&c&&!l&&!u||i&&s&&c||!n&&c||!o)return 1;if(!i&&!a&&!u&&e0;--l)if(r=t[l].dequeue()){i=i.concat(s(e,t,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(c,(function(t){return e.outEdges(t.v,t.w)})),!0)};var a=r.constant(1);function s(e,t,n,i,o){var a=o?[]:void 0;return r.forEach(e.inEdges(i.v),(function(r){var i=e.edge(r),s=e.node(r.v);o&&a.push({v:r.v,w:r.w}),s.out-=i,l(t,n,s)})),r.forEach(e.outEdges(i.v),(function(r){var i=e.edge(r),o=r.w,a=e.node(o);a.in-=i,l(t,n,a)})),e.removeNode(i.v),a}function l(e,t,n){n.out?n.in?e[n.out-n.in+t].enqueue(n):e[e.length-1].enqueue(n):e[0].enqueue(n)}},function(e,t){function n(){var e={};e._next=e._prev=e,this._sentinel=e}function r(e){e._prev._next=e._next,e._next._prev=e._prev,delete e._next,delete e._prev}function i(e,t){if("_next"!==e&&"_prev"!==e)return t}e.exports=n,n.prototype.dequeue=function(){var e=this._sentinel,t=e._prev;if(t!==e)return r(t),t},n.prototype.enqueue=function(e){var t=this._sentinel;e._prev&&e._next&&r(e),e._next=t._next,t._next._prev=e,t._next=e,e._prev=t},n.prototype.toString=function(){for(var e=[],t=this._sentinel,n=t._prev;n!==t;)e.push(JSON.stringify(n,i)),n=n._prev;return"["+e.join(", ")+"]"}},function(e,t,n){"use strict";var r=n(11),i=n(20);e.exports={run:function(e){e.graph().dummyChains=[],r.forEach(e.edges(),(function(t){!function(e,t){var n,r,o,a=t.v,s=e.node(a).rank,l=t.w,c=e.node(l).rank,u=t.name,d=e.edge(t),p=d.labelRank;if(c===s+1)return;for(e.removeEdge(t),o=0,++s;sl.lim&&(c=l,u=!0);var d=r.filter(t.edges(),(function(t){return u===b(e,e.node(t.v),c)&&u!==b(e,e.node(t.w),c)}));return r.minBy(d,(function(e){return o(t,e)}))}function v(e,t,n,i){var o=n.v,a=n.w;e.removeEdge(o,a),e.setEdge(i.v,i.w,{}),f(e),d(e,t),function(e,t){var n=r.find(e.nodes(),(function(e){return!t.node(e).parent})),i=s(e,n);i=i.slice(1),r.forEach(i,(function(n){var r=e.node(n).parent,i=t.edge(n,r),o=!1;i||(i=t.edge(r,n),o=!0),t.node(n).rank=t.node(r).rank+(o?i.minlen:-i.minlen)}))}(e,t)}function b(e,t,n){return n.low<=t.lim&&t.lim<=n.lim}e.exports=u,u.initLowLimValues=f,u.initCutValues=d,u.calcCutValue=p,u.leaveEdge=g,u.enterEdge=m,u.exchangeEdges=v},function(e,t,n){var r=n(11);e.exports=function(e){var t=function(e){var t={},n=0;function i(o){var a=n;r.forEach(e.children(o),i),t[o]={low:a,lim:n++}}return r.forEach(e.children(),i),t}(e);r.forEach(e.graph().dummyChains,(function(n){for(var r=e.node(n),i=r.edgeObj,o=function(e,t,n,r){var i,o,a=[],s=[],l=Math.min(t[n].low,t[r].low),c=Math.max(t[n].lim,t[r].lim);i=n;do{i=e.parent(i),a.push(i)}while(i&&(t[i].low>l||c>t[i].lim));o=i,i=r;for(;(i=e.parent(i))!==o;)s.push(i);return{path:a.concat(s.reverse()),lca:o}}(e,t,i.v,i.w),a=o.path,s=o.lca,l=0,c=a[l],u=!0;n!==i.w;){if(r=e.node(n),u){for(;(c=a[l])!==s&&e.node(c).maxRank=2),s=u.buildLayerMatrix(e);var m=o(e,s);m0;)t%2&&(n+=l[t+1]),l[t=t-1>>1]+=e.weight;c+=e.weight*n}))),c}e.exports=function(e,t){for(var n=0,r=1;r=e.barycenter)&&function(e,t){var n=0,r=0;e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.vs=t.vs.concat(e.vs),e.barycenter=n/r,e.weight=r,e.i=Math.min(t.i,e.i),t.merged=!0}(e,t)}}function i(t){return function(n){n.in.push(t),0==--n.indegree&&e.push(n)}}for(;e.length;){var o=e.pop();t.push(o),r.forEach(o.in.reverse(),n(o)),r.forEach(o.out,i(o))}return r.map(r.filter(t,(function(e){return!e.merged})),(function(e){return r.pick(e,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(e){return!e.indegree})))}},function(e,t,n){var r=n(11),i=n(20);function o(e,t,n){for(var i;t.length&&(i=r.last(t)).i<=n;)t.pop(),e.push(i.vs),n++;return n}e.exports=function(e,t){var n=i.partition(e,(function(e){return r.has(e,"barycenter")})),a=n.lhs,s=r.sortBy(n.rhs,(function(e){return-e.i})),l=[],c=0,u=0,d=0;a.sort((p=!!t,function(e,t){return e.barycentert.barycenter?1:p?t.i-e.i:e.i-t.i})),d=o(l,s,d),r.forEach(a,(function(e){d+=e.vs.length,l.push(e.vs),c+=e.barycenter*e.weight,u+=e.weight,d=o(l,s,d)}));var p;var f={vs:r.flatten(l,!0)};u&&(f.barycenter=c/u,f.weight=u);return f}},function(e,t,n){var r=n(11),i=n(28).Graph;e.exports=function(e,t,n){var o=function(e){var t;for(;e.hasNode(t=r.uniqueId("_root")););return t}(e),a=new i({compound:!0}).setGraph({root:o}).setDefaultNodeLabel((function(t){return e.node(t)}));return r.forEach(e.nodes(),(function(i){var s=e.node(i),l=e.parent(i);(s.rank===t||s.minRank<=t&&t<=s.maxRank)&&(a.setNode(i),a.setParent(i,l||o),r.forEach(e[n](i),(function(t){var n=t.v===i?t.w:t.v,o=a.edge(n,i),s=r.isUndefined(o)?0:o.weight;a.setEdge(n,i,{weight:e.edge(t).weight+s})})),r.has(s,"minRank")&&a.setNode(i,{borderLeft:s.borderLeft[t],borderRight:s.borderRight[t]}))})),a}},function(e,t,n){var r=n(11);e.exports=function(e,t,n){var i,o={};r.forEach(n,(function(n){for(var r,a,s=e.parent(n);s;){if((r=e.parent(s))?(a=o[r],o[r]=s):(a=i,i=s),a&&a!==s)return void t.setEdge(a,s);s=r}}))}},function(e,t,n){"use strict";var r=n(11),i=n(20),o=n(419).positionX;e.exports=function(e){(function(e){var t=i.buildLayerMatrix(e),n=e.graph().ranksep,o=0;r.forEach(t,(function(t){var i=r.max(r.map(t,(function(t){return e.node(t).height})));r.forEach(t,(function(t){e.node(t).y=o+i/2})),o+=i+n}))})(e=i.asNonCompoundGraph(e)),r.forEach(o(e),(function(t,n){e.node(n).x=t}))}},function(e,t,n){"use strict";var r=n(11),i=n(28).Graph,o=n(20);function a(e,t){var n={};return r.reduce(t,(function(t,i){var o=0,a=0,s=t.length,c=r.last(i);return r.forEach(i,(function(t,u){var d=function(e,t){if(e.node(t).dummy)return r.find(e.predecessors(t),(function(t){return e.node(t).dummy}))}(e,t),p=d?e.node(d).order:s;(d||t===c)&&(r.forEach(i.slice(a,u+1),(function(t){r.forEach(e.predecessors(t),(function(r){var i=e.node(r),a=i.order;!(as)&&l(n,t,c)}))}))}return r.reduce(t,(function(t,n){var o,a=-1,s=0;return r.forEach(n,(function(r,l){if("border"===e.node(r).dummy){var c=e.predecessors(r);c.length&&(o=e.node(c[0]).order,i(n,s,l,a,o),s=l,a=o)}i(n,s,n.length,o,t.length)})),n})),n}function l(e,t,n){if(t>n){var r=t;t=n,n=r}var i=e[t];i||(e[t]=i={}),i[n]=!0}function c(e,t,n){if(t>n){var i=t;t=n,n=i}return r.has(e[t],n)}function u(e,t,n,i){var o={},a={},s={};return r.forEach(t,(function(e){r.forEach(e,(function(e,t){o[e]=e,a[e]=e,s[e]=t}))})),r.forEach(t,(function(e){var t=-1;r.forEach(e,(function(e){var l=i(e);if(l.length)for(var u=((l=r.sortBy(l,(function(e){return s[e]}))).length-1)/2,d=Math.floor(u),p=Math.ceil(u);d<=p;++d){var f=l[d];a[e]===e&&t\n.menu ul ul {\n margin-left: 12px;\n}\n\n\n\n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(425),i=n(21);n(426),angular.module("dbt").directive("modelTreeLine",["$state",function(e){return{scope:{item:"=",depth:"<",resourceType:"@"},replace:!0,templateUrl:r,link:function(t,n,r,o){t.depth||(t.depth=0);var a=t.item.name;if(a){var s=i.last(a,15).join(""),l=i.initial(a,s.length).join("");t.name={name:a,start:l,end:s},t.name_start=l,t.name_end=s,t.onFolderClick=function(n){if(n.active=!n.active,"source"==t.resourceType){var r=n.name;e.go("dbt.source_list",{source:r})}else 0===t.depth&&"database"!==n.type&&e.go("dbt.project_overview",{project_name:n.name})},t.activate=function(n){t.$emit("clearSearch"),n.active=!0;var r="dbt."+n.node.resource_type;e.go(r,{unique_id:n.unique_id})},t.getIcon=function(e,t){return"#"+{header:{on:"icn-down",off:"icn-right"},database:{on:"icn-db-on",off:"icn-db"},schema:{on:"icn-tree-on",off:"icn-tree"},table:{on:"icn-doc-on",off:"icn-doc"},folder:{on:"icn-dir-on",off:"icn-dir"},file:{on:"icn-doc-on",off:"icn-doc"},group:{on:"icn-filter",off:"icn-filter"}}[e][t]},t.getClass=function(e){return{active:e.active,"menu-tree":"header"==e.type||"schema"==e.type||"folder"==e.type,"menu-main":"header"==e.type,"menu-node":"file"==e.type||"table"==e.type}}}}}}])},function(e,t){var n="/components/model_tree/model_tree_line.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
  • \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n
      \n \n
    \n
  • \n')}]),e.exports=n},function(e,t,n){var r=n(427);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.unselectable{\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(429);n(33);n(206),r.module("dbt").directive("docsSearch",["$sce","project",function(e,t){return{scope:{query:"=",results:"=",onSelect:"&"},replace:!0,templateUrl:i,link:function(n){n.max_results=20,n.show_all=!1,n.max_results_columns=3,n.limit_columns={},n.checkboxStatus={show_names:!1,show_descriptions:!1,show_columns:!1,show_column_descriptions:!1,show_code:!1,show_tags:!1},n.limit_search=function(e,t,r){return t0&&null!=n.query&&n.query.trim().length>0){let t=e.replace(/\s+/g," "),o=r(i(n.query)[0]),a=t.search(new RegExp(o)),s=a-75<0?0:a-75,l=a+75>t.length?t.length:a+75;return"..."+t.substring(s,l)+"..."}return e},n.highlight=function(t){if(!n.query||!t)return e.trustAsHtml(t);let o="("+i(n.query).map(e=>r(e)).join(")|(")+")";return e.trustAsHtml(t.replace(new RegExp(o,"gi"),'$&'))},n.$watch("query",(function(e,t){0==e.length&&(n.show_all=!1,n.limit_columns={})})),n.columnFilter=function(e){var t=[];let r=i(n.query);for(var o in e)r.every(e=>-1!=o.toLowerCase().indexOf(e))&&t.push(o);return t},n.limitColumns=function(e){return void 0!==n.limit_columns[e]?n.limit_columns[e]:3}}}}])},function(e,t){var n="/components/search/search.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n \n
    \n
    \n

    \n {{ query }}\n {{ results.length }} search results\n

    \n \n \n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n \n
    \n
    \n

    \n \n {{result.model.resource_type}}\n

    \n

    \n
    \n
    \n
    \n \n columns:\n \n \n \n Show {{ columnFilter(result.model.columns).length - max_results_columns }} more\n
    \n
    \n \n \n \n
    \n
    \n \n tags:\n \n \n \n
    \n
    \n Show {{ results.length - max_results }} more\n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){e.exports={getQuoteChar:function(e){var t=(e||{}).adapter_type;return["bigquery","spark","databricks"].indexOf(t)>=0?"`":'"'}}},function(e,t,n){"use strict";const r=n(432);n(433);const i=n(21);angular.module("dbt").directive("tableDetails",["$sce","$filter","project",function(e,t,n){return{scope:{model:"=",extras:"=",exclude:"<"},templateUrl:r,link:function(e){function r(e,t){if(0==e)return"0 bytes";if(e<1&&(e*=1e6),isNaN(parseFloat(e))||!isFinite(e))return"-";void 0===t&&(t=0);var n=Math.floor(Math.log(e)/Math.log(1024));return(e/Math.pow(1024,Math.floor(n))).toFixed(t)+" "+["bytes","KB","MB","GB","TB","PB"][n]}function o(e,n){return void 0===n&&(n=2),t("number")(100*e,n)+"%"}function a(e,n){return void 0===n&&(n=0),t("number")(e,n)}function s(e){var t,r,o=!e.metadata,a=e.metadata||{};t=e.database?e.database+".":"",r=o?void 0:"source"==e.resource_type?t+e.schema+"."+e.identifier:t+e.schema+"."+e.alias;var s,l=[{name:"Owner",value:e.group?function(e){const{name:t,email:r}=n.project.groups[e].owner,i=[];if(t&&i.push(t),r){const e=i.length>0?`<${r}>`:r;i.push(e)}return i.join(" ")}(`group.${e.package_name}.${e.group}`):a.owner},{name:"Type",value:o?void 0:(s=a.type,"BASE TABLE"==s?{type:"table",name:"table"}:"LATE BINDING VIEW"==s?{type:"view",name:"late binding view"}:{type:s.toLowerCase(),name:s.toLowerCase()}).name},{name:"Package",value:e.package_name},{name:"Language",value:e.language},{name:"Relation",value:r},{name:"Access",value:e.access},{name:"Version",value:e.version},{name:"Contract",value:e.config.contract.enforced?"Enforced":"Not Enforced"}];return i.filter(l,(function(e){return void 0!==e.value}))}e.details=[],e.extended=[],e.exclude=e.exclude||[],e.meta=null,e._show_expanded=!1,e.show_expanded=function(t){return void 0!==t&&(e._show_expanded=t),e._show_expanded},e.hasData=function(e){return!(!e||i.isEmpty(e))&&(1!=e.length||0!=e[0].include)},e.$watch("model",(function(t,n){i.property(["metadata","type"])(t);var l,c,u,d=t.hasOwnProperty("sources")&&null!=t.sources[0]?t.sources[0].source_meta:null;if(e.meta=t.meta||d,e.details=s(t),e.extended=(l=t.stats,c={rows:a,row_count:a,num_rows:a,max_varchar:a,pct_used:o,size:r,bytes:r,num_bytes:r},u=i.sortBy(i.values(l),"label"),i.map(u,(function(e){var t=i.clone(e),n=c[e.id];return n&&(t.value=n(e.value),t.label=e.label.replace("Approximate","~"),t.label=e.label.replace("Utilization","Used")),t}))),e.extras){var p=i.filter(e.extras,(function(e){return void 0!==e.value&&null!==e.value}));e.details=e.details.concat(p)}e.show_extended=i.where(e.extended,{include:!0}).length>0})),e.queryTag=function(t){e.$emit("query",t)}}}}])},function(e,t){var n="/components/table_details/table_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    Details
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    Tags
    \n
    \n {{ tag }} \n
    \n
    untagged
    \n
    \n
    \n
    {{ item.name }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ item.label }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){var r=n(434);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n\n.details-content {\n table-layout: fixed;\n}\n\n.detail-body {\n white-space: nowrap;\n overflow-x: scroll;\n}\n",""])},function(e,t,n){"use strict";const r=n(436),i=n(21);angular.module("dbt").directive("columnDetails",["project",function(e){return{scope:{model:"="},templateUrl:r,link:function(t){t.has_test=function(e,t){return-1!=i.pluck(e.tests,"short").indexOf(t)},t.has_constraint=function(e,t){return e.constraints.some(e=>e.type===t)},t.has_more_info=function(e){var t=e.tests||[],n=e.description||"",r=e.meta||{},o=e.constraints||[];return t.length||n.length||o.length||!i.isEmpty(r)},t.toggle_column_expanded=function(e){t.has_more_info(e)&&(e.expanded=!e.expanded)},t.getState=function(e){return"dbt."+e.resource_type},t.get_col_name=function(t){return e.caseColumn(t)},t.get_columns=function(e){var t=i.chain(e.columns).values().sortBy("index").value();return i.each(t,(function(e,t){e.index=t})),t}}}}])},function(e,t){var n="/components/column_details/column_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n
    \n Column information is not available for this seed\n
    \n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ColumnTypeDescriptionConstraintsData TestsMore?
    \n
    \n {{ get_col_name(column.name) }}\n
    \n
    \n {{ column.type }}

    \n
    \n {{ column.description }}\n \n \n N\n PK\n FK\n C\n +\n \n \n \n U\n N\n F\n A\n +\n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Details
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n\n
    \n
    Description
    \n \n
    \n\n
    \n
    Constraints
    \n
    \n
    \n
    \n
    Name
    \n
    {{ constraint.name }}
    \n
    Type
    \n
    {{ constraint.type }}
    \n
    Expression
    \n
    {{ constraint.expression }}
    \n
    \n
    \n
    \n
    \n\n
    \n
    Generic Data Tests
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(438);n(33),n(439);function i(e){return"python"===e?"language-python":"language-sql"}angular.module("dbt").directive("codeBlock",["code","$timeout",function(e,t){return{scope:{versions:"=",default:"<",language:"="},restrict:"E",templateUrl:r,link:function(n,r){n.selected_version=n.default,n.language_class=i(n.language),n.source=null,n.setSelected=function(r){n.selected_version=r,n.source=n.versions[r]||"";const i=n.source.trim();n.highlighted=e.highlight(i,n.language),t((function(){Prism.highlightAll()}))},n.titleCase=function(e){return e.charAt(0).toUpperCase()+e.substring(1)},n.copied=!1,n.copy_to_clipboard=function(){e.copy_to_clipboard(n.source),n.copied=!0,setTimeout((function(){n.$apply((function(){n.copied=!1}))}),1e3)},n.$watch("language",(function(e,t){e&&e!=t&&(n.language_class=i(e))}),!0),n.$watch("versions",(function(e,t){if(e)if(n.default)n.setSelected(n.default);else{var r=Object.keys(n.versions);r.length>0&&n.setSelected(r[0])}}),!0)}}}])},function(e,t){var n="/components/code_block/code_block.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    Code
    \n\n')}]),e.exports=n},function(e,t,n){var r=n(440);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"pre.code {\n border: none !important;\n overflow-y: visible !important;\n overflow-x: scroll !important;\n padding-bottom: 10px;\n}\n\npre.code code {\n font-family: Monaco, monospace !important;\n font-weight: 400 !important;\n}\n\n.line-numbers-rows {\n border: none !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(442);angular.module("dbt").directive("macroArguments",[function(){return{scope:{macro:"="},templateUrl:r,link:function(e){_.each(e.macro.arguments,(function(e){e.expanded=!1}))}}}])},function(e,t){var n="/components/macro_arguments/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n
    \n
    \n Details are not available for this macro. This may be due to the fact that this macro doesn\'t have any\n arguments or that they haven\'t been documented yet.\n
    \n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ArgumentTypeDescriptionMore?
    \n
    \n {{ arg.name }}\n
    \n
    \n {{ arg.type }}

    \n
    \n {{ arg.description }}\n \n \n \n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Description
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(444);angular.module("dbt").directive("referenceList",["$state",function(e){return{scope:{references:"=",node:"="},restrict:"E",templateUrl:r,link:function(t){t.selected_type=null,t.setType=function(e){t.selected_type=e,t.nodes=t.references[t.selected_type]},t.getNodeUrl=function(t){var n="dbt."+t.resource_type;return e.href(n,{unique_id:t.unique_id,"#":null})},t.mapResourceType=function(e){return"model"==e?"Models":"seed"==e?"Seeds":"test"==e?"Data Tests":"unit_test"==e?"Unit Tests":"snapshot"==e?"Snapshots":"analysis"==e?"Analyses":"macro"==e?"Macros":"exposure"==e?"Exposures":"metric"==e?"Metrics":"semantic_model"==e?"Semantic Models":"operation"==e?"Operations":"Nodes"},t.$watch("references",(function(e){e&&_.size(e)>0?(t.selected_type=_.keys(e)[0],t.has_references=!0,t.nodes=t.references[t.selected_type]):t.has_references=!1}))}}}])},function(e,t){var n="/components/references/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n No resources reference this {{ node.resource_type }}\n
    \n
    \n \n
    \n \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){n(446),n(448),n(449),n(450),n(451),n(452),n(453),n(454),n(455),n(456),n(457)},function(e,t,n){"use strict";const r=n(9),i=n(30);n(31),r.module("dbt").controller("ModelCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.copied=!1,e.copy_to_clipboard=function(t){r.copy_to_clipboard(t),e.copied=!0,setTimeout((function(){e.$apply((function(){e.copied=!1}))}),1e3)},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.nav-tabs li.nav-pull-right {\n flex: 1 0 auto;\n text-align: right;\n}\n\ntr.column-row-selected {\n\n}\n\ntd.column-expanded{\n padding: 0px !important;\n}\n\ntd.column-expanded > div {\n padding: 5px 10px;\n margin-left: 20px;\n height: 100%;\n\n border-left: 1px solid #ccc !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(30);n(31),r.module("dbt").controller("SourceCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Sample SQL":r.generateSourceSQL(e.model)},e.extra_table_fields=[{name:"Loader",value:e.model.loader},{name:"Source",value:e.model.source_name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(30);n(31),r.module("dbt").controller("SeedCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Example SQL":r.generateSourceSQL(e.model)}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(30);n(31),r.module("dbt").controller("SnapshotCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"Compiled SQL is not available for this snapshot"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(30);n(31),r.module("dbt").controller("TestCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(30);n(31),r.module("dbt").controller("MacroCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,a,s,l){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.macro={},n.ready((function(t){let n=t.macros[e.model_uid];if(e.macro=n,e.references=o.getMacroReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=o.getMacroParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.macro.is_adapter_macro){var r=t.metadata.adapter_type;e.versions=n.impls,n.impls[r]?e.default_version=r:n.impls.default?e.default_version="default":e.default_version=i.keys(n.impls)[0]}else e.default_version="Source",e.versions={Source:e.macro.macro_sql}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(30);n(31),r.module("dbt").controller("AnalysisCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.default_version="Source",e.versions={Source:"",Compiled:""},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language,e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(30);n(31),r.module("dbt").controller("ExposureCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.exposure={},n.ready((function(t){let n=t.nodes[e.model_uid];var r;e.exposure=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.language=n.language,e.exposure.owner.name&&e.exposure.owner.email?r=`${e.exposure.owner.name} <${e.exposure.owner.email}>`:e.exposure.owner.name?r=""+e.exposure.owner.name:e.exposure.owner.email&&(r=""+e.exposure.owner.email),e.extra_table_fields=[{name:"Maturity",value:e.exposure.maturity},{name:"Owner",value:r},{name:"Exposure name",value:e.exposure.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(30);n(31),r.module("dbt").controller("MetricCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.metric={},n.ready((function(t){let n=t.nodes[e.model_uid];e.metric=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length;const r="expression"===e.metric.type?"Expression metric":"Aggregate metric";e.extra_table_fields=[{name:"Metric Type",value:r},{name:"Metric name",value:e.metric.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(30);n(31),r.module("dbt").controller("SemanticModelCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.semantic_model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.semantic_model=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length;const r="expression"===e.semantic_model.type?"Expression semantic_model":"Aggregate semantic_model";e.extra_table_fields=[{name:"Semantic Model Type",value:r},{name:"Semantic Model name",value:e.semantic_model.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(30);n(31),r.module("dbt").controller("OperationCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";n(9).module("dbt").controller("GraphCtrl",["$scope","$state","$window","graph","project","selectorService",function(e,t,n,r,i,o){function a(e){return e&&"source"==e.resource_type?"source:"+e.source_name+"."+e.name:e&&"exposure"==e.resource_type?"exposure:"+e.name:e&&"metric"==e.resource_type?"metric:"+e.name:e&&"semantic_model"==e.resource_type?"semantic_model:"+e.name:e&&"model"==e.resource_type&&null!=e.version?e.label:e.name?e.name:"*"}e.graph=r.graph,e.graphService=r,e.graphRendered=function(e){r.setGraphReady(e)},e.$watch((function(){return t.params.unique_id}),(function(e,t){e&&e!=t&&i.find_by_id(e,(function(e){e&&("sidebar"==r.orientation?r.showVerticalGraph(a(e),!1):r.showFullGraph(a(e)))})),e||o.clearViewNode()}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(33),a=n(460);n(461),n(206),n(469),n(471),n(474),n(478),r.module("dbt").controller("MainController",["$scope","$route","$state","project","graph","selectorService","trackingService","locationService","$transitions",function(e,t,n,r,s,l,c,u,d){function p(t){e.model_uid=t;var n=r.node(t);n&&l.resetSelection(n)}function f(e){e&&setTimeout((function(){var t=o("*[data-nav-unique-id='"+e+"']");t.length&&t[0].scrollIntoView&&t[0].scrollIntoView({behavior:"smooth",block:"center",inline:"center"})}),1)}e.tree={database:{},project:{},sources:{}},e.search={query:"",results:[],is_focused:!1},e.logo=a,e.model_uid=null,e.project={},o("body").bind("keydown",(function(e){"t"==event.key&&"INPUT"!=event.target.tagName&&(o("#search").focus(),event.preventDefault())})),e.onSearchFocus=function(t,n){e.search.is_focused=n},e.clearSearch=function(){e.search.is_focused=!1,e.search.query="",e.search.results=[],o("#search").blur()},e.$on("clearSearch",(function(){e.clearSearch()})),e.$on("query",(function(t,n){e.search.is_focused=!0,e.search.query=n})),e.onSearchKeypress=function(t){"Escape"==t.key&&(e.clearSearch(),t.preventDefault())},r.getModelTree(n.params.unique_id,(function(t){e.tree.database=t.database,e.tree.project=t.project,e.tree.sources=t.sources,e.tree.exposures=t.exposures,e.tree.metrics=t.metrics,e.tree.semantic_models=t.semantic_models,e.tree.groups=t.groups,setTimeout((function(){f(e.model_uid)}))})),d.onSuccess({},(function(t,n){var i=t.router.globals.params,o=l.getViewNode(),a=o?o.unique_id:null,s=i.unique_id,u=!0;if(t.from().name==t.to().name&&a==s&&(u=!1),u&&i.unique_id){var d=r.updateSelected(i.unique_id);e.tree.database=d.database,e.tree.groups=d.groups,e.tree.project=d.project,e.tree.sources=d.sources,e.search.query="",p(i.unique_id),setTimeout((function(){f(i.unique_id)}))}u&&c.track_pageview()})),e.$watch("search.query",(function(t){e.search.results=function(t){if(""===e.search.query)return t;let n={name:10,tags:5,description:3,raw_code:2,columns:1};return i.each(t,(function(t){t.overallWeight=0,i.each(Object.keys(n),(function(r){if(null!=t.model[r]){let o=0,a=t.model[r],s=e.search.query.toLowerCase();if("columns"===r)i.each(a,(function(e){if(e.name){let t=e.name.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}}));else if("tags"===r)i.each(a,(function(e){let t=e.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}));else{a=a.toLowerCase();let e=0;for(;-1!=e;)e=a.indexOf(s,e),-1!=e&&(o++,e++)}t.overallWeight+=o*n[r]}}))})),t}(r.search(t))})),r.init(),r.ready((function(t){e.project=t,e.search.results=r.search("");var o=i.unique(i.pluck(i.values(t.nodes),"package_name")).sort(),a=[null];i.each(t.nodes,(function(e){var t=e.tags;a=i.union(a,t).sort()})),l.init({packages:o,tags:a}),p(n.params.unique_id);var d=u.parseState(n.params);d.show_graph&&s.ready((function(){i.assign(l.selection.dirty,d.selected);var e=l.updateSelection();s.updateGraph(e)}));var f=t.metadata||{};c.init({track:f.send_anonymous_usage_stats,project_id:f.project_id})}))}])},function(e,t){e.exports="data:image/svg+xml,%3Csvg width='242' height='90' viewBox='0 0 242 90' fill='none' xmlns='http://www.w3.org/2000/svg'%3E %3Cpath d='M240.384 74.5122L239.905 75.8589H239.728L239.249 74.5156V75.8589H238.941V74.0234H239.324L239.816 75.3872L240.309 74.0234H240.691V75.8589H240.384V74.5122ZM238.671 74.3003H238.169V75.8589H237.858V74.3003H237.352V74.0234H238.671V74.3003Z' fill='%23262A38'/%3E %3Cpath d='M154.123 13.915V75.3527H141.672V69.0868C140.37 71.2839 138.499 73.0742 136.22 74.2134C133.779 75.434 131.012 76.085 128.246 76.085C124.828 76.1664 121.41 75.1899 118.562 73.2369C115.633 71.2839 113.354 68.5986 111.889 65.425C110.262 61.7631 109.448 57.8572 109.529 53.8698C109.448 49.8825 110.262 45.9765 111.889 42.3961C113.354 39.3038 115.633 36.6185 118.481 34.7469C121.41 32.8753 124.828 31.9801 128.246 32.0615C130.931 32.0615 133.616 32.6311 135.976 33.8517C138.255 34.991 140.126 36.6999 141.428 38.8156V18.0651L154.123 13.915ZM139.15 63.2279C140.777 61.1121 141.672 58.0199 141.672 54.0326C141.672 50.0452 140.859 47.0344 139.15 44.9187C137.441 42.8029 134.755 41.5823 131.989 41.6637C129.222 41.5009 126.537 42.7215 124.746 44.8373C123.038 46.953 122.142 49.9639 122.142 53.8698C122.142 57.8572 123.038 60.9494 124.746 63.1465C126.455 65.3436 129.222 66.5642 131.989 66.4828C135.081 66.4828 137.522 65.3436 139.15 63.2279Z' fill='%23262A38'/%3E %3Cpath d='M198.635 34.6655C201.564 36.5371 203.843 39.2225 205.226 42.3147C206.853 45.8952 207.667 49.8011 207.586 53.7885C207.667 57.7758 206.853 61.7632 205.226 65.3436C203.761 68.5172 201.483 71.2026 198.553 73.1556C195.705 75.0272 192.287 76.0037 188.87 75.9223C186.103 76.0037 183.336 75.3527 180.895 74.0507C178.617 72.9114 176.745 71.1212 175.524 68.9241V75.2713H162.993V18.0651L175.606 13.915V38.9783C176.826 36.7812 178.698 34.991 180.976 33.8517C183.418 32.5498 186.103 31.8988 188.87 31.9801C192.287 31.8988 195.705 32.8753 198.635 34.6655ZM192.45 63.1465C194.159 60.9494 194.973 57.8572 194.973 53.7885C194.973 49.8825 194.159 46.8716 192.45 44.7559C190.741 42.6402 188.381 41.5823 185.289 41.5823C182.523 41.4196 179.837 42.6402 178.047 44.8373C176.338 47.0344 175.524 50.0452 175.524 53.9512C175.524 57.9386 176.338 61.0308 178.047 63.1465C179.756 65.3436 182.441 66.5642 185.289 66.4015C188.056 66.5642 190.741 65.3436 192.45 63.1465Z' fill='%23262A38'/%3E %3Cpath d='M225 42.4774V58.915C225 61.2749 225.651 62.9838 226.791 64.0416C228.093 65.1809 229.801 65.7505 231.592 65.6691C232.975 65.6691 234.44 65.425 235.742 65.0995V74.8644C233.382 75.6782 230.941 76.085 228.499 76.0037C223.292 76.0037 219.304 74.5389 216.537 71.6094C213.771 68.68 212.387 64.5299 212.387 59.1592V23.1103L225 19.0416V33.038H235.742V42.4774H225Z' fill='%23262A38'/%3E %3Cpath d='M86.1754 3.74322C88.2911 5.77758 89.6745 8.46293 90 11.3924C90 12.613 89.6745 13.4268 88.9421 14.9729C88.2098 16.519 79.1772 32.1429 76.4919 36.4557C74.9458 38.9783 74.132 41.9892 74.132 44.9186C74.132 47.9295 74.9458 50.859 76.4919 53.3816C79.1772 57.6944 88.2098 73.3996 88.9421 74.9457C89.6745 76.4919 90 77.2242 90 78.4448C89.6745 81.3743 88.3725 84.0597 86.2568 86.0127C84.2224 88.1284 81.5371 89.5118 78.689 89.7559C77.4684 89.7559 76.6546 89.4304 75.1899 88.698C73.7251 87.9656 57.7758 79.1772 53.4629 76.4919C53.1374 76.3291 52.8119 76.085 52.4051 75.9222L31.085 63.3092C31.5732 67.3779 33.3635 71.2839 36.2929 74.132C36.8626 74.7016 37.4322 75.1899 38.0832 75.6781C37.5949 75.9222 37.0253 76.1664 36.5371 76.4919C32.2242 79.1772 16.519 88.2098 14.9729 88.9421C13.4268 89.6745 12.6944 90 11.3924 90C8.46293 89.6745 5.77758 88.3725 3.82459 86.2568C1.70886 84.2224 0.325497 81.5371 0 78.6076C0.0813743 77.387 0.406872 76.1664 1.05787 75.1085C1.79024 73.5624 10.8228 57.8571 13.5081 53.5443C15.0542 51.0217 15.868 48.0922 15.868 45.0814C15.868 42.0705 15.0542 39.141 13.5081 36.6184C10.8228 32.1429 1.70886 16.4376 1.05787 14.8915C0.406872 13.8336 0.0813743 12.613 0 11.3924C0.325497 8.46293 1.62749 5.77758 3.74322 3.74322C5.77758 1.62749 8.46293 0.325497 11.3924 0C12.613 0.0813743 13.8336 0.406872 14.9729 1.05787C16.2749 1.62749 27.7486 8.30018 33.8517 11.8807L35.2351 12.6944C35.7233 13.0199 36.1302 13.264 36.4557 13.4268L37.1067 13.8336L58.8336 26.6908C58.3454 21.8083 55.8228 17.3327 51.9168 14.3219C52.4051 14.0778 52.9747 13.8336 53.4629 13.5081C57.7758 10.8228 73.481 1.70886 75.0271 1.05787C76.085 0.406872 77.3056 0.0813743 78.6076 0C81.4557 0.325497 84.1411 1.62749 86.1754 3.74322ZM46.1392 50.7776L50.7776 46.1392C51.4286 45.4882 51.4286 44.5118 50.7776 43.8608L46.1392 39.2224C45.4882 38.5714 44.5118 38.5714 43.8608 39.2224L39.2224 43.8608C38.5714 44.5118 38.5714 45.4882 39.2224 46.1392L43.8608 50.7776C44.4304 51.3472 45.4882 51.3472 46.1392 50.7776Z' fill='%23FF694A'/%3E %3C/svg%3E"},function(e,t,n){"use strict";n.r(t);var r=n(63),i=n.n(r);n(462),n(463),n(464),n(465),n(467);const o=n(9),a=(n(33),n(21));window.Prism=i.a,o.module("dbt").factory("code",["$sce",function(e){var t={copied:!1,highlight:function(t,n="sql"){if("sql"==n)var r=i.a.highlight(t,i.a.languages.sql,"sql");else if("python"==n)r=i.a.highlight(t,i.a.languages.python,"python");return e.trustAsHtml(r)},copy_to_clipboard:function(e){var t=document.createElement("textarea");t.value=e,t.setAttribute("readonly",""),t.style.position="absolute",t.style.left="-9999px",document.body.appendChild(t),t.select(),document.execCommand("copy"),document.body.removeChild(t)},generateSourceSQL:function(e){var t=["select"],n=a.size(e.columns),r=a.keys(e.columns);a.each(r,(function(e,r){var i=" "+e;r+1!=n&&(i+=","),t.push(i)}));const i=(e.database?e.database+".":"")+e.schema+"."+e.identifier;return t.push("from "+i),t.join("\n")}};return t}])},function(e,t){Prism.languages.sql={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:--|\/\/|#).*)/,lookbehind:!0},variable:[{pattern:/@(["'`])(?:\\[\s\S]|(?!\1)[^\\])+\1/,greedy:!0},/@[\w.$]+/],string:{pattern:/(^|[^@\\])("|')(?:\\[\s\S]|(?!\2)[^\\]|\2\2)*\2/,greedy:!0,lookbehind:!0},identifier:{pattern:/(^|[^@\\])`(?:\\[\s\S]|[^`\\]|``)*`/,greedy:!0,lookbehind:!0,inside:{punctuation:/^`|`$/}},function:/\b(?:AVG|COUNT|FIRST|FORMAT|LAST|LCASE|LEN|MAX|MID|MIN|MOD|NOW|ROUND|SUM|UCASE)(?=\s*\()/i,keyword:/\b(?:ACTION|ADD|AFTER|ALGORITHM|ALL|ALTER|ANALYZE|ANY|APPLY|AS|ASC|AUTHORIZATION|AUTO_INCREMENT|BACKUP|BDB|BEGIN|BERKELEYDB|BIGINT|BINARY|BIT|BLOB|BOOL|BOOLEAN|BREAK|BROWSE|BTREE|BULK|BY|CALL|CASCADED?|CASE|CHAIN|CHAR(?:ACTER|SET)?|CHECK(?:POINT)?|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMNS?|COMMENT|COMMIT(?:TED)?|COMPUTE|CONNECT|CONSISTENT|CONSTRAINT|CONTAINS(?:TABLE)?|CONTINUE|CONVERT|CREATE|CROSS|CURRENT(?:_DATE|_TIME|_TIMESTAMP|_USER)?|CURSOR|CYCLE|DATA(?:BASES?)?|DATE(?:TIME)?|DAY|DBCC|DEALLOCATE|DEC|DECIMAL|DECLARE|DEFAULT|DEFINER|DELAYED|DELETE|DELIMITERS?|DENY|DESC|DESCRIBE|DETERMINISTIC|DISABLE|DISCARD|DISK|DISTINCT|DISTINCTROW|DISTRIBUTED|DO|DOUBLE|DROP|DUMMY|DUMP(?:FILE)?|DUPLICATE|ELSE(?:IF)?|ENABLE|ENCLOSED|END|ENGINE|ENUM|ERRLVL|ERRORS|ESCAPED?|EXCEPT|EXEC(?:UTE)?|EXISTS|EXIT|EXPLAIN|EXTENDED|FETCH|FIELDS|FILE|FILLFACTOR|FIRST|FIXED|FLOAT|FOLLOWING|FOR(?: EACH ROW)?|FORCE|FOREIGN|FREETEXT(?:TABLE)?|FROM|FULL|FUNCTION|GEOMETRY(?:COLLECTION)?|GLOBAL|GOTO|GRANT|GROUP|HANDLER|HASH|HAVING|HOLDLOCK|HOUR|IDENTITY(?:COL|_INSERT)?|IF|IGNORE|IMPORT|INDEX|INFILE|INNER|INNODB|INOUT|INSERT|INT|INTEGER|INTERSECT|INTERVAL|INTO|INVOKER|ISOLATION|ITERATE|JOIN|KEYS?|KILL|LANGUAGE|LAST|LEAVE|LEFT|LEVEL|LIMIT|LINENO|LINES|LINESTRING|LOAD|LOCAL|LOCK|LONG(?:BLOB|TEXT)|LOOP|MATCH(?:ED)?|MEDIUM(?:BLOB|INT|TEXT)|MERGE|MIDDLEINT|MINUTE|MODE|MODIFIES|MODIFY|MONTH|MULTI(?:LINESTRING|POINT|POLYGON)|NATIONAL|NATURAL|NCHAR|NEXT|NO|NONCLUSTERED|NULLIF|NUMERIC|OFF?|OFFSETS?|ON|OPEN(?:DATASOURCE|QUERY|ROWSET)?|OPTIMIZE|OPTION(?:ALLY)?|ORDER|OUT(?:ER|FILE)?|OVER|PARTIAL|PARTITION|PERCENT|PIVOT|PLAN|POINT|POLYGON|PRECEDING|PRECISION|PREPARE|PREV|PRIMARY|PRINT|PRIVILEGES|PROC(?:EDURE)?|PUBLIC|PURGE|QUICK|RAISERROR|READS?|REAL|RECONFIGURE|REFERENCES|RELEASE|RENAME|REPEAT(?:ABLE)?|REPLACE|REPLICATION|REQUIRE|RESIGNAL|RESTORE|RESTRICT|RETURN(?:ING|S)?|REVOKE|RIGHT|ROLLBACK|ROUTINE|ROW(?:COUNT|GUIDCOL|S)?|RTREE|RULE|SAVE(?:POINT)?|SCHEMA|SECOND|SELECT|SERIAL(?:IZABLE)?|SESSION(?:_USER)?|SET(?:USER)?|SHARE|SHOW|SHUTDOWN|SIMPLE|SMALLINT|SNAPSHOT|SOME|SONAME|SQL|START(?:ING)?|STATISTICS|STATUS|STRIPED|SYSTEM_USER|TABLES?|TABLESPACE|TEMP(?:ORARY|TABLE)?|TERMINATED|TEXT(?:SIZE)?|THEN|TIME(?:STAMP)?|TINY(?:BLOB|INT|TEXT)|TOP?|TRAN(?:SACTIONS?)?|TRIGGER|TRUNCATE|TSEQUAL|TYPES?|UNBOUNDED|UNCOMMITTED|UNDEFINED|UNION|UNIQUE|UNLOCK|UNPIVOT|UNSIGNED|UPDATE(?:TEXT)?|USAGE|USE|USER|USING|VALUES?|VAR(?:BINARY|CHAR|CHARACTER|YING)|VIEW|WAITFOR|WARNINGS|WHEN|WHERE|WHILE|WITH(?: ROLLUP|IN)?|WORK|WRITE(?:TEXT)?|YEAR)\b/i,boolean:/\b(?:FALSE|NULL|TRUE)\b/i,number:/\b0x[\da-f]+\b|\b\d+(?:\.\d*)?|\B\.\d+\b/i,operator:/[-+*\/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?|\b(?:AND|BETWEEN|DIV|ILIKE|IN|IS|LIKE|NOT|OR|REGEXP|RLIKE|SOUNDS LIKE|XOR)\b/i,punctuation:/[;[\]()`,.]/}},function(e,t){Prism.languages.python={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0,greedy:!0},"string-interpolation":{pattern:/(?:f|fr|rf)(?:("""|''')[\s\S]*?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2)/i,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^{])(?:\{\{)*)\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}])+\})+\})+\}/,lookbehind:!0,inside:{"format-spec":{pattern:/(:)[^:(){}]+(?=\}$)/,lookbehind:!0},"conversion-option":{pattern:/![sra](?=[:}]$)/,alias:"punctuation"},rest:null}},string:/[\s\S]+/}},"triple-quoted-string":{pattern:/(?:[rub]|br|rb)?("""|''')[\s\S]*?\1/i,greedy:!0,alias:"string"},string:{pattern:/(?:[rub]|br|rb)?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/i,greedy:!0},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/g,lookbehind:!0},"class-name":{pattern:/(\bclass\s+)\w+/i,lookbehind:!0},decorator:{pattern:/(^[\t ]*)@\w+(?:\.\w+)*/m,lookbehind:!0,alias:["annotation","punctuation"],inside:{punctuation:/\./}},keyword:/\b(?:_(?=\s*:)|and|as|assert|async|await|break|case|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|match|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)\b/,builtin:/\b(?:__import__|abs|all|any|apply|ascii|basestring|bin|bool|buffer|bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|format|frozenset|getattr|globals|hasattr|hash|help|hex|id|input|int|intern|isinstance|issubclass|iter|len|list|locals|long|map|max|memoryview|min|next|object|oct|open|ord|pow|property|range|raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)\b/,boolean:/\b(?:False|None|True)\b/,number:/\b0(?:b(?:_?[01])+|o(?:_?[0-7])+|x(?:_?[a-f0-9])+)\b|(?:\b\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\B\.\d+(?:_\d+)*)(?:e[+-]?\d+(?:_\d+)*)?j?(?!\w)/i,operator:/[-+%=]=?|!=|:=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[{}[\];(),.:]/},Prism.languages.python["string-interpolation"].inside.interpolation.inside.rest=Prism.languages.python,Prism.languages.py=Prism.languages.python},function(e,t){!function(){if("undefined"!=typeof Prism&&"undefined"!=typeof document){var e=/\n(?!$)/g,t=Prism.plugins.lineNumbers={getLine:function(e,t){if("PRE"===e.tagName&&e.classList.contains("line-numbers")){var n=e.querySelector(".line-numbers-rows");if(n){var r=parseInt(e.getAttribute("data-start"),10)||1,i=r+(n.children.length-1);ti&&(t=i);var o=t-r;return n.children[o]}}},resize:function(e){r([e])},assumeViewportIndependence:!0},n=void 0;window.addEventListener("resize",(function(){t.assumeViewportIndependence&&n===window.innerWidth||(n=window.innerWidth,r(Array.prototype.slice.call(document.querySelectorAll("pre.line-numbers"))))})),Prism.hooks.add("complete",(function(t){if(t.code){var n=t.element,i=n.parentNode;if(i&&/pre/i.test(i.nodeName)&&!n.querySelector(".line-numbers-rows")&&Prism.util.isActive(n,"line-numbers")){n.classList.remove("line-numbers"),i.classList.add("line-numbers");var o,a=t.code.match(e),s=a?a.length+1:1,l=new Array(s+1).join("");(o=document.createElement("span")).setAttribute("aria-hidden","true"),o.className="line-numbers-rows",o.innerHTML=l,i.hasAttribute("data-start")&&(i.style.counterReset="linenumber "+(parseInt(i.getAttribute("data-start"),10)-1)),t.element.appendChild(o),r([i]),Prism.hooks.run("line-numbers",t)}}})),Prism.hooks.add("line-numbers",(function(e){e.plugins=e.plugins||{},e.plugins.lineNumbers=!0}))}function r(t){if(0!=(t=t.filter((function(e){var t=function(e){if(!e)return null;return window.getComputedStyle?getComputedStyle(e):e.currentStyle||null}(e)["white-space"];return"pre-wrap"===t||"pre-line"===t}))).length){var n=t.map((function(t){var n=t.querySelector("code"),r=t.querySelector(".line-numbers-rows");if(n&&r){var i=t.querySelector(".line-numbers-sizer"),o=n.textContent.split(e);i||((i=document.createElement("span")).className="line-numbers-sizer",n.appendChild(i)),i.innerHTML="0",i.style.display="block";var a=i.getBoundingClientRect().height;return i.innerHTML="",{element:t,lines:o,lineHeights:[],oneLinerHeight:a,sizer:i}}})).filter(Boolean);n.forEach((function(e){var t=e.sizer,n=e.lines,r=e.lineHeights,i=e.oneLinerHeight;r[n.length-1]=void 0,n.forEach((function(e,n){if(e&&e.length>1){var o=t.appendChild(document.createElement("span"));o.style.display="block",o.textContent=e}else r[n]=i}))})),n.forEach((function(e){for(var t=e.sizer,n=e.lineHeights,r=0,i=0;i code {\n\tposition: relative;\n\twhite-space: inherit;\n}\n\n.line-numbers .line-numbers-rows {\n\tposition: absolute;\n\tpointer-events: none;\n\ttop: 0;\n\tfont-size: 100%;\n\tleft: -3.8em;\n\twidth: 3em; /* works for line-numbers below 1000 lines */\n\tletter-spacing: -1px;\n\tborder-right: 1px solid #999;\n\n\t-webkit-user-select: none;\n\t-moz-user-select: none;\n\t-ms-user-select: none;\n\tuser-select: none;\n\n}\n\n\t.line-numbers-rows > span {\n\t\tdisplay: block;\n\t\tcounter-increment: linenumber;\n\t}\n\n\t\t.line-numbers-rows > span:before {\n\t\t\tcontent: counter(linenumber);\n\t\t\tcolor: #999;\n\t\t\tdisplay: block;\n\t\t\tpadding-right: 0.8em;\n\t\t\ttext-align: right;\n\t\t}\n',""])},function(e,t,n){var r=n(468);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,'/**\n * GHColors theme by Avi Aryan (http://aviaryan.in)\n * Inspired by Github syntax coloring\n */\n\ncode[class*="language-"],\npre[class*="language-"] {\n\tcolor: #393A34;\n\tfont-family: "Consolas", "Bitstream Vera Sans Mono", "Courier New", Courier, monospace;\n\tdirection: ltr;\n\ttext-align: left;\n\twhite-space: pre;\n\tword-spacing: normal;\n\tword-break: normal;\n\tfont-size: .9em;\n\tline-height: 1.2em;\n\n\t-moz-tab-size: 4;\n\t-o-tab-size: 4;\n\ttab-size: 4;\n\n\t-webkit-hyphens: none;\n\t-moz-hyphens: none;\n\t-ms-hyphens: none;\n\thyphens: none;\n}\n\npre > code[class*="language-"] {\n\tfont-size: 1em;\n}\n\npre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection,\ncode[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection {\n\tbackground: #b3d4fc;\n}\n\npre[class*="language-"]::selection, pre[class*="language-"] ::selection,\ncode[class*="language-"]::selection, code[class*="language-"] ::selection {\n\tbackground: #b3d4fc;\n}\n\n/* Code blocks */\npre[class*="language-"] {\n\tpadding: 1em;\n\tmargin: .5em 0;\n\toverflow: auto;\n\tborder: 1px solid #dddddd;\n\tbackground-color: white;\n}\n\n/* Inline code */\n:not(pre) > code[class*="language-"] {\n\tpadding: .2em;\n\tpadding-top: 1px;\n\tpadding-bottom: 1px;\n\tbackground: #f8f8f8;\n\tborder: 1px solid #dddddd;\n}\n\n.token.comment,\n.token.prolog,\n.token.doctype,\n.token.cdata {\n\tcolor: #999988;\n\tfont-style: italic;\n}\n\n.token.namespace {\n\topacity: .7;\n}\n\n.token.string,\n.token.attr-value {\n\tcolor: #e3116c;\n}\n\n.token.punctuation,\n.token.operator {\n\tcolor: #393A34; /* no highlight */\n}\n\n.token.entity,\n.token.url,\n.token.symbol,\n.token.number,\n.token.boolean,\n.token.variable,\n.token.constant,\n.token.property,\n.token.regex,\n.token.inserted {\n\tcolor: #36acaa;\n}\n\n.token.atrule,\n.token.keyword,\n.token.attr-name,\n.language-autohotkey .token.selector {\n\tcolor: #00a4db;\n}\n\n.token.function,\n.token.deleted,\n.language-autohotkey .token.tag {\n\tcolor: #9a050f;\n}\n\n.token.tag,\n.token.selector,\n.language-autohotkey .token.keyword {\n\tcolor: #00009f;\n}\n\n.token.important,\n.token.function,\n.token.bold {\n\tfont-weight: bold;\n}\n\n.token.italic {\n\tfont-style: italic;\n}\n',""])},function(e,t,n){n(33);const r=n(21),i=n(148),o=n(203),a=n(470);angular.module("dbt").factory("graph",["$state","$window","$q","selectorService","project","locationService",function(e,t,n,s,l,c){var u={vertical:{userPanningEnabled:!1,boxSelectionEnabled:!1,maxZoom:1.5},horizontal:{userPanningEnabled:!0,boxSelectionEnabled:!1,maxZoom:1,minZoom:.05}},d={none:{name:"null"},left_right:{name:"dagre",rankDir:"LR",rankSep:200,edgeSep:30,nodeSep:50},top_down:{name:"preset",positions:function(t){var n=e.params.unique_id;if(!n)return{x:0,y:0};var a=p.graph.pristine.dag,s=r.sortBy(o.ancestorNodes(a,n,1)),l=r.sortBy(o.descendentNodes(a,n,1)),c=r.partial(r.includes,s),u=r.partial(r.includes,l),d=a.filterNodes(c),f=a.filterNodes(u);return function(e,t,n,i){var o,a=100/(1+Math.max(t.length,n.length));if(e==i)return{x:0,y:0};if(r.includes(t,i))o={set:t,index:r.indexOf(t,i),factor:-1,type:"parent"};else{if(!r.includes(n,i))return{x:0,y:0};o={set:n,index:r.indexOf(n,i),factor:1,type:"child"}}var s=o.set.length;if("parent"==o.type)var l={x:(0+o.index)*a,y:-200-100*(s-o.index-1)};else l={x:(0+o.index)*a,y:200+100*(s-o.index-1)};return l}(n,i.alg.topsort(d),i.alg.topsort(f).reverse(),t.data("id"))}}},p={loading:!0,loaded:n.defer(),graph_element:null,orientation:"sidebar",expanded:!1,graph:{options:u.vertical,pristine:{nodes:{},edges:{},dag:null},elements:[],layout:d.none,style:[{selector:"edge.vertical",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#027599","arrow-scale":1.5,"line-color":"#027599",width:3,"target-distance-from-node":"5px","source-endpoint":"0% 50%","target-endpoint":"0deg"}},{selector:"edge.horizontal",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#006f8a","arrow-scale":1.5,"target-distance-from-node":"10px","source-distance-from-node":"5px","line-color":"#006f8a",width:3,"source-endpoint":"50% 0%","target-endpoint":"270deg"}},{selector:"edge[selected=1]",style:{"line-color":"#bd6bb6","target-arrow-color":"#bd6bb6","z-index":1}},{selector:'node[display="none"]',style:{display:"none"}},{selector:"node.vertical",style:{"text-margin-x":"5px","background-color":"#0094b3","border-color":"#0094b3","font-size":"16px",shape:"ellipse",color:"#fff",width:"5px",height:"5px",padding:"5px",content:"data(label)","font-weight":300,"text-valign":"center","text-halign":"right"}},{selector:"node.horizontal",style:{"background-color":"#0094b3","border-color":"#0094b3","font-size":"24px",shape:"roundrectangle",color:"#fff",width:"label",height:"label",padding:"12px",content:"data(label)","font-weight":300,"font-family":'-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, Cantarell, "Fira Sans", "Droid Sans", "Helvetica Neue", Helvetica, Arial, sans-serif',"text-valign":"center","text-halign":"center",ghost:"yes","ghost-offset-x":"2px","ghost-offset-y":"4px","ghost-opacity":.5,"text-outline-color":"#000","text-outline-width":"1px","text-outline-opacity":.2}},{selector:'node[resource_type="source"]',style:{"background-color":"#5fb825","border-color":"#5fb825"}},{selector:'node[resource_type="exposure"]',style:{"background-color":"#ff694b","border-color":"#ff694b"}},{selector:'node[resource_type="metric"]',style:{"background-color":"#ff5688","border-color":"#ff5688"}},{selector:'node[resource_type="semantic_model"]',style:{"background-color":"#ffa8c2","border-color":"#ffa8c2"}},{selector:'node[language="python"]',style:{"background-color":"#6a5acd","border-color":"#6a5acd"}},{selector:"node[node_color]",style:{"background-color":"data(node_color)","border-color":"data(node_color)"}},{selector:"node[selected=1]",style:{"background-color":"#bd6bb6","border-color":"#bd6bb6"}},{selector:"node.horizontal[selected=1]",style:{"background-color":"#88447d","border-color":"#88447d"}},{selector:"node.horizontal.dirty",style:{"background-color":"#919599","border-color":"#919599"}},{selector:"node[hidden=1]",style:{"background-color":"#919599","border-color":"#919599","background-opacity":.5}},{selector:'node[access="private"]',style:{"background-opacity":.2,"border-width":2,ghost:"no"}}],ready:function(e){console.log("graph ready")}}};function f(e,t,n){var i=r.map(e,(function(e){return p.graph.pristine.nodes[e]})),o=[];r.flatten(r.each(e,(function(t){var n=p.graph.pristine.edges[t];r.each(n,(function(t){r.includes(e,t.data.target)&&r.includes(e,t.data.source)&&o.push(t)}))})));var s=r.compact(i).concat(r.compact(o));return r.each(p.graph.elements,(function(e){e.data.display="none",e.data.selected=0,e.data.hidden=0,e.classes=n})),r.each(s,(function(e){e.data.display="element",e.classes=n,t&&r.includes(t,e.data.unique_id)&&(e.data.selected=1),r.get(e,["data","docs","show"],!0)||(e.data.hidden=1);var i=r.get(e,["data","docs","node_color"]);i&&a.isValidColor(i)&&(e.data.node_color=i)})),p.graph.elements=r.filter(s,(function(e){return"element"==e.data.display})),e}function h(e,t,n){var r=p.graph.pristine.dag;if(r){var i=p.graph.pristine.nodes,o=s.selectNodes(r,i,e),a=n?o.matched:[];return f(o.selected,a,t)}}return p.setGraphReady=function(e){p.loading=!1,p.loaded.resolve(),p.graph_element=e},p.ready=function(e){p.loaded.promise.then((function(){e(p)}))},p.manifest={},p.packages=[],p.selected_node=null,p.getCanvasHeight=function(){return.8*t.innerHeight+"px"},l.ready((function(e){p.manifest=e,p.packages=r.uniq(r.map(p.manifest.nodes,"package_name")),r.each(r.filter(p.manifest.nodes,(function(e){var t=r.includes(["model","seed","source","snapshot","analysis","exposure","metric","semantic_model","operation"],e.resource_type),n="test"==e.resource_type&&!e.hasOwnProperty("test_metadata");return t||n})),(function(e){var t={group:"nodes",data:r.assign(e,{parent:e.package_name,id:e.unique_id,is_group:"false"})};p.graph.pristine.nodes[e.unique_id]=t})),r.each(p.manifest.parent_map,(function(e,t){r.each(e,(function(e){var n=p.manifest.nodes[e],i=p.manifest.nodes[t];if(r.includes(["model","source","seed","snapshot","metric","semantic_model"],n.resource_type)&&("test"!=i.resource_type||!i.hasOwnProperty("test_metadata"))){var o=n.unique_id+"|"+i.unique_id,a={group:"edges",data:{source:n.unique_id,target:i.unique_id,unique_id:o}},s=i.unique_id;p.graph.pristine.edges[s]||(p.graph.pristine.edges[s]=[]),p.graph.pristine.edges[s].push(a)}}))}));var t=new i.Graph({directed:!0});r.each(p.graph.pristine.nodes,(function(e){t.setNode(e.data.unique_id,e.data.name)})),r.each(p.graph.pristine.edges,(function(e){r.each(e,(function(e){t.setEdge(e.data.source,e.data.target)}))})),p.graph.pristine.dag=t,p.graph.elements=r.flatten(r.values(p.graph.pristine.nodes).concat(r.values(p.graph.pristine.edges))),f(t.nodes())})),p.hideGraph=function(){p.orientation="sidebar",p.expanded=!1},p.showVerticalGraph=function(e,t){p.orientation="sidebar",t&&(p.expanded=!0);var n=h(r.assign({},s.options,{include:"+"+e+"+",exclude:"",hops:1}),"vertical",!0);return p.graph.layout=d.top_down,p.graph.options=u.vertical,n},p.showFullGraph=function(e){p.orientation="fullscreen",p.expanded=!0;var t=r.assign({},s.options);e?(t.include="+"+e+"+",t.exclude=""):(t.include="",t.exclude="");var n=h(t,"horizontal",!0);return p.graph.layout=d.left_right,p.graph.options=u.horizontal,c.setState(t),n},p.updateGraph=function(e){p.orientation="fullscreen",p.expanded=!0;var t=h(e,"horizontal",!1);return p.graph.layout=d.left_right,p.graph.options=u.horizontal,c.setState(e),t},p.deselectNodes=function(){"fullscreen"==p.orientation&&p.graph_element.elements().data("selected",0)},p.selectNode=function(e){if("fullscreen"==p.orientation){p.graph.pristine.nodes[e];var t=p.graph.pristine.dag,n=r.indexBy(o.ancestorNodes(t,e)),i=r.indexBy(o.descendentNodes(t,e));n[e]=e,i[e]=e;var a=p.graph_element;r.each(p.graph.elements,(function(t){var r=a.$id(t.data.id);n[t.data.source]&&n[t.data.target]||i[t.data.source]&&i[t.data.target]||t.data.unique_id==e?r.data("selected",1):r.data("selected",0)}))}},p.markDirty=function(e){p.markAllClean(),r.each(e,(function(e){p.graph_element.$id(e).addClass("dirty")}))},p.markAllClean=function(){p.graph_element&&p.graph_element.elements().removeClass("dirty")},p}])},function(e,t,n){"use strict";n.r(t),n.d(t,"isValidColor",(function(){return i}));const r=new Set(["aliceblue","antiquewhite","aqua","aquamarine","azure","beige","bisque","black","blanchedalmond","blue","blueviolet","brown","burlywood","cadetblue","chartreuse","chocolate","coral","cornflowerblue","cornsilk","crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgreen","darkkhaki","darkmagenta","darkolivegreen","darkorange","darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dodgerblue","firebrick","floralwhite","forestgreen","fuchsia","ghostwhite","gold","goldenrod","gray","green","greenyellow","honeydew","hotpink","indianred","indigo","ivory","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral","lightcyan","lightgoldenrodyellow","lightgray","lightgreen","lightpink","lightsalmon","lightsalmon","lightseagreen","lightskyblue","lightslategray","lightsteelblue","lightyellow","lime","limegreen","linen","magenta","maroon","mediumaquamarine","mediumblue","mediumorchid","mediumpurple","mediumseagreen","mediumslateblue","mediumslateblue","mediumspringgreen","mediumturquoise","mediumvioletred","midnightblue","mintcream","mistyrose","moccasin","navajowhite","navy","oldlace","olive","olivedrab","orange","orangered","orchid","palegoldenrod","palegreen","paleturquoise","palevioletred","papayawhip","peachpuff","peru","pink","plum","powderblue","purple","rebeccapurple","red","rosybrown","royalblue","saddlebrown","salmon","sandybrown","seagreen","seashell","sienna","silver","skyblue","slateblue","slategray","snow","springgreen","steelblue","tan","teal","thistle","tomato","turquoise","violet","wheat","white","whitesmoke","yellow","yellowgreen"]);function i(e){if(!e)return!1;const t=e.trim().toLowerCase();if(""===t)return!1;const n=t.match(/^#([A-Fa-f0-9]{3}){1,2}$/),i=r.has(t);return Boolean(n)||i}},function(e,t,n){n(33);const r=n(21),i=n(472);angular.module("dbt").factory("selectorService",["$state",function(e){var t={include:"",exclude:"",packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","unit_test","analysis","exposure","metric","semantic_model"],depth:1},n={view_node:null,selection:{clean:r.clone(t),dirty:r.clone(t)},options:{packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric","semantic_model","unit_test"]},init:function(e){r.each(e,(function(e,r){n.options[r]=e,t[r]=e,n.selection.clean[r]=e,n.selection.dirty[r]=e}))},resetSelection:function(e){var i={include:e&&r.includes(["model","seed","snapshot"],e.resource_type)?"+"+e.name+"+":e&&"source"==e.resource_type?"+source:"+e.source_name+"."+e.name+"+":e&&"exposure"==e.resource_type?"+exposure:"+e.name:e&&"metric"==e.resource_type?"+metric:"+e.name:e&&"semantic_model"==e.resource_type?"+semantic_model:"+e.name:e&&r.includes(["analysis","test","unit_test"],e.resource_type)?"+"+e.name:""},o=r.assign({},t,i);n.selection.clean=r.clone(o),n.selection.dirty=r.clone(o),n.view_node=e},getViewNode:function(){return n.view_node},excludeNode:function(e,t){var r,i=n.selection.dirty.exclude,o=t.parents?"+":"",a=t.children?"+":"",s=i.length>0?" ":"";"source"==e.resource_type?(o+="source:",r=e.source_name+"."+e.name):["exposure","metric","semantic_model"].indexOf(e.resource_type)>-1?(o+=e.resource_type+":",r=e.name):r=e.name;var l=i+s+o+r+a;return n.selection.dirty.exclude=l,n.updateSelection()},selectSource:function(e,t){var r="source:"+e+(t.children?"+":"");return n.selection.dirty.include=r,n.updateSelection()},clearViewNode:function(){n.view_node=null},isDirty:function(){return!r.isEqual(n.selection.clean,n.selection.dirty)},updateSelection:function(){return n.selection.clean=r.clone(n.selection.dirty),n.selection.clean},selectNodes:function(e,t,n){return i.selectNodes(e,t,n)}};return n}])},function(e,t,n){const r=n(21),i=n(473);function o(e,t){return t||(t=" "),r.filter(r.uniq(e.split(t)),(function(e){return e.length>0}))}function a(e){var t={raw:e,select_at:!1,select_children:!1,children_depth:null,select_parents:!1,parents_depth:null};const n=new RegExp(""+/^/.source+/(?(\@))?/.source+/(?((?(\d*))\+))?/.source+/((?([\w.]+)):)?/.source+/(?(.*?))/.source+/(?(\+(?(\d*))))?/.source+/$/.source).exec(e).groups;t.select_at="@"==n.childs_parents,t.select_parents=!!n.parents,t.select_children=!!n.children,n.parents_depth&&(t.parents_depth=parseInt(n.parents_depth)),n.children_depth&&(t.children_depth=parseInt(n.children_depth));var r=n.method,i=n.value;return r?-1!=r.indexOf(".")&&([r,selector_modifier]=r.split(".",2),i={config:selector_modifier,value:i}):r="implicit",t.selector_type=r,t.selector_value=i,t}function s(e){var t=o(e," ");return r.map(t,(function(e){var t=o(e,",");return t.length>1?{method:"intersect",selectors:r.map(t,a)}:{method:"none",selectors:r.map([e],a)}}))}function l(e,t){var n=s(e),i=null,o=null;return r.each(n,(function(e){var n="intersect"==e.method?r.intersection:r.union;r.each(e.selectors,(function(e){var r=t(e);null===i?(i=r.matched,o=r.selected):(i=n(i,r.matched),o=n(o,r.selected))}))})),{matched:i||[],selected:o||[]}}e.exports={splitSpecs:o,parseSpec:a,parseSpecs:s,buildSpec:function(e,t,n){return{include:s(e),exclude:s(t),hops:n}},applySpec:l,selectNodes:function(e,t,n){n.include,n.exclude;var o,a=r.partial(i.getNodesFromSpec,e,t,n.hops);r.values(t),o=0==n.include.trim().length?{selected:e.nodes(),matched:[]}:l(n.include,a);var s=l(n.exclude,a),c=o.selected,u=o.matched;c=r.difference(c,s.selected),u=r.difference(u,s.matched);var d=[];return r.each(c,(function(e){var i=t[e];i.data.tags||(i.data.tags=[]);var o=r.includes(n.packages,i.data.package_name),a=r.intersection(n.tags,i.data.tags).length>0,s=r.includes(n.tags,null)&&0==i.data.tags.length,l=r.includes(n.resource_types,i.data.resource_type);o&&(a||s)&&l||d.push(i.data.unique_id)})),{selected:r.difference(c,d),matched:r.difference(u,d)}}}},function(e,t,n){const r=n(21),i=n(203);var o="fqn",a="tag",s="source",l="exposure",c="metric",u="semantic_model",d="group",p="path",f="file",h="package",g="config",m="test_name",v="test_type",b={};function y(e,t,n){var r=e.slice(-1)[0],i=e.slice(-2,-1)[0];if(t===r)return!0;if(version_options=[i,i+"_"+r,i+"."+r],n&&version_options.includes(t))return!0;var o=e.reduce((e,t)=>e.concat(t.split(".")),[]),a=t.split(".");if(o.length-1||!r.hasOwnProperty("test_metadata")&&["data","singular"].indexOf(t)>-1)&&n.push(r)})),n}function _(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("source"==r.resource_type){var i,o,a=r.source_name,s=r.name;-1!=t.indexOf(".")?[i,o]=t.split(".",2):(i=t,o=null),("*"==i||i==a&&"*"===o||i==a&&o===s||i==a&&null===o)&&n.push(e.data)}})),n}b["implicit"]=function(e,t){var n=x(e,t),i=w(e,t),o=[];t.toLowerCase().endsWith(".sql")&&(o=k(e,t));var a=r.uniq([].concat(r.map(n,"unique_id"),r.map(i,"unique_id"),r.map(o,"unique_id")));return r.map(a,t=>e[t].data)},b[o]=x,b[a]=A,b[s]=_,b[l]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("exposure"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},b[c]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("metric"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},b[u]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("semantic_model"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},b[d]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;r.group==t&&n.push(r)})),n},b[p]=w,b[f]=k,b[h]=E,b[g]=S,b[m]=$,b[v]=C,e.exports={isFQNMatch:y,getNodesByFQN:x,getNodesByTag:A,getNodesBySource:_,getNodesByPath:w,getNodesByPackage:E,getNodesByConfig:S,getNodesByTestName:$,getNodesByTestType:C,getNodesFromSpec:function(e,t,n,o){const a=b[o.selector_type];if(!a)return{selected:[],matched:[]};var s=a(t,o.selector_value),l=[],c=[];return r.each(s,(function(t){var a=t.unique_id;c.push(t.unique_id);var s=[],u=[],d=[];if(o.select_at&&(d=r.union(i.selectAt(e,a))),o.select_parents){var p=n||o.parents_depth;s=i.ancestorNodes(e,a,p)}if(o.select_children){p=n||o.children_depth;u=i.descendentNodes(e,a,p)}l=r.union([a],l,u,s,d)})),{selected:l,matched:c}}}},function(e,t,n){const r=n(9);n(475);r.module("dbt").factory("trackingService",["$location","selectorService","$rootScope",function(e,t,n){var r={initialized:!1,snowplow:null,project_id:null,init:function(e){r.initialized||(r.initialized=!0,r.project_id=e.project_id,!0===e.track&&r.turn_on_tracking())},isHosted:function(){return hostedgetdbt=window.location.hostname.indexOf(".getdbt.com")>-1,hosteddbt=window.location.hostname.indexOf(".dbt.com")>-1,hostedgetdbt||hosteddbt},turn_on_tracking:function(){var e,t,n,i,o,a;e=window,t=document,n="script",e[i="snowplow"]||(e.GlobalSnowplowNamespace=e.GlobalSnowplowNamespace||[],e.GlobalSnowplowNamespace.push(i),e[i]=function(){(e[i].q=e[i].q||[]).push(arguments)},e[i].q=e[i].q||[],o=t.createElement(n),a=t.getElementsByTagName(n)[0],o.async=1,o.src="//d1fc8wv8zag5ca.cloudfront.net/2.9.0/sp.js",a.parentNode.insertBefore(o,a));var s={appId:"dbt-docs",forceSecureTracker:!0,respectDoNotTrack:!0,userFingerprint:!1,contexts:{webPage:!0}};r.isHosted()&&(window.location.hostname.indexOf(".getdbt.com")>-1?s.cookieDomain=".getdbt.com":s.cookieDomain=".dbt.com"),r.snowplow=window.snowplow,r.snowplow("newTracker","sp","fishtownanalytics.sinter-collect.com",s),r.snowplow("enableActivityTracking",30,30),r.track_pageview()},fuzzUrls:function(){r.isHosted()||(r.snowplow("setCustomUrl","https://fuzzed.getdbt.com/"),r.snowplow("setReferrerUrl","https://fuzzed.getdbt.com/"))},getContext:function(){return[{schema:"iglu:com.dbt/dbt_docs/jsonschema/1-0-0",data:{is_cloud_hosted:r.isHosted(),core_project_id:r.project_id}}]},track_pageview:function(){if(r.snowplow){r.fuzzUrls();r.snowplow("trackPageView",null,r.getContext())}},track_event:function(e,t,n,i){r.snowplow&&(r.fuzzUrls(),r.snowplow("trackStructEvent","dbt-docs",e,t,n,i,r.getContext()))},track_graph_interaction:function(e,t){r.snowplow&&(r.fuzzUrls(),r.track_event("graph","interact",e,t))}};return r}])},function(e,t,n){var r,i,o,a,s;r=n(476),i=n(204).utf8,o=n(477),a=n(204).bin,(s=function(e,t){e.constructor==String?e=t&&"binary"===t.encoding?a.stringToBytes(e):i.stringToBytes(e):o(e)?e=Array.prototype.slice.call(e,0):Array.isArray(e)||e.constructor===Uint8Array||(e=e.toString());for(var n=r.bytesToWords(e),l=8*e.length,c=1732584193,u=-271733879,d=-1732584194,p=271733878,f=0;f>>24)|4278255360&(n[f]<<24|n[f]>>>8);n[l>>>5]|=128<>>9<<4)]=l;var h=s._ff,g=s._gg,m=s._hh,v=s._ii;for(f=0;f>>0,u=u+y>>>0,d=d+x>>>0,p=p+w>>>0}return r.endian([c,u,d,p])})._ff=function(e,t,n,r,i,o,a){var s=e+(t&n|~t&r)+(i>>>0)+a;return(s<>>32-o)+t},s._gg=function(e,t,n,r,i,o,a){var s=e+(t&r|n&~r)+(i>>>0)+a;return(s<>>32-o)+t},s._hh=function(e,t,n,r,i,o,a){var s=e+(t^n^r)+(i>>>0)+a;return(s<>>32-o)+t},s._ii=function(e,t,n,r,i,o,a){var s=e+(n^(t|~r))+(i>>>0)+a;return(s<>>32-o)+t},s._blocksize=16,s._digestsize=16,e.exports=function(e,t){if(null==e)throw new Error("Illegal argument "+e);var n=r.wordsToBytes(s(e,t));return t&&t.asBytes?n:t&&t.asString?a.bytesToString(n):r.bytesToHex(n)}},function(e,t){var n,r;n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",r={rotl:function(e,t){return e<>>32-t},rotr:function(e,t){return e<<32-t|e>>>t},endian:function(e){if(e.constructor==Number)return 16711935&r.rotl(e,8)|4278255360&r.rotl(e,24);for(var t=0;t0;e--)t.push(Math.floor(256*Math.random()));return t},bytesToWords:function(e){for(var t=[],n=0,r=0;n>>5]|=e[n]<<24-r%32;return t},wordsToBytes:function(e){for(var t=[],n=0;n<32*e.length;n+=8)t.push(e[n>>>5]>>>24-n%32&255);return t},bytesToHex:function(e){for(var t=[],n=0;n>>4).toString(16)),t.push((15&e[n]).toString(16));return t.join("")},hexToBytes:function(e){for(var t=[],n=0;n>>6*(3-o)&63)):t.push("=");return t.join("")},base64ToBytes:function(e){e=e.replace(/[^A-Z0-9+\/]/gi,"");for(var t=[],r=0,i=0;r>>6-2*i);return t}},e.exports=r},function(e,t){function n(e){return!!e.constructor&&"function"==typeof e.constructor.isBuffer&&e.constructor.isBuffer(e)} +/*! + * Determine if an object is a Buffer + * + * @author Feross Aboukhadijeh + * @license MIT + */ +e.exports=function(e){return null!=e&&(n(e)||function(e){return"function"==typeof e.readFloatLE&&"function"==typeof e.slice&&n(e.slice(0,0))}(e)||!!e._isBuffer)}},function(e,t,n){n(9).module("dbt").factory("locationService",["$state",function(e){var t={};return t.parseState=function(e){return function(e){return{selected:{include:e.g_i||"",exclude:e.g_e||""},show_graph:!!e.g_v}}(e)},t.setState=function(t){var n=function(e){var t={g_v:1};return t.g_i=e.include,t.g_e=e.exclude,t}(t),r=e.current.name;e.go(r,n)},t.clearState=function(){var t=e.current.name;e.go(t,{g_i:null,g_e:null,g_v:null})},t}])},function(e,t,n){"use strict";const r=n(9),i=n(202);r.module("dbt").controller("OverviewCtrl",["$scope","$state","project",function(e,t,n){e.overview_md="(loading)",n.ready((function(n){let r=t.params.project_name?t.params.project_name:null;var o=n.docs["doc.dbt.__overview__"],a=i.filter(n.docs,{name:"__overview__"});if(i.each(a,(function(e){"dbt"!=e.package_name&&(o=e)})),null!==r){o=n.docs[`doc.${r}.__${r}__`]||o;let e=i.filter(n.docs,{name:`__${r}__`});i.each(e,e=>{e.package_name!==r&&(o=e)})}e.overview_md=o.block_contents}))}])},function(e,t,n){"use strict";n(9).module("dbt").controller("SourceListCtrl",["$scope","$state","project",function(e,t,n){e.source=t.params.source,e.model={},e.extra_table_fields=[],e.has_more_info=function(e){return(e.description||"").length},e.toggle_source_expanded=function(t){e.has_more_info(t)&&(t.expanded=!t.expanded)},n.ready((function(t){var n=_.filter(t.nodes,(function(t){return t.source_name==e.source}));if(0!=n.length){n.sort((e,t)=>e.name.localeCompare(t.name));var r=n[0];e.model={name:e.source,source_description:r.source_description,sources:n};var i=_.uniq(_.map(n,"metadata.owner")),o=_.uniq(_.map(n,"database")),a=_.uniq(_.map(n,"schema"));e.extra_table_fields=[{name:"Loader",value:r.loader},{name:1==i.length?"Owner":"Owners",value:i.join(", ")},{name:1==o.length?"Database":"Databases",value:o.join(", ")},{name:1==a.length?"Schema":"Schemas",value:a.join(", ")},{name:"Tables",value:n.length}]}}))}])},function(e,t,n){const r=n(9),i={main:n(482),overview:n(483),graph:n(484),source:n(205),source_list:n(485),model:n(486),source:n(205),snapshot:n(487),seed:n(488),unit_test:n(489),test:n(490),analysis:n(491),macro:n(492),exposure:n(493),metric:n(494),semantic_model:n(495),operation:n(496)};r.module("dbt").config(["$stateProvider","$urlRouterProvider",function(e,t){var n="g_v&g_i&g_e&g_p&g_n";t.otherwise("/overview"),e.state("dbt",{url:"/",abstract:!0,controller:"MainController",templateUrl:i.main}).state("dbt.overview",{url:"overview?"+n,controller:"OverviewCtrl",templateUrl:i.overview}).state("dbt.project_overview",{url:"overview/:project_name?"+n,controller:"OverviewCtrl",templateUrl:i.overview,params:{project_name:{type:"string"}}}).state("dbt.graph",{url:"graph",controller:"GraphCtrl",templateUrl:i.graph}).state("dbt.model",{url:"model/:unique_id?section&"+n,controller:"ModelCtrl",templateUrl:i.model,params:{unique_id:{type:"string"}}}).state("dbt.seed",{url:"seed/:unique_id?section&"+n,controller:"SeedCtrl",templateUrl:i.seed,params:{unique_id:{type:"string"}}}).state("dbt.snapshot",{url:"snapshot/:unique_id?section&"+n,controller:"SnapshotCtrl",templateUrl:i.snapshot,params:{unique_id:{type:"string"}}}).state("dbt.unit_test",{url:"unit_test/:unique_id?section&"+n,controller:"TestCtrl",templateUrl:i.unit_test,params:{unique_id:{type:"string"}}}).state("dbt.test",{url:"test/:unique_id?section&"+n,controller:"TestCtrl",templateUrl:i.test,params:{unique_id:{type:"string"}}}).state("dbt.analysis",{url:"analysis/:unique_id?section&"+n,controller:"AnalysisCtrl",templateUrl:i.analysis,params:{unique_id:{type:"string"}}}).state("dbt.source",{url:"source/:unique_id?section&"+n,controller:"SourceCtrl",templateUrl:i.source,params:{unique_id:{type:"string"}}}).state("dbt.source_list",{url:"source_list/:source?section&"+n,controller:"SourceListCtrl",templateUrl:i.source_list,params:{source:{type:"string"}}}).state("dbt.macro",{url:"macro/:unique_id?section",controller:"MacroCtrl",templateUrl:i.macro,params:{unique_id:{type:"string"}}}).state("dbt.exposure",{url:"exposure/:unique_id?section&"+n,controller:"ExposureCtrl",templateUrl:i.exposure,params:{unique_id:{type:"string"}}}).state("dbt.metric",{url:"metric/:unique_id?section&"+n,controller:"MetricCtrl",templateUrl:i.metric,params:{unique_id:{type:"string"}}}).state("dbt.semantic_model",{url:"semantic_model/:unique_id?section&"+n,controller:"SemanticModelCtrl",templateUrl:i.semantic_model,params:{unique_id:{type:"string"}}}).state("dbt.operation",{url:"operation/:unique_id?section&"+n,controller:"OperationCtrl",templateUrl:i.operation,params:{unique_id:{type:"string"}}})}])},function(e,t){var n="/main/main.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n \n\n
    \n
    \n
    \n
    \n
    \n
    \n \n
    \n
    \n
    \n
    \n
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n \n
    \n
    \n \n
    \n
    \n
    \n
    \n \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){var n="/overview/overview.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n \n
    \n
    \n

    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){var n="/graph/graph.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){var n="/sources/source_list.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n \n
    \n
    \n
    \n
    \n \n
    \n\n
    \n
    \n
    \n
    Description
    \n
    \n
    \n
    \n
    This {{ model.resource_type }} is not currently documented
    \n
    \n
    \n
    \n
    \n\n\n
    \n
    \n
    \n
    Source Tables
    \n
    \n
    \n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    SourceTableDescriptionLinkMore?
    \n
    \n {{ source.source_name }}\n
    \n
    \n {{ source.name }}

    \n
    \n {{ source.description }}\n \n View docs\n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Description
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){var n="/docs/model.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n \n
    \n
    \n
    \n
    \n \n
    \n\n
    \n
    \n
    \n
    Description
    \n
    \n
    \n
    \n
    This {{ model.resource_type }} is not currently documented
    \n
    \n
    \n
    \n
    \n\n
    \n
    \n
    \n
    Columns
    \n \n
    \n
    \n\n
    \n
    \n
    \n
    Referenced By
    \n \n
    \n
    \n\n
    \n
    \n
    \n
    Depends On
    \n \n
    \n
    \n\n
    \n
    \n
    \n \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){var n="/docs/snapshot.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n \n
    \n
    \n
    \n
    \n \n
    \n\n
    \n
    \n
    \n
    Description
    \n
    \n
    \n
    \n
    This {{ model.resource_type }} is not currently documented
    \n
    \n
    \n
    \n
    \n\n
    \n
    \n
    \n
    Columns
    \n \n
    \n
    \n\n
    \n
    \n
    \n
    Referenced By
    \n \n
    \n
    \n\n
    \n
    \n
    \n
    Depends On
    \n \n
    \n
    \n\n
    \n
    \n
    \n \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){var n="/docs/seed.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n \n
    \n
    \n
    \n
    \n \n
    \n\n
    \n
    \n
    \n
    Description
    \n
    \n
    \n
    \n
    This {{ model.resource_type }} is not currently documented
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    Columns
    \n \n
    \n
    \n\n
    \n
    \n
    \n
    Referenced By
    \n \n
    \n
    \n\n
    \n
    \n
    \n
    Depends On
    \n \n
    \n
    \n\n
    \n
    \n
    \n \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){var n="/docs/unit_test.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n \n
    \n
    \n
    \n
    \n
    \n
    Description
    \n
    \n
    \n
    \n
    This {{ model.resource_type }} is not currently documented
    \n
    \n
    \n
    \n
    \n\n
    \n
    \n
    \n
    Depends On
    \n \n
    \n
    \n\n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){var n="/docs/test.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n \n
    \n
    \n
    \n
    \n
    \n
    Description
    \n
    \n
    \n
    \n
    This {{ model.resource_type }} is not currently documented
    \n
    \n
    \n
    \n
    \n\n
    \n
    \n
    \n
    Depends On
    \n \n
    \n
    \n\n
    \n
    \n
    \n \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){var n="/docs/analysis.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n \n
    \n
    \n\n
    \n
    \n
    \n
    Description
    \n
    \n
    \n
    \n
    This {{ model.resource_type }} is not currently documented
    \n
    \n
    \n
    \n
    \n\n
    \n
    \n
    \n
    Depends On
    \n \n
    \n
    \n\n
    \n
    \n
    \n \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){var n="/docs/macro.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n \n
    \n
    \n
    \n
    \n
    \n
    Description
    \n
    \n
    \n
    \n
    This {{ macro.resource_type }} is not currently documented
    \n
    \n
    \n
    \n
    \n\n
    \n
    \n
    \n
    Arguments
    \n \n
    \n
    \n\n
    \n
    \n
    \n
    Referenced By
    \n \n
    \n
    \n\n
    \n
    \n
    \n
    Depends On
    \n \n
    \n
    \n\n
    \n
    \n
    \n \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){var n="/docs/exposure.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n \n
    \n
    \n\n
    \n
    \n \n
    \n\n
    \n
    \n
    \n
    Description
    \n
    \n
    \n
    \n
    This {{ exposure.resource_type }} is not currently documented
    \n
    \n
    \n
    \n
    \n\n
    \n
    \n
    \n
    Depends On
    \n \n
    \n
    \n\n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){var n="/docs/metric.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n \n
    \n
    \n\n
    \n
    \n \n
    \n\n
    \n
    \n
    \n
    Description
    \n
    \n
    \n
    \n
    This {{ metric.resource_type }} is not currently documented
    \n
    \n
    \n
    \n
    \n\n
    \n
    \n
    \n
    Depends On
    \n \n
    \n
    \n\n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){var n="/docs/semantic_model.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n \n
    \n
    \n\n
    \n
    \n \n
    \n\n
    \n
    \n
    \n
    Description
    \n
    \n
    \n
    \n
    This {{ semantic_model.resource_type }} is not currently documented
    \n
    \n
    \n
    \n
    \n\n
    \n
    \n
    \n
    Entities
    \n\n
    \n
    \n
    \n
    \n
    Name
    \n
    {{ entity.name }}
    \n
    None
    \n
    Type
    \n
    {{ entity.type }}
    \n
    None
    \n
    Expression
    \n
    {{ entity.expr }}
    \n
    None
    \n
    \n
    \n
    \n
    \n
    \n
    \n\n
    \n
    \n
    \n
    Depends On
    \n \n
    \n
    \n\n
    \n
    \n
    \n')}]),e.exports=n},function(e,t){var n="/docs/operation.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n \n
    \n
    \n
    \n
    \n
    \n
    Description
    \n
    \n
    \n
    \n
    This {{ model.resource_type }} is not currently documented
    \n
    \n
    \n
    \n
    \n\n
    \n
    \n
    \n
    Depends On
    \n \n
    \n
    \n\n
    \n
    \n
    \n \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n}]); +//# sourceMappingURL=main.js.map diff --git a/core/dbt/task/freshness.py b/core/dbt/task/freshness.py index ff1159ab6e6..b1fe7581c30 100644 --- a/core/dbt/task/freshness.py +++ b/core/dbt/task/freshness.py @@ -1,7 +1,7 @@ import os import threading import time -from typing import Optional, List +from typing import Optional, List, AbstractSet, Dict from .base import BaseRunner from .printer import ( @@ -28,6 +28,8 @@ from dbt.adapters.capability import Capability from dbt.adapters.contracts.connection import AdapterResponse +from dbt.adapters.base.relation import BaseRelation +from dbt.adapters.base.impl import FreshnessResponse from dbt.contracts.graph.nodes import SourceDefinition, HookNode from dbt_common.events.base_types import EventLevel from dbt.graph import ResourceTypeSelector @@ -36,6 +38,15 @@ class FreshnessRunner(BaseRunner): + def __init__(self, config, adapter, node, node_index, num_nodes) -> None: + super().__init__(config, adapter, node, node_index, num_nodes) + self._metadata_freshness_cache: Dict[BaseRelation, FreshnessResult] = {} + + def set_metadata_freshness_cache( + self, metadata_freshness_cache: Dict[BaseRelation, FreshnessResult] + ) -> None: + self._metadata_freshness_cache = metadata_freshness_cache + def on_skip(self): raise DbtRuntimeError("Freshness: nodes cannot be skipped!") @@ -105,7 +116,7 @@ def execute(self, compiled_node, manifest): with self.adapter.connection_named(compiled_node.unique_id, compiled_node): self.adapter.clear_transaction() adapter_response: Optional[AdapterResponse] = None - freshness = None + freshness: Optional[FreshnessResponse] = None if compiled_node.loaded_at_field is not None: adapter_response, freshness = self.adapter.calculate_freshness( @@ -120,21 +131,24 @@ def execute(self, compiled_node, manifest): if compiled_node.freshness.filter is not None: fire_event( Note( - f"A filter cannot be applied to a metadata freshness check on source '{compiled_node.name}'.", - EventLevel.WARN, - ) + msg=f"A filter cannot be applied to a metadata freshness check on source '{compiled_node.name}'." + ), + EventLevel.WARN, ) - adapter_response, freshness = self.adapter.calculate_freshness_from_metadata( - relation, - macro_resolver=manifest, - ) + metadata_source = self.adapter.Relation.create_from(self.config, compiled_node) + if metadata_source in self._metadata_freshness_cache: + freshness = self._metadata_freshness_cache[metadata_source] + else: + adapter_response, freshness = self.adapter.calculate_freshness_from_metadata( + relation, + macro_resolver=manifest, + ) status = compiled_node.freshness.status(freshness["age"]) else: - status = FreshnessStatus.Warn - fire_event( - Note(f"Skipping freshness for source {compiled_node.name}."), + raise DbtRuntimeError( + f"Could not compute freshness for source {compiled_node.name}: no 'loaded_at_field' provided and {self.adapter.type()} adapter does not support metadata-based freshness checks." ) # adapter_response was not returned in previous versions, so this will be None @@ -172,6 +186,10 @@ def node_is_match(self, node): class FreshnessTask(RunTask): + def __init__(self, args, config, manifest) -> None: + super().__init__(args, config, manifest) + self._metadata_freshness_cache: Dict[BaseRelation, FreshnessResult] = {} + def result_path(self): if self.args.output: return os.path.realpath(self.args.output) @@ -191,6 +209,17 @@ def get_node_selector(self): resource_types=[NodeType.Source], ) + def before_run(self, adapter, selected_uids: AbstractSet[str]) -> None: + super().before_run(adapter, selected_uids) + if adapter.supports(Capability.TableLastModifiedMetadataBatch): + self.populate_metadata_freshness_cache(adapter, selected_uids) + + def get_runner(self, node) -> BaseRunner: + freshness_runner = super().get_runner(node) + assert isinstance(freshness_runner, FreshnessRunner) + freshness_runner.set_metadata_freshness_cache(self._metadata_freshness_cache) + return freshness_runner + def get_runner_type(self, _): return FreshnessRunner @@ -215,3 +244,40 @@ def get_hooks_by_type(self, hook_type: RunHookType) -> List[HookNode]: return super().get_hooks_by_type(hook_type) else: return [] + + def populate_metadata_freshness_cache(self, adapter, selected_uids: AbstractSet[str]) -> None: + if self.manifest is None: + raise DbtInternalError("Manifest must be set to populate metadata freshness cache") + + batch_metadata_sources: List[BaseRelation] = [] + for selected_source_uid in list(selected_uids): + source = self.manifest.sources.get(selected_source_uid) + if source and source.loaded_at_field is None: + metadata_source = adapter.Relation.create_from(self.config, source) + batch_metadata_sources.append(metadata_source) + + fire_event( + Note( + msg=f"Pulling freshness from warehouse metadata tables for {len(batch_metadata_sources)} sources" + ), + EventLevel.INFO, + ) + + try: + _, metadata_freshness_results = adapter.calculate_freshness_from_metadata_batch( + batch_metadata_sources + ) + self._metadata_freshness_cache.update(metadata_freshness_results) + except Exception as e: + # This error handling is intentionally very coarse. + # If anything goes wrong during batch metadata calculation, we can safely + # leave _metadata_freshness_cache unpopulated. + # Downstream, this will be gracefully handled as a cache miss and non-batch + # metadata-based freshness will still be performed on a source-by-source basis. + fire_event( + Note(msg=f"Metadata freshness could not be computed in batch: {e}"), + EventLevel.WARN, + ) + + def get_freshness_metadata_cache(self) -> Dict[BaseRelation, FreshnessResult]: + return self._metadata_freshness_cache diff --git a/core/dbt/task/init.py b/core/dbt/task/init.py index 0b6f4fb22d6..aa7e942c206 100644 --- a/core/dbt/task/init.py +++ b/core/dbt/task/init.py @@ -32,10 +32,6 @@ ProjectCreated, ) -from dbt.include.starter_project import PACKAGE_PATH as starter_project_directory - -from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME - from dbt.task.base import BaseTask, move_to_nearest_project_dir DOCS_URL = "https://docs.getdbt.com/docs/configure-your-profile" @@ -57,7 +53,10 @@ class InitTask(BaseTask): - def copy_starter_repo(self, project_name): + def copy_starter_repo(self, project_name: str) -> None: + # Lazy import to avoid ModuleNotFoundError + from dbt.include.starter_project import PACKAGE_PATH as starter_project_directory + fire_event(StarterProjectPath(dir=starter_project_directory)) shutil.copytree( starter_project_directory, project_name, ignore=shutil.ignore_patterns(*IGNORE_FILES) @@ -265,6 +264,10 @@ def setup_profile(self, profile_name: str) -> None: def get_valid_project_name(self) -> str: """Returns a valid project name, either from CLI arg or user prompt.""" + + # Lazy import to avoid ModuleNotFoundError + from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME + name = self.args.project_name internal_package_names = {GLOBAL_PROJECT_NAME} available_adapters = list(_get_adapter_plugin_names()) diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py index d0bb7628c78..e345bc78d94 100644 --- a/core/dbt/task/list.py +++ b/core/dbt/task/list.py @@ -4,11 +4,16 @@ Exposure, SourceDefinition, Metric, + SavedQuery, SemanticModel, UnitTestDefinition, ) +from dbt.cli.flags import Flags +from dbt.config.runtime import RuntimeConfig +from dbt.contracts.graph.manifest import Manifest from dbt.flags import get_flags from dbt.graph import ResourceTypeSelector +from dbt.task.base import resource_types_from_args from dbt.task.runnable import GraphRunnableTask from dbt.task.test import TestSelector from dbt.node_types import NodeType @@ -31,6 +36,7 @@ class ListTask(GraphRunnableTask): NodeType.Source, NodeType.Exposure, NodeType.Metric, + NodeType.SavedQuery, NodeType.SemanticModel, NodeType.Unit, ) @@ -51,7 +57,7 @@ class ListTask(GraphRunnableTask): ) ) - def __init__(self, args, config, manifest) -> None: + def __init__(self, args: Flags, config: RuntimeConfig, manifest: Manifest) -> None: super().__init__(args, config, manifest) if self.args.models: if self.args.select: @@ -83,10 +89,12 @@ def _iterate_selected_nodes(self): yield self.manifest.semantic_models[unique_id] elif unique_id in self.manifest.unit_tests: yield self.manifest.unit_tests[unique_id] + elif unique_id in self.manifest.saved_queries: + yield self.manifest.saved_queries[unique_id] else: raise DbtRuntimeError( f'Got an unexpected result from node selection: "{unique_id}"' - f"Expected a source or a node!" + f"Listing this node type is not yet supported!" ) def generate_selectors(self): @@ -106,6 +114,10 @@ def generate_selectors(self): # metrics are searched for by pkg.metric_name metric_selector = ".".join([node.package_name, node.name]) yield f"metric:{metric_selector}" + elif node.resource_type == NodeType.SavedQuery: + assert isinstance(node, SavedQuery) + saved_query_selector = ".".join([node.package_name, node.name]) + yield f"saved_query:{saved_query_selector}" elif node.resource_type == NodeType.SemanticModel: assert isinstance(node, SemanticModel) semantic_model_selector = ".".join([node.package_name, node.name]) @@ -175,17 +187,11 @@ def resource_types(self): if self.args.models: return [NodeType.Model] - if not self.args.resource_types: - return list(self.DEFAULT_RESOURCE_VALUES) - - values = set(self.args.resource_types) - if "default" in values: - values.remove("default") - values.update(self.DEFAULT_RESOURCE_VALUES) - if "all" in values: - values.remove("all") - values.update(self.ALL_RESOURCE_VALUES) - return list(values) + resource_types = resource_types_from_args( + self.args, set(self.ALL_RESOURCE_VALUES), set(self.DEFAULT_RESOURCE_VALUES) + ) + + return list(resource_types) @property def selection_arg(self): diff --git a/core/dbt/task/printer.py b/core/dbt/task/printer.py index dc8f4c483d1..1e4d37878ab 100644 --- a/core/dbt/task/printer.py +++ b/core/dbt/task/printer.py @@ -33,11 +33,11 @@ def get_counts(flat_nodes) -> str: if node.resource_type == NodeType.Model: t = "{} {}".format(node.get_materialization(), t) elif node.resource_type == NodeType.Operation: - t = "hook" + t = "project hook" counts[t] = counts.get(t, 0) + 1 - stat_line = ", ".join([pluralize(v, k) for k, v in counts.items()]) + stat_line = ", ".join([pluralize(v, k).replace("_", " ") for k, v in counts.items()]) return stat_line @@ -79,6 +79,10 @@ def print_run_result_error(result, newline: bool = True, is_warning: bool = Fals with TextOnly(): fire_event(Formatting("")) + # set node_info for logging events + node_info = None + if hasattr(result, "node") and result.node: + node_info = result.node.node_info if result.status == NodeStatus.Fail or (is_warning and result.status == NodeStatus.Warn): if is_warning: fire_event( @@ -86,6 +90,7 @@ def print_run_result_error(result, newline: bool = True, is_warning: bool = Fals resource_type=result.node.resource_type, node_name=result.node.name, path=result.node.original_file_path, + node_info=node_info, ) ) else: @@ -94,29 +99,32 @@ def print_run_result_error(result, newline: bool = True, is_warning: bool = Fals resource_type=result.node.resource_type, node_name=result.node.name, path=result.node.original_file_path, + node_info=node_info, ) ) if result.message: if is_warning: - fire_event(RunResultWarningMessage(msg=result.message)) + fire_event(RunResultWarningMessage(msg=result.message, node_info=node_info)) else: - fire_event(RunResultError(msg=result.message)) + fire_event(RunResultError(msg=result.message, node_info=node_info)) else: - fire_event(RunResultErrorNoMessage(status=result.status)) + fire_event(RunResultErrorNoMessage(status=result.status, node_info=node_info)) - if result.node.build_path is not None: + if result.node.compiled_path is not None: with TextOnly(): fire_event(Formatting("")) - fire_event(SQLCompiledPath(path=result.node.compiled_path)) + fire_event(SQLCompiledPath(path=result.node.compiled_path, node_info=node_info)) if result.node.should_store_failures: with TextOnly(): fire_event(Formatting("")) - fire_event(CheckNodeTestFailure(relation_name=result.node.relation_name)) + fire_event( + CheckNodeTestFailure(relation_name=result.node.relation_name, node_info=node_info) + ) elif result.message is not None: - fire_event(RunResultError(msg=result.message)) + fire_event(RunResultError(msg=result.message, node_info=node_info)) def print_run_end_messages(results, keyboard_interrupt: bool = False) -> None: diff --git a/core/dbt/task/retry.py b/core/dbt/task/retry.py index 9aadf9ead97..57724f455e0 100644 --- a/core/dbt/task/retry.py +++ b/core/dbt/task/retry.py @@ -63,7 +63,7 @@ class RetryTask(ConfiguredTask): - def __init__(self, args, config) -> None: + def __init__(self, args: Flags, config: RuntimeConfig) -> None: # load previous run results state_path = args.state or config.target_path self.previous_results = load_result_state( diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index 38b5209cbbf..1bd3c0e4081 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -15,12 +15,15 @@ from dbt import tracking from dbt import utils from dbt.adapters.base import BaseRelation +from dbt.cli.flags import Flags from dbt.clients.jinja import MacroGenerator +from dbt.config.runtime import RuntimeConfig from dbt.context.providers import generate_runtime_model_context -from dbt.contracts.graph.model_config import Hook from dbt.contracts.graph.nodes import HookNode, ResultNode +from dbt.contracts.graph.manifest import Manifest from dbt.artifacts.schemas.results import NodeStatus, RunStatus, RunningStatus, BaseResult from dbt.artifacts.schemas.run import RunResult +from dbt.artifacts.resources import Hook from dbt.exceptions import ( CompilationError, DbtInternalError, @@ -33,6 +36,7 @@ HooksRunning, FinishedRunningStats, ) +from dbt_common.events.contextvars import log_contextvars from dbt_common.events.functions import fire_event, get_invocation_id from dbt_common.events.types import Formatting from dbt_common.events.base_types import EventLevel @@ -304,7 +308,7 @@ def execute(self, model, manifest): class RunTask(CompileTask): - def __init__(self, args, config, manifest) -> None: + def __init__(self, args: Flags, config: RuntimeConfig, manifest: Manifest) -> None: super().__init__(args, config, manifest) self.ran_hooks: List[HookNode] = [] self._total_executed = 0 @@ -361,48 +365,51 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context) -> None: finishctx = TimestampNamed("node_finished_at") for idx, hook in enumerate(ordered_hooks, start=1): - hook.update_event_status( - started_at=datetime.utcnow().isoformat(), node_status=RunningStatus.Started - ) - sql = self.get_hook_sql(adapter, hook, idx, num_hooks, extra_context) - - hook_text = "{}.{}.{}".format(hook.package_name, hook_type, hook.index) - hook_meta_ctx = HookMetadata(hook, self.index_offset(idx)) - with UniqueID(hook.unique_id): - with hook_meta_ctx, startctx: - fire_event( - LogHookStartLine( - statement=hook_text, - index=idx, - total=num_hooks, - node_info=hook.node_info, + # We want to include node_info in the appropriate log files, so use + # log_contextvars + with log_contextvars(node_info=hook.node_info): + hook.update_event_status( + started_at=datetime.utcnow().isoformat(), node_status=RunningStatus.Started + ) + sql = self.get_hook_sql(adapter, hook, idx, num_hooks, extra_context) + + hook_text = "{}.{}.{}".format(hook.package_name, hook_type, hook.index) + hook_meta_ctx = HookMetadata(hook, self.index_offset(idx)) + with UniqueID(hook.unique_id): + with hook_meta_ctx, startctx: + fire_event( + LogHookStartLine( + statement=hook_text, + index=idx, + total=num_hooks, + node_info=hook.node_info, + ) ) - ) - - with Timer() as timer: - if len(sql.strip()) > 0: - response, _ = adapter.execute(sql, auto_begin=False, fetch=False) - status = response._message - else: - status = "OK" - - self.ran_hooks.append(hook) - hook.update_event_status(finished_at=datetime.utcnow().isoformat()) - with finishctx, DbtModelState({"node_status": "passed"}): - hook.update_event_status(node_status=RunStatus.Success) - fire_event( - LogHookEndLine( - statement=hook_text, - status=status, - index=idx, - total=num_hooks, - execution_time=timer.elapsed, - node_info=hook.node_info, + + with Timer() as timer: + if len(sql.strip()) > 0: + response, _ = adapter.execute(sql, auto_begin=False, fetch=False) + status = response._message + else: + status = "OK" + + self.ran_hooks.append(hook) + hook.update_event_status(finished_at=datetime.utcnow().isoformat()) + with finishctx, DbtModelState({"node_status": "passed"}): + hook.update_event_status(node_status=RunStatus.Success) + fire_event( + LogHookEndLine( + statement=hook_text, + status=status, + index=idx, + total=num_hooks, + execution_time=timer.elapsed, + node_info=hook.node_info, + ) ) - ) - # `_event_status` dict is only used for logging. Make sure - # it gets deleted when we're done with it - hook.clear_event_status() + # `_event_status` dict is only used for logging. Make sure + # it gets deleted when we're done with it + hook.clear_event_status() self._total_executed += len(ordered_hooks) diff --git a/core/dbt/task/run_operation.py b/core/dbt/task/run_operation.py index 7b217853ce7..1c6c5002e27 100644 --- a/core/dbt/task/run_operation.py +++ b/core/dbt/task/run_operation.py @@ -2,8 +2,7 @@ import threading import traceback from datetime import datetime - -import agate +from typing import TYPE_CHECKING import dbt_common.exceptions from dbt.adapters.factory import get_adapter @@ -24,6 +23,10 @@ RESULT_FILE_NAME = "run_results.json" +if TYPE_CHECKING: + import agate + + class RunOperationTask(ConfiguredTask): def _get_macro_parts(self): macro_name = self.args.macro @@ -34,7 +37,7 @@ def _get_macro_parts(self): return package_name, macro_name - def _run_unsafe(self, package_name, macro_name) -> agate.Table: + def _run_unsafe(self, package_name, macro_name) -> "agate.Table": adapter = get_adapter(self.config) macro_kwargs = self.args.args diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index 8c2697e7e93..71ad5d0a85d 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -15,6 +15,9 @@ import dbt.utils from dbt.adapters.base import BaseRelation from dbt.adapters.factory import get_adapter +from dbt.cli.flags import Flags +from dbt.config.runtime import RuntimeConfig +from dbt.contracts.graph.manifest import Manifest from dbt.contracts.graph.nodes import ResultNode from dbt.artifacts.schemas.results import NodeStatus, RunningStatus, RunStatus, BaseResult from dbt.artifacts.schemas.run import RunExecutionResult, RunResult @@ -31,6 +34,7 @@ ConcurrencyLine, EndRunResult, NothingToDo, + GenericExceptionOnRun, ) from dbt.exceptions import ( DbtInternalError, @@ -49,7 +53,6 @@ ModelMetadata, NodeCount, ) -from dbt.node_types import NodeType from dbt.parser.manifest import write_manifest from dbt.task.base import ConfiguredTask, BaseRunner from .printer import ( @@ -64,8 +67,9 @@ class GraphRunnableTask(ConfiguredTask): MARK_DEPENDENT_ERRORS_STATUSES = [NodeStatus.Error] - def __init__(self, args, config, manifest) -> None: + def __init__(self, args: Flags, config: RuntimeConfig, manifest: Manifest) -> None: super().__init__(args, config, manifest) + self.config = config self._flattened_nodes: Optional[List[ResultNode]] = None self._raise_next_tick: Optional[DbtRuntimeError] = None self._skipped_children: Dict[str, Optional[RunResult]] = {} @@ -116,7 +120,9 @@ def get_selection_spec(self) -> SelectionSpec: # This is what's used with no default selector and no selection # use --select and --exclude args spec = parse_difference(self.selection_arg, self.exclusion_arg, indirect_selection) - return spec + # mypy complains because the return values of get_selector and parse_difference + # are different + return spec # type: ignore @abstractmethod def get_node_selector(self) -> NodeSelector: @@ -195,23 +201,47 @@ def call_runner(self, runner: BaseRunner) -> RunResult: ) ) status: Dict[str, str] = {} + result = None + thread_exception = None try: result = runner.run_with_hooks(self.manifest) - except Exception as exc: - raise DbtInternalError(f"Unable to execute node: {exc}") + except Exception as e: + thread_exception = e finally: finishctx = TimestampNamed("finished_at") with finishctx, DbtModelState(status): - fire_event( - NodeFinished( - node_info=runner.node.node_info, - run_result=result.to_msg_dict(), + if result is not None: + fire_event( + NodeFinished( + node_info=runner.node.node_info, + run_result=result.to_msg_dict(), + ) ) - ) + else: + msg = f"Exception on worker thread. {thread_exception}" + + fire_event( + GenericExceptionOnRun( + unique_id=runner.node.unique_id, + exc=str(thread_exception), + node_info=runner.node.node_info, + ) + ) + + result = RunResult( + status=RunStatus.Error, # type: ignore + timing=[], + thread_id="", + execution_time=0.0, + adapter_response={}, + message=msg, + failures=None, + node=runner.node, + ) + # `_event_status` dict is only used for logging. Make sure - # it gets deleted when we're done with it, except for unit tests - if not runner.node.resource_type == NodeType.Unit: - runner.node.clear_event_status() + # it gets deleted when we're done with it + runner.node.clear_event_status() fail_fast = get_flags().FAIL_FAST @@ -572,7 +602,9 @@ def create_schema(relation: BaseRelation) -> None: list_futures = [] create_futures = [] - with dbt_common.utils.executor(self.config) as tpe: + # TODO: following has a mypy issue because profile and project config + # defines threads as int and HasThreadingConfig defines it as Optional[int] + with dbt_common.utils.executor(self.config) as tpe: # type: ignore for req in required_databases: if req.database is None: name = "list_schemas" diff --git a/core/dbt/task/test.py b/core/dbt/task/test.py index 9548f633eb7..8a82c7f1243 100644 --- a/core/dbt/task/test.py +++ b/core/dbt/task/test.py @@ -1,21 +1,24 @@ -from distutils.util import strtobool - -import agate import daff import io import json import re from dataclasses import dataclass -from dbt.utils import _coerce_decimal +from dbt.utils import _coerce_decimal, strtobool from dbt_common.events.format import pluralize from dbt_common.dataclass_schema import dbtClassMixin import threading -from typing import Dict, Any, Optional, Union, List +from typing import Dict, Any, Optional, Union, List, TYPE_CHECKING, Tuple from .compile import CompileRunner from .run import RunTask -from dbt.contracts.graph.nodes import TestNode, UnitTestDefinition, UnitTestNode +from dbt.contracts.graph.nodes import ( + TestNode, + UnitTestDefinition, + UnitTestNode, + GenericTestNode, + SingularTestNode, +) from dbt.contracts.graph.manifest import Manifest from dbt.artifacts.schemas.results import TestStatus from dbt.artifacts.schemas.run import RunResult @@ -39,6 +42,10 @@ from dbt_common.ui import green, red +if TYPE_CHECKING: + import agate + + @dataclass class UnitTestDiff(dbtClassMixin): actual: List[Dict[str, Any]] @@ -80,6 +87,7 @@ class UnitTestResultData(dbtClassMixin): class TestRunner(CompileRunner): _ANSI_ESCAPE = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") + _LOG_TEST_RESULT_EVENTS = LogTestResult def describe_node_name(self): if self.node.resource_type == NodeType.Unit: @@ -95,7 +103,7 @@ def print_result_line(self, result): model = result.node fire_event( - LogTestResult( + self._LOG_TEST_RESULT_EVENTS( name=self.describe_node_name(), status=str(result.status), index=self.node_index, @@ -179,7 +187,7 @@ def build_unit_test_manifest_from_test( def execute_unit_test( self, unit_test_def: UnitTestDefinition, manifest: Manifest - ) -> UnitTestResultData: + ) -> Tuple[UnitTestNode, UnitTestResultData]: unit_test_manifest = self.build_unit_test_manifest_from_test(unit_test_def, manifest) @@ -189,6 +197,7 @@ def execute_unit_test( # Compile the node unit_test_node = self.compiler.compile_node(unit_test_node, unit_test_manifest, {}) + assert isinstance(unit_test_node, UnitTestNode) # generate_runtime_unit_test_context not strictly needed - this is to run the 'unit' # materialization, not compile the node.compiled_code @@ -242,18 +251,21 @@ def execute_unit_test( rendered=rendered, ) - return UnitTestResultData( + unit_test_result_data = UnitTestResultData( diff=diff, should_error=should_error, adapter_response=adapter_response, ) - def execute(self, test: Union[TestNode, UnitTestDefinition], manifest: Manifest): + return unit_test_node, unit_test_result_data + + def execute(self, test: Union[TestNode, UnitTestNode], manifest: Manifest): if isinstance(test, UnitTestDefinition): - unit_test_result = self.execute_unit_test(test, manifest) - return self.build_unit_test_run_result(test, unit_test_result) + unit_test_node, unit_test_result = self.execute_unit_test(test, manifest) + return self.build_unit_test_run_result(unit_test_node, unit_test_result) else: # Note: manifest here is a normal manifest + assert isinstance(test, (SingularTestNode, GenericTestNode)) test_result = self.execute_data_test(test, manifest) return self.build_test_run_result(test, test_result) @@ -269,7 +281,9 @@ def build_test_run_result(self, test: TestNode, result: TestResultData) -> RunRe message = f"Got {num_errors}, configured to fail if {test.config.error_if}" failures = result.failures elif result.should_warn: - if get_flags().WARN_ERROR: + if get_flags().WARN_ERROR or get_flags().WARN_ERROR_OPTIONS.includes( + self._LOG_TEST_RESULT_EVENTS.__name__ + ): status = TestStatus.Fail message = f"Got {num_errors}, configured to fail if {test.config.warn_if}" else: @@ -292,7 +306,7 @@ def build_test_run_result(self, test: TestNode, result: TestResultData) -> RunRe return run_result def build_unit_test_run_result( - self, test: UnitTestDefinition, result: UnitTestResultData + self, test: UnitTestNode, result: UnitTestResultData ) -> RunResult: thread_id = threading.current_thread().name @@ -305,7 +319,7 @@ def build_unit_test_run_result( failures = 1 return RunResult( - node=test, # type: ignore + node=test, status=status, timing=[], thread_id=thread_id, @@ -327,7 +341,7 @@ def _get_unit_test_agate_table(self, result_table, actual_or_expected: str): return unit_test_table.select(columns) def _get_daff_diff( - self, expected: agate.Table, actual: agate.Table, ordered: bool = False + self, expected: "agate.Table", actual: "agate.Table", ordered: bool = False ) -> daff.TableDiff: expected_daff_table = daff.PythonTableView(list_rows_from_table(expected)) @@ -390,7 +404,7 @@ def get_runner_type(self, _): # This was originally in agate_helper, but that was moved out into dbt_common -def json_rows_from_table(table: agate.Table) -> List[Dict[str, Any]]: +def json_rows_from_table(table: "agate.Table") -> List[Dict[str, Any]]: "Convert a table to a list of row dict objects" output = io.StringIO() table.to_json(path=output) # type: ignore @@ -399,7 +413,7 @@ def json_rows_from_table(table: agate.Table) -> List[Dict[str, Any]]: # This was originally in agate_helper, but that was moved out into dbt_common -def list_rows_from_table(table: agate.Table) -> List[Any]: +def list_rows_from_table(table: "agate.Table") -> List[Any]: "Convert a table to a list of lists, where the first element represents the header" rows = [[col.name for col in table.columns]] for row in table.rows: diff --git a/core/dbt/tests/fixtures/project.py b/core/dbt/tests/fixtures/project.py index 3ef25c55789..252f11116c1 100644 --- a/core/dbt/tests/fixtures/project.py +++ b/core/dbt/tests/fixtures/project.py @@ -1,5 +1,7 @@ import os from pathlib import Path +from typing import Mapping + import pytest # type: ignore import random from argparse import Namespace @@ -493,11 +495,18 @@ def get_tables_in_schema(self): return {model_name: materialization for (model_name, materialization) in result} +@pytest.fixture(scope="class") +def environment() -> Mapping[str, str]: + # By default, fixture initialization is done with the following environment + # from the os, but this fixture provides a way to customize the environment. + return os.environ + + # Housekeeping that needs to be done before we start setting up any test fixtures. @pytest.fixture(scope="class") -def initialization() -> None: +def initialization(environment) -> None: # Create an "invocation context," which dbt application code relies on. - set_invocation_context(os.environ) + set_invocation_context(environment) # Enable caches used between test runs, for better testing performance. enable_test_caching() diff --git a/core/dbt/tracking.py b/core/dbt/tracking.py index e73a089154b..ff7b9e7ebb9 100644 --- a/core/dbt/tracking.py +++ b/core/dbt/tracking.py @@ -9,7 +9,9 @@ import logbook import pytz import requests +from packaging.version import Version from snowplow_tracker import Emitter, SelfDescribingJson, Subject, Tracker +from snowplow_tracker import __version__ as snowplow_version # type: ignore from snowplow_tracker import logger as sp_logger from dbt import version as dbt_version @@ -49,17 +51,25 @@ RUN_MODEL_SPEC = "iglu:com.dbt/run_model/jsonschema/1-0-3" PLUGIN_GET_NODES = "iglu:com.dbt/plugin_get_nodes/jsonschema/1-0-0" +SNOWPLOW_TRACKER_VERSION = Version(snowplow_version) + +# workaround in case real snowplow tracker is in the env +# the argument was renamed in https://github.com/snowplow/snowplow-python-tracker/commit/39fd50a3aff98a5efdd5c5c7fb5518fe4761305b +INIT_KW_ARGS = ( + {"buffer_size": 30} if SNOWPLOW_TRACKER_VERSION < Version("0.13.0") else {"batch_size": 30} +) + class TimeoutEmitter(Emitter): def __init__(self) -> None: super().__init__( COLLECTOR_URL, protocol=COLLECTOR_PROTOCOL, - buffer_size=30, on_failure=self.handle_failure, method="post", # don't set this. byte_limit=None, + **INIT_KW_ARGS, ) @staticmethod @@ -104,7 +114,7 @@ def http_get(self, payload): emitter = TimeoutEmitter() tracker = Tracker( - emitter, + emitters=emitter, namespace="cf", app_id="dbt", ) diff --git a/core/dbt/utils.py b/core/dbt/utils.py index b4d510da8bb..8f7509a5dec 100644 --- a/core/dbt/utils.py +++ b/core/dbt/utils.py @@ -369,3 +369,21 @@ def args_to_dict(args): dict_args[key] = var_args[key] return dict_args + + +# Taken from https://github.com/python/cpython/blob/3.11/Lib/distutils/util.py +# This is a copy of the function from distutils.util, which was removed in Python 3.12. +def strtobool(val: str) -> bool: + """Convert a string representation of truth to True or False. + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + """ + val = val.lower() + if val in ("y", "yes", "t", "true", "on", "1"): + return True + elif val in ("n", "no", "f", "false", "off", "0"): + return False + else: + raise ValueError("invalid truth value %r" % (val,)) diff --git a/core/dbt/version.py b/core/dbt/version.py index dcfb070569e..5d515185ae6 100644 --- a/core/dbt/version.py +++ b/core/dbt/version.py @@ -229,5 +229,5 @@ def _get_adapter_plugin_names() -> Iterator[str]: yield plugin_name -__version__ = "1.8.0a1" +__version__ = "1.8.0b3" installed = get_installed_version() diff --git a/core/setup.py b/core/setup.py index 3a575747f54..1f103c19534 100644 --- a/core/setup.py +++ b/core/setup.py @@ -25,7 +25,7 @@ package_name = "dbt-core" -package_version = "1.8.0a1" +package_version = "1.8.0b3" description = """With dbt, data analysts and engineers can build analytics \ the way engineers build applications.""" @@ -49,9 +49,9 @@ # ---- # dbt-core uses these packages deeply, throughout the codebase, and there have been breaking changes in past patch releases (even though these are major-version-one). # Pin to the patch or minor version, and bump in each new minor version of dbt-core. - "agate~=1.7.0", - "Jinja2~=3.1.2", - "mashumaro[msgpack]~=3.9", + "agate>=1.7.0,<1.8", + "Jinja2>=3.1.3,<4", + "mashumaro[msgpack]>=3.9,<4.0", # ---- # Legacy: This package has not been updated since 2019, and it is unused in dbt's logging system (since v1.0) # The dependency here will be removed along with the removal of 'legacy logging', in a future release of dbt-core @@ -59,25 +59,27 @@ # ---- # dbt-core uses these packages in standard ways. Pin to the major version, and check compatibility # with major versions in each new minor version of dbt-core. - "click>=8.0.2,<9", - "networkx>=2.3,<4", + "click>=8.0.2,<9.0", + "networkx>=2.3,<4.0", + "protobuf>=4.0.0,<5", "requests<3.0.0", # should match dbt-common # ---- # These packages are major-version-0. Keep upper bounds on upcoming minor versions (which could have breaking changes) # and check compatibility / bump in each new minor version of dbt-core. - "pathspec>=0.9,<0.12", - "sqlparse>=0.2.3,<0.5", + "pathspec>=0.9,<0.13", + "sqlparse>=0.5.0,<0.6.0", # ---- - # These are major-version-0 packages also maintained by dbt-labs. Accept patches. - "dbt-extractor~=0.5.0", - "minimal-snowplow-tracker~=0.0.2", - "dbt-semantic-interfaces~=0.5.0a2", - "dbt-common~=0.1.3", - "dbt-adapters~=0.1.0a2", + # These are major-version-0 packages also maintained by dbt-labs. + # Accept patches but avoid automatically updating past a set minor version range. + "dbt-extractor>=0.5.0,<=0.6", + "minimal-snowplow-tracker>=0.0.2,<0.1", + "dbt-semantic-interfaces>=0.5.1,<0.6", + # Minor versions for these are expected to be backwards-compatible + "dbt-common>=1.0.1,<2.0", + "dbt-adapters>=0.1.0a2,<2.0", # ---- # Expect compatibility with all new versions of these packages, so lower bounds only. "packaging>20.9", - "protobuf>=4.0.0", "pytz>=2015.7", "pyyaml>=6.0", "daff>=1.3.46", @@ -95,6 +97,7 @@ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", ], python_requires=">=3.8", ) diff --git a/dev-requirements.txt b/dev-requirements.txt index 0770769c18c..b7827c7003c 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,21 +1,22 @@ -git+https://github.com/dbt-labs/dbt-adapters.git@main +git+https://github.com/dbt-labs/dbt-adapters.git +git+https://github.com/dbt-labs/dbt-adapters.git@main#subdirectory=dbt-tests-adapter +git+https://github.com/dbt-labs/dbt-common.git@main git+https://github.com/dbt-labs/dbt-postgres.git@main -black==23.3.0 +black>=24.3.0,<25.0 bumpversion ddtrace==2.3.0 docutils flake8 flaky -freezegun==0.3.12 +freezegun>=1.4.0,<1.5 hypothesis ipdb mypy==1.4.1 pip-tools pre-commit -protobuf>=4.0.0 -pytest~=7.4 +pytest>=7.4,<8.0 pytest-cov -pytest-csv~=3.0 +pytest-csv>=3.0,<4.0 pytest-dotenv pytest-logbook pytest-mock @@ -28,7 +29,7 @@ types-docutils types-PyYAML types-Jinja2 types-mock -types-protobuf +types-protobuf>=4.0.0,<5.0.0 types-pytz types-requests types-setuptools diff --git a/docker/Dockerfile b/docker/Dockerfile index 8fbda897fe5..5b07514d76b 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,133 +1,59 @@ -## -# Generic dockerfile for dbt image building. -# See README for operational details -## - -# Top level build args -ARG build_for=linux/amd64 - -## -# base image (abstract) -## -# Please do not upgrade beyond python3.10.7 currently as dbt-spark does not support -# 3.11py and images do not get made properly -FROM --platform=$build_for python:3.10.7-slim-bullseye as base - -# N.B. The refs updated automagically every release via bumpversion -ARG dbt_core_ref=dbt-core@v1.8.0a1 -ARG dbt_postgres_ref=dbt-postgres@v1.8.0a1 -ARG dbt_redshift_ref=dbt-redshift@v1.8.0a1 -ARG dbt_bigquery_ref=dbt-bigquery@v1.8.0a1 -ARG dbt_snowflake_ref=dbt-snowflake@v1.8.0a1 -ARG dbt_spark_ref=dbt-spark@v1.8.0a1 -# special case args -ARG dbt_spark_version=all -ARG dbt_third_party +ARG py_version=3.11.2 + +FROM python:$py_version-slim-bullseye as base -# System setup RUN apt-get update \ && apt-get dist-upgrade -y \ && apt-get install -y --no-install-recommends \ - git \ - ssh-client \ - software-properties-common \ - make \ - build-essential \ - ca-certificates \ - libpq-dev \ + build-essential=12.9 \ + ca-certificates=20210119 \ + git=1:2.30.2-1+deb11u2 \ + libpq-dev=13.14-0+deb11u1 \ + make=4.3-4.1 \ + openssh-client=1:8.4p1-5+deb11u3 \ + software-properties-common=0.96.20.2-2.1 \ && apt-get clean \ && rm -rf \ /var/lib/apt/lists/* \ /tmp/* \ /var/tmp/* -# Env vars ENV PYTHONIOENCODING=utf-8 ENV LANG=C.UTF-8 -# Update python -RUN python -m pip install --upgrade pip setuptools wheel --no-cache-dir +RUN python -m pip install --upgrade "pip==24.0" "setuptools==69.2.0" "wheel==0.43.0" --no-cache-dir -# Set docker basics -WORKDIR /usr/app/dbt/ -ENTRYPOINT ["dbt"] -## -# dbt-core -## FROM base as dbt-core -RUN python -m pip install --no-cache-dir "git+https://github.com/dbt-labs/${dbt_core_ref}#egg=dbt-core&subdirectory=core" -## -# dbt-postgres -## -FROM base as dbt-postgres -RUN python -m pip install --no-cache-dir "git+https://github.com/dbt-labs/${dbt_postgres_ref}#egg=dbt-postgres" +ARG commit_ref=main +HEALTHCHECK CMD dbt --version || exit 1 -## -# dbt-redshift -## -FROM base as dbt-redshift -RUN python -m pip install --no-cache-dir "git+https://github.com/dbt-labs/${dbt_redshift_ref}#egg=dbt-redshift" +WORKDIR /usr/app/dbt/ +ENTRYPOINT ["dbt"] +RUN python -m pip install --no-cache-dir "dbt-core @ git+https://github.com/dbt-labs/dbt-core@${commit_ref}#subdirectory=core" -## -# dbt-bigquery -## -FROM base as dbt-bigquery -RUN python -m pip install --no-cache-dir "git+https://github.com/dbt-labs/${dbt_bigquery_ref}#egg=dbt-bigquery" +FROM base as dbt-postgres -## -# dbt-snowflake -## -FROM base as dbt-snowflake -RUN python -m pip install --no-cache-dir "git+https://github.com/dbt-labs/${dbt_snowflake_ref}#egg=dbt-snowflake" +ARG commit_ref=main -## -# dbt-spark -## -FROM base as dbt-spark -RUN apt-get update \ - && apt-get dist-upgrade -y \ - && apt-get install -y --no-install-recommends \ - python-dev \ - libsasl2-dev \ - gcc \ - unixodbc-dev \ - && apt-get clean \ - && rm -rf \ - /var/lib/apt/lists/* \ - /tmp/* \ - /var/tmp/* -RUN python -m pip install --no-cache-dir "git+https://github.com/dbt-labs/${dbt_spark_ref}#egg=dbt-spark[${dbt_spark_version}]" +HEALTHCHECK CMD dbt --version || exit 1 + +WORKDIR /usr/app/dbt/ +ENTRYPOINT ["dbt"] + +RUN python -m pip install --no-cache-dir "dbt-postgres @ git+https://github.com/dbt-labs/dbt-core@${commit_ref}#subdirectory=plugins/postgres" -## -# dbt-third-party -## FROM dbt-core as dbt-third-party -RUN python -m pip install --no-cache-dir "${dbt_third_party}" -## -# dbt-all -## -FROM base as dbt-all -RUN apt-get update \ - && apt-get dist-upgrade -y \ - && apt-get install -y --no-install-recommends \ - python-dev \ - libsasl2-dev \ - gcc \ - unixodbc-dev \ - && apt-get clean \ - && rm -rf \ - /var/lib/apt/lists/* \ - /tmp/* \ - /var/tmp/* - RUN python -m pip install --no-cache "git+https://github.com/dbt-labs/${dbt_redshift_ref}#egg=dbt-redshift" - RUN python -m pip install --no-cache "git+https://github.com/dbt-labs/${dbt_bigquery_ref}#egg=dbt-bigquery" - RUN python -m pip install --no-cache "git+https://github.com/dbt-labs/${dbt_snowflake_ref}#egg=dbt-snowflake" - RUN python -m pip install --no-cache "git+https://github.com/dbt-labs/${dbt_spark_ref}#egg=dbt-spark[${dbt_spark_version}]" - RUN python -m pip install --no-cache "git+https://github.com/dbt-labs/${dbt_postgres_ref}#egg=dbt-postgres" +ARG dbt_third_party + +RUN if [ "$dbt_third_party" ]; then \ + python -m pip install --no-cache-dir "${dbt_third_party}"; \ + else \ + echo "No third party adapter provided"; \ + fi \ diff --git a/docker/README.md b/docker/README.md index e4af582a29a..d05184146ed 100644 --- a/docker/README.md +++ b/docker/README.md @@ -5,13 +5,9 @@ This docker file is suitable for building dbt Docker images locally or using wit ## Building an image: This Dockerfile can create images for the following targets, each named after the database they support: * `dbt-core` _(no db-adapter support)_ -* `dbt-postgres` -* `dbt-redshift` -* `dbt-bigquery` -* `dbt-snowflake` -* `dbt-spark` * `dbt-third-party` _(requires additional build-arg)_ -* `dbt-all` _(installs all of the above in a single image)_ + +For platform-specific images, please refer to that platform's repository (eg. `dbt-labs/dbt-postgres`) In order to build a new image, run the following docker command. ``` @@ -22,53 +18,27 @@ docker build --tag --target --- -By default the images will be populated with the most recent release of `dbt-core` and whatever database adapter you select. If you need to use a different version you can specify it by git ref using the `--build-arg` flag: +By default the images will be populated with `dbt-core` on `main`. +If you need to use a different version you can specify it by git ref (tag, branch, sha) using the `--build-arg` flag: ``` docker build --tag \ --target \ - --build-arg = \ + --build-arg commit_ref= \ ``` -valid arg names for versioning are: -* `dbt_core_ref` -* `dbt_postgres_ref` -* `dbt_redshift_ref` -* `dbt_bigquery_ref` -* `dbt_snowflake_ref` -* `dbt_spark_ref` - ---- ->**NOTE:** Only override a _single_ build arg for each build. Using multiple overrides may lead to a non-functioning image. ---- - -If you wish to build an image with a third-party adapter you can use the `dbt-third-party` target. This target requires you provide a path to the adapter that can be processed by `pip` by using the `dbt_third_party` build arg: +If you wish to build an image with a third-party adapter you can use the `dbt-third-party` target. +This target requires you provide a path to the adapter that can be processed by `pip` by using the `dbt_third_party` build arg: ``` docker build --tag \ --target dbt-third-party \ --build-arg dbt_third_party= \ ``` +This can also be combined with the `commit_ref` build arg to specify a version of `dbt-core`. ### Examples: -To build an image named "my-dbt" that supports redshift using the latest releases: -``` -cd dbt-core/docker -docker build --tag my-dbt --target dbt-redshift . -``` - -To build an image named "my-other-dbt" that supports bigquery using `dbt-core` version 0.21.latest and the bigquery adapter version 1.0.0b1: -``` -cd dbt-core/docker -docker build \ - --tag my-other-dbt \ - --target dbt-bigquery \ - --build-arg dbt_bigquery_ref=dbt-bigquery@v1.0.0b1 \ - --build-arg dbt_core_ref=dbt-core@0.21.latest \ - . -``` - -To build an image named "my-third-party-dbt" that uses [Materilize third party adapter](https://github.com/MaterializeInc/materialize/tree/main/misc/dbt-materialize) and the latest release of `dbt-core`: +To build an image named "my-third-party-dbt" that uses the latest release of [Materialize third party adapter](https://github.com/MaterializeInc/materialize/tree/main/misc/dbt-materialize) and the latest dev version of `dbt-core`: ``` cd dbt-core/docker docker build --tag my-third-party-dbt \ @@ -78,27 +48,6 @@ docker build --tag my-third-party-dbt \ ``` -## Special cases -There are a few special cases worth noting: -* The `dbt-spark` database adapter comes in three different versions named `PyHive`, `ODBC`, and the default `all`. If you wish to overide this you can use the `--build-arg` flag with the value of `dbt_spark_version=`. See the [docs](https://docs.getdbt.com/reference/warehouse-profiles/spark-profile) for more information. - -``` -docker build --tag my_dbt \ - --target dbt-postgres \ - --build-arg dbt_postgres_ref=dbt-core@1.0.0b1 \ - -``` - -* If you need to build against another architecture (linux/arm64 in this example) you can overide the `build_for` build arg: -``` -docker build --tag my_dbt \ - --target dbt-postgres \ - --build-arg build_for=linux/arm64 \ - -``` - -Supported architectures can be found in the python docker [dockerhub page](https://hub.docker.com/_/python). - ## Running an image in a container: The `ENTRYPOINT` for this Dockerfile is the command `dbt` so you can bind-mount your project to `/usr/app` and use dbt as normal: ``` diff --git a/pytest.ini b/pytest.ini index 0760d49a55a..800dd6b9ece 100644 --- a/pytest.ini +++ b/pytest.ini @@ -7,3 +7,4 @@ env_files = testpaths = tests/functional tests/unit +pythonpath = core diff --git a/tests/functional/adapter/query_comment/fixtures.py b/tests/functional/adapter/query_comment/fixtures.py index d8848dc089e..ccaf329209c 100644 --- a/tests/functional/adapter/query_comment/fixtures.py +++ b/tests/functional/adapter/query_comment/fixtures.py @@ -10,7 +10,6 @@ {%- set comment_dict = dict( app='dbt++', macro_version='0.1.0', - dbt_version=dbt_version, message='blah: '~ message) -%} {{ return(comment_dict) }} {%- endmacro -%} diff --git a/tests/functional/adapter/query_comment/test_query_comment.py b/tests/functional/adapter/query_comment/test_query_comment.py index 18d66ffda7d..5651e54b39b 100644 --- a/tests/functional/adapter/query_comment/test_query_comment.py +++ b/tests/functional/adapter/query_comment/test_query_comment.py @@ -1,7 +1,6 @@ import pytest import json from dbt.exceptions import DbtRuntimeError -from dbt.version import __version__ as dbt_version from dbt.tests.util import run_dbt_and_capture from tests.functional.adapter.query_comment.fixtures import MACROS__MACRO_SQL, MODELS__X_SQL @@ -59,7 +58,6 @@ def test_matches_comment(self, project) -> bool: logs = self.run_get_json() expected_dct = { "app": "dbt++", - "dbt_version": dbt_version, "macro_version": "0.1.0", "message": f"blah: {project.adapter.config.target_name}", } diff --git a/tests/functional/artifacts/test_previous_version_state.py b/tests/functional/artifacts/test_previous_version_state.py index e8eb3b251c8..a8511de5a2b 100644 --- a/tests/functional/artifacts/test_previous_version_state.py +++ b/tests/functional/artifacts/test_previous_version_state.py @@ -7,7 +7,7 @@ from dbt.artifacts.schemas.base import get_artifact_schema_version from dbt.contracts.graph.manifest import WritableManifest from dbt.artifacts.schemas.run import RunResultsArtifact -from dbt.exceptions import IncompatibleSchemaError +from dbt.artifacts.exceptions import IncompatibleSchemaError from dbt.tests.util import run_dbt, get_manifest # This project must have one of each kind of node type, plus disabled versions, for diff --git a/tests/functional/configs/test_contract_configs.py b/tests/functional/configs/test_contract_configs.py index ad9da69d8b5..10a3e778904 100644 --- a/tests/functional/configs/test_contract_configs.py +++ b/tests/functional/configs/test_contract_configs.py @@ -108,6 +108,59 @@ def model(dbt, _): data_type: date """ +model_pk_model_column_schema_yml = """ +models: + - name: my_model + config: + contract: + enforced: true + constraints: + - type: primary_key + columns: [id] + columns: + - name: id + data_type: integer + description: hello + constraints: + - type: not_null + - type: primary_key + - type: check + expression: (id > 0) + data_tests: + - unique + - name: color + data_type: string + - name: date_day + data_type: date +""" + +model_pk_mult_column_schema_yml = """ +models: + - name: my_model + config: + contract: + enforced: true + columns: + - name: id + quote: true + data_type: integer + description: hello + constraints: + - type: not_null + - type: primary_key + - type: check + expression: (id > 0) + data_tests: + - unique + - name: color + data_type: string + constraints: + - type: not_null + - type: primary_key + - name: date_day + data_type: date +""" + model_schema_alias_types_false_yml = """ models: - name: my_model @@ -514,3 +567,35 @@ def test__missing_column_contract_error(self, project): "This model has an enforced contract, and its 'columns' specification is missing" ) assert expected_error in results[0].message + + +# test primary key defined across model and column level constraints, expect error +class TestPrimaryKeysModelAndColumnLevelConstraints: + @pytest.fixture(scope="class") + def models(self): + return { + "constraints_schema.yml": model_pk_model_column_schema_yml, + "my_model.sql": my_model_sql, + } + + def test_model_column_pk_error(self, project): + expected_error = "Primary key constraints defined at the model level and the columns level" + with pytest.raises(ParsingError) as exc_info: + run_dbt(["run"]) + assert expected_error in str(exc_info.value) + + +# test primary key defined across multiple columns, expect error +class TestPrimaryKeysMultipleColumns: + @pytest.fixture(scope="class") + def models(self): + return { + "constraints_schema.yml": model_pk_mult_column_schema_yml, + "my_model.sql": my_model_sql, + } + + def test_pk_multiple_columns(self, project): + expected_error = "Found 2 columns (['id', 'color']) with primary key constraints defined" + with pytest.raises(ParsingError) as exc_info: + run_dbt(["run"]) + assert expected_error in str(exc_info.value) diff --git a/tests/functional/configs/test_versioned_model_constraint.py b/tests/functional/configs/test_versioned_model_constraint.py index 05634a0e0fa..eb135df5d61 100644 --- a/tests/functional/configs/test_versioned_model_constraint.py +++ b/tests/functional/configs/test_versioned_model_constraint.py @@ -1,5 +1,6 @@ import pytest from dbt.tests.util import run_dbt, rm_file, write_file, get_manifest +from dbt.exceptions import ParsingError schema_yml = """ @@ -25,6 +26,10 @@ select 1 as id, 'alice' as user_name """ +foo_v2_sql = """ +select 1 as id, 'alice' as user_name, 2 as another_pk +""" + versioned_schema_yml = """ models: - name: foo @@ -47,6 +52,69 @@ - v: 1 """ +versioned_pk_model_column_schema_yml = """ +models: + - name: foo + latest_version: 2 + config: + materialized: table + contract: + enforced: true + constraints: + - type: primary_key + columns: [id] + columns: + - name: id + data_type: int + constraints: + - type: not_null + - name: user_name + data_type: text + versions: + - v: 1 + - v: 2 + columns: + - name: id + data_type: int + constraints: + - type: not_null + - type: primary_key + - name: user_name + data_type: text +""" + +versioned_pk_mult_columns_schema_yml = """ +models: + - name: foo + latest_version: 2 + config: + materialized: table + contract: + enforced: true + columns: + - name: id + data_type: int + constraints: + - type: not_null + - type: primary_key + - name: user_name + data_type: text + versions: + - v: 1 + - v: 2 + columns: + - name: id + data_type: int + constraints: + - type: not_null + - type: primary_key + - name: user_name + data_type: text + constraints: + - type: primary_key + +""" + class TestVersionedModelConstraints: @pytest.fixture(scope="class") @@ -74,3 +142,85 @@ def test_versioned_model_constraints(self, project): model_node = manifest.nodes["model.test.foo.v1"] assert model_node.contract.enforced is True assert len(model_node.constraints) == 1 + + +# test primary key defined across model and column level constraints, expect error +class TestPrimaryKeysModelAndColumnLevelConstraints: + @pytest.fixture(scope="class") + def models(self): + return { + "foo.sql": foo_sql, + "schema.yml": schema_yml, + } + + def test_model_column_pk_error(self, project): + results = run_dbt(["run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + model_node = manifest.nodes["model.test.foo"] + assert len(model_node.constraints) == 1 + + # remove foo.sql and create foo_v1.sql + rm_file(project.project_root, "models", "foo.sql") + write_file(foo_sql, project.project_root, "models", "foo_v1.sql") + write_file(versioned_schema_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["run"]) + assert len(results) == 1 + + manifest = get_manifest(project.project_root) + model_node = manifest.nodes["model.test.foo.v1"] + assert model_node.contract.enforced is True + assert len(model_node.constraints) == 1 + + # add foo_v2.sql + write_file(foo_sql, project.project_root, "models", "foo_v2.sql") + write_file( + versioned_pk_model_column_schema_yml, project.project_root, "models", "schema.yml" + ) + + expected_error = "Primary key constraints defined at the model level and the columns level" + with pytest.raises(ParsingError) as exc_info: + run_dbt(["run"]) + assert expected_error in str(exc_info.value) + + +# test primary key defined across multiple columns, expect error +class TestPrimaryKeysMultipleColumns: + @pytest.fixture(scope="class") + def models(self): + return { + "foo.sql": foo_sql, + "schema.yml": schema_yml, + } + + def test_pk_multiple_columns(self, project): + results = run_dbt(["run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + model_node = manifest.nodes["model.test.foo"] + assert len(model_node.constraints) == 1 + + # remove foo.sql and create foo_v1.sql + rm_file(project.project_root, "models", "foo.sql") + write_file(foo_sql, project.project_root, "models", "foo_v1.sql") + write_file(versioned_schema_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["run"]) + assert len(results) == 1 + + manifest = get_manifest(project.project_root) + model_node = manifest.nodes["model.test.foo.v1"] + assert model_node.contract.enforced is True + assert len(model_node.constraints) == 1 + + # add foo_v2.sql + write_file(foo_sql, project.project_root, "models", "foo_v2.sql") + write_file( + versioned_pk_mult_columns_schema_yml, project.project_root, "models", "schema.yml" + ) + + expected_error = ( + "Found 2 columns (['id', 'user_name']) with primary key constraints defined" + ) + with pytest.raises(ParsingError) as exc_info: + run_dbt(["run"]) + assert expected_error in str(exc_info.value) diff --git a/tests/functional/context_methods/test_cli_vars.py b/tests/functional/context_methods/test_cli_vars.py index d3d5dfc8197..1d72e8c5021 100644 --- a/tests/functional/context_methods/test_cli_vars.py +++ b/tests/functional/context_methods/test_cli_vars.py @@ -3,7 +3,13 @@ from tests.fixtures.dbt_integration_project import dbt_integration_project # noqa: F401 -from dbt.tests.util import run_dbt, get_artifact, write_config_file +from dbt.tests.util import ( + run_dbt, + run_dbt_and_capture, + get_logging_events, + get_artifact, + write_config_file, +) from dbt.tests.fixtures.project import write_project_files from dbt.exceptions import DbtRuntimeError, CompilationError @@ -206,3 +212,76 @@ def test_vars_in_selectors(self, project): # Var in cli_vars works results = run_dbt(["run", "--vars", "snapshot_target: dev"]) assert len(results) == 1 + + +models_scrubbing__schema_yml = """ +version: 2 +models: +- name: simple_model + columns: + - name: simple + data_tests: + - accepted_values: + values: + - abc +""" + +models_scrubbing__simple_model_sql = """ +select + '{{ var("DBT_ENV_SECRET_simple") }}'::varchar as simple +""" + + +class TestCLIVarsScrubbing: + @pytest.fixture(scope="class") + def models(self): + return { + "schema.yml": models_scrubbing__schema_yml, + "simple_model.sql": models_scrubbing__simple_model_sql, + } + + def test__run_results_scrubbing(self, project): + results, output = run_dbt_and_capture( + [ + "--debug", + "--log-format", + "json", + "run", + "--vars", + "{DBT_ENV_SECRET_simple: abc, unused: def}", + ] + ) + assert len(results) == 1 + + run_results = get_artifact(project.project_root, "target", "run_results.json") + assert run_results["args"]["vars"] == { + "DBT_ENV_SECRET_simple": "*****", + "unused": "def", + } + + log_events = get_logging_events(log_output=output, event_name="StateCheckVarsHash") + assert len(log_events) == 1 + assert ( + log_events[0]["data"]["vars"] == "{'DBT_ENV_SECRET_simple': '*****', 'unused': 'def'}" + ) + + def test__exception_scrubbing(self, project): + results, output = run_dbt_and_capture( + [ + "--debug", + "--log-format", + "json", + "run", + "--vars", + "{DBT_ENV_SECRET_unused: abc, unused: def}", + ], + False, + ) + assert len(results) == 1 + + log_events = get_logging_events(log_output=output, event_name="CatchableExceptionOnRun") + assert len(log_events) == 1 + assert ( + '{\n "DBT_ENV_SECRET_unused": "*****",\n "unused": "def"\n }' + in log_events[0]["info"]["msg"] + ) diff --git a/tests/functional/context_methods/test_env_vars.py b/tests/functional/context_methods/test_env_vars.py index 506ed40d31c..33feb3b5de1 100644 --- a/tests/functional/context_methods/test_env_vars.py +++ b/tests/functional/context_methods/test_env_vars.py @@ -191,3 +191,4 @@ def test_env_vars_secrets(self, project): assert not ("secret_variable" in log_output) assert "regular_variable" in log_output + del os.environ["DBT_DEBUG"] diff --git a/tests/functional/dbt_runner/test_dbt_runner.py b/tests/functional/dbt_runner/test_dbt_runner.py index b951820d4e8..c332490e2d7 100644 --- a/tests/functional/dbt_runner/test_dbt_runner.py +++ b/tests/functional/dbt_runner/test_dbt_runner.py @@ -8,6 +8,7 @@ from dbt.adapters.factory import reset_adapters, FACTORY from dbt.tests.util import read_file, write_file from dbt.version import __version__ as dbt_version +from dbt_common.events.contextvars import get_node_info class TestDbtRunner: @@ -93,6 +94,12 @@ def test_pass_in_manifest(self, project, dbt): assert result.success assert len(FACTORY.adapters) == 1 + def test_pass_in_args_variable(self, dbt): + args = ["--log-format", "text"] + args_before = args.copy() + dbt.invoke(args) + assert args == args_before + class TestDbtRunnerQueryComments: @pytest.fixture(scope="class") @@ -120,3 +127,20 @@ def test_query_comment_saved_manifest(self, project, logs_dir): dbt.invoke(["build", "--select", "models"]) log_file = read_file(logs_dir, "dbt.log") assert f"comment: {dbt_version}" in log_file + + +class TestDbtRunnerHooks: + @pytest.fixture(scope="class") + def models(self): + return { + "models.sql": "select 1 as id", + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return {"on-run-end": ["select 1;"]} + + def test_node_info_non_persistence(self, project): + dbt = dbtRunner() + dbt.invoke(["run", "--select", "models"]) + assert get_node_info() == {} diff --git a/tests/functional/defer_state/test_defer_state.py b/tests/functional/defer_state/test_defer_state.py index 5f2ac50620f..1a40fbca73a 100644 --- a/tests/functional/defer_state/test_defer_state.py +++ b/tests/functional/defer_state/test_defer_state.py @@ -297,6 +297,9 @@ def test_defer_state_flag(self, project, unique_schema, other_schema): expect_pass=False, ) + # Test that retry of a defer command works + run_dbt(["retry"], expect_pass=False) + # this will fail because we haven't passed in --state with pytest.raises( DbtRuntimeError, match="Got a state selector method, but no comparison manifest" diff --git a/tests/functional/dependencies/test_dependency_options.py b/tests/functional/dependencies/test_dependency_options.py index a6a200bf3d5..7e451555ac3 100644 --- a/tests/functional/dependencies/test_dependency_options.py +++ b/tests/functional/dependencies/test_dependency_options.py @@ -35,10 +35,10 @@ def test_deps_lock(self, clean_start): assert ( contents == """packages: -- package: fivetran/fivetran_utils - version: 0.4.7 -- package: dbt-labs/dbt_utils - version: 1.1.1 + - package: fivetran/fivetran_utils + version: 0.4.7 + - package: dbt-labs/dbt_utils + version: 1.1.1 sha1_hash: 71304bca2138cf8004070b3573a1e17183c0c1a8 """ ) @@ -52,10 +52,10 @@ def test_deps_default(self, clean_start): assert ( contents == """packages: -- package: fivetran/fivetran_utils - version: 0.4.7 -- package: dbt-labs/dbt_utils - version: 1.1.1 + - package: fivetran/fivetran_utils + version: 0.4.7 + - package: dbt-labs/dbt_utils + version: 1.1.1 sha1_hash: 71304bca2138cf8004070b3573a1e17183c0c1a8 """ ) diff --git a/tests/functional/dependencies/test_simple_dependency.py b/tests/functional/dependencies/test_simple_dependency.py index ad33e5c59ef..de06452ec30 100644 --- a/tests/functional/dependencies/test_simple_dependency.py +++ b/tests/functional/dependencies/test_simple_dependency.py @@ -241,12 +241,12 @@ def test_git_with_multiple_subdir(self, project): run_dbt(["deps"]) assert os.path.exists("package-lock.yml") expected = """packages: -- git: https://github.com/dbt-labs/dbt-multipe-packages.git - revision: 53782f3ede8fdf307ee1d8e418aa65733a4b72fa - subdirectory: dbt-utils-main -- git: https://github.com/dbt-labs/dbt-multipe-packages.git - revision: 53782f3ede8fdf307ee1d8e418aa65733a4b72fa - subdirectory: dbt-date-main + - git: https://github.com/dbt-labs/dbt-multipe-packages.git + revision: 53782f3ede8fdf307ee1d8e418aa65733a4b72fa + subdirectory: dbt-utils-main + - git: https://github.com/dbt-labs/dbt-multipe-packages.git + revision: 53782f3ede8fdf307ee1d8e418aa65733a4b72fa + subdirectory: dbt-date-main sha1_hash: b9c8042f29446c55a33f9f211737f445a640c7a1 """ with open("package-lock.yml") as fp: diff --git a/tests/functional/deprecations/test_deprecations.py b/tests/functional/deprecations/test_deprecations.py index ba90fec8bb7..d93cb5f9a10 100644 --- a/tests/functional/deprecations/test_deprecations.py +++ b/tests/functional/deprecations/test_deprecations.py @@ -120,7 +120,7 @@ def test_exposure_name_fail(self, project): assert expected_msg in exc_str -class TestPrjectFlagsMovedDeprecation: +class TestProjectFlagsMovedDeprecation: @pytest.fixture(scope="class") def profiles_config_update(self): return { diff --git a/tests/functional/docs/test_generate.py b/tests/functional/docs/test_generate.py index 4129fc0abb5..7597fdb41f3 100644 --- a/tests/functional/docs/test_generate.py +++ b/tests/functional/docs/test_generate.py @@ -153,6 +153,56 @@ def test_select_source(self, project): assert len(catalog.nodes) == 0 +class TestGenerateSelectOverMaxSchemaMetadataRelations(TestBaseGenerate): + @pytest.fixture(scope="class") + def seeds(self): + return { + "sample_seed.csv": sample_seed, + "second_seed.csv": sample_seed, + "source_from_seed.csv": sample_seed, + } + + def test_select_source(self, project): + run_dbt(["build"]) + + project.run_sql("create table {}.sample_source (id int)".format(project.test_schema)) + project.run_sql("create table {}.second_source (id int)".format(project.test_schema)) + + with mock.patch.object(type(project.adapter), "MAX_SCHEMA_METADATA_RELATIONS", 1): + # more relations than MAX_SCHEMA_METADATA_RELATIONS -> all sources and nodes correctly returned + catalog = run_dbt(["docs", "generate"]) + assert len(catalog.sources) == 3 + assert len(catalog.nodes) == 5 + + # full source selection respected + catalog = run_dbt(["docs", "generate", "--select", "source:*"]) + assert len(catalog.sources) == 3 + assert len(catalog.nodes) == 0 + + # full node selection respected + catalog = run_dbt(["docs", "generate", "--exclude", "source:*"]) + assert len(catalog.sources) == 0 + assert len(catalog.nodes) == 5 + + # granular source selection respected (> MAX_SCHEMA_METADATA_RELATIONS selected sources) + catalog = run_dbt( + [ + "docs", + "generate", + "--select", + "source:test.my_source_schema.sample_source", + "source:test.my_source_schema.second_source", + ] + ) + assert len(catalog.sources) == 2 + assert len(catalog.nodes) == 0 + + # granular node selection respected (> MAX_SCHEMA_METADATA_RELATIONS selected nodes) + catalog = run_dbt(["docs", "generate", "--select", "my_model", "alt_model"]) + assert len(catalog.sources) == 0 + assert len(catalog.nodes) == 2 + + class TestGenerateSelectSeed(TestBaseGenerate): @pytest.fixture(scope="class") def seeds(self): diff --git a/tests/functional/list/fixtures.py b/tests/functional/list/fixtures.py index ae5514c6245..7681e1c5632 100644 --- a/tests/functional/list/fixtures.py +++ b/tests/functional/list/fixtures.py @@ -145,6 +145,24 @@ """ +saved_queries__sq_yml = """ +saved_queries: + - name: my_saved_query + label: My Saved Query + query_params: + metrics: + - total_outer + group_by: + - "Dimension('my_entity__created_at')" + exports: + - name: my_export + config: + alias: my_export_alias + export_as: table + schema: my_export_schema_name +""" + + @pytest.fixture(scope="class") def snapshots(): return {"snapshot.sql": snapshots__snapshot_sql} @@ -164,6 +182,7 @@ def models(): "docs.md": models__docs_md, "outer.sql": models__outer_sql, "metricflow_time_spine.sql": models__metric_flow, + "sq.yml": saved_queries__sq_yml, "sm.yml": semantic_models__sm_yml, "m.yml": metrics__m_yml, "sub": {"inner.sql": models__sub__inner_sql}, @@ -195,6 +214,11 @@ def metrics(): return {"m.yml": metrics__m_yml} +@pytest.fixture(scope="class") +def saved_queries(): + return {"sq.yml": saved_queries__sq_yml} + + @pytest.fixture(scope="class") def project_files( project_root, diff --git a/tests/functional/list/test_list.py b/tests/functional/list/test_list.py index 77eedfb2c03..4eb8cb8e2f9 100644 --- a/tests/functional/list/test_list.py +++ b/tests/functional/list/test_list.py @@ -14,6 +14,7 @@ analyses, semantic_models, metrics, + saved_queries, project_files, ) @@ -36,6 +37,13 @@ def project_config_update(self): }, } + def test_packages_install_path_does_not_exist(self, project): + run_dbt(["list"]) + packages_install_path = "dbt_packages" + + # the packages-install-path should not be created by `dbt list` + assert not os.path.exists(packages_install_path) + def run_dbt_ls(self, args=None, expect_pass=True): log_manager.stdout_console() full_args = ["ls"] @@ -596,6 +604,7 @@ def expect_all_output(self): "test.t", "semantic_model:test.my_sm", "metric:test.total_outer", + "saved_query:test.my_saved_query", } # analyses have their type inserted into their fqn like tests expected_all = expected_default | {"test.analysis.a"} @@ -626,6 +635,9 @@ def expect_select(self): results = self.run_dbt_ls(["--resource-type", "metric"]) assert set(results) == {"metric:test.total_outer"} + results = self.run_dbt_ls(["--resource-type", "saved_query"]) + assert set(results) == {"saved_query:test.my_saved_query"} + results = self.run_dbt_ls(["--resource-type", "model", "--select", "outer+"]) assert set(results) == {"test.outer", "test.sub.inner"} @@ -698,6 +710,34 @@ def expect_resource_type_multiple(self): "test.outer", } + def expect_resource_type_env_var(self): + """Expect selected resources when --resource-type given multiple times""" + os.environ["DBT_RESOURCE_TYPES"] = "test model" + results = self.run_dbt_ls() + assert set(results) == { + "test.ephemeral", + "test.incremental", + "test.not_null_outer_id", + "test.outer", + "test.sub.inner", + "test.metricflow_time_spine", + "test.t", + "test.unique_outer_id", + } + del os.environ["DBT_RESOURCE_TYPES"] + os.environ[ + "DBT_EXCLUDE_RESOURCE_TYPES" + ] = "test saved_query metric source semantic_model snapshot seed" + results = self.run_dbt_ls() + assert set(results) == { + "test.ephemeral", + "test.incremental", + "test.outer", + "test.sub.inner", + "test.metricflow_time_spine", + } + del os.environ["DBT_EXCLUDE_RESOURCE_TYPES"] + def expect_selected_keys(self, project): """Expect selected fields of the the selected model""" expectations = [ @@ -793,6 +833,7 @@ def test_ls(self, project): self.expect_test_output() self.expect_select() self.expect_resource_type_multiple() + self.expect_resource_type_env_var() self.expect_all_output() self.expect_selected_keys(project) diff --git a/tests/functional/logging/test_logging.py b/tests/functional/logging/test_logging.py index b4e64966e16..c4799f609d9 100644 --- a/tests/functional/logging/test_logging.py +++ b/tests/functional/logging/test_logging.py @@ -100,3 +100,39 @@ def test_invalid_event_value(project, logs_dir): fire_event(InvalidOptionYAML(option_name=1)) assert str(excinfo.value) == "[InvalidOptionYAML]: Unable to parse dict {'option_name': 1}" + + +class TestNodeInfo: + @pytest.fixture(scope="class") + def models(self): + return {"my_model.sql": "select not_found as id"} + + def test_node_info_on_results(self, project, logs_dir): + results = run_dbt(["--log-format=json", "run"], expect_pass=False) + assert len(results) == 1 + # get log file + log_file = read_file(logs_dir, "dbt.log") + task_printer_events = [ + "RunResultWarning", + "RunResultFailure", + "RunResultWarningMessage", + "RunResultError", + "RunResultErrorNoMessage", + "SQLCompiledPath", + "CheckNodeTestFailure", + ] + count = 0 + for log_line in log_file.split("\n"): + # skip empty lines + if len(log_line) == 0: + continue + # The adapter logging also shows up, so skip non-json lines + if "[debug]" in log_line: + continue + log_dct = json.loads(log_line) + log_data = log_dct["data"] + log_event = log_dct["info"]["name"] + if log_event in task_printer_events: + assert "node_info" in log_data + count += 1 + assert count > 0 diff --git a/tests/functional/manifest_validations/test_check_for_spaces_in_model_names.py b/tests/functional/manifest_validations/test_check_for_spaces_in_model_names.py new file mode 100644 index 00000000000..45ca4bab307 --- /dev/null +++ b/tests/functional/manifest_validations/test_check_for_spaces_in_model_names.py @@ -0,0 +1,106 @@ +import pytest + +from dataclasses import dataclass, field +from dbt.cli.main import dbtRunner +from dbt_common.events.base_types import BaseEvent, EventLevel, EventMsg +from dbt.events.types import SpacesInModelNameDeprecation, TotalModelNamesWithSpacesDeprecation +from dbt.tests.util import update_config_file +from typing import Dict, List + + +@dataclass +class EventCatcher: + event_to_catch: BaseEvent + caught_events: List[EventMsg] = field(default_factory=list) + + def catch(self, event: EventMsg): + if event.info.name == self.event_to_catch.__name__: + self.caught_events.append(event) + + +class TestSpacesInModelNamesHappyPath: + def test_no_warnings_when_no_spaces_in_name(self, project) -> None: + event_catcher = EventCatcher(SpacesInModelNameDeprecation) + runner = dbtRunner(callbacks=[event_catcher.catch]) + runner.invoke(["parse"]) + assert len(event_catcher.caught_events) == 0 + + +class TestSpacesInModelNamesSadPath: + @pytest.fixture(scope="class") + def models(self) -> Dict[str, str]: + return { + "my model.sql": "select 1 as id", + } + + def tests_warning_when_spaces_in_name(self, project) -> None: + event_catcher = EventCatcher(SpacesInModelNameDeprecation) + total_catcher = EventCatcher(TotalModelNamesWithSpacesDeprecation) + runner = dbtRunner(callbacks=[event_catcher.catch, total_catcher.catch]) + runner.invoke(["parse"]) + + assert len(total_catcher.caught_events) == 1 + assert len(event_catcher.caught_events) == 1 + event = event_catcher.caught_events[0] + assert "Model `my model` has spaces in its name. This is deprecated" in event.info.msg + assert event.info.level == EventLevel.WARN + + +class TestSpaceInModelNamesWithDebug: + @pytest.fixture(scope="class") + def models(self) -> Dict[str, str]: + return { + "my model.sql": "select 1 as id", + "my model2.sql": "select 1 as id", + } + + def tests_debug_when_spaces_in_name(self, project) -> None: + spaces_check_catcher = EventCatcher(SpacesInModelNameDeprecation) + total_catcher = EventCatcher(TotalModelNamesWithSpacesDeprecation) + runner = dbtRunner(callbacks=[spaces_check_catcher.catch, total_catcher.catch]) + runner.invoke(["parse"]) + assert len(spaces_check_catcher.caught_events) == 1 + assert len(total_catcher.caught_events) == 1 + assert ( + "Spaces in model names found in 2 model(s)" in total_catcher.caught_events[0].info.msg + ) + assert ( + "Run again with `--debug` to see them all." in total_catcher.caught_events[0].info.msg + ) + + spaces_check_catcher = EventCatcher(SpacesInModelNameDeprecation) + total_catcher = EventCatcher(TotalModelNamesWithSpacesDeprecation) + runner = dbtRunner(callbacks=[spaces_check_catcher.catch, total_catcher.catch]) + runner.invoke(["parse", "--debug"]) + assert len(spaces_check_catcher.caught_events) == 2 + assert len(total_catcher.caught_events) == 1 + assert ( + "Run again with `--debug` to see them all." + not in total_catcher.caught_events[0].info.msg + ) + + +class TestAllowSpacesInModelNamesFalse: + @pytest.fixture(scope="class") + def models(self) -> Dict[str, str]: + return { + "my model.sql": "select 1 as id", + } + + def test_dont_allow_spaces_in_model_names(self, project): + spaces_check_catcher = EventCatcher(SpacesInModelNameDeprecation) + runner = dbtRunner(callbacks=[spaces_check_catcher.catch]) + runner.invoke(["parse"]) + assert len(spaces_check_catcher.caught_events) == 1 + assert spaces_check_catcher.caught_events[0].info.level == EventLevel.WARN + + config_patch = {"flags": {"allow_spaces_in_model_names": False}} + update_config_file(config_patch, project.project_root, "dbt_project.yml") + + spaces_check_catcher = EventCatcher(SpacesInModelNameDeprecation) + runner = dbtRunner(callbacks=[spaces_check_catcher.catch]) + result = runner.invoke(["parse"]) + assert not result.success + assert "Model names cannot contain spaces" in result.exception.__str__() + assert len(spaces_check_catcher.caught_events) == 1 + assert spaces_check_catcher.caught_events[0].info.level == EventLevel.ERROR diff --git a/tests/functional/materializations/conftest.py b/tests/functional/materializations/conftest.py index b808c1a6a7b..8441e72c0b2 100644 --- a/tests/functional/materializations/conftest.py +++ b/tests/functional/materializations/conftest.py @@ -325,6 +325,21 @@ {%- endmaterialization -%} """ +custom_materialization_dep__dbt_project_yml = """ +name: custom_materialization_default +macro-paths: ['macros'] +""" + +custom_materialization_sql = """ +{% materialization custom_materialization, default %} + {%- set target_relation = this.incorporate(type='table') %} + {% call statement('main') -%} + select 1 as column1 + {%- endcall %} + {{ return({'relations': [target_relation]}) }} +{% endmaterialization %} +""" + @pytest.fixture(scope="class") def override_view_adapter_pass_dep(project_root): @@ -368,3 +383,12 @@ def override_view_return_no_relation(project_root): }, } write_project_files(project_root, "override-view-return-no-relation", files) + + +@pytest.fixture(scope="class") +def custom_materialization_dep(project_root): + files = { + "dbt_project.yml": custom_materialization_dep__dbt_project_yml, + "macros": {"custom_materialization.sql": custom_materialization_sql}, + } + write_project_files(project_root, "custom-materialization-dep", files) diff --git a/tests/functional/materializations/test_custom_materialization.py b/tests/functional/materializations/test_custom_materialization.py index 838eb68bb01..2c3ec4e74c2 100644 --- a/tests/functional/materializations/test_custom_materialization.py +++ b/tests/functional/materializations/test_custom_materialization.py @@ -1,7 +1,7 @@ import pytest from dbt.tests.util import run_dbt - +from dbt import deprecations models__model_sql = """ {{ config(materialized='view') }} @@ -10,11 +10,24 @@ """ +models_custom_materialization__model_sql = """ +{{ config(materialized='custom_materialization') }} +select 1 as id + +""" + + @pytest.fixture(scope="class") def models(): return {"model.sql": models__model_sql} +@pytest.fixture(scope="class") +def set_up_deprecations(): + deprecations.reset_deprecations() + assert deprecations.active_deprecations == set() + + class TestOverrideAdapterDependency: # make sure that if there's a dependency with an adapter-specific # materialization, we honor that materialization @@ -22,22 +35,171 @@ class TestOverrideAdapterDependency: def packages(self): return {"packages": [{"local": "override-view-adapter-dep"}]} - def test_adapter_dependency(self, project, override_view_adapter_dep): + def test_adapter_dependency(self, project, override_view_adapter_dep, set_up_deprecations): + run_dbt(["deps"]) + # this should error because the override is buggy + run_dbt(["run"], expect_pass=False) + + # overriding a built-in materialization scoped to adapter from package is deprecated + assert deprecations.active_deprecations == {"package-materialization-override"} + + +class TestOverrideAdapterDependencyDeprecated: + # make sure that if there's a dependency with an adapter-specific + # materialization, we honor that materialization + @pytest.fixture(scope="class") + def packages(self): + return {"packages": [{"local": "override-view-adapter-dep"}]} + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "flags": { + "require_explicit_package_overrides_for_builtin_materializations": True, + }, + } + + def test_adapter_dependency_deprecate_overrides( + self, project, override_view_adapter_dep, set_up_deprecations + ): + run_dbt(["deps"]) + # this should pass because the override is buggy and unused + run_dbt(["run"]) + + # no deprecation warning -- flag used correctly + assert deprecations.active_deprecations == set() + + +class TestOverrideAdapterDependencyLegacy: + # make sure that if there's a dependency with an adapter-specific + # materialization, we honor that materialization + @pytest.fixture(scope="class") + def packages(self): + return {"packages": [{"local": "override-view-adapter-dep"}]} + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "flags": { + "require_explicit_package_overrides_for_builtin_materializations": False, + }, + } + + def test_adapter_dependency(self, project, override_view_adapter_dep, set_up_deprecations): run_dbt(["deps"]) # this should error because the override is buggy run_dbt(["run"], expect_pass=False) + # overriding a built-in materialization scoped to adapter from package is deprecated + assert deprecations.active_deprecations == {"package-materialization-override"} + class TestOverrideDefaultDependency: @pytest.fixture(scope="class") def packages(self): return {"packages": [{"local": "override-view-default-dep"}]} - def test_default_dependency(self, project, override_view_default_dep): + def test_default_dependency(self, project, override_view_default_dep, set_up_deprecations): + run_dbt(["deps"]) + # this should error because the override is buggy + run_dbt(["run"], expect_pass=False) + + # overriding a built-in materialization from package is deprecated + assert deprecations.active_deprecations == {"package-materialization-override"} + + +class TestOverrideDefaultDependencyDeprecated: + @pytest.fixture(scope="class") + def packages(self): + return {"packages": [{"local": "override-view-default-dep"}]} + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "flags": { + "require_explicit_package_overrides_for_builtin_materializations": True, + }, + } + + def test_default_dependency_deprecated( + self, project, override_view_default_dep, set_up_deprecations + ): + run_dbt(["deps"]) + # this should pass because the override is buggy and unused + run_dbt(["run"]) + + # overriding a built-in materialization from package is deprecated + assert deprecations.active_deprecations == set() + + +class TestOverrideDefaultDependencyLegacy: + @pytest.fixture(scope="class") + def packages(self): + return {"packages": [{"local": "override-view-default-dep"}]} + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "flags": { + "require_explicit_package_overrides_for_builtin_materializations": False, + }, + } + + def test_default_dependency(self, project, override_view_default_dep, set_up_deprecations): + run_dbt(["deps"]) + # this should error because the override is buggy + run_dbt(["run"], expect_pass=False) + + # overriding a built-in materialization from package is deprecated + assert deprecations.active_deprecations == {"package-materialization-override"} + + +root_view_override_macro = """ +{% materialization view, default %} + {{ return(view_default_override.materialization_view_default()) }} +{% endmaterialization %} +""" + + +class TestOverrideDefaultDependencyRootOverride: + @pytest.fixture(scope="class") + def packages(self): + return {"packages": [{"local": "override-view-default-dep"}]} + + @pytest.fixture(scope="class") + def macros(self): + return {"my_view.sql": root_view_override_macro} + + def test_default_dependency_with_root_override( + self, project, override_view_default_dep, set_up_deprecations + ): run_dbt(["deps"]) # this should error because the override is buggy run_dbt(["run"], expect_pass=False) + # using an package-overriden built-in materialization in a root matereialization is _not_ deprecated + assert deprecations.active_deprecations == set() + + +class TestCustomMaterializationDependency: + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": models_custom_materialization__model_sql} + + @pytest.fixture(scope="class") + def packages(self): + return {"packages": [{"local": "custom-materialization-dep"}]} + + def test_custom_materialization_deopendency( + self, project, custom_materialization_dep, set_up_deprecations + ): + run_dbt(["deps"]) + # custom materilization is valid + run_dbt(["run"]) + + # using a custom materialization is from an installed package is _not_ deprecated + assert deprecations.active_deprecations == set() + class TestOverrideAdapterDependencyPassing: @pytest.fixture(scope="class") diff --git a/tests/functional/metrics/fixtures.py b/tests/functional/metrics/fixtures.py index a4bd35d4d44..c9d2d0ab190 100644 --- a/tests/functional/metrics/fixtures.py +++ b/tests/functional/metrics/fixtures.py @@ -88,6 +88,14 @@ metrics: - average_tenure expr: "average_tenure + 1" + + - name: tenured_people + label: Tenured People + description: People who have been here more than 1 year + type: simple + type_params: + measure: people + filter: "{{ Metric('collective_tenure', ['id']) }} > 2" """ metricflow_time_spine_sql = """ @@ -748,3 +756,86 @@ conversion_measure: num_orders entity: purchase """ + +filtered_metrics_yml = """ +version: 2 + +metrics: + + - name: collective_tenure_measure_filter_str + label: "Collective tenure1" + description: Total number of years of team experience + type: simple + type_params: + measure: + name: "years_tenure" + filter: "{{ Dimension('id__loves_dbt') }} is true" + + - name: collective_tenure_measure_filter_list + label: "Collective tenure2" + description: Total number of years of team experience + type: simple + type_params: + measure: + name: "years_tenure" + filter: + - "{{ Dimension('id__loves_dbt') }} is true" + + - name: collective_tenure_metric_filter_str + label: Collective tenure3 + description: Total number of years of team experience + type: simple + type_params: + measure: + name: "years_tenure" + filter: "{{ Dimension('id__loves_dbt') }} is true" + + - name: collective_tenure_metric_filter_list + label: Collective tenure4 + description: Total number of years of team experience + type: simple + type_params: + measure: + name: "years_tenure" + filter: + - "{{ Dimension('id__loves_dbt') }} is true" + + - name: average_tenure_filter_str + label: Average tenure of people who love dbt1 + description: Average tenure of people who love dbt + type: derived + type_params: + expr: "average_tenure" + metrics: + - name: average_tenure + filter: "{{ Dimension('id__loves_dbt') }} is true" + + - name: average_tenure_filter_list + label: Average tenure of people who love dbt2 + description: Average tenure of people who love dbt + type: derived + type_params: + expr: "average_tenure" + metrics: + - name: average_tenure + filter: + - "{{ Dimension('id__loves_dbt') }} is true" +""" + +duplicate_measure_metric_yml = """ +metrics: + # Simple metrics + - name: people_with_tenure + description: "Count of people with tenure" + type: simple + label: People with tenure + type_params: + measure: people + - name: ratio_tenure_to_people + description: People to years of tenure + label: New customers to all customers + type: ratio + type_params: + numerator: people_with_tenure + denominator: number_of_people +""" diff --git a/tests/functional/metrics/test_metrics.py b/tests/functional/metrics/test_metrics.py index 70ebcfa35b8..f87cc56b9b3 100644 --- a/tests/functional/metrics/test_metrics.py +++ b/tests/functional/metrics/test_metrics.py @@ -28,6 +28,9 @@ semantic_model_people_yml, semantic_model_purchasing_yml, purchasing_model_sql, + filtered_metrics_yml, + basic_metrics_yml, + duplicate_measure_metric_yml, ) @@ -76,7 +79,7 @@ def test_simple_metric( "metric.test.average_tenure_minus_people" ].type_params.input_measures ) - == 3 + == 2 ) @@ -399,3 +402,90 @@ def test_conversion_metric( ].type_params.conversion_type_params.entity == "purchase" ) + + +class TestFilterParsing: + @pytest.fixture(scope="class") + def models(self): + return { + "basic_metrics.yml": basic_metrics_yml, + "filtered_metrics.yml": filtered_metrics_yml, + "metricflow_time_spine.sql": metricflow_time_spine_sql, + "semantic_model_people.yml": semantic_model_people_yml, + "people.sql": models_people_sql, + } + + # Tests that filters are parsed to their appropriate types + def test_filter_parsing( + self, + project, + ): + runner = dbtRunner() + result = runner.invoke(["parse"]) + assert result.success + assert isinstance(result.result, Manifest) + + manifest = get_manifest(project.project_root) + assert manifest + + # Test metrics with input measure filters. + filters1 = ( + manifest.metrics["metric.test.collective_tenure_measure_filter_str"] + .input_measures[0] + .filter.where_filters + ) + assert len(filters1) == 1 + assert filters1[0].where_sql_template == "{{ Dimension('id__loves_dbt') }} is true" + filters2 = ( + manifest.metrics["metric.test.collective_tenure_measure_filter_list"] + .input_measures[0] + .filter.where_filters + ) + assert len(filters2) == 1 + assert filters2[0].where_sql_template == "{{ Dimension('id__loves_dbt') }} is true" + + # Test metrics with metric-level filters. + filters3 = manifest.metrics[ + "metric.test.collective_tenure_metric_filter_str" + ].filter.where_filters + assert len(filters3) == 1 + assert filters3[0].where_sql_template == "{{ Dimension('id__loves_dbt') }} is true" + filters4 = manifest.metrics[ + "metric.test.collective_tenure_metric_filter_list" + ].filter.where_filters + assert len(filters4) == 1 + assert filters4[0].where_sql_template == "{{ Dimension('id__loves_dbt') }} is true" + + # Test derived metrics with input metric filters. + filters5 = ( + manifest.metrics["metric.test.average_tenure_filter_str"] + .input_metrics[0] + .filter.where_filters + ) + assert len(filters5) == 1 + assert filters5[0].where_sql_template == "{{ Dimension('id__loves_dbt') }} is true" + filters6 = ( + manifest.metrics["metric.test.average_tenure_filter_list"] + .input_metrics[0] + .filter.where_filters + ) + assert len(filters6) == 1 + assert filters6[0].where_sql_template == "{{ Dimension('id__loves_dbt') }} is true" + + +class TestDuplicateInputMeasures: + @pytest.fixture(scope="class") + def models(self): + return { + "basic_metrics.yml": basic_metrics_yml, + "filtered_metrics.yml": duplicate_measure_metric_yml, + "metricflow_time_spine.sql": metricflow_time_spine_sql, + "semantic_model_people.yml": semantic_model_people_yml, + "people.sql": models_people_sql, + } + + def test_duplicate_input_measures(self, project): + runner = dbtRunner() + result = runner.invoke(["parse"]) + assert result.success + assert isinstance(result.result, Manifest) diff --git a/tests/functional/minimal_cli/test_minimal_cli.py b/tests/functional/minimal_cli/test_minimal_cli.py index 8408354df78..d47f8b911c5 100644 --- a/tests/functional/minimal_cli/test_minimal_cli.py +++ b/tests/functional/minimal_cli/test_minimal_cli.py @@ -35,7 +35,7 @@ def test_ls(self, runner, project): ls_result = runner.invoke(cli, ["ls"]) assert "1 seed" in ls_result.output assert "1 model" in ls_result.output - assert "5 data_tests" in ls_result.output + assert "5 data tests" in ls_result.output assert "1 snapshot" in ls_result.output diff --git a/tests/functional/partial_parsing/fixtures.py b/tests/functional/partial_parsing/fixtures.py index f76d90ad216..c7a53982ec3 100644 --- a/tests/functional/partial_parsing/fixtures.py +++ b/tests/functional/partial_parsing/fixtures.py @@ -452,6 +452,60 @@ agg_time_dimension: created_at """ +people_sl_yml = """ +version: 2 + +semantic_models: + - name: semantic_people + model: ref('people') + dimensions: + - name: favorite_color + type: categorical + - name: created_at + type: TIME + type_params: + time_granularity: day + measures: + - name: years_tenure + agg: SUM + expr: tenure + - name: people + agg: count + expr: id + entities: + - name: id + type: primary + defaults: + agg_time_dimension: created_at + +metrics: + + - name: number_of_people + description: Total count of people + label: "Number of people" + type: simple + type_params: + measure: people + meta: + my_meta: 'testing' + + - name: collective_tenure + description: Total number of years of team experience + label: "Collective tenure" + type: simple + type_params: + measure: + name: years_tenure + filter: "{{ Dimension('id__loves_dbt') }} is true" + + - name: average_tenure + label: Average Tenure + type: ratio + type_params: + numerator: collective_tenure + denominator: number_of_people +""" + env_var_metrics_yml = """ metrics: diff --git a/tests/functional/partial_parsing/test_partial_parsing.py b/tests/functional/partial_parsing/test_partial_parsing.py index aa38944e678..6b5ba8895cd 100644 --- a/tests/functional/partial_parsing/test_partial_parsing.py +++ b/tests/functional/partial_parsing/test_partial_parsing.py @@ -11,6 +11,7 @@ run_dbt_and_capture, rename_dir, ) +import yaml from tests.functional.utils import up_one from dbt.tests.fixtures.project import write_project_files from tests.functional.partial_parsing.fixtures import ( @@ -824,3 +825,31 @@ def test_pp_renamed_project_dir_changed_project_contents(self, project): run_dbt(["deps"]) len(run_dbt(["--partial-parse", "seed"])) == 1 len(run_dbt(["--partial-parse", "run"])) == 3 + + +class TestProfileChanges: + @pytest.fixture(scope="class") + def models(self): + return { + "model.sql": "select 1 as id", + } + + def test_profile_change(self, project, dbt_profile_data): + # Fist run not partial parsing + _, stdout = run_dbt_and_capture(["parse"]) + assert "Unable to do partial parsing because saved manifest not found" in stdout + + _, stdout = run_dbt_and_capture(["parse"]) + assert "Unable to do partial parsing" not in stdout + + # change dbname which is included in the connection_info + dbt_profile_data["test"]["outputs"]["default"]["dbname"] = "dbt2" + write_file(yaml.safe_dump(dbt_profile_data), project.profiles_dir, "profiles.yml") + _, stdout = run_dbt_and_capture(["parse"]) + assert "Unable to do partial parsing because profile has changed" in stdout + + # Change the password which is not included in the connection_info + dbt_profile_data["test"]["outputs"]["default"]["pass"] = "another_password" + write_file(yaml.safe_dump(dbt_profile_data), project.profiles_dir, "profiles.yml") + _, stdout = run_dbt_and_capture(["parse"]) + assert "Unable to do partial parsing" not in stdout diff --git a/tests/functional/partial_parsing/test_pp_metrics.py b/tests/functional/partial_parsing/test_pp_metrics.py index da994e09808..19da625604b 100644 --- a/tests/functional/partial_parsing/test_pp_metrics.py +++ b/tests/functional/partial_parsing/test_pp_metrics.py @@ -1,6 +1,8 @@ import pytest -from dbt.tests.util import run_dbt, write_file, get_manifest +from dbt.cli.main import dbtRunner +from dbt.contracts.graph.manifest import Manifest +from dbt.tests.util import run_dbt, rm_file, write_file, get_manifest from tests.functional.partial_parsing.fixtures import ( people_sql, metricflow_time_spine_sql, @@ -9,6 +11,7 @@ people_metrics2_yml, metric_model_a_sql, people_metrics3_yml, + people_sl_yml, ) from dbt.exceptions import CompilationError @@ -84,3 +87,29 @@ def test_metrics(self, project): # We use "parse" here and not "run" because we're checking that the CompilationError # occurs at parse time, not compilation results = run_dbt(["parse"]) + + +class TestDeleteFileWithMetricsAndSemanticModels: + @pytest.fixture(scope="class") + def models(self): + return { + "people.sql": people_sql, + "metricflow_time_spine.sql": metricflow_time_spine_sql, + "people_sl.yml": people_sl_yml, + } + + def test_metrics(self, project): + # Initial parsing + runner = dbtRunner() + result = runner.invoke(["parse"]) + assert result.success + manifest = result.result + assert isinstance(manifest, Manifest) + assert len(manifest.metrics) == 3 + + # Remove metric file + rm_file(project.project_root, "models", "people_sl.yml") + + # Rerun parse, shouldn't fail + result = runner.invoke(["parse"]) + assert result.exception is None, result.exception diff --git a/tests/functional/partial_parsing/test_pp_vars.py b/tests/functional/partial_parsing/test_pp_vars.py index a01e78c6458..e55592f8dd2 100644 --- a/tests/functional/partial_parsing/test_pp_vars.py +++ b/tests/functional/partial_parsing/test_pp_vars.py @@ -264,11 +264,16 @@ def test_env_vars_models(self, project): class TestProjectEnvVars: + @pytest.fixture(scope="class") + def environment(self): + custom_env = os.environ.copy() + custom_env["ENV_VAR_NAME"] = "Jane Smith" + return custom_env + @pytest.fixture(scope="class") def project_config_update(self): # Need to set the environment variable here initially because # the project fixture loads the config. - os.environ["ENV_VAR_NAME"] = "Jane Smith" return {"models": {"+meta": {"meta_name": "{{ env_var('ENV_VAR_NAME') }}"}}} @pytest.fixture(scope="class") @@ -279,6 +284,7 @@ def models(self): def test_project_env_vars(self, project): # Initial run + os.environ["ENV_VAR_NAME"] = "Jane Smith" results = run_dbt(["run"]) assert len(results) == 1 manifest = get_manifest(project.project_root) @@ -308,46 +314,40 @@ def models(self): "model_one.sql": model_one_sql, } + @pytest.fixture(scope="class") + def environment(self): + custom_env = os.environ.copy() + custom_env["ENV_VAR_HOST"] = "localhost" + return custom_env + @pytest.fixture(scope="class") def dbt_profile_target(self): - # Need to set these here because the base integration test class - # calls 'load_config' before the tests are run. - # Note: only the specified profile is rendered, so there's no - # point it setting env_vars in non-used profiles. - os.environ["ENV_VAR_USER"] = "root" - os.environ["ENV_VAR_PASS"] = "password" return { "type": "postgres", "threads": 4, - "host": "localhost", + "host": "{{ env_var('ENV_VAR_HOST') }}", "port": 5432, - "user": "{{ env_var('ENV_VAR_USER') }}", - "pass": "{{ env_var('ENV_VAR_PASS') }}", + "user": "root", + "pass": "password", "dbname": "dbt", } def test_profile_env_vars(self, project, logs_dir): # Initial run - os.environ["ENV_VAR_USER"] = "root" - os.environ["ENV_VAR_PASS"] = "password" + os.environ["ENV_VAR_HOST"] = "localhost" run_dbt(["run"]) - manifest = get_manifest(project.project_root) - env_vars_checksum = manifest.state_check.profile_env_vars_hash.checksum # Change env_vars, the user doesn't exist, this should fail - os.environ["ENV_VAR_USER"] = "fake_user" + os.environ["ENV_VAR_HOST"] = "wrong_host" # N.B. run_dbt_and_capture won't work here because FailedToConnectError ends the test entirely with pytest.raises(FailedToConnectError): run_dbt(["run"], expect_pass=False) log_output = Path(logs_dir, "dbt.log").read_text() - assert "env vars used in profiles.yml have changed" in log_output - - manifest = get_manifest(project.project_root) - assert env_vars_checksum != manifest.state_check.profile_env_vars_hash.checksum + assert "Unable to do partial parsing because profile has changed" in log_output class TestProfileSecretEnvVars: diff --git a/tests/functional/retry/test_retry.py b/tests/functional/retry/test_retry.py index 8890a99ac16..7bf8d8a0ef5 100644 --- a/tests/functional/retry/test_retry.py +++ b/tests/functional/retry/test_retry.py @@ -1,3 +1,4 @@ +from pathlib import Path from shutil import copytree, move import pytest @@ -327,3 +328,40 @@ def test_retry(self, project): # ...and so should this one, since the effect of the full-refresh parameter should persist. results = run_dbt(["retry"], expect_pass=False) assert len(results) == 1 + + +class TestRetryTargetPathEnvVar: + @pytest.fixture(scope="class") + def models(self): + return { + "sample_model.sql": models__sample_model, + } + + def test_retry_target_path_env_var(self, project, monkeypatch): + monkeypatch.setenv("DBT_TARGET_PATH", "artifacts") + run_dbt(["run"], expect_pass=False) + + write_file(models__second_model, "models", "sample_model.sql") + + results = run_dbt(["retry"]) + assert len(results) == 1 + + +class TestRetryTargetPathFlag: + @pytest.fixture(scope="class") + def models(self): + return { + "sample_model.sql": models__sample_model, + } + + def test_retry_target_path_flag(self, project): + run_dbt(["run", "--target-path", "target"], expect_pass=False) + + project_root = project.project_root + move(project_root / "target", project_root / "artifacts") + + write_file(models__second_model, "models", "sample_model.sql") + + results = run_dbt(["retry", "--state", "artifacts", "--target-path", "my_target_path"]) + assert len(results) == 1 + assert Path("my_target_path").is_dir() diff --git a/tests/functional/saved_queries/fixtures.py b/tests/functional/saved_queries/fixtures.py index 90cb53e7a79..e938760a12e 100644 --- a/tests/functional/saved_queries/fixtures.py +++ b/tests/functional/saved_queries/fixtures.py @@ -17,6 +17,7 @@ where: - "{{ Dimension('user__ds', 'DAY') }} <= now()" - "{{ Dimension('user__ds', 'DAY') }} >= '2023-01-01'" + - "{{ Metric('txn_revenue', ['id']) }} > 1" exports: - name: my_export config: @@ -25,6 +26,62 @@ schema: my_export_schema_name """ +saved_queries_with_defaults_yml = """ +version: 2 + +saved_queries: + - name: test_saved_query + description: "{{ doc('saved_query_description') }}" + label: Test Saved Query + query_params: + metrics: + - simple_metric + group_by: + - "Dimension('user__ds')" + where: + - "{{ Dimension('user__ds', 'DAY') }} <= now()" + - "{{ Dimension('user__ds', 'DAY') }} >= '2023-01-01'" + - "{{ Metric('txn_revenue', ['id']) }} > 1" + exports: + - name: my_export + config: + alias: my_export_alias + export_as: table +""" + +saved_queries_with_diff_filters_yml = """ +version: 2 + +saved_queries: + - name: test_saved_query_where_list + description: "{{ doc('saved_query_description') }}" + label: Test Saved Query + query_params: + metrics: + - simple_metric + group_by: + - "Dimension('user__ds')" + where: + - "{{ Dimension('user__ds', 'DAY') }} <= now()" + - "{{ Dimension('user__ds', 'DAY') }} >= '2023-01-01'" + exports: + - name: my_export + config: + alias: my_export_alias + export_as: table + schema: my_export_schema_name + + - name: test_saved_query_where_str + description: "{{ doc('saved_query_description') }}" + label: Test Saved Query2 + query_params: + metrics: + - simple_metric + group_by: + - "Dimension('user__ds')" + where: "{{ Dimension('user__ds', 'DAY') }} <= now()" +""" + saved_query_with_extra_config_attributes_yml = """ version: 2 diff --git a/tests/functional/saved_queries/test_configs.py b/tests/functional/saved_queries/test_configs.py index 4c55c54a9eb..ef63888441a 100644 --- a/tests/functional/saved_queries/test_configs.py +++ b/tests/functional/saved_queries/test_configs.py @@ -12,6 +12,7 @@ saved_query_with_extra_config_attributes_yml, saved_query_with_export_configs_defined_at_saved_query_level_yml, saved_query_without_export_configs_defined_yml, + saved_queries_with_defaults_yml, ) from tests.functional.semantic_models.fixtures import ( fct_revenue_sql, @@ -121,6 +122,33 @@ def test_extra_config_properties_dont_break_parsing(self, project): assert saved_query.exports[0].config.__dict__.get("my_random_config") is None +class TestExportConfigsWithDefaultProperties(BaseConfigProject): + @pytest.fixture(scope="class") + def models(self): + return { + "saved_queries.yml": saved_queries_with_defaults_yml, + "schema.yml": schema_yml, + "fct_revenue.sql": fct_revenue_sql, + "metricflow_time_spine.sql": metricflow_time_spine_sql, + "docs.md": saved_query_description, + } + + def test_default_properties(self, project): + runner = dbtTestRunner() + + # parse with default fixture project config + result = runner.invoke(["parse"]) + assert result.success + assert isinstance(result.result, Manifest) + assert len(result.result.saved_queries) == 1 + saved_query = result.result.saved_queries["saved_query.test.test_saved_query"] + assert len(saved_query.exports) == 1 + export = saved_query.exports[0] + assert export.config.alias == "my_export_alias" + assert export.config.schema_name == project.test_schema + assert export.config.database == project.database + + class TestInheritingExportConfigFromSavedQueryConfig(BaseConfigProject): @pytest.fixture(scope="class") def models(self): @@ -152,6 +180,7 @@ def test_export_config_inherits_from_saved_query(self, project): assert export1.config.export_as != saved_query.config.export_as assert export1.config.schema_name == "my_custom_export_schema" assert export1.config.schema_name != saved_query.config.schema + assert export1.config.database == project.database # assert Export `my_export` has its configs defined from the saved_query because they should take priority export2 = next( @@ -162,6 +191,7 @@ def test_export_config_inherits_from_saved_query(self, project): assert export2.config.export_as == saved_query.config.export_as assert export2.config.schema_name == "my_default_export_schema" assert export2.config.schema_name == saved_query.config.schema + assert export2.config.database == project.database class TestInheritingExportConfigsFromProject(BaseConfigProject): diff --git a/tests/functional/saved_queries/test_saved_query_build.py b/tests/functional/saved_queries/test_saved_query_build.py index ffe37761521..2f721b15337 100644 --- a/tests/functional/saved_queries/test_saved_query_build.py +++ b/tests/functional/saved_queries/test_saved_query_build.py @@ -28,11 +28,8 @@ def packages(self): version: 1.1.1 """ - def test_semantic_model_parsing(self, project): + def test_build_saved_queries(self, project): run_dbt(["deps"]) result = run_dbt(["build"]) - assert len(result.results) == 2 - assert "test_saved_query" not in [r.node.name for r in result.results] - result = run_dbt(["build", "--include-saved-query"]) assert len(result.results) == 3 - assert "test_saved_query" in [r.node.name for r in result.results] + assert "NO-OP" in [r.message for r in result.results] diff --git a/tests/functional/saved_queries/test_saved_query_parsing.py b/tests/functional/saved_queries/test_saved_query_parsing.py index 84c8c78b5eb..9c02b7c5418 100644 --- a/tests/functional/saved_queries/test_saved_query_parsing.py +++ b/tests/functional/saved_queries/test_saved_query_parsing.py @@ -7,7 +7,11 @@ from dbt.tests.util import write_file from dbt_semantic_interfaces.type_enums.export_destination_type import ExportDestinationType from tests.functional.assertions.test_runner import dbtTestRunner -from tests.functional.saved_queries.fixtures import saved_queries_yml, saved_query_description +from tests.functional.saved_queries.fixtures import ( + saved_queries_yml, + saved_query_description, + saved_queries_with_diff_filters_yml, +) from tests.functional.semantic_models.fixtures import ( fct_revenue_sql, metricflow_time_spine_sql, @@ -37,7 +41,7 @@ def test_semantic_model_parsing(self, project): assert saved_query.name == "test_saved_query" assert len(saved_query.query_params.metrics) == 1 assert len(saved_query.query_params.group_by) == 1 - assert len(saved_query.query_params.where.where_filters) == 2 + assert len(saved_query.query_params.where.where_filters) == 3 assert len(saved_query.depends_on.nodes) == 1 assert saved_query.description == "My SavedQuery Description" assert len(saved_query.exports) == 1 @@ -63,12 +67,38 @@ class TestSavedQueryPartialParsing: def models(self): return { "saved_queries.yml": saved_queries_yml, + "saved_queries_with_diff_filters.yml": saved_queries_with_diff_filters_yml, "schema.yml": schema_yml, "fct_revenue.sql": fct_revenue_sql, "metricflow_time_spine.sql": metricflow_time_spine_sql, "docs.md": saved_query_description, } + def test_saved_query_filter_types(self, project): + runner = dbtTestRunner() + result = runner.invoke(["parse"]) + assert result.success + + manifest = result.result + saved_query1 = manifest.saved_queries["saved_query.test.test_saved_query_where_list"] + saved_query2 = manifest.saved_queries["saved_query.test.test_saved_query_where_str"] + + # List filter + assert len(saved_query1.query_params.where.where_filters) == 2 + assert { + where_filter.where_sql_template + for where_filter in saved_query1.query_params.where.where_filters + } == { + "{{ Dimension('user__ds', 'DAY') }} <= now()", + "{{ Dimension('user__ds', 'DAY') }} >= '2023-01-01'", + } + # String filter + assert len(saved_query2.query_params.where.where_filters) == 1 + assert ( + saved_query2.query_params.where.where_filters[0].where_sql_template + == "{{ Dimension('user__ds', 'DAY') }} <= now()" + ) + def test_saved_query_metrics_changed(self, project): # First, use the default saved_queries.yml to define our saved_queries, and # run the dbt parse command diff --git a/tests/functional/sources/test_source_freshness.py b/tests/functional/sources/test_source_freshness.py index e204bf76142..0e58b33b555 100644 --- a/tests/functional/sources/test_source_freshness.py +++ b/tests/functional/sources/test_source_freshness.py @@ -5,6 +5,8 @@ import yaml import dbt.version +from dbt.artifacts.schemas.freshness import FreshnessResult +from dbt.artifacts.schemas.results import FreshnessStatus from dbt.cli.main import dbtRunner from tests.functional.sources.common_source_setup import BaseSourcesTest from tests.functional.sources.fixtures import ( @@ -385,7 +387,7 @@ class TestMetadataFreshnessFails: def models(self): return {"schema.yml": freshness_via_metadata_schema_yml} - def test_metadata_freshness_fails(self, project): + def test_metadata_freshness_unsupported_parse_warning(self, project): """Since the default test adapter (postgres) does not support metadata based source freshness checks, trying to use that mechanism should result in a parse-time warning.""" @@ -401,6 +403,16 @@ def warning_probe(e): assert got_warning + def test_metadata_freshness_unsupported_error_when_run(self, project): + + runner = dbtRunner() + result = runner.invoke(["source", "freshness"]) + assert isinstance(result.result, FreshnessResult) + assert len(result.result.results) == 1 + freshness_result = result.result.results[0] + assert freshness_result.status == FreshnessStatus.RuntimeErr + assert "Could not compute freshness for source test_table" in freshness_result.message + class TestHooksInSourceFreshness(SuccessfulSourceFreshnessTest): @pytest.fixture(scope="class") @@ -430,6 +442,44 @@ def test_hooks_do_run_for_source_freshness( assert "on-run-end" in log_output +class TestHooksInSourceFreshnessError: + @pytest.fixture(scope="class") + def models(self): + return { + "schema.yml": error_models_schema_yml, + "model.sql": error_models_model_sql, + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "config-version": 2, + "on-run-start": ["select fake_column from table_does_not_exist"], + "flags": { + "source_freshness_run_project_hooks": True, + }, + } + + def test_hooks_do_not_run_for_source_freshness( + self, + project, + ): + run_result_error = None + + def run_result_error_probe(e): + nonlocal run_result_error + if ( + e.info.name == "RunResultError" + and e.info.level == "error" + and "on-run-start" in e.info.msg + ): + run_result_error = e.info.msg + + runner = dbtRunner(callbacks=[run_result_error_probe]) + runner.invoke(["source", "freshness"]) + assert 'relation "table_does_not_exist" does not exist' in run_result_error + + class TestHooksInSourceFreshnessDisabled(SuccessfulSourceFreshnessTest): @pytest.fixture(scope="class") def project_config_update(self): diff --git a/tests/functional/test_singular_tests.py b/tests/functional/test_singular_tests.py new file mode 100644 index 00000000000..a4b9d05b510 --- /dev/null +++ b/tests/functional/test_singular_tests.py @@ -0,0 +1,34 @@ +import pytest + +from dbt.tests.util import run_dbt + +single_test_sql = """ +{{ config(warn_if = '>0', error_if ="> 10") }} + +select 1 as issue +""" + + +class TestSingularTestWarnError: + @pytest.fixture(scope="class") + def tests(self): + return {"single_test.sql": single_test_sql} + + def test_singular_test_warn_error(self, project): + results = run_dbt(["--warn-error", "test"], expect_pass=False) + assert results.results[0].status == "fail" + + def test_singular_test_warn_error_options(self, project): + results = run_dbt( + ["--warn-error-options", "{'include': 'all'}", "test"], expect_pass=False + ) + assert results.results[0].status == "fail" + + def test_singular_test_equals_warn_error(self, project): + results = run_dbt(["--warn-error", "test"], expect_pass=False) + warn_error_result = results.results[0].status + + results = run_dbt( + ["--warn-error-options", "{'include': 'all'}", "test"], expect_pass=False + ) + assert warn_error_result == results.results[0].status diff --git a/tests/functional/unit_testing/fixtures.py b/tests/functional/unit_testing/fixtures.py index 02af9b418ba..54f0497250a 100644 --- a/tests/functional/unit_testing/fixtures.py +++ b/tests/functional/unit_testing/fixtures.py @@ -266,7 +266,7 @@ {% endif %} """ -test_my_model_incremental_yml = """ +test_my_model_incremental_yml_basic = """ unit_tests: - name: incremental_false model: my_incremental_model @@ -300,6 +300,54 @@ - {event_time: "2020-01-03", event: 3} """ +test_my_model_incremental_yml_no_override = """ +unit_tests: + - name: incremental_false + model: my_incremental_model + given: + - input: ref('events') + rows: + - {event_time: "2020-01-01", event: 1} + expect: + rows: + - {event_time: "2020-01-01", event: 1} +""" + +test_my_model_incremental_yml_wrong_override = """ +unit_tests: + - name: incremental_false + model: my_incremental_model + overrides: + macros: + is_incremental: foobar + given: + - input: ref('events') + rows: + - {event_time: "2020-01-01", event: 1} + expect: + rows: + - {event_time: "2020-01-01", event: 1} +""" + +test_my_model_incremental_yml_no_this_input = """ +unit_tests: + - name: incremental_true + model: my_incremental_model + overrides: + macros: + is_incremental: true + given: + - input: ref('events') + rows: + - {event_time: "2020-01-01", event: 1} + - {event_time: "2020-01-02", event: 2} + - {event_time: "2020-01-03", event: 3} + expect: + rows: + - {event_time: "2020-01-02", event: 2} + - {event_time: "2020-01-03", event: 3} +""" + # -- inline csv tests test_my_model_csv_yml = """ diff --git a/tests/functional/unit_testing/test_sql_format.py b/tests/functional/unit_testing/test_sql_format.py new file mode 100644 index 00000000000..6b5af93e1ba --- /dev/null +++ b/tests/functional/unit_testing/test_sql_format.py @@ -0,0 +1,245 @@ +import pytest +from dbt.tests.util import run_dbt + +wizards_csv = """id,w_name,email,email_tld,phone,world +1,Albus Dumbledore,a.dumbledore@gmail.com,gmail.com,813-456-9087,1 +2,Gandalf,gandy811@yahoo.com,yahoo.com,551-329-8367,2 +3,Winifred Sanderson,winnie@hocuspocus.com,hocuspocus.com,,6 +4,Marnie Piper,cromwellwitch@gmail.com,gmail.com,,5 +5,Grace Goheen,grace.goheen@dbtlabs.com,dbtlabs.com,,3 +6,Glinda,glinda_good@hotmail.com,hotmail.com,912-458-3289,4 +""" + +top_level_email_domains_csv = """tld +gmail.com +yahoo.com +hocuspocus.com +dbtlabs.com +hotmail.com +""" + +worlds_csv = """id,name +1,The Wizarding World +2,Middle-earth +3,dbt Labs +4,Oz +5,Halloweentown +6,Salem +""" + +stg_wizards_sql = """ +select + id as wizard_id, + w_name as wizard_name, + email, + email_tld as email_top_level_domain, + phone as phone_number, + world as world_id +from {{ ref('wizards') }} +""" + +stg_worlds_sql = """ +select + id as world_id, + name as world_name +from {{ ref('worlds') }} +""" + +dim_wizards_sql = """ +with wizards as ( + + select * from {{ ref('stg_wizards') }} + +), + +worlds as ( + + select * from {{ ref('stg_worlds') }} + +), + +accepted_email_domains as ( + + select * from {{ ref('top_level_email_domains') }} + +), + +check_valid_emails as ( + + select + wizards.wizard_id, + wizards.wizard_name, + wizards.email, + wizards.phone_number, + wizards.world_id, + + coalesce ( + wizards.email ~ '^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,}$' + = true + and accepted_email_domains.tld is not null, + false) as is_valid_email_address + + from wizards + left join accepted_email_domains + on wizards.email_top_level_domain = lower(accepted_email_domains.tld) + +) + +select + check_valid_emails.wizard_id, + check_valid_emails.wizard_name, + check_valid_emails.email, + check_valid_emails.is_valid_email_address, + check_valid_emails.phone_number, + worlds.world_name +from check_valid_emails +left join worlds + on check_valid_emails.world_id = worlds.world_id +""" + +orig_schema_yml = """ +unit_tests: + - name: test_valid_email_address + model: dim_wizards + given: + - input: ref('stg_wizards') + rows: + - {email: cool@example.com, email_top_level_domain: example.com} + - {email: cool@unknown.com, email_top_level_domain: unknown.com} + - {email: badgmail.com, email_top_level_domain: gmail.com} + - {email: missingdot@gmailcom, email_top_level_domain: gmail.com} + - input: ref('top_level_email_domains') + rows: + - {tld: example.com} + - {tld: gmail.com} + - input: ref('stg_worlds') + rows: [] + expect: + rows: + - {email: cool@example.com, is_valid_email_address: true} + - {email: cool@unknown.com, is_valid_email_address: false} + - {email: badgmail.com, is_valid_email_address: false} + - {email: missingdot@gmailcom, is_valid_email_address: false} +""" + +schema_yml = """ +unit_tests: + - name: test_valid_email_address + model: dim_wizards + given: + - input: ref('stg_wizards') + format: sql + rows: | + select 1 as wizard_id, 'joe' as wizard_name, 'cool@example.com' as email, 'example.com' as email_top_level_domain, '123' as phone_number, 1 as world_id union all + select 2 as wizard_id, 'don' as wizard_name, 'cool@unknown.com' as email, 'unknown.com' as email_top_level_domain, '456' as phone_number, 2 as world_id union all + select 3 as wizard_id, 'mary' as wizard_name, 'badgmail.com' as email, 'gmail.com' as email_top_level_domain, '789' as phone_number, 3 as world_id union all + select 4 as wizard_id, 'jane' as wizard_name, 'missingdot@gmailcom' as email, 'gmail.com' as email_top_level_domain, '102' as phone_number, 4 as world_id + - input: ref('top_level_email_domains') + format: sql + rows: | + select 'example.com' as tld union all + select 'gmail.com' as tld + - input: ref('stg_worlds') + rows: [] + expect: + format: sql + rows: | + select 1 as wizard_id, 'joe' as wizard_name, 'cool@example.com' as email, true as is_valid_email_address, '123' as phone_number, null as world_name union all + select 2 as wizard_id, 'don' as wizard_name, 'cool@unknown.com' as email, false as is_valid_email_address, '456' as phone_number, null as world_name union all + select 3 as wizard_id, 'mary' as wizard_name, 'badgmail.com' as email, false as is_valid_email_address, '789' as phone_number, null as world_name union all + select 4 as wizard_id, 'jane' as wizard_name, 'missingdot@gmailcom' as email, false as is_valid_email_address, '102' as phone_number, null as world_name +""" + + +class TestSQLFormat: + @pytest.fixture(scope="class") + def seeds(self): + return { + "wizards.csv": wizards_csv, + "top_level_email_domains.csv": top_level_email_domains_csv, + "worlds.csv": worlds_csv, + } + + @pytest.fixture(scope="class") + def models(self): + return { + "stg_wizards.sql": stg_wizards_sql, + "stg_worlds.sql": stg_worlds_sql, + "dim_wizards.sql": dim_wizards_sql, + "schema.yml": schema_yml, + } + + def test_sql_format(self, project): + results = run_dbt(["build"]) + assert len(results) == 7 + + +stg_wizards_fixture_sql = """ + select 1 as wizard_id, 'joe' as wizard_name, 'cool@example.com' as email, 'example.com' as email_top_level_domain, '123' as phone_number, 1 as world_id union all + select 2 as wizard_id, 'don' as wizard_name, 'cool@unknown.com' as email, 'unknown.com' as email_top_level_domain, '456' as phone_number, 2 as world_id union all + select 3 as wizard_id, 'mary' as wizard_name, 'badgmail.com' as email, 'gmail.com' as email_top_level_domain, '789' as phone_number, 3 as world_id union all + select 4 as wizard_id, 'jane' as wizard_name, 'missingdot@gmailcom' as email, 'gmail.com' as email_top_level_domain, '102' as phone_number, 4 as world_id +""" + +top_level_email_domains_fixture_sql = """ + select 'example.com' as tld union all + select 'gmail.com' as tld +""" + +test_valid_email_address_fixture_sql = """ + select 1 as wizard_id, 'joe' as wizard_name, 'cool@example.com' as email, true as is_valid_email_address, '123' as phone_number, null as world_name union all + select 2 as wizard_id, 'don' as wizard_name, 'cool@unknown.com' as email, false as is_valid_email_address, '456' as phone_number, null as world_name union all + select 3 as wizard_id, 'mary' as wizard_name, 'badgmail.com' as email, false as is_valid_email_address, '789' as phone_number, null as world_name union all + select 4 as wizard_id, 'jane' as wizard_name, 'missingdot@gmailcom' as email, false as is_valid_email_address, '102' as phone_number, null as world_name +""" + +fixture_schema_yml = """ +unit_tests: + - name: test_valid_email_address + model: dim_wizards + given: + - input: ref('stg_wizards') + format: sql + fixture: stg_wizards_fixture + - input: ref('top_level_email_domains') + format: sql + fixture: top_level_email_domains_fixture + - input: ref('stg_worlds') + rows: [] + expect: + format: sql + fixture: test_valid_email_address_fixture +""" + + +class TestSQLFormatFixtures: + @pytest.fixture(scope="class") + def tests(self): + return { + "fixtures": { + "test_valid_email_address_fixture.sql": test_valid_email_address_fixture_sql, + "top_level_email_domains_fixture.sql": top_level_email_domains_fixture_sql, + "stg_wizards_fixture.sql": stg_wizards_fixture_sql, + } + } + + @pytest.fixture(scope="class") + def seeds(self): + return { + "wizards.csv": wizards_csv, + "top_level_email_domains.csv": top_level_email_domains_csv, + "worlds.csv": worlds_csv, + } + + @pytest.fixture(scope="class") + def models(self): + return { + "stg_wizards.sql": stg_wizards_sql, + "stg_worlds.sql": stg_worlds_sql, + "dim_wizards.sql": dim_wizards_sql, + "schema.yml": fixture_schema_yml, + } + + def test_sql_format_fixtures(self, project): + results = run_dbt(["build"]) + assert len(results) == 7 diff --git a/tests/functional/unit_testing/test_unit_testing.py b/tests/functional/unit_testing/test_unit_testing.py index ffa3d0e34b2..887c1907e76 100644 --- a/tests/functional/unit_testing/test_unit_testing.py +++ b/tests/functional/unit_testing/test_unit_testing.py @@ -1,9 +1,13 @@ import pytest +import os from unittest import mock from dbt.tests.util import ( run_dbt, write_file, get_manifest, + run_dbt_and_capture, + read_file, + file_exists, ) from dbt.contracts.results import NodeStatus from dbt.exceptions import DuplicateResourceNameError, ParsingError @@ -18,14 +22,18 @@ datetime_test, my_incremental_model_sql, event_sql, - test_my_model_incremental_yml, + test_my_model_incremental_yml_basic, test_my_model_yml_invalid, test_my_model_yml_invalid_ref, valid_emails_sql, top_level_domains_sql, external_package__accounts_seed_csv, external_package, + test_my_model_incremental_yml_no_override, + test_my_model_incremental_yml_wrong_override, + test_my_model_incremental_yml_no_this_input, ) +from tests.unit.utils import normalize class TestUnitTests: @@ -50,12 +58,29 @@ def test_basic(self, project): results = run_dbt(["test", "--select", "my_model"], expect_pass=False) assert len(results) == 5 - results = run_dbt(["build", "--select", "my_model"], expect_pass=False) + results = run_dbt( + ["build", "--select", "my_model", "--resource-types", "model unit_test"], + expect_pass=False, + ) assert len(results) == 6 for result in results: if result.node.unique_id == "model.test.my_model": result.status == NodeStatus.Skipped + # Run build command but specify no unit tests + results = run_dbt( + ["build", "--select", "my_model", "--exclude-resource-types", "unit_test"], + expect_pass=True, + ) + assert len(results) == 1 + + # Exclude unit tests with environment variable + os.environ["DBT_EXCLUDE_RESOURCE_TYPES"] = "unit_test" + results = run_dbt(["build", "--select", "my_model"], expect_pass=True) + assert len(results) == 1 + + del os.environ["DBT_EXCLUDE_RESOURCE_TYPES"] + # Test select by test name results = run_dbt(["test", "--select", "test_name:test_my_model_string_concat"]) assert len(results) == 1 @@ -99,13 +124,13 @@ def test_basic(self, project): run_dbt(["run", "--no-partial-parse", "--select", "my_model"]) -class TestUnitTestIncrementalModel: +class TestUnitTestIncrementalModelBasic: @pytest.fixture(scope="class") def models(self): return { "my_incremental_model.sql": my_incremental_model_sql, "events.sql": event_sql, - "test_my_incremental_model.yml": test_my_model_incremental_yml, + "schema.yml": test_my_model_incremental_yml_basic, } def test_basic(self, project): @@ -117,6 +142,57 @@ def test_basic(self, project): assert len(results) == 2 +class TestUnitTestIncrementalModelNoOverride: + @pytest.fixture(scope="class") + def models(self): + return { + "my_incremental_model.sql": my_incremental_model_sql, + "events.sql": event_sql, + "schema.yml": test_my_model_incremental_yml_no_override, + } + + def test_no_override(self, project): + with pytest.raises( + ParsingError, + match="Boolean override for 'is_incremental' must be provided for unit test 'incremental_false' in model 'my_incremental_model'", + ): + run_dbt(["parse"]) + + +class TestUnitTestIncrementalModelWrongOverride: + @pytest.fixture(scope="class") + def models(self): + return { + "my_incremental_model.sql": my_incremental_model_sql, + "events.sql": event_sql, + "schema.yml": test_my_model_incremental_yml_wrong_override, + } + + def test_str_override(self, project): + with pytest.raises( + ParsingError, + match="Boolean override for 'is_incremental' must be provided for unit test 'incremental_false' in model 'my_incremental_model'", + ): + run_dbt(["parse"]) + + +class TestUnitTestIncrementalModelNoThisInput: + @pytest.fixture(scope="class") + def models(self): + return { + "my_incremental_model.sql": my_incremental_model_sql, + "events.sql": event_sql, + "schema.yml": test_my_model_incremental_yml_no_this_input, + } + + def test_no_this_input(self, project): + with pytest.raises( + ParsingError, + match="Unit test 'incremental_true' for incremental model 'my_incremental_model' must have a 'this' input", + ): + run_dbt(["parse"]) + + my_new_model = """ select my_favorite_seed.id, @@ -370,3 +446,60 @@ def test_unit_test_ext_nodes( run_dbt(["run"], expect_pass=True) results = run_dbt(["test", "--select", "valid_emails"], expect_pass=True) assert len(results) == 1 + + +subfolder_model_a_sql = """select 1 as id, 'blue' as color""" + +subfolder_model_b_sql = """ +select + id, + color +from {{ ref('model_a') }} +""" + +subfolder_my_model_yml = """ +unit_tests: + - name: my_unit_test + model: model_b + given: + - input: ref('model_a') + rows: + - { id: 1, color: 'blue' } + expect: + rows: + - { id: 1, color: 'red' } +""" + + +class TestUnitTestSubfolderPath: + @pytest.fixture(scope="class") + def models(self): + return { + "subfolder": { + "model_a.sql": subfolder_model_a_sql, + "model_b.sql": subfolder_model_b_sql, + "my_model.yml": subfolder_my_model_yml, + } + } + + def test_subfolder_unit_test(self, project): + results, output = run_dbt_and_capture(["build"], expect_pass=False) + + # Test that input fixture doesn't overwrite the original model + assert ( + read_file("target/compiled/test/models/subfolder/model_a.sql").strip() + == subfolder_model_a_sql.strip() + ) + + # Test that correct path is written in logs + assert ( + normalize( + "target/compiled/test/models/subfolder/my_model.yml/models/subfolder/my_unit_test.sql" + ) + in output + ) + assert file_exists( + normalize( + "target/compiled/test/models/subfolder/my_model.yml/models/subfolder/my_unit_test.sql" + ) + ) diff --git a/tests/functional/unit_testing/test_ut_ephemeral.py b/tests/functional/unit_testing/test_ut_ephemeral.py new file mode 100644 index 00000000000..2898633ec40 --- /dev/null +++ b/tests/functional/unit_testing/test_ut_ephemeral.py @@ -0,0 +1,84 @@ +import pytest +from dbt.tests.util import run_dbt, write_file +from dbt.contracts.results import RunStatus, TestStatus + + +ephemeral_model_sql = """ +{{ config(materialized="ephemeral") }} +select 1 as id, 'Emily' as first_name +""" + +nested_ephemeral_model_sql = """ +{{ config(materialized="ephemeral") }} +select * from {{ ref('ephemeral_model') }} +""" + +customers_sql = """ +select * from {{ ref('nested_ephemeral_model') }} +""" + +test_sql_format_yml = """ +unit_tests: + - name: test_customers + model: customers + given: + - input: ref('nested_ephemeral_model') + format: sql + rows: | + select 1 as id, 'Emily' as first_name + expect: + rows: + - {id: 1, first_name: Emily} +""" + +failing_test_sql_format_yml = """ + - name: fail_test_customers + model: customers + given: + - input: ref('nested_ephemeral_model') + format: sql + rows: | + select 1 as id, 'Emily' as first_name + expect: + rows: + - {id: 1, first_name: Joan} +""" + + +class TestUnitTestEphemeralInput: + @pytest.fixture(scope="class") + def models(self): + return { + "customers.sql": customers_sql, + "ephemeral_model.sql": ephemeral_model_sql, + "nested_ephemeral_model.sql": nested_ephemeral_model_sql, + "tests.yml": test_sql_format_yml, + } + + def test_ephemeral_input(self, project): + results = run_dbt(["run"]) + len(results) == 1 + + results = run_dbt(["test", "--select", "test_type:unit"]) + assert len(results) == 1 + + results = run_dbt(["build"]) + assert len(results) == 2 + result_unique_ids = [result.node.unique_id for result in results] + assert len(result_unique_ids) == 2 + assert "unit_test.test.customers.test_customers" in result_unique_ids + + # write failing unit test + write_file( + test_sql_format_yml + failing_test_sql_format_yml, + project.project_root, + "models", + "tests.yml", + ) + results = run_dbt(["build"], expect_pass=False) + for result in results: + if result.node.unique_id == "model.test.customers": + assert result.status == RunStatus.Skipped + elif result.node.unique_id == "unit_test.test.customers.fail_test_customers": + assert result.status == TestStatus.Fail + assert len(results) == 3 diff --git a/tests/functional/unit_testing/test_ut_names.py b/tests/functional/unit_testing/test_ut_names.py index 27c0a56201c..d1721438576 100644 --- a/tests/functional/unit_testing/test_ut_names.py +++ b/tests/functional/unit_testing/test_ut_names.py @@ -27,33 +27,37 @@ def test_duplicate_test_names_across_models(self, project): # Select duplicate tests results, log_output = run_dbt_and_capture(["test"], expect_pass=True) assert len(results) == 2 - assert ["my_model_a", "my_model_b"] == sorted([result.node.model for result in results]) + assert {"model.test.my_model_a", "model.test.my_model_b"} == { + result.node.tested_node_unique_id for result in results + } assert "my_model_a::my_test_name" in log_output assert "my_model_b::my_test_name" in log_output # Test select duplicates by by test name results = run_dbt(["test", "--select", "test_name:my_test_name"]) assert len(results) == 2 - assert ["my_model_a", "my_model_b"] == sorted([result.node.model for result in results]) + assert {"model.test.my_model_a", "model.test.my_model_b"} == { + result.node.tested_node_unique_id for result in results + } assert "my_model_a::my_test_name" in log_output assert "my_model_b::my_test_name" in log_output results = run_dbt(["test", "--select", "my_model_a,test_name:my_test_name"]) assert len(results) == 1 - assert results[0].node.model == "my_model_a" + assert results[0].node.tested_node_unique_id == "model.test.my_model_a" results = run_dbt(["test", "--select", "my_model_b,test_name:my_test_name"]) assert len(results) == 1 - assert results[0].node.model == "my_model_b" + assert results[0].node.tested_node_unique_id == "model.test.my_model_b" # Test select by model name results = run_dbt(["test", "--select", "my_model_a"]) assert len(results) == 1 - assert results[0].node.model == "my_model_a" + assert results[0].node.tested_node_unique_id == "model.test.my_model_a" results = run_dbt(["test", "--select", "my_model_b"]) assert len(results) == 1 - assert results[0].node.model == "my_model_b" + assert results[0].node.tested_node_unique_id == "model.test.my_model_b" class TestUnitTestDuplicateTestNamesWithinModel: diff --git a/tests/functional/unit_testing/test_ut_overrides.py b/tests/functional/unit_testing/test_ut_overrides.py new file mode 100644 index 00000000000..c8102e47ce2 --- /dev/null +++ b/tests/functional/unit_testing/test_ut_overrides.py @@ -0,0 +1,69 @@ +import pytest +from dbt.tests.util import run_dbt + + +my_model_with_macros = """ +SELECT +{{ current_timestamp() }} as global_current_timestamp, +{{ dbt.current_timestamp() }} as dbt_current_timestamp, +{{ dbt.type_int() }} as dbt_type_int, +{{ my_macro() }} as user_defined_my_macro, +{{ dbt_utils.generate_surrogate_key() }} as package_defined_macro +""" + +test_my_model_with_macros = """ +unit_tests: + - name: test_macro_overrides + model: my_model_with_macros + overrides: + macros: + current_timestamp: "'current_timestamp_override'" + dbt.type_int: "'dbt_macro_override'" + my_macro: "'global_user_defined_macro_override'" + dbt_utils.generate_surrogate_key: "'package_macro_override'" + given: [] + expect: + rows: + - global_current_timestamp: "current_timestamp_override" + dbt_current_timestamp: "current_timestamp_override" + dbt_type_int: "dbt_macro_override" + user_defined_my_macro: "global_user_defined_macro_override" + package_defined_macro: "package_macro_override" +""" + +MY_MACRO_SQL = """ +{% macro my_macro() -%} + {{ test }} +{%- endmacro %} +""" + + +class TestUnitTestingMacroOverrides: + @pytest.fixture(scope="class") + def packages(self): + return { + "packages": [ + { + "package": "dbt-labs/dbt_utils", + "version": "1.1.1", + }, + ] + } + + @pytest.fixture(scope="class") + def models(self): + return { + "my_model_with_macros.sql": my_model_with_macros, + "test_my_model_with_macros.yml": test_my_model_with_macros, + } + + @pytest.fixture(scope="class") + def macros(self): + return {"my_macro.sql": MY_MACRO_SQL} + + def test_macro_overrides(self, project): + run_dbt(["deps"]) + + # Select by model name + results = run_dbt(["test", "--select", "my_model_with_macros"], expect_pass=True) + assert len(results) == 1 diff --git a/tests/functional/unit_testing/test_ut_snapshot_dependency.py b/tests/functional/unit_testing/test_ut_snapshot_dependency.py new file mode 100644 index 00000000000..f1b20d3e554 --- /dev/null +++ b/tests/functional/unit_testing/test_ut_snapshot_dependency.py @@ -0,0 +1,163 @@ +import pytest +from dbt.tests.util import run_dbt +from dbt.contracts.results import RunStatus, TestStatus + +raw_customers_csv = """id,first_name,last_name,email,gender,ip_address,updated_at +1,'Judith','Kennedy','(not provided)','Female','54.60.24.128','2015-12-24 12:19:28' +2,'Arthur','Kelly','(not provided)','Male','62.56.24.215','2015-10-28 16:22:15' +3,'Rachel','Moreno','rmoreno2@msu.edu','Female','31.222.249.23','2016-04-05 02:05:30' +4,'Ralph','Turner','rturner3@hp.com','Male','157.83.76.114','2016-08-08 00:06:51' +5,'Laura','Gonzales','lgonzales4@howstuffworks.com','Female','30.54.105.168','2016-09-01 08:25:38' +6,'Katherine','Lopez','klopez5@yahoo.co.jp','Female','169.138.46.89','2016-08-30 18:52:11' +7,'Jeremy','Hamilton','jhamilton6@mozilla.org','Male','231.189.13.133','2016-07-17 02:09:46' +""" + +top_level_domains_csv = """id,domain +3,'msu.edu' +4,'hp.com' +5,'howstuffworks.com' +6,'yahoo.co.jp' +7,'mozilla.org' +""" + +snapshots_users__snapshot_sql = """ +{% snapshot snapshot_users %} + + {{ + config( + target_database=var('target_database', database), + target_schema=schema, + unique_key='id || ' ~ "'-'" ~ ' || first_name', + strategy='check', + check_cols=['email'], + ) + }} + select *, split_part(email, '@', 2) as domain from {{target.database}}.{{schema}}.raw_customers + +{% endsnapshot %} +""" + +unit_test_yml = """ +sources: + - name: seed_sources + schema: "{{ target.schema }}" + tables: + - name: top_level_domains + columns: + - name: id + - name: domain + +unit_tests: + - name: test_is_valid_email_address + model: customers + given: + - input: ref('snapshot_users') + rows: + - {id: 1, email: cool@example.com, domain: example.com} + - {id: 2, email: cool@unknown.com, domain: unknown.com} + - {id: 3, email: badgmail.com, domain: gmailcom} + - {id: 4, email: missingdot@gmailcom, domain: gmailcom} + - input: source('seed_sources', 'top_level_domains') + rows: + - {domain: example.com} + - {domain: gmail.com} + expect: + rows: + - {id: 1, is_valid_email_address: true} + - {id: 2, is_valid_email_address: false} + - {id: 3, is_valid_email_address: false} + - {id: 4, is_valid_email_address: false} + + - name: fail_is_valid_email_address + model: customers + given: + - input: ref('snapshot_users') + rows: + - {id: 1, email: cool@example.com, domain: example.com} + - input: source('seed_sources', 'top_level_domains') + rows: + - {domain: example.com} + - {domain: gmail.com} + expect: + rows: + - {id: 1, is_valid_email_address: false} +""" + +customers_sql = """ +with snapshot_users as ( +select * from {{ ref('snapshot_users') }} +), + +top_level_domains as ( +select * from {{ source('seed_sources', 'top_level_domains') }} +), +matched_values as ( + select + snapshot_users.*, + case when exists ( + select 1 from top_level_domains + where top_level_domains.domain = snapshot_users.domain + ) then true else false end as is_valid_email_address + from + snapshot_users +) + +select * from matched_values +""" + + +class TestUnitTestSnapshotDependency: + @pytest.fixture(scope="class") + def seeds(self): + return { + "raw_customers.csv": raw_customers_csv, + "top_level_domains.csv": top_level_domains_csv, + } + + @pytest.fixture(scope="class") + def models(self): + return { + "customers.sql": customers_sql, + "unit_tests.yml": unit_test_yml, + } + + @pytest.fixture(scope="class") + def snapshots(self): + return { + "snapshot_users.sql": snapshots_users__snapshot_sql, + } + + def test_snapshot_dependency(self, project): + seed_results = run_dbt(["seed"]) + len(seed_results) == 2 + snapshot_results = run_dbt(["snapshot"]) + len(snapshot_results) == 1 + model_results = run_dbt(["run"]) + len(model_results) == 1 + + # test passing unit test + results = run_dbt(["test", "--select", "test_name:test_is_valid_email_address"]) + assert len(results) == 1 + + # test failing unit test + results = run_dbt( + ["test", "--select", "test_name:fail_is_valid_email_address"], expect_pass=False + ) + assert len(results) == 1 + assert results[0].status == TestStatus.Fail + + # test all with build + results = run_dbt(["build"], expect_pass=False) + + for result in results: + if result.node.unique_id == "unit_test.test.customers.fail_is_valid_email_address": + # This will always fail, regarless of order executed + assert result.status == TestStatus.Fail + elif result.node.unique_id == "unit_test.test.customers.test_is_valid_email_address": + # there's no guarantee that the order of the results will be the same. If the + # failed test runs first this one gets skipped. If this runs first it passes. + assert result.status in [TestStatus.Pass, TestStatus.Skipped] + elif result.node.unique_id == "model.test.customers": + # This is always skipped because one test always fails + assert result.status == RunStatus.Skipped + assert len(results) == 6 diff --git a/tests/functional/unit_testing/test_ut_sources.py b/tests/functional/unit_testing/test_ut_sources.py index 791217a39ff..2779d698790 100644 --- a/tests/functional/unit_testing/test_ut_sources.py +++ b/tests/functional/unit_testing/test_ut_sources.py @@ -98,6 +98,6 @@ def test_source_input(self, project): for result in results: if result.node.unique_id == "model.test.customers": assert result.status == RunStatus.Skipped - elif result.node.unique_id == "model.test.customers": + elif result.node.unique_id == "unit_test.test.customers.fail_test_customers": assert result.status == TestStatus.Fail assert len(results) == 6 diff --git a/tests/unit/test_cli_flags.py b/tests/unit/cli/test_flags.py similarity index 96% rename from tests/unit/test_cli_flags.py rename to tests/unit/cli/test_flags.py index dcc8f800659..6bf9d692e0e 100644 --- a/tests/unit/test_cli_flags.py +++ b/tests/unit/cli/test_flags.py @@ -18,7 +18,7 @@ class TestFlags: def make_dbt_context( self, context_name: str, args: List[str], parent: Optional[click.Context] = None ) -> click.Context: - ctx = cli.make_context(context_name, args, parent) + ctx = cli.make_context(context_name, args.copy(), parent) return ctx @pytest.fixture(scope="class") @@ -29,6 +29,13 @@ def run_context(self) -> click.Context: def project_flags(self) -> ProjectFlags: return ProjectFlags() + def test_cli_args_unmodified(self): + args = ["--target", "my_target"] + args_before = args.copy() + self.make_dbt_context("context", args) + + assert args == args_before + def test_which(self, run_context): flags = Flags(run_context) assert flags.WHICH == "run" @@ -105,6 +112,13 @@ def test_anonymous_usage_state( flags = Flags(run_context) assert flags.SEND_ANONYMOUS_USAGE_STATS == expected_anonymous_usage_stats + def test_resource_types(self, monkeypatch): + monkeypatch.setenv("DBT_RESOURCE_TYPES", "model") + build_context = self.make_dbt_context("build", ["build"]) + build_context.params["resource_types"] = ("unit_test",) + flags = Flags(build_context) + assert flags.resource_types == ("unit_test",) + def test_empty_project_flags_uses_default(self, run_context, project_flags): flags = Flags(run_context, project_flags) assert flags.USE_COLORS == run_context.params["use_colors"] @@ -394,10 +408,12 @@ def test_from_dict__build(self): args_dict = { "print": True, "state": "some/path", + "defer_state": None, } result = self._create_flags_from_dict(Command.BUILD, args_dict) assert result.print is True assert "some/path" in str(result.state) + assert result.defer_state is None def test_from_dict__seed(self): args_dict = {"use_colors": False, "exclude": ["model_three"]} diff --git a/tests/unit/test_cli.py b/tests/unit/cli/test_main.py similarity index 100% rename from tests/unit/test_cli.py rename to tests/unit/cli/test_main.py diff --git a/tests/unit/config/__init__.py b/tests/unit/config/__init__.py new file mode 100644 index 00000000000..073cf3d6499 --- /dev/null +++ b/tests/unit/config/__init__.py @@ -0,0 +1,279 @@ +from contextlib import contextmanager +import os +import shutil +import tempfile +import unittest +from argparse import Namespace + + +import yaml + +import dbt.config +import dbt.exceptions +from dbt import flags +from dbt.constants import PACKAGES_FILE_NAME + + +from dbt.flags import set_from_args + +from tests.unit.utils import normalize + +INITIAL_ROOT = os.getcwd() + + +@contextmanager +def temp_cd(path): + current_path = os.getcwd() + os.chdir(path) + try: + yield + finally: + os.chdir(current_path) + + +@contextmanager +def raises_nothing(): + yield + + +def empty_profile_renderer(): + return dbt.config.renderer.ProfileRenderer({}) + + +def empty_project_renderer(): + return dbt.config.renderer.DbtProjectYamlRenderer() + + +model_config = { + "my_package_name": { + "enabled": True, + "adwords": { + "adwords_ads": {"materialized": "table", "enabled": True, "schema": "analytics"} + }, + "snowplow": { + "snowplow_sessions": { + "sort": "timestamp", + "materialized": "incremental", + "dist": "user_id", + "unique_key": "id", + }, + "base": { + "snowplow_events": { + "sort": ["timestamp", "userid"], + "materialized": "table", + "sort_type": "interleaved", + "dist": "userid", + } + }, + }, + } +} + +model_fqns = frozenset( + ( + ("my_package_name", "snowplow", "snowplow_sessions"), + ("my_package_name", "snowplow", "base", "snowplow_events"), + ("my_package_name", "adwords", "adwords_ads"), + ) +) + + +class Args: + def __init__( + self, + profiles_dir=None, + threads=None, + profile=None, + cli_vars=None, + version_check=None, + project_dir=None, + target=None, + ): + self.profile = profile + self.threads = threads + self.target = target + if profiles_dir is not None: + self.profiles_dir = profiles_dir + flags.PROFILES_DIR = profiles_dir + if cli_vars is not None: + self.vars = cli_vars + if version_check is not None: + self.version_check = version_check + if project_dir is not None: + self.project_dir = project_dir + + +class BaseConfigTest(unittest.TestCase): + """Subclass this, and before calling the superclass setUp, set + self.profiles_dir and self.project_dir. + """ + + def setUp(self): + # Write project + self.project_dir = normalize(tempfile.mkdtemp()) + self.default_project_data = { + "version": "0.0.1", + "name": "my_test_project", + "profile": "default", + } + self.write_project(self.default_project_data) + + # Write profile + self.profiles_dir = normalize(tempfile.mkdtemp()) + self.default_profile_data = { + "default": { + "outputs": { + "postgres": { + "type": "postgres", + "host": "postgres-db-hostname", + "port": 5555, + "user": "db_user", + "pass": "db_pass", + "dbname": "postgres-db-name", + "schema": "postgres-schema", + "threads": 7, + }, + "with-vars": { + "type": "{{ env_var('env_value_type') }}", + "host": "{{ env_var('env_value_host') }}", + "port": "{{ env_var('env_value_port') | as_number }}", + "user": "{{ env_var('env_value_user') }}", + "pass": "{{ env_var('env_value_pass') }}", + "dbname": "{{ env_var('env_value_dbname') }}", + "schema": "{{ env_var('env_value_schema') }}", + }, + "cli-and-env-vars": { + "type": "{{ env_var('env_value_type') }}", + "host": "{{ var('cli_value_host') }}", + "port": "{{ env_var('env_value_port') | as_number }}", + "user": "{{ env_var('env_value_user') }}", + "pass": "{{ env_var('env_value_pass') }}", + "dbname": "{{ env_var('env_value_dbname') }}", + "schema": "{{ env_var('env_value_schema') }}", + }, + }, + "target": "postgres", + }, + "other": { + "outputs": { + "other-postgres": { + "type": "postgres", + "host": "other-postgres-db-hostname", + "port": 4444, + "user": "other_db_user", + "pass": "other_db_pass", + "dbname": "other-postgres-db-name", + "schema": "other-postgres-schema", + "threads": 2, + } + }, + "target": "other-postgres", + }, + "empty_profile_data": {}, + } + self.write_profile(self.default_profile_data) + + self.args = Namespace( + profiles_dir=self.profiles_dir, + cli_vars={}, + version_check=True, + project_dir=self.project_dir, + target=None, + threads=None, + profile=None, + ) + set_from_args(self.args, None) + self.env_override = { + "env_value_type": "postgres", + "env_value_host": "env-postgres-host", + "env_value_port": "6543", + "env_value_user": "env-postgres-user", + "env_value_pass": "env-postgres-pass", + "env_value_dbname": "env-postgres-dbname", + "env_value_schema": "env-postgres-schema", + "env_value_profile": "default", + } + + def assertRaisesOrReturns(self, exc): + if exc is None: + return raises_nothing() + else: + return self.assertRaises(exc) + + def tearDown(self): + try: + shutil.rmtree(self.project_dir) + except EnvironmentError: + pass + try: + shutil.rmtree(self.profiles_dir) + except EnvironmentError: + pass + + def project_path(self, name): + return os.path.join(self.project_dir, name) + + def profile_path(self, name): + return os.path.join(self.profiles_dir, name) + + def write_project(self, project_data=None): + if project_data is None: + project_data = self.project_data + with open(self.project_path("dbt_project.yml"), "w") as fp: + yaml.dump(project_data, fp) + + def write_packages(self, package_data): + with open(self.project_path("packages.yml"), "w") as fp: + yaml.dump(package_data, fp) + + def write_profile(self, profile_data=None): + if profile_data is None: + profile_data = self.profile_data + with open(self.profile_path("profiles.yml"), "w") as fp: + yaml.dump(profile_data, fp) + + def write_empty_profile(self): + with open(self.profile_path("profiles.yml"), "w") as fp: + yaml.dump("", fp) + + +def project_from_config_norender( + cfg, packages=None, project_root="/invalid-root-path", verify_version=False +): + if packages is None: + packages = {} + partial = dbt.config.project.PartialProject.from_dicts( + project_root, + project_dict=cfg, + packages_dict=packages, + selectors_dict={}, + verify_version=verify_version, + ) + # no rendering ... Why? + partial.project_dict["project-root"] = project_root + rendered = dbt.config.project.RenderComponents( + project_dict=partial.project_dict, + packages_dict=partial.packages_dict, + selectors_dict=partial.selectors_dict, + ) + return partial.create_project(rendered) + + +def project_from_config_rendered( + cfg, + packages=None, + project_root="/invalid-root-path", + verify_version=False, + packages_specified_path=PACKAGES_FILE_NAME, +): + if packages is None: + packages = {} + partial = dbt.config.project.PartialProject.from_dicts( + project_root, + project_dict=cfg, + packages_dict=packages, + selectors_dict={}, + verify_version=verify_version, + packages_specified_path=packages_specified_path, + ) + return partial.render(empty_project_renderer()) diff --git a/tests/unit/config/test_profile.py b/tests/unit/config/test_profile.py new file mode 100644 index 00000000000..7c53b715ab9 --- /dev/null +++ b/tests/unit/config/test_profile.py @@ -0,0 +1,295 @@ +from copy import deepcopy + +import os +from unittest import mock +import dbt.config +import dbt.exceptions + +from dbt.adapters.postgres import PostgresCredentials + + +from dbt.flags import set_from_args +from dbt.tests.util import safe_set_invocation_context + + +from tests.unit.config import BaseConfigTest, empty_profile_renderer, project_from_config_norender + + +class TestProfile(BaseConfigTest): + def from_raw_profiles(self): + renderer = empty_profile_renderer() + return dbt.config.Profile.from_raw_profiles(self.default_profile_data, "default", renderer) + + def test_from_raw_profiles(self): + profile = self.from_raw_profiles() + self.assertEqual(profile.profile_name, "default") + self.assertEqual(profile.target_name, "postgres") + self.assertEqual(profile.threads, 7) + self.assertTrue(isinstance(profile.credentials, PostgresCredentials)) + self.assertEqual(profile.credentials.type, "postgres") + self.assertEqual(profile.credentials.host, "postgres-db-hostname") + self.assertEqual(profile.credentials.port, 5555) + self.assertEqual(profile.credentials.user, "db_user") + self.assertEqual(profile.credentials.password, "db_pass") + self.assertEqual(profile.credentials.schema, "postgres-schema") + self.assertEqual(profile.credentials.database, "postgres-db-name") + + def test_missing_type(self): + del self.default_profile_data["default"]["outputs"]["postgres"]["type"] + with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: + self.from_raw_profiles() + self.assertIn("type", str(exc.exception)) + self.assertIn("postgres", str(exc.exception)) + self.assertIn("default", str(exc.exception)) + + def test_bad_type(self): + self.default_profile_data["default"]["outputs"]["postgres"]["type"] = "invalid" + with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: + self.from_raw_profiles() + self.assertIn("Credentials", str(exc.exception)) + self.assertIn("postgres", str(exc.exception)) + self.assertIn("default", str(exc.exception)) + + def test_invalid_credentials(self): + del self.default_profile_data["default"]["outputs"]["postgres"]["host"] + with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: + self.from_raw_profiles() + self.assertIn("Credentials", str(exc.exception)) + self.assertIn("postgres", str(exc.exception)) + self.assertIn("default", str(exc.exception)) + + def test_missing_target(self): + profile = self.default_profile_data["default"] + del profile["target"] + profile["outputs"]["default"] = profile["outputs"]["postgres"] + profile = self.from_raw_profiles() + self.assertEqual(profile.profile_name, "default") + self.assertEqual(profile.target_name, "default") + self.assertEqual(profile.credentials.type, "postgres") + + def test_extra_path(self): + self.default_project_data.update( + { + "model-paths": ["models"], + "source-paths": ["other-models"], + } + ) + with self.assertRaises(dbt.exceptions.DbtProjectError) as exc: + project_from_config_norender(self.default_project_data, project_root=self.project_dir) + + self.assertIn("source-paths and model-paths", str(exc.exception)) + self.assertIn("cannot both be defined.", str(exc.exception)) + + def test_profile_invalid_project(self): + renderer = empty_profile_renderer() + with self.assertRaises(dbt.exceptions.DbtProjectError) as exc: + dbt.config.Profile.from_raw_profiles( + self.default_profile_data, "invalid-profile", renderer + ) + + self.assertEqual(exc.exception.result_type, "invalid_project") + self.assertIn("Could not find", str(exc.exception)) + self.assertIn("invalid-profile", str(exc.exception)) + + def test_profile_invalid_target(self): + renderer = empty_profile_renderer() + with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: + dbt.config.Profile.from_raw_profiles( + self.default_profile_data, "default", renderer, target_override="nope" + ) + + self.assertIn("nope", str(exc.exception)) + self.assertIn("- postgres", str(exc.exception)) + self.assertIn("- with-vars", str(exc.exception)) + + def test_no_outputs(self): + renderer = empty_profile_renderer() + + with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: + dbt.config.Profile.from_raw_profiles( + {"some-profile": {"target": "blah"}}, "some-profile", renderer + ) + self.assertIn("outputs not specified", str(exc.exception)) + self.assertIn("some-profile", str(exc.exception)) + + def test_neq(self): + profile = self.from_raw_profiles() + self.assertNotEqual(profile, object()) + + def test_eq(self): + renderer = empty_profile_renderer() + profile = dbt.config.Profile.from_raw_profiles( + deepcopy(self.default_profile_data), "default", renderer + ) + + other = dbt.config.Profile.from_raw_profiles( + deepcopy(self.default_profile_data), "default", renderer + ) + self.assertEqual(profile, other) + + def test_invalid_env_vars(self): + self.env_override["env_value_port"] = "hello" + with mock.patch.dict(os.environ, self.env_override): + with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: + safe_set_invocation_context() + renderer = empty_profile_renderer() + dbt.config.Profile.from_raw_profile_info( + self.default_profile_data["default"], + "default", + renderer, + target_override="with-vars", + ) + self.assertIn("Could not convert value 'hello' into type 'number'", str(exc.exception)) + + +class TestProfileFile(BaseConfigTest): + def from_raw_profile_info(self, raw_profile=None, profile_name="default", **kwargs): + if raw_profile is None: + raw_profile = self.default_profile_data["default"] + renderer = empty_profile_renderer() + kw = { + "raw_profile": raw_profile, + "profile_name": profile_name, + "renderer": renderer, + } + kw.update(kwargs) + return dbt.config.Profile.from_raw_profile_info(**kw) + + def from_args(self, project_profile_name="default", **kwargs): + kw = { + "project_profile_name": project_profile_name, + "renderer": empty_profile_renderer(), + "threads_override": self.args.threads, + "target_override": self.args.target, + "profile_name_override": self.args.profile, + } + kw.update(kwargs) + return dbt.config.Profile.render(**kw) + + def test_profile_simple(self): + profile = self.from_args() + from_raw = self.from_raw_profile_info() + + self.assertEqual(profile.profile_name, "default") + self.assertEqual(profile.target_name, "postgres") + self.assertEqual(profile.threads, 7) + self.assertTrue(isinstance(profile.credentials, PostgresCredentials)) + self.assertEqual(profile.credentials.type, "postgres") + self.assertEqual(profile.credentials.host, "postgres-db-hostname") + self.assertEqual(profile.credentials.port, 5555) + self.assertEqual(profile.credentials.user, "db_user") + self.assertEqual(profile.credentials.password, "db_pass") + self.assertEqual(profile.credentials.schema, "postgres-schema") + self.assertEqual(profile.credentials.database, "postgres-db-name") + self.assertEqual(profile, from_raw) + + def test_profile_override(self): + self.args.profile = "other" + self.args.threads = 3 + set_from_args(self.args, None) + profile = self.from_args() + from_raw = self.from_raw_profile_info( + self.default_profile_data["other"], + "other", + threads_override=3, + ) + + self.assertEqual(profile.profile_name, "other") + self.assertEqual(profile.target_name, "other-postgres") + self.assertEqual(profile.threads, 3) + self.assertTrue(isinstance(profile.credentials, PostgresCredentials)) + self.assertEqual(profile.credentials.type, "postgres") + self.assertEqual(profile.credentials.host, "other-postgres-db-hostname") + self.assertEqual(profile.credentials.port, 4444) + self.assertEqual(profile.credentials.user, "other_db_user") + self.assertEqual(profile.credentials.password, "other_db_pass") + self.assertEqual(profile.credentials.schema, "other-postgres-schema") + self.assertEqual(profile.credentials.database, "other-postgres-db-name") + self.assertEqual(profile, from_raw) + + def test_env_vars(self): + self.args.target = "with-vars" + with mock.patch.dict(os.environ, self.env_override): + safe_set_invocation_context() # reset invocation context with new env + profile = self.from_args() + from_raw = self.from_raw_profile_info(target_override="with-vars") + + self.assertEqual(profile.profile_name, "default") + self.assertEqual(profile.target_name, "with-vars") + self.assertEqual(profile.threads, 1) + self.assertEqual(profile.credentials.type, "postgres") + self.assertEqual(profile.credentials.host, "env-postgres-host") + self.assertEqual(profile.credentials.port, 6543) + self.assertEqual(profile.credentials.user, "env-postgres-user") + self.assertEqual(profile.credentials.password, "env-postgres-pass") + self.assertEqual(profile, from_raw) + + def test_env_vars_env_target(self): + self.default_profile_data["default"]["target"] = "{{ env_var('env_value_target') }}" + self.write_profile(self.default_profile_data) + self.env_override["env_value_target"] = "with-vars" + with mock.patch.dict(os.environ, self.env_override): + safe_set_invocation_context() # reset invocation context with new env + profile = self.from_args() + from_raw = self.from_raw_profile_info(target_override="with-vars") + + self.assertEqual(profile.profile_name, "default") + self.assertEqual(profile.target_name, "with-vars") + self.assertEqual(profile.threads, 1) + self.assertEqual(profile.credentials.type, "postgres") + self.assertEqual(profile.credentials.host, "env-postgres-host") + self.assertEqual(profile.credentials.port, 6543) + self.assertEqual(profile.credentials.user, "env-postgres-user") + self.assertEqual(profile.credentials.password, "env-postgres-pass") + self.assertEqual(profile, from_raw) + + def test_invalid_env_vars(self): + self.env_override["env_value_port"] = "hello" + self.args.target = "with-vars" + with mock.patch.dict(os.environ, self.env_override): + with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: + safe_set_invocation_context() # reset invocation context with new env + self.from_args() + + self.assertIn("Could not convert value 'hello' into type 'number'", str(exc.exception)) + + def test_cli_and_env_vars(self): + self.args.target = "cli-and-env-vars" + self.args.vars = {"cli_value_host": "cli-postgres-host"} + renderer = dbt.config.renderer.ProfileRenderer({"cli_value_host": "cli-postgres-host"}) + with mock.patch.dict(os.environ, self.env_override): + safe_set_invocation_context() # reset invocation context with new env + profile = self.from_args(renderer=renderer) + from_raw = self.from_raw_profile_info( + target_override="cli-and-env-vars", + renderer=renderer, + ) + + self.assertEqual(profile.profile_name, "default") + self.assertEqual(profile.target_name, "cli-and-env-vars") + self.assertEqual(profile.threads, 1) + self.assertEqual(profile.credentials.type, "postgres") + self.assertEqual(profile.credentials.host, "cli-postgres-host") + self.assertEqual(profile.credentials.port, 6543) + self.assertEqual(profile.credentials.user, "env-postgres-user") + self.assertEqual(profile.credentials.password, "env-postgres-pass") + self.assertEqual(profile, from_raw) + + def test_no_profile(self): + with self.assertRaises(dbt.exceptions.DbtProjectError) as exc: + self.from_args(project_profile_name=None) + self.assertIn("no profile was specified", str(exc.exception)) + + def test_empty_profile(self): + self.write_empty_profile() + with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: + self.from_args() + self.assertIn("profiles.yml is empty", str(exc.exception)) + + def test_profile_with_empty_profile_data(self): + renderer = empty_profile_renderer() + with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: + dbt.config.Profile.from_raw_profiles( + self.default_profile_data, "empty_profile_data", renderer + ) + self.assertIn("Profile empty_profile_data in profiles.yml is empty", str(exc.exception)) diff --git a/tests/unit/config/test_project.py b/tests/unit/config/test_project.py new file mode 100644 index 00000000000..46dbda6b909 --- /dev/null +++ b/tests/unit/config/test_project.py @@ -0,0 +1,501 @@ +from copy import deepcopy +import json +import os +import unittest +import pytest + +from unittest import mock + +import dbt.config +from dbt.constants import DEPENDENCIES_FILE_NAME +import dbt.exceptions +from dbt.adapters.factory import load_plugin +from dbt.adapters.contracts.connection import QueryComment, DEFAULT_QUERY_COMMENT +from dbt.contracts.project import PackageConfig, LocalPackage, GitPackage +from dbt.node_types import NodeType +from dbt_common.semver import VersionSpecifier + +from dbt.flags import set_from_args +from dbt.tests.util import safe_set_invocation_context + + +from tests.unit.config import ( + BaseConfigTest, + project_from_config_norender, + empty_project_renderer, + project_from_config_rendered, +) + + +class TestProject(BaseConfigTest): + def test_defaults(self): + project = project_from_config_norender( + self.default_project_data, project_root=self.project_dir + ) + self.assertEqual(project.project_name, "my_test_project") + self.assertEqual(project.version, "0.0.1") + self.assertEqual(project.profile_name, "default") + self.assertEqual(project.project_root, self.project_dir) + self.assertEqual(project.model_paths, ["models"]) + self.assertEqual(project.macro_paths, ["macros"]) + self.assertEqual(project.seed_paths, ["seeds"]) + self.assertEqual(project.test_paths, ["tests"]) + self.assertEqual(project.analysis_paths, ["analyses"]) + self.assertEqual( + set(project.docs_paths), set(["models", "seeds", "snapshots", "analyses", "macros"]) + ) + self.assertEqual(project.asset_paths, []) + self.assertEqual(project.target_path, "target") + self.assertEqual(project.clean_targets, ["target"]) + self.assertEqual(project.log_path, "logs") + self.assertEqual(project.packages_install_path, "dbt_packages") + self.assertEqual(project.quoting, {}) + self.assertEqual(project.models, {}) + self.assertEqual(project.on_run_start, []) + self.assertEqual(project.on_run_end, []) + self.assertEqual(project.seeds, {}) + self.assertEqual(project.dbt_version, [VersionSpecifier.from_version_string(">=0.0.0")]) + self.assertEqual(project.packages, PackageConfig(packages=[])) + # just make sure str() doesn't crash anything, that's always + # embarrassing + str(project) + + def test_eq(self): + project = project_from_config_norender( + self.default_project_data, project_root=self.project_dir + ) + other = project_from_config_norender( + self.default_project_data, project_root=self.project_dir + ) + self.assertEqual(project, other) + + def test_neq(self): + project = project_from_config_norender( + self.default_project_data, project_root=self.project_dir + ) + self.assertNotEqual(project, object()) + + def test_implicit_overrides(self): + self.default_project_data.update( + { + "model-paths": ["other-models"], + } + ) + project = project_from_config_norender( + self.default_project_data, project_root=self.project_dir + ) + self.assertEqual( + set(project.docs_paths), + set(["other-models", "seeds", "snapshots", "analyses", "macros"]), + ) + + def test_hashed_name(self): + project = project_from_config_norender( + self.default_project_data, project_root=self.project_dir + ) + self.assertEqual(project.hashed_name(), "754cd47eac1d6f50a5f7cd399ec43da4") + + def test_all_overrides(self): + # log-path is not tested because it is set exclusively from flags, not cfg + self.default_project_data.update( + { + "model-paths": ["other-models"], + "macro-paths": ["other-macros"], + "seed-paths": ["other-seeds"], + "test-paths": ["other-tests"], + "analysis-paths": ["other-analyses"], + "docs-paths": ["docs"], + "asset-paths": ["other-assets"], + "clean-targets": ["another-target"], + "packages-install-path": "other-dbt_packages", + "quoting": {"identifier": False}, + "models": { + "pre-hook": ["{{ logging.log_model_start_event() }}"], + "post-hook": ["{{ logging.log_model_end_event() }}"], + "my_test_project": { + "first": { + "enabled": False, + "sub": { + "enabled": True, + }, + }, + "second": { + "materialized": "table", + }, + }, + "third_party": { + "third": { + "materialized": "view", + }, + }, + }, + "on-run-start": [ + "{{ logging.log_run_start_event() }}", + ], + "on-run-end": [ + "{{ logging.log_run_end_event() }}", + ], + "seeds": { + "my_test_project": { + "enabled": True, + "schema": "seed_data", + "post-hook": "grant select on {{ this }} to bi_user", + }, + }, + "data_tests": {"my_test_project": {"fail_calc": "sum(failures)"}}, + "require-dbt-version": ">=0.1.0", + } + ) + packages = { + "packages": [ + { + "local": "foo", + }, + {"git": "git@example.com:dbt-labs/dbt-utils.git", "revision": "test-rev"}, + ], + } + project = project_from_config_norender( + self.default_project_data, project_root=self.project_dir, packages=packages + ) + self.assertEqual(project.project_name, "my_test_project") + self.assertEqual(project.version, "0.0.1") + self.assertEqual(project.profile_name, "default") + self.assertEqual(project.model_paths, ["other-models"]) + self.assertEqual(project.macro_paths, ["other-macros"]) + self.assertEqual(project.seed_paths, ["other-seeds"]) + self.assertEqual(project.test_paths, ["other-tests"]) + self.assertEqual(project.analysis_paths, ["other-analyses"]) + self.assertEqual(project.docs_paths, ["docs"]) + self.assertEqual(project.asset_paths, ["other-assets"]) + self.assertEqual(project.clean_targets, ["another-target"]) + self.assertEqual(project.packages_install_path, "other-dbt_packages") + self.assertEqual(project.quoting, {"identifier": False}) + self.assertEqual( + project.models, + { + "pre-hook": ["{{ logging.log_model_start_event() }}"], + "post-hook": ["{{ logging.log_model_end_event() }}"], + "my_test_project": { + "first": { + "enabled": False, + "sub": { + "enabled": True, + }, + }, + "second": { + "materialized": "table", + }, + }, + "third_party": { + "third": { + "materialized": "view", + }, + }, + }, + ) + self.assertEqual(project.on_run_start, ["{{ logging.log_run_start_event() }}"]) + self.assertEqual(project.on_run_end, ["{{ logging.log_run_end_event() }}"]) + self.assertEqual( + project.seeds, + { + "my_test_project": { + "enabled": True, + "schema": "seed_data", + "post-hook": "grant select on {{ this }} to bi_user", + }, + }, + ) + self.assertEqual( + project.data_tests, + { + "my_test_project": {"fail_calc": "sum(failures)"}, + }, + ) + self.assertEqual(project.dbt_version, [VersionSpecifier.from_version_string(">=0.1.0")]) + self.assertEqual( + project.packages, + PackageConfig( + packages=[ + LocalPackage(local="foo", unrendered={"local": "foo"}), + GitPackage( + git="git@example.com:dbt-labs/dbt-utils.git", + revision="test-rev", + unrendered={ + "git": "git@example.com:dbt-labs/dbt-utils.git", + "revision": "test-rev", + }, + ), + ] + ), + ) + str(project) # this does the equivalent of project.to_project_config(with_packages=True) + json.dumps(project.to_project_config()) + + def test_string_run_hooks(self): + self.default_project_data.update( + { + "on-run-start": "{{ logging.log_run_start_event() }}", + "on-run-end": "{{ logging.log_run_end_event() }}", + } + ) + project = project_from_config_rendered(self.default_project_data) + self.assertEqual(project.on_run_start, ["{{ logging.log_run_start_event() }}"]) + self.assertEqual(project.on_run_end, ["{{ logging.log_run_end_event() }}"]) + + def test_invalid_project_name(self): + self.default_project_data["name"] = "invalid-project-name" + with self.assertRaises(dbt.exceptions.DbtProjectError) as exc: + project_from_config_norender(self.default_project_data, project_root=self.project_dir) + + self.assertIn("invalid-project-name", str(exc.exception)) + + def test_no_project(self): + os.remove(os.path.join(self.project_dir, "dbt_project.yml")) + renderer = empty_project_renderer() + with self.assertRaises(dbt.exceptions.DbtProjectError) as exc: + dbt.config.Project.from_project_root(self.project_dir, renderer) + + self.assertIn("No dbt_project.yml", str(exc.exception)) + + def test_invalid_version(self): + self.default_project_data["require-dbt-version"] = "hello!" + with self.assertRaises(dbt.exceptions.DbtProjectError): + project_from_config_norender(self.default_project_data, project_root=self.project_dir) + + def test_unsupported_version(self): + self.default_project_data["require-dbt-version"] = ">99999.0.0" + # allowed, because the RuntimeConfig checks, not the Project itself + project_from_config_norender(self.default_project_data, project_root=self.project_dir) + + def test_none_values(self): + self.default_project_data.update( + { + "models": None, + "seeds": None, + "on-run-end": None, + "on-run-start": None, + } + ) + project = project_from_config_rendered(self.default_project_data) + self.assertEqual(project.models, {}) + self.assertEqual(project.on_run_start, []) + self.assertEqual(project.on_run_end, []) + self.assertEqual(project.seeds, {}) + + def test_nested_none_values(self): + self.default_project_data.update( + { + "models": {"vars": None, "pre-hook": None, "post-hook": None}, + "seeds": {"vars": None, "pre-hook": None, "post-hook": None, "column_types": None}, + } + ) + project = project_from_config_rendered(self.default_project_data) + self.assertEqual(project.models, {"vars": {}, "pre-hook": [], "post-hook": []}) + self.assertEqual( + project.seeds, {"vars": {}, "pre-hook": [], "post-hook": [], "column_types": {}} + ) + + @pytest.mark.skipif(os.name == "nt", reason="crashes CI for Windows") + def test_cycle(self): + models = {} + models["models"] = models + self.default_project_data.update( + { + "models": models, + } + ) + with self.assertRaises(dbt.exceptions.DbtProjectError) as exc: + project_from_config_rendered(self.default_project_data) + + assert "Cycle detected" in str(exc.exception) + + def test_query_comment_disabled(self): + self.default_project_data.update( + { + "query-comment": None, + } + ) + project = project_from_config_norender( + self.default_project_data, project_root=self.project_dir + ) + self.assertEqual(project.query_comment.comment, "") + self.assertEqual(project.query_comment.append, False) + + self.default_project_data.update( + { + "query-comment": "", + } + ) + project = project_from_config_norender( + self.default_project_data, project_root=self.project_dir + ) + self.assertEqual(project.query_comment.comment, "") + self.assertEqual(project.query_comment.append, False) + + def test_default_query_comment(self): + project = project_from_config_norender( + self.default_project_data, project_root=self.project_dir + ) + self.assertEqual(project.query_comment, QueryComment()) + + def test_default_query_comment_append(self): + self.default_project_data.update( + { + "query-comment": {"append": True}, + } + ) + project = project_from_config_norender( + self.default_project_data, project_root=self.project_dir + ) + self.assertEqual(project.query_comment.comment, DEFAULT_QUERY_COMMENT) + self.assertEqual(project.query_comment.append, True) + + def test_custom_query_comment_append(self): + self.default_project_data.update( + { + "query-comment": {"comment": "run by user test", "append": True}, + } + ) + project = project_from_config_norender( + self.default_project_data, project_root=self.project_dir + ) + self.assertEqual(project.query_comment.comment, "run by user test") + self.assertEqual(project.query_comment.append, True) + + def test_packages_from_dependencies(self): + packages = { + "packages": [ + { + "git": "{{ env_var('some_package') }}", + "warn-unpinned": True, + } + ], + } + + project = project_from_config_rendered( + self.default_project_data, packages, packages_specified_path=DEPENDENCIES_FILE_NAME + ) + git_package = project.packages.packages[0] + # packages did not render because packages_specified_path=DEPENDENCIES_FILE_NAME + assert git_package.git == "{{ env_var('some_package') }}" + + +class TestProjectFile(BaseConfigTest): + def test_from_project_root(self): + renderer = empty_project_renderer() + project = dbt.config.Project.from_project_root(self.project_dir, renderer) + from_config = project_from_config_norender( + self.default_project_data, project_root=self.project_dir + ) + self.assertEqual(project, from_config) + self.assertEqual(project.version, "0.0.1") + self.assertEqual(project.project_name, "my_test_project") + + def test_with_invalid_package(self): + renderer = empty_project_renderer() + self.write_packages({"invalid": ["not a package of any kind"]}) + with self.assertRaises(dbt.exceptions.DbtProjectError): + dbt.config.Project.from_project_root(self.project_dir, renderer) + + +class TestVariableProjectFile(BaseConfigTest): + def setUp(self): + super().setUp() + self.default_project_data["version"] = "{{ var('cli_version') }}" + self.default_project_data["name"] = "blah" + self.default_project_data["profile"] = "{{ env_var('env_value_profile') }}" + self.write_project(self.default_project_data) + + def test_cli_and_env_vars(self): + renderer = dbt.config.renderer.DbtProjectYamlRenderer(None, {"cli_version": "0.1.2"}) + with mock.patch.dict(os.environ, self.env_override): + safe_set_invocation_context() # reset invocation context with new env + project = dbt.config.Project.from_project_root( + self.project_dir, + renderer, + ) + + self.assertEqual(renderer.ctx_obj.env_vars, {"env_value_profile": "default"}) + self.assertEqual(project.version, "0.1.2") + self.assertEqual(project.project_name, "blah") + self.assertEqual(project.profile_name, "default") + + +class TestVarLookups(unittest.TestCase): + def setUp(self): + self.initial_src_vars = { + # globals + "foo": 123, + "bar": "hello", + # project-scoped + "my_project": { + "bar": "goodbye", + "baz": True, + }, + "other_project": { + "foo": 456, + }, + } + self.src_vars = deepcopy(self.initial_src_vars) + self.dst = {"vars": deepcopy(self.initial_src_vars)} + + self.projects = ["my_project", "other_project", "third_project"] + load_plugin("postgres") + self.local_var_search = mock.MagicMock( + fqn=["my_project", "my_model"], resource_type=NodeType.Model, package_name="my_project" + ) + self.other_var_search = mock.MagicMock( + fqn=["other_project", "model"], + resource_type=NodeType.Model, + package_name="other_project", + ) + self.third_var_search = mock.MagicMock( + fqn=["third_project", "third_model"], + resource_type=NodeType.Model, + package_name="third_project", + ) + + def test_lookups(self): + vars_provider = dbt.config.project.VarProvider(self.initial_src_vars) + + expected = [ + (self.local_var_search, "foo", 123), + (self.other_var_search, "foo", 456), + (self.third_var_search, "foo", 123), + (self.local_var_search, "bar", "goodbye"), + (self.other_var_search, "bar", "hello"), + (self.third_var_search, "bar", "hello"), + (self.local_var_search, "baz", True), + (self.other_var_search, "baz", None), + (self.third_var_search, "baz", None), + ] + for node, key, expected_value in expected: + value = vars_provider.vars_for(node, "postgres").get(key) + assert value == expected_value + + +class TestMultipleProjectFlags(BaseConfigTest): + def setUp(self): + super().setUp() + + self.default_project_data.update( + { + "flags": { + "send_anonymous_usage_data": False, + } + } + ) + self.write_project(self.default_project_data) + + self.default_profile_data.update( + { + "config": { + "send_anonymous_usage_data": False, + } + } + ) + self.write_profile(self.default_profile_data) + + def test_setting_multiple_flags(self): + with pytest.raises(dbt.exceptions.DbtProjectError): + set_from_args(self.args, None) diff --git a/tests/unit/config/test_runtime.py b/tests/unit/config/test_runtime.py new file mode 100644 index 00000000000..84220d53bbf --- /dev/null +++ b/tests/unit/config/test_runtime.py @@ -0,0 +1,329 @@ +import os +from argparse import Namespace + +from unittest import mock + +import dbt.config +import dbt.exceptions +from dbt import tracking +from dbt.contracts.project import PackageConfig + +from dbt.flags import set_from_args +from dbt.tests.util import safe_set_invocation_context + +from tests.unit.config import ( + BaseConfigTest, + empty_profile_renderer, + project_from_config_norender, + temp_cd, +) + + +class TestRuntimeConfig(BaseConfigTest): + def get_project(self): + return project_from_config_norender( + self.default_project_data, + project_root=self.project_dir, + verify_version=self.args.version_check, + ) + + def get_profile(self): + renderer = empty_profile_renderer() + return dbt.config.Profile.from_raw_profiles( + self.default_profile_data, self.default_project_data["profile"], renderer + ) + + def from_parts(self, exc=None): + with self.assertRaisesOrReturns(exc) as err: + project = self.get_project() + profile = self.get_profile() + + result = dbt.config.RuntimeConfig.from_parts(project, profile, self.args) + + if exc is None: + return result + else: + return err + + def test_from_parts(self): + project = self.get_project() + profile = self.get_profile() + config = dbt.config.RuntimeConfig.from_parts(project, profile, self.args) + + self.assertEqual(config.cli_vars, {}) + self.assertEqual(config.to_profile_info(), profile.to_profile_info()) + # we should have the default quoting set in the full config, but not in + # the project + # TODO(jeb): Adapters must assert that quoting is populated? + expected_project = project.to_project_config() + self.assertEqual(expected_project["quoting"], {}) + + expected_project["quoting"] = { + "database": True, + "identifier": True, + "schema": True, + } + self.assertEqual(config.to_project_config(), expected_project) + + def test_str(self): + project = self.get_project() + profile = self.get_profile() + config = dbt.config.RuntimeConfig.from_parts(project, profile, {}) + + # to make sure nothing terrible happens + str(config) + + def test_supported_version(self): + self.default_project_data["require-dbt-version"] = ">0.0.0" + conf = self.from_parts() + self.assertEqual(set(x.to_version_string() for x in conf.dbt_version), {">0.0.0"}) + + def test_unsupported_version(self): + self.default_project_data["require-dbt-version"] = ">99999.0.0" + raised = self.from_parts(dbt.exceptions.DbtProjectError) + self.assertIn("This version of dbt is not supported", str(raised.exception)) + + def test_unsupported_version_no_check(self): + self.default_project_data["require-dbt-version"] = ">99999.0.0" + self.args.version_check = False + set_from_args(self.args, None) + conf = self.from_parts() + self.assertEqual(set(x.to_version_string() for x in conf.dbt_version), {">99999.0.0"}) + + def test_supported_version_range(self): + self.default_project_data["require-dbt-version"] = [">0.0.0", "<=99999.0.0"] + conf = self.from_parts() + self.assertEqual( + set(x.to_version_string() for x in conf.dbt_version), {">0.0.0", "<=99999.0.0"} + ) + + def test_unsupported_version_range(self): + self.default_project_data["require-dbt-version"] = [">0.0.0", "<=0.0.1"] + raised = self.from_parts(dbt.exceptions.DbtProjectError) + self.assertIn("This version of dbt is not supported", str(raised.exception)) + + def test_unsupported_version_range_bad_config(self): + self.default_project_data["require-dbt-version"] = [">0.0.0", "<=0.0.1"] + self.default_project_data["some-extra-field-not-allowed"] = True + raised = self.from_parts(dbt.exceptions.DbtProjectError) + self.assertIn("This version of dbt is not supported", str(raised.exception)) + + def test_unsupported_version_range_no_check(self): + self.default_project_data["require-dbt-version"] = [">0.0.0", "<=0.0.1"] + self.args.version_check = False + set_from_args(self.args, None) + conf = self.from_parts() + self.assertEqual( + set(x.to_version_string() for x in conf.dbt_version), {">0.0.0", "<=0.0.1"} + ) + + def test_impossible_version_range(self): + self.default_project_data["require-dbt-version"] = [">99999.0.0", "<=0.0.1"] + raised = self.from_parts(dbt.exceptions.DbtProjectError) + self.assertIn( + "The package version requirement can never be satisfied", str(raised.exception) + ) + + def test_unsupported_version_extra_config(self): + self.default_project_data["some-extra-field-not-allowed"] = True + raised = self.from_parts(dbt.exceptions.DbtProjectError) + self.assertIn("Additional properties are not allowed", str(raised.exception)) + + def test_archive_not_allowed(self): + self.default_project_data["archive"] = [ + { + "source_schema": "a", + "target_schema": "b", + "tables": [ + { + "source_table": "seed", + "target_table": "archive_actual", + "updated_at": "updated_at", + "unique_key": """id || '-' || first_name""", + }, + ], + } + ] + with self.assertRaises(dbt.exceptions.DbtProjectError): + self.get_project() + + def test__warn_for_unused_resource_config_paths_empty(self): + project = self.from_parts() + dbt.flags.WARN_ERROR = True + try: + project.warn_for_unused_resource_config_paths( + { + "models": frozenset( + ( + ("my_test_project", "foo", "bar"), + ("my_test_project", "foo", "baz"), + ) + ) + }, + [], + ) + finally: + dbt.flags.WARN_ERROR = False + + @mock.patch.object(tracking, "active_user") + def test_get_metadata(self, mock_user): + project = self.get_project() + profile = self.get_profile() + config = dbt.config.RuntimeConfig.from_parts(project, profile, self.args) + + mock_user.id = "cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf" + set_from_args(Namespace(SEND_ANONYMOUS_USAGE_STATS=False), None) + + metadata = config.get_metadata() + # ensure user_id and send_anonymous_usage_stats are set correctly + self.assertEqual(metadata.user_id, mock_user.id) + self.assertFalse(metadata.send_anonymous_usage_stats) + + +class TestRuntimeConfigWithConfigs(BaseConfigTest): + def setUp(self): + self.profiles_dir = "/invalid-profiles-path" + self.project_dir = "/invalid-root-path" + super().setUp() + self.default_project_data["project-root"] = self.project_dir + self.default_project_data["models"] = { + "enabled": True, + "my_test_project": { + "foo": { + "materialized": "view", + "bar": { + "materialized": "table", + }, + }, + "baz": { + "materialized": "table", + }, + }, + } + self.used = { + "models": frozenset( + ( + ("my_test_project", "foo", "bar"), + ("my_test_project", "foo", "baz"), + ) + ) + } + + def get_project(self): + return project_from_config_norender( + self.default_project_data, project_root=self.project_dir, verify_version=True + ) + + def get_profile(self): + renderer = empty_profile_renderer() + return dbt.config.Profile.from_raw_profiles( + self.default_profile_data, self.default_project_data["profile"], renderer + ) + + def from_parts(self, exc=None): + with self.assertRaisesOrReturns(exc) as err: + project = self.get_project() + profile = self.get_profile() + + result = dbt.config.RuntimeConfig.from_parts(project, profile, self.args) + + if exc is None: + return result + else: + return err + + def test__warn_for_unused_resource_config_paths(self): + project = self.from_parts() + with mock.patch("dbt.config.runtime.warn_or_error") as warn_or_error_patch: + project.warn_for_unused_resource_config_paths(self.used, []) + warn_or_error_patch.assert_called_once() + event = warn_or_error_patch.call_args[0][0] + assert type(event).__name__ == "UnusedResourceConfigPath" + msg = event.message() + expected_msg = "- models.my_test_project.baz" + assert expected_msg in msg + + +class TestRuntimeConfigFiles(BaseConfigTest): + def test_from_args(self): + with temp_cd(self.project_dir): + config = dbt.config.RuntimeConfig.from_args(self.args) + self.assertEqual(config.version, "0.0.1") + self.assertEqual(config.profile_name, "default") + # on osx, for example, these are not necessarily equal due to /private + self.assertTrue(os.path.samefile(config.project_root, self.project_dir)) + self.assertEqual(config.model_paths, ["models"]) + self.assertEqual(config.macro_paths, ["macros"]) + self.assertEqual(config.seed_paths, ["seeds"]) + self.assertEqual(config.test_paths, ["tests"]) + self.assertEqual(config.analysis_paths, ["analyses"]) + self.assertEqual( + set(config.docs_paths), set(["models", "seeds", "snapshots", "analyses", "macros"]) + ) + self.assertEqual(config.asset_paths, []) + self.assertEqual(config.target_path, "target") + self.assertEqual(config.clean_targets, ["target"]) + self.assertEqual(config.log_path, "logs") + self.assertEqual(config.packages_install_path, "dbt_packages") + self.assertEqual(config.quoting, {"database": True, "identifier": True, "schema": True}) + self.assertEqual(config.models, {}) + self.assertEqual(config.on_run_start, []) + self.assertEqual(config.on_run_end, []) + self.assertEqual(config.seeds, {}) + self.assertEqual(config.packages, PackageConfig(packages=[])) + self.assertEqual(config.project_name, "my_test_project") + + +class TestVariableRuntimeConfigFiles(BaseConfigTest): + def setUp(self): + super().setUp() + self.default_project_data.update( + { + "version": "{{ var('cli_version') }}", + "name": "blah", + "profile": "{{ env_var('env_value_profile') }}", + "on-run-end": [ + "{{ env_var('env_value_profile') }}", + ], + "models": { + "foo": { + "post-hook": "{{ env_var('env_value_profile') }}", + }, + "bar": { + # just gibberish, make sure it gets interpreted + "materialized": "{{ env_var('env_value_profile') }}", + }, + }, + "seeds": { + "foo": { + "post-hook": "{{ env_var('env_value_profile') }}", + }, + "bar": { + # just gibberish, make sure it gets interpreted + "materialized": "{{ env_var('env_value_profile') }}", + }, + }, + } + ) + self.write_project(self.default_project_data) + + def test_cli_and_env_vars(self): + self.args.target = "cli-and-env-vars" + self.args.vars = {"cli_value_host": "cli-postgres-host", "cli_version": "0.1.2"} + self.args.project_dir = self.project_dir + set_from_args(self.args, None) + with mock.patch.dict(os.environ, self.env_override): + safe_set_invocation_context() # reset invocation context with new env + config = dbt.config.RuntimeConfig.from_args(self.args) + + self.assertEqual(config.version, "0.1.2") + self.assertEqual(config.project_name, "blah") + self.assertEqual(config.profile_name, "default") + self.assertEqual(config.credentials.host, "cli-postgres-host") + self.assertEqual(config.credentials.user, "env-postgres-user") + # make sure hooks are not interpreted + self.assertEqual(config.on_run_end, ["{{ env_var('env_value_profile') }}"]) + self.assertEqual(config.models["foo"]["post-hook"], "{{ env_var('env_value_profile') }}") + self.assertEqual(config.models["bar"]["materialized"], "default") # rendered! + self.assertEqual(config.seeds["foo"]["post-hook"], "{{ env_var('env_value_profile') }}") + self.assertEqual(config.seeds["bar"]["materialized"], "default") # rendered! diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py new file mode 100644 index 00000000000..5e9acb84907 --- /dev/null +++ b/tests/unit/conftest.py @@ -0,0 +1,32 @@ +import pytest + +from dbt.artifacts.resources import Quoting, SourceConfig +from dbt.artifacts.resources.types import NodeType +from dbt.contracts.graph.nodes import SourceDefinition + +# All manifest related fixtures. +from tests.unit.utils.manifest import * # noqa + + +@pytest.fixture +def basic_parsed_source_definition_object(): + return SourceDefinition( + columns={}, + database="some_db", + description="", + fqn=["test", "source", "my_source", "my_source_table"], + identifier="my_source_table", + loader="stitch", + name="my_source_table", + original_file_path="/root/models/sources.yml", + package_name="test", + path="/root/models/sources.yml", + quoting=Quoting(), + resource_type=NodeType.Source, + schema="some_schema", + source_description="my source description", + source_name="my_source", + unique_id="test.source.my_source.my_source_table", + tags=[], + config=SourceConfig(), + ) diff --git a/tests/unit/test_context.py b/tests/unit/context/test_context.py similarity index 64% rename from tests/unit/test_context.py rename to tests/unit/context/test_context.py index 4ee923676cf..6070c24a1b7 100644 --- a/tests/unit/test_context.py +++ b/tests/unit/context/test_context.py @@ -1,4 +1,3 @@ -import unittest import os from typing import Set, Dict, Any from unittest import mock @@ -13,28 +12,32 @@ NodeConfig, DependsOn, Macro, + UnitTestNode, + UnitTestOverrides, ) from dbt.config.project import VarProvider -from dbt.context import base, providers, docs, manifest, macros +from dbt.context import base, providers, docs, macros, query_header from dbt.contracts.files import FileHash from dbt_common.events.functions import reset_metadata_vars +from dbt.flags import set_from_args from dbt.node_types import NodeType import dbt_common.exceptions -from .utils import ( + +from tests.unit.utils import ( config_from_parts_or_dicts, inject_adapter, clear_plugin, ) -from .mock_adapter import adapter_factory -from dbt.flags import set_from_args +from tests.unit.mock_adapter import adapter_factory from argparse import Namespace set_from_args(Namespace(WARN_ERROR=False), None) -class TestVar(unittest.TestCase): - def setUp(self): - self.model = ModelNode( +class TestVar: + @pytest.fixture + def model(self): + return ModelNode( alias="model_one", name="model_one", database="dbt", @@ -68,91 +71,114 @@ def setUp(self): columns={}, checksum=FileHash.from_contents(""), ) - self.context = mock.MagicMock() - self.provider = VarProvider({}) - self.config = mock.MagicMock( - config_version=2, vars=self.provider, cli_vars={}, project_name="root" - ) - def test_var_default_something(self): - self.config.cli_vars = {"foo": "baz"} - var = providers.RuntimeVar(self.context, self.config, self.model) - self.assertEqual(var("foo"), "baz") - self.assertEqual(var("foo", "bar"), "baz") + @pytest.fixture + def context(self): + return mock.MagicMock() + + @pytest.fixture + def provider(self): + return VarProvider({}) + + @pytest.fixture + def config(self, provider): + return mock.MagicMock(config_version=2, vars=provider, cli_vars={}, project_name="root") + + def test_var_default_something(self, model, config, context): + config.cli_vars = {"foo": "baz"} + var = providers.RuntimeVar(context, config, model) - def test_var_default_none(self): - self.config.cli_vars = {"foo": None} - var = providers.RuntimeVar(self.context, self.config, self.model) - self.assertEqual(var("foo"), None) - self.assertEqual(var("foo", "bar"), None) + assert var("foo") == "baz" + assert var("foo", "bar") == "baz" - def test_var_not_defined(self): - var = providers.RuntimeVar(self.context, self.config, self.model) + def test_var_default_none(self, model, config, context): + config.cli_vars = {"foo": None} + var = providers.RuntimeVar(context, config, model) - self.assertEqual(var("foo", "bar"), "bar") - with self.assertRaises(dbt_common.exceptions.CompilationError): + assert var("foo") is None + assert var("foo", "bar") is None + + def test_var_not_defined(self, model, config, context): + var = providers.RuntimeVar(self.context, config, model) + + assert var("foo", "bar") == "bar" + with pytest.raises(dbt_common.exceptions.CompilationError): var("foo") - def test_parser_var_default_something(self): - self.config.cli_vars = {"foo": "baz"} - var = providers.ParseVar(self.context, self.config, self.model) - self.assertEqual(var("foo"), "baz") - self.assertEqual(var("foo", "bar"), "baz") + def test_parser_var_default_something(self, model, config, context): + config.cli_vars = {"foo": "baz"} + var = providers.ParseVar(context, config, model) + assert var("foo") == "baz" + assert var("foo", "bar") == "baz" - def test_parser_var_default_none(self): - self.config.cli_vars = {"foo": None} - var = providers.ParseVar(self.context, self.config, self.model) - self.assertEqual(var("foo"), None) - self.assertEqual(var("foo", "bar"), None) + def test_parser_var_default_none(self, model, config, context): + config.cli_vars = {"foo": None} + var = providers.ParseVar(context, config, model) + assert var("foo") is None + assert var("foo", "bar") is None - def test_parser_var_not_defined(self): + def test_parser_var_not_defined(self, model, config, context): # at parse-time, we should not raise if we encounter a missing var # that way disabled models don't get parse errors - var = providers.ParseVar(self.context, self.config, self.model) + var = providers.ParseVar(context, config, model) - self.assertEqual(var("foo", "bar"), "bar") - self.assertEqual(var("foo"), None) + assert var("foo", "bar") == "bar" + assert var("foo") is None -class TestParseWrapper(unittest.TestCase): - def setUp(self): - self.mock_config = mock.MagicMock() - self.mock_mp_context = mock.MagicMock() +class TestParseWrapper: + @pytest.fixture + def mock_adapter(self): + mock_config = mock.MagicMock() + mock_mp_context = mock.MagicMock() adapter_class = adapter_factory() - self.mock_adapter = adapter_class(self.mock_config, self.mock_mp_context) - self.namespace = mock.MagicMock() - self.wrapper = providers.ParseDatabaseWrapper(self.mock_adapter, self.namespace) - self.responder = self.mock_adapter.responder - - def test_unwrapped_method(self): - self.assertEqual(self.wrapper.quote("test_value"), '"test_value"') - self.responder.quote.assert_called_once_with("test_value") - - def test_wrapped_method(self): - found = self.wrapper.get_relation("database", "schema", "identifier") - self.assertEqual(found, None) - self.responder.get_relation.assert_not_called() - - -class TestRuntimeWrapper(unittest.TestCase): - def setUp(self): - self.mock_config = mock.MagicMock() - self.mock_mp_context = mock.MagicMock() - self.mock_config.quoting = { + return adapter_class(mock_config, mock_mp_context) + + @pytest.fixture + def wrapper(self, mock_adapter): + namespace = mock.MagicMock() + return providers.ParseDatabaseWrapper(mock_adapter, namespace) + + @pytest.fixture + def responder(self, mock_adapter): + return mock_adapter.responder + + def test_unwrapped_method(self, wrapper, responder): + assert wrapper.quote("test_value") == '"test_value"' + responder.quote.assert_called_once_with("test_value") + + def test_wrapped_method(self, wrapper, responder): + found = wrapper.get_relation("database", "schema", "identifier") + assert found is None + responder.get_relation.assert_not_called() + + +class TestRuntimeWrapper: + @pytest.fixture + def mock_adapter(self): + mock_config = mock.MagicMock() + mock_config.quoting = { "database": True, "schema": True, "identifier": True, } + mock_mp_context = mock.MagicMock() adapter_class = adapter_factory() - self.mock_adapter = adapter_class(self.mock_config, self.mock_mp_context) - self.namespace = mock.MagicMock() - self.wrapper = providers.RuntimeDatabaseWrapper(self.mock_adapter, self.namespace) - self.responder = self.mock_adapter.responder + return adapter_class(mock_config, mock_mp_context) + + @pytest.fixture + def wrapper(self, mock_adapter): + namespace = mock.MagicMock() + return providers.RuntimeDatabaseWrapper(mock_adapter, namespace) + + @pytest.fixture + def responder(self, mock_adapter): + return mock_adapter.responder - def test_unwrapped_method(self): + def test_unwrapped_method(self, wrapper, responder): # the 'quote' method isn't wrapped, we should get our expected inputs - self.assertEqual(self.wrapper.quote("test_value"), '"test_value"') - self.responder.quote.assert_called_once_with("test_value") + assert wrapper.quote("test_value") == '"test_value"' + responder.quote.assert_called_once_with("test_value") def assert_has_keys(required_keys: Set[str], maybe_keys: Set[str], ctx: Dict[str, Any]): @@ -314,11 +340,15 @@ def mock_macro(name, package_name): return macro -def mock_manifest(config): +def mock_manifest(config, additional_macros=None): + default_macro_names = ["macro_a", "macro_b"] + default_macros = [mock_macro(name, config.project_name) for name in default_macro_names] + additional_macros = additional_macros or [] + all_macros = default_macros + additional_macros + manifest_macros = {} macros_by_package = {} - for name in ["macro_a", "macro_b"]: - macro = mock_macro(name, config.project_name) + for macro in all_macros: manifest_macros[macro.unique_id] = macro if macro.package_name not in macros_by_package: macros_by_package[macro.package_name] = {} @@ -370,6 +400,14 @@ def mock_model(): ) +def mock_unit_test_node(): + return mock.MagicMock( + __class__=UnitTestNode, + resource_type=NodeType.Unit, + tested_node_unique_id="model.root.model_one", + ) + + @pytest.fixture def get_adapter(): with mock.patch.object(providers, "get_adapter") as patch: @@ -403,7 +441,7 @@ def postgres_adapter(config_postgres, get_adapter): def test_query_header_context(config_postgres, manifest_fx): - ctx = manifest.generate_query_header_context( + ctx = query_header.generate_query_header_context( config=config_postgres, manifest=manifest_fx, ) @@ -534,3 +572,84 @@ def test_dbt_metadata_envs( # cleanup reset_metadata_vars() + + +def test_unit_test_runtime_context(config_postgres, manifest_fx, get_adapter, get_include_paths): + ctx = providers.generate_runtime_unit_test_context( + unit_test=mock_unit_test_node(), + config=config_postgres, + manifest=manifest_fx, + ) + assert_has_keys(REQUIRED_MODEL_KEYS, MAYBE_KEYS, ctx) + + +def test_unit_test_runtime_context_macro_overrides_global( + config_postgres, manifest_fx, get_adapter, get_include_paths +): + unit_test = mock_unit_test_node() + unit_test.overrides = UnitTestOverrides(macros={"macro_a": "override"}) + ctx = providers.generate_runtime_unit_test_context( + unit_test=unit_test, + config=config_postgres, + manifest=manifest_fx, + ) + assert ctx["macro_a"]() == "override" + + +def test_unit_test_runtime_context_macro_overrides_package( + config_postgres, manifest_fx, get_adapter, get_include_paths +): + unit_test = mock_unit_test_node() + unit_test.overrides = UnitTestOverrides(macros={"some_package.some_macro": "override"}) + + dbt_macro = mock_macro("some_macro", "some_package") + manifest_with_dbt_macro = mock_manifest(config_postgres, additional_macros=[dbt_macro]) + + ctx = providers.generate_runtime_unit_test_context( + unit_test=unit_test, + config=config_postgres, + manifest=manifest_with_dbt_macro, + ) + assert ctx["some_package"]["some_macro"]() == "override" + + +@pytest.mark.parametrize( + "overrides,expected_override_value", + [ + # override dbt macro at global level + ({"some_macro": "override"}, "override"), + # # override dbt macro at dbt-namespaced level level + ({"dbt.some_macro": "override"}, "override"), + # override dbt macro at both levels - global override should win + ( + {"some_macro": "dbt_global_override", "dbt.some_macro": "dbt_namespaced_override"}, + "dbt_global_override", + ), + # override dbt macro at both levels - global override should win, regardless of order + ( + {"dbt.some_macro": "dbt_namespaced_override", "some_macro": "dbt_global_override"}, + "dbt_global_override", + ), + ], +) +def test_unit_test_runtime_context_macro_overrides_dbt_macro( + overrides, + expected_override_value, + config_postgres, + manifest_fx, + get_adapter, + get_include_paths, +): + unit_test = mock_unit_test_node() + unit_test.overrides = UnitTestOverrides(macros=overrides) + + dbt_macro = mock_macro("some_macro", "dbt") + manifest_with_dbt_macro = mock_manifest(config_postgres, additional_macros=[dbt_macro]) + + ctx = providers.generate_runtime_unit_test_context( + unit_test=unit_test, + config=config_postgres, + manifest=manifest_with_dbt_macro, + ) + assert ctx["some_macro"]() == expected_override_value + assert ctx["dbt"]["some_macro"]() == expected_override_value diff --git a/tests/unit/test_providers.py b/tests/unit/context/test_providers.py similarity index 100% rename from tests/unit/test_providers.py rename to tests/unit/context/test_providers.py diff --git a/tests/unit/test_query_headers.py b/tests/unit/context/test_query_header.py similarity index 50% rename from tests/unit/test_query_headers.py rename to tests/unit/context/test_query_header.py index 2be9b59bd4d..aa9e99821a2 100644 --- a/tests/unit/test_query_headers.py +++ b/tests/unit/context/test_query_header.py @@ -1,8 +1,9 @@ +import pytest import re -from unittest import TestCase, mock +from unittest import mock from dbt.adapters.base.query_headers import MacroQueryStringSetter -from dbt.context.manifest import generate_query_header_context +from dbt.context.query_header import generate_query_header_context from tests.unit.utils import config_from_parts_or_dicts from dbt.flags import set_from_args @@ -11,9 +12,10 @@ set_from_args(Namespace(WARN_ERROR=False), None) -class TestQueryHeaders(TestCase): - def setUp(self): - self.profile_cfg = { +class TestQueryHeaderContext: + @pytest.fixture + def profile_cfg(self): + return { "outputs": { "test": { "type": "postgres", @@ -27,33 +29,40 @@ def setUp(self): }, "target": "test", } - self.project_cfg = { + + @pytest.fixture + def project_cfg(self): + return { "name": "query_headers", "version": "0.1", "profile": "test", "config-version": 2, } - self.query = "SELECT 1;" - def test_comment_should_prepend_query_by_default(self): - config = config_from_parts_or_dicts(self.project_cfg, self.profile_cfg) + @pytest.fixture + def query(self): + return "SELECT 1;" + + def test_comment_should_prepend_query_by_default(self, profile_cfg, project_cfg, query): + config = config_from_parts_or_dicts(project_cfg, profile_cfg) query_header_context = generate_query_header_context(config, mock.MagicMock(macros={})) query_header = MacroQueryStringSetter(config, query_header_context) - sql = query_header.add(self.query) - self.assertTrue(re.match(f"^\/\*.*\*\/\n{self.query}$", sql)) # noqa: [W605] + sql = query_header.add(query) + assert re.match(f"^\/\*.*\*\/\n{query}$", sql) # noqa: [W605] - def test_append_comment(self): - self.project_cfg.update({"query-comment": {"comment": "executed by dbt", "append": True}}) - config = config_from_parts_or_dicts(self.project_cfg, self.profile_cfg) + def test_append_comment(self, profile_cfg, project_cfg, query): + project_cfg.update({"query-comment": {"comment": "executed by dbt", "append": True}}) + config = config_from_parts_or_dicts(project_cfg, profile_cfg) query_header_context = generate_query_header_context(config, mock.MagicMock(macros={})) query_header = MacroQueryStringSetter(config, query_header_context) - sql = query_header.add(self.query) - self.assertEqual(sql, f"{self.query[:-1]}\n/* executed by dbt */;") + sql = query_header.add(query) + + assert sql == f"{query[:-1]}\n/* executed by dbt */;" - def test_disable_query_comment(self): - self.project_cfg.update({"query-comment": ""}) - config = config_from_parts_or_dicts(self.project_cfg, self.profile_cfg) + def test_disable_query_comment(self, profile_cfg, project_cfg, query): + project_cfg.update({"query-comment": ""}) + config = config_from_parts_or_dicts(project_cfg, profile_cfg) query_header = MacroQueryStringSetter(config, mock.MagicMock(macros={})) - self.assertEqual(query_header.add(self.query), self.query) + assert query_header.add(query) == query diff --git a/core/dbt/contracts/graph/searcher.py b/tests/unit/contracts/__init__.py similarity index 100% rename from core/dbt/contracts/graph/searcher.py rename to tests/unit/contracts/__init__.py diff --git a/tests/unit/test_compiler.py b/tests/unit/contracts/graph/__init__.py similarity index 100% rename from tests/unit/test_compiler.py rename to tests/unit/contracts/graph/__init__.py diff --git a/tests/unit/contracts/graph/test_semantic_manifest.py b/tests/unit/contracts/graph/test_semantic_manifest.py new file mode 100644 index 00000000000..4eb389ea0f5 --- /dev/null +++ b/tests/unit/contracts/graph/test_semantic_manifest.py @@ -0,0 +1,28 @@ +import pytest +from dbt.contracts.graph.semantic_manifest import SemanticManifest + + +# Overwrite the default nods to construct the manifest +@pytest.fixture +def nodes(metricflow_time_spine_model): + return [metricflow_time_spine_model] + + +@pytest.fixture +def semantic_models( + semantic_model, +) -> list: + return [semantic_model] + + +@pytest.fixture +def metrics( + metric, +) -> list: + return [metric] + + +class TestSemanticManifest: + def test_validate(self, manifest): + sm_manifest = SemanticManifest(manifest) + assert sm_manifest.validate() diff --git a/tests/unit/test_constraint_parsing.py b/tests/unit/contracts/graph/test_unparsed.py similarity index 100% rename from tests/unit/test_constraint_parsing.py rename to tests/unit/contracts/graph/test_unparsed.py diff --git a/tests/unit/fixtures.py b/tests/unit/fixtures.py new file mode 100644 index 00000000000..f74e381741e --- /dev/null +++ b/tests/unit/fixtures.py @@ -0,0 +1,79 @@ +from dbt.contracts.files import FileHash +from dbt.contracts.graph.nodes import ( + DependsOn, + InjectedCTE, + ModelNode, + ModelConfig, + GenericTestNode, +) +from dbt.node_types import NodeType + +from dbt.artifacts.resources import Contract, TestConfig, TestMetadata + + +def model_node(): + return ModelNode( + package_name="test", + path="/root/models/foo.sql", + original_file_path="models/foo.sql", + language="sql", + raw_code='select * from {{ ref("other") }}', + name="foo", + resource_type=NodeType.Model, + unique_id="model.test.foo", + fqn=["test", "models", "foo"], + refs=[], + sources=[], + metrics=[], + depends_on=DependsOn(), + description="", + database="test_db", + schema="test_schema", + alias="bar", + tags=[], + config=ModelConfig(), + contract=Contract(), + meta={}, + compiled=True, + extra_ctes=[InjectedCTE("whatever", "select * from other")], + extra_ctes_injected=True, + compiled_code="with whatever as (select * from other) select * from whatever", + checksum=FileHash.from_contents(""), + unrendered_config={}, + ) + + +def generic_test_node(): + return GenericTestNode( + package_name="test", + path="/root/x/path.sql", + original_file_path="/root/path.sql", + language="sql", + raw_code='select * from {{ ref("other") }}', + name="foo", + resource_type=NodeType.Test, + unique_id="model.test.foo", + fqn=["test", "models", "foo"], + refs=[], + sources=[], + metrics=[], + depends_on=DependsOn(), + description="", + database="test_db", + schema="dbt_test__audit", + alias="bar", + tags=[], + config=TestConfig(severity="warn"), + contract=Contract(), + meta={}, + compiled=True, + extra_ctes=[InjectedCTE("whatever", "select * from other")], + extra_ctes_injected=True, + compiled_code="with whatever as (select * from other) select * from whatever", + column_name="id", + test_metadata=TestMetadata(namespace=None, name="foo", kwargs={}), + checksum=FileHash.from_contents(""), + unrendered_config={ + "severity": "warn", + }, + ) diff --git a/tests/unit/parser/test_manifest.py b/tests/unit/parser/test_manifest.py new file mode 100644 index 00000000000..6a643e444f3 --- /dev/null +++ b/tests/unit/parser/test_manifest.py @@ -0,0 +1,93 @@ +import pytest +from unittest.mock import patch, MagicMock +from argparse import Namespace + + +from dbt.contracts.graph.manifest import Manifest +from dbt.parser.manifest import ManifestLoader +from dbt.config import RuntimeConfig +from dbt.flags import set_from_args + + +@pytest.fixture +def mock_project(): + mock_project = MagicMock(RuntimeConfig) + mock_project.cli_vars = {} + mock_project.args = MagicMock() + mock_project.args.profile = "test" + mock_project.args.target = "test" + mock_project.project_env_vars = {} + mock_project.profile_env_vars = {} + mock_project.project_target_path = "mock_target_path" + mock_project.credentials = MagicMock() + return mock_project + + +class TestPartialParse: + @patch("dbt.parser.manifest.ManifestLoader.build_manifest_state_check") + @patch("dbt.parser.manifest.os.path.exists") + @patch("dbt.parser.manifest.open") + def test_partial_parse_file_path(self, patched_open, patched_os_exist, patched_state_check): + mock_project = MagicMock(RuntimeConfig) + mock_project.project_target_path = "mock_target_path" + patched_os_exist.return_value = True + set_from_args(Namespace(), {}) + ManifestLoader(mock_project, {}) + # by default we use the project_target_path + patched_open.assert_called_with("mock_target_path/partial_parse.msgpack", "rb") + set_from_args(Namespace(partial_parse_file_path="specified_partial_parse_path"), {}) + ManifestLoader(mock_project, {}) + # if specified in flags, we use the specified path + patched_open.assert_called_with("specified_partial_parse_path", "rb") + + def test_profile_hash_change(self, mock_project): + # This test validate that the profile_hash is updated when the connection keys change + profile_hash = "750bc99c1d64ca518536ead26b28465a224be5ffc918bf2a490102faa5a1bcf5" + mock_project.credentials.connection_info.return_value = "test" + set_from_args(Namespace(), {}) + manifest = ManifestLoader(mock_project, {}) + assert manifest.manifest.state_check.profile_hash.checksum == profile_hash + mock_project.credentials.connection_info.return_value = "test1" + manifest = ManifestLoader(mock_project, {}) + assert manifest.manifest.state_check.profile_hash.checksum != profile_hash + + +class TestFailedPartialParse: + @patch("dbt.tracking.track_partial_parser") + @patch("dbt.tracking.active_user") + @patch("dbt.parser.manifest.PartialParsing") + @patch("dbt.parser.manifest.ManifestLoader.read_manifest_for_partial_parse") + @patch("dbt.parser.manifest.ManifestLoader.build_manifest_state_check") + def test_partial_parse_safe_update_project_parser_files_partially( + self, + patched_state_check, + patched_read_manifest_for_partial_parse, + patched_partial_parsing, + patched_active_user, + patched_track_partial_parser, + ): + mock_instance = MagicMock() + mock_instance.skip_parsing.return_value = False + mock_instance.get_parsing_files.side_effect = KeyError("Whoopsie!") + patched_partial_parsing.return_value = mock_instance + + mock_project = MagicMock(RuntimeConfig) + mock_project.project_target_path = "mock_target_path" + + mock_saved_manifest = MagicMock(Manifest) + mock_saved_manifest.files = {} + patched_read_manifest_for_partial_parse.return_value = mock_saved_manifest + + set_from_args(Namespace(), {}) + loader = ManifestLoader(mock_project, {}) + loader.safe_update_project_parser_files_partially({}) + + patched_track_partial_parser.assert_called_once() + exc_info = patched_track_partial_parser.call_args[0][0] + assert "traceback" in exc_info + assert "exception" in exc_info + assert "code" in exc_info + assert "location" in exc_info + assert "full_reparse_reason" in exc_info + assert "KeyError: 'Whoopsie!'" == exc_info["exception"] + assert isinstance(exc_info["code"], str) or isinstance(exc_info["code"], type(None)) diff --git a/tests/unit/task/test_base.py b/tests/unit/task/test_base.py new file mode 100644 index 00000000000..b8f84fffa5e --- /dev/null +++ b/tests/unit/task/test_base.py @@ -0,0 +1,54 @@ +import os +from dbt.task.base import BaseRunner, ConfiguredTask +from dbt.contracts.graph.nodes import SourceDefinition +import dbt_common.exceptions + +from tests.unit.config import BaseConfigTest + +INITIAL_ROOT = os.getcwd() + + +class MockRunner(BaseRunner): + def compile(self): + pass + + +class TestBaseRunner: + def test_handle_generic_exception_handles_nodes_without_build_path( + self, basic_parsed_source_definition_object: SourceDefinition + ): + # Source definition nodes don't have `build_path` attributes. Thus, this + # test will fail if _handle_generic_exception doesn't account for this + runner = MockRunner( + config=None, + adapter=None, + node=basic_parsed_source_definition_object, + node_index=None, + num_nodes=None, + ) + assert not hasattr(basic_parsed_source_definition_object, "build_path") + runner._handle_generic_exception(Exception("bad thing happened"), ctx=None) + + +class InheritsFromConfiguredTask(ConfiguredTask): + def run(self): + pass + + +class TestConfiguredTask(BaseConfigTest): + def tearDown(self): + super().tearDown() + # These tests will change the directory to the project path, + # so it's necessary to change it back at the end. + os.chdir(INITIAL_ROOT) + + def test_configured_task_dir_change(self): + self.assertEqual(os.getcwd(), INITIAL_ROOT) + self.assertNotEqual(INITIAL_ROOT, self.project_dir) + InheritsFromConfiguredTask.from_args(self.args) + self.assertEqual(os.path.realpath(os.getcwd()), os.path.realpath(self.project_dir)) + + def test_configured_task_dir_change_with_bad_path(self): + self.args.project_dir = "bad_path" + with self.assertRaises(dbt_common.exceptions.DbtRuntimeError): + InheritsFromConfiguredTask.from_args(self.args) diff --git a/tests/unit/task/test_freshness.py b/tests/unit/task/test_freshness.py new file mode 100644 index 00000000000..05c00df75da --- /dev/null +++ b/tests/unit/task/test_freshness.py @@ -0,0 +1,154 @@ +import datetime +import pytest +from unittest import mock + +from dbt.task.freshness import FreshnessTask, FreshnessResponse + + +class TestFreshnessTaskMetadataCache: + @pytest.fixture(scope="class") + def args(self): + mock_args = mock.Mock() + mock_args.state = None + mock_args.defer_state = None + mock_args.write_json = None + + return mock_args + + @pytest.fixture(scope="class") + def config(self): + mock_config = mock.Mock() + mock_config.threads = 1 + mock_config.target_name = "mock_config_target_name" + + @pytest.fixture(scope="class") + def manifest(self): + return mock.Mock() + + @pytest.fixture(scope="class") + def source_with_loaded_at_field(self): + mock_source = mock.Mock() + mock_source.unique_id = "source_with_loaded_at_field" + mock_source.loaded_at_field = "loaded_at_field" + return mock_source + + @pytest.fixture(scope="class") + def source_no_loaded_at_field(self): + mock_source = mock.Mock() + mock_source.unique_id = "source_no_loaded_at_field" + return mock_source + + @pytest.fixture(scope="class") + def source_no_loaded_at_field2(self): + mock_source = mock.Mock() + mock_source.unique_id = "source_no_loaded_at_field2" + return mock_source + + @pytest.fixture(scope="class") + def adapter(self): + return mock.Mock() + + @pytest.fixture(scope="class") + def freshness_response(self): + return FreshnessResponse( + max_loaded_at=datetime.datetime(2020, 5, 2), + snapshotted_at=datetime.datetime(2020, 5, 4), + age=2, + ) + + def test_populate_metadata_freshness_cache( + self, args, config, manifest, adapter, source_no_loaded_at_field, freshness_response + ): + manifest.sources = {source_no_loaded_at_field.unique_id: source_no_loaded_at_field} + adapter.Relation.create_from.return_value = "source_relation" + adapter.calculate_freshness_from_metadata_batch.return_value = ( + [], + {"source_relation": freshness_response}, + ) + task = FreshnessTask(args=args, config=config, manifest=manifest) + + task.populate_metadata_freshness_cache(adapter, {source_no_loaded_at_field.unique_id}) + + assert task.get_freshness_metadata_cache() == {"source_relation": freshness_response} + + def test_populate_metadata_freshness_cache_multiple_sources( + self, + args, + config, + manifest, + adapter, + source_no_loaded_at_field, + source_no_loaded_at_field2, + freshness_response, + ): + manifest.sources = { + source_no_loaded_at_field.unique_id: source_no_loaded_at_field, + source_no_loaded_at_field2.unique_id: source_no_loaded_at_field2, + } + adapter.Relation.create_from.side_effect = ["source_relation1", "source_relation2"] + adapter.calculate_freshness_from_metadata_batch.return_value = ( + [], + {"source_relation1": freshness_response, "source_relation2": freshness_response}, + ) + task = FreshnessTask(args=args, config=config, manifest=manifest) + + task.populate_metadata_freshness_cache(adapter, {source_no_loaded_at_field.unique_id}) + + assert task.get_freshness_metadata_cache() == { + "source_relation1": freshness_response, + "source_relation2": freshness_response, + } + + def test_populate_metadata_freshness_cache_with_loaded_at_field( + self, args, config, manifest, adapter, source_with_loaded_at_field, freshness_response + ): + manifest.sources = { + source_with_loaded_at_field.unique_id: source_with_loaded_at_field, + } + adapter.Relation.create_from.return_value = "source_relation" + adapter.calculate_freshness_from_metadata_batch.return_value = ( + [], + {"source_relation": freshness_response}, + ) + task = FreshnessTask(args=args, config=config, manifest=manifest) + + task.populate_metadata_freshness_cache(adapter, {source_with_loaded_at_field.unique_id}) + + assert task.get_freshness_metadata_cache() == {"source_relation": freshness_response} + + def test_populate_metadata_freshness_cache_multiple_sources_mixed( + self, + args, + config, + manifest, + adapter, + source_no_loaded_at_field, + source_with_loaded_at_field, + freshness_response, + ): + manifest.sources = { + source_no_loaded_at_field.unique_id: source_no_loaded_at_field, + source_with_loaded_at_field.unique_id: source_with_loaded_at_field, + } + adapter.Relation.create_from.return_value = "source_relation" + adapter.calculate_freshness_from_metadata_batch.return_value = ( + [], + {"source_relation": freshness_response}, + ) + task = FreshnessTask(args=args, config=config, manifest=manifest) + + task.populate_metadata_freshness_cache(adapter, {source_no_loaded_at_field.unique_id}) + + assert task.get_freshness_metadata_cache() == {"source_relation": freshness_response} + + def test_populate_metadata_freshness_cache_adapter_exception( + self, args, config, manifest, adapter, source_no_loaded_at_field, freshness_response + ): + manifest.sources = {source_no_loaded_at_field.unique_id: source_no_loaded_at_field} + adapter.Relation.create_from.return_value = "source_relation" + adapter.calculate_freshness_from_metadata_batch.side_effect = Exception() + task = FreshnessTask(args=args, config=config, manifest=manifest) + + task.populate_metadata_freshness_cache(adapter, {source_no_loaded_at_field.unique_id}) + + assert task.get_freshness_metadata_cache() == {} diff --git a/tests/unit/test_graph_runnable_task.py b/tests/unit/task/test_runnable.py similarity index 100% rename from tests/unit/test_graph_runnable_task.py rename to tests/unit/task/test_runnable.py diff --git a/tests/unit/test_base_column.py b/tests/unit/test_base_column.py deleted file mode 100644 index aaff40621e6..00000000000 --- a/tests/unit/test_base_column.py +++ /dev/null @@ -1,30 +0,0 @@ -import unittest - -import decimal - -from dbt.adapters.base import Column - - -class TestStringType(unittest.TestCase): - def test__character_type(self): - col = Column("fieldname", "character", char_size=10) - - self.assertEqual(col.data_type, "character varying(10)") - - -class TestNumericType(unittest.TestCase): - def test__numeric_type(self): - col = Column( - "fieldname", - "numeric", - numeric_precision=decimal.Decimal("12"), - numeric_scale=decimal.Decimal("2"), - ) - - self.assertEqual(col.data_type, "numeric(12,2)") - - def test__numeric_type_with_no_precision(self): - # PostgreSQL, at least, will allow empty numeric precision - col = Column("fieldname", "numeric", numeric_precision=None) - - self.assertEqual(col.data_type, "numeric") diff --git a/tests/unit/test_cache.py b/tests/unit/test_cache.py deleted file mode 100644 index 3cc167fc783..00000000000 --- a/tests/unit/test_cache.py +++ /dev/null @@ -1,524 +0,0 @@ -from unittest import TestCase -from dbt.adapters.cache import RelationsCache -from dbt.adapters.base.relation import BaseRelation -from multiprocessing.dummy import Pool as ThreadPool -import dbt.exceptions - -import random -import time -from dbt.flags import set_from_args -from argparse import Namespace - -set_from_args(Namespace(WARN_ERROR=False), None) - - -def make_relation(database, schema, identifier): - return BaseRelation.create(database=database, schema=schema, identifier=identifier) - - -def make_mock_relationship(database, schema, identifier): - return BaseRelation.create( - database=database, schema=schema, identifier=identifier, type="view" - ) - - -class TestCache(TestCase): - def setUp(self): - self.cache = RelationsCache() - - def assert_relations_state(self, database, schema, identifiers): - relations = self.cache.get_relations(database, schema) - for identifier, expect in identifiers.items(): - found = any( - (r.identifier == identifier and r.schema == schema and r.database == database) - for r in relations - ) - msg = "{}.{}.{} was{} found in the cache!".format( - database, schema, identifier, "" if found else " not" - ) - self.assertEqual(expect, found, msg) - - def assert_relations_exist(self, database, schema, *identifiers): - self.assert_relations_state(database, schema, {k: True for k in identifiers}) - - def assert_relations_do_not_exist(self, database, schema, *identifiers): - self.assert_relations_state(database, schema, {k: False for k in identifiers}) - - -class TestEmpty(TestCache): - def test_empty(self): - self.assertEqual(len(self.cache.relations), 0) - self.assertEqual(len(self.cache.get_relations("dbt", "test")), 0) - - -class TestDrop(TestCache): - def setUp(self): - super().setUp() - self.cache.add(make_relation("dbt", "foo", "bar")) - - def test_missing_identifier_ignored(self): - self.cache.drop(make_relation("dbt", "foo", "bar1")) - self.assert_relations_exist("dbt", "foo", "bar") - self.assertEqual(len(self.cache.relations), 1) - - def test_missing_schema_ignored(self): - self.cache.drop(make_relation("dbt", "foo1", "bar")) - self.assert_relations_exist("dbt", "foo", "bar") - self.assertEqual(len(self.cache.relations), 1) - - def test_missing_db_ignored(self): - self.cache.drop(make_relation("dbt1", "foo", "bar")) - self.assert_relations_exist("dbt", "foo", "bar") - self.assertEqual(len(self.cache.relations), 1) - - def test_drop(self): - self.cache.drop(make_relation("dbt", "foo", "bar")) - self.assert_relations_do_not_exist("dbt", "foo", "bar") - self.assertEqual(len(self.cache.relations), 0) - - -class TestAddLink(TestCache): - def setUp(self): - super().setUp() - self.cache.add(make_relation("dbt", "schema", "foo")) - self.cache.add(make_relation("dbt_2", "schema", "bar")) - self.cache.add(make_relation("dbt", "schema_2", "bar")) - - def test_no_src(self): - self.assert_relations_exist("dbt", "schema", "foo") - self.assert_relations_do_not_exist("dbt", "schema", "bar") - - self.cache.add_link( - make_relation("dbt", "schema", "bar"), make_relation("dbt", "schema", "foo") - ) - - self.assert_relations_exist("dbt", "schema", "foo", "bar") - - def test_no_dst(self): - self.assert_relations_exist("dbt", "schema", "foo") - self.assert_relations_do_not_exist("dbt", "schema", "bar") - - self.cache.add_link( - make_relation("dbt", "schema", "foo"), make_relation("dbt", "schema", "bar") - ) - - self.assert_relations_exist("dbt", "schema", "foo", "bar") - - -class TestRename(TestCache): - def setUp(self): - super().setUp() - self.cache.add(make_relation("DBT", "schema", "foo")) - self.assert_relations_exist("DBT", "schema", "foo") - self.assertEqual(self.cache.schemas, {("dbt", "schema")}) - - def test_no_source_error(self): - # dest should be created anyway (it's probably a temp table) - self.cache.rename( - make_relation("DBT", "schema", "bar"), make_relation("DBT", "schema", "baz") - ) - - self.assertEqual(len(self.cache.relations), 2) - self.assert_relations_exist("DBT", "schema", "foo", "baz") - - def test_dest_exists_error(self): - foo = make_relation("DBT", "schema", "foo") - bar = make_relation("DBT", "schema", "bar") - self.cache.add(bar) - self.assert_relations_exist("DBT", "schema", "foo", "bar") - - with self.assertRaises(dbt.exceptions.DbtInternalError): - self.cache.rename(foo, bar) - - self.assert_relations_exist("DBT", "schema", "foo", "bar") - - def test_dest_different_db(self): - self.cache.rename( - make_relation("DBT", "schema", "foo"), make_relation("DBT_2", "schema", "foo") - ) - self.assert_relations_exist("DBT_2", "schema", "foo") - self.assert_relations_do_not_exist("DBT", "schema", "foo") - # we know about both schemas: dbt has nothing, dbt_2 has something. - self.assertEqual(self.cache.schemas, {("dbt_2", "schema"), ("dbt", "schema")}) - self.assertEqual(len(self.cache.relations), 1) - - def test_rename_identifier(self): - self.cache.rename( - make_relation("DBT", "schema", "foo"), make_relation("DBT", "schema", "bar") - ) - - self.assert_relations_exist("DBT", "schema", "bar") - self.assert_relations_do_not_exist("DBT", "schema", "foo") - self.assertEqual(self.cache.schemas, {("dbt", "schema")}) - - relation = self.cache.relations[("dbt", "schema", "bar")] - self.assertEqual(relation.inner.schema, "schema") - self.assertEqual(relation.inner.identifier, "bar") - self.assertEqual(relation.schema, "schema") - self.assertEqual(relation.identifier, "bar") - - def test_rename_db(self): - self.cache.rename( - make_relation("DBT", "schema", "foo"), make_relation("DBT_2", "schema", "foo") - ) - - self.assertEqual(len(self.cache.get_relations("DBT", "schema")), 0) - self.assertEqual(len(self.cache.get_relations("DBT_2", "schema")), 1) - self.assert_relations_exist("DBT_2", "schema", "foo") - self.assert_relations_do_not_exist("DBT", "schema", "foo") - # we know about both schemas: dbt has nothing, dbt_2 has something. - self.assertEqual(self.cache.schemas, {("dbt_2", "schema"), ("dbt", "schema")}) - - relation = self.cache.relations[("dbt_2", "schema", "foo")] - self.assertEqual(relation.inner.database, "DBT_2") - self.assertEqual(relation.inner.schema, "schema") - self.assertEqual(relation.inner.identifier, "foo") - self.assertEqual(relation.database, "dbt_2") - self.assertEqual(relation.schema, "schema") - self.assertEqual(relation.identifier, "foo") - - def test_rename_schema(self): - self.cache.rename( - make_relation("DBT", "schema", "foo"), make_relation("DBT", "schema_2", "foo") - ) - - self.assertEqual(len(self.cache.get_relations("DBT", "schema")), 0) - self.assertEqual(len(self.cache.get_relations("DBT", "schema_2")), 1) - self.assert_relations_exist("DBT", "schema_2", "foo") - self.assert_relations_do_not_exist("DBT", "schema", "foo") - # we know about both schemas: schema has nothing, schema_2 has something. - self.assertEqual(self.cache.schemas, {("dbt", "schema_2"), ("dbt", "schema")}) - - relation = self.cache.relations[("dbt", "schema_2", "foo")] - self.assertEqual(relation.inner.database, "DBT") - self.assertEqual(relation.inner.schema, "schema_2") - self.assertEqual(relation.inner.identifier, "foo") - self.assertEqual(relation.database, "dbt") - self.assertEqual(relation.schema, "schema_2") - self.assertEqual(relation.identifier, "foo") - - -class TestGetRelations(TestCache): - def setUp(self): - super().setUp() - self.relation = make_relation("dbt", "foo", "bar") - self.cache.add(self.relation) - - def test_get_by_name(self): - relations = self.cache.get_relations("dbt", "foo") - self.assertEqual(len(relations), 1) - self.assertIs(relations[0], self.relation) - - def test_get_by_uppercase_schema(self): - relations = self.cache.get_relations("dbt", "FOO") - self.assertEqual(len(relations), 1) - self.assertIs(relations[0], self.relation) - - def test_get_by_uppercase_db(self): - relations = self.cache.get_relations("DBT", "foo") - self.assertEqual(len(relations), 1) - self.assertIs(relations[0], self.relation) - - def test_get_by_uppercase_schema_and_db(self): - relations = self.cache.get_relations("DBT", "FOO") - self.assertEqual(len(relations), 1) - self.assertIs(relations[0], self.relation) - - def test_get_by_wrong_db(self): - relations = self.cache.get_relations("dbt_2", "foo") - self.assertEqual(len(relations), 0) - - def test_get_by_wrong_schema(self): - relations = self.cache.get_relations("dbt", "foo_2") - self.assertEqual(len(relations), 0) - - -class TestAdd(TestCache): - def setUp(self): - super().setUp() - self.relation = make_relation("dbt", "foo", "bar") - self.cache.add(self.relation) - - def test_add(self): - relations = self.cache.get_relations("dbt", "foo") - self.assertEqual(len(relations), 1) - self.assertEqual(len(self.cache.relations), 1) - self.assertIs(relations[0], self.relation) - - def test_add_twice(self): - # add a new relation with same name - self.cache.add(make_relation("dbt", "foo", "bar")) - self.assertEqual(len(self.cache.relations), 1) - self.assertEqual(self.cache.schemas, {("dbt", "foo")}) - self.assert_relations_exist("dbt", "foo", "bar") - - def add_uppercase_schema(self): - self.cache.add(make_relation("dbt", "FOO", "baz")) - - self.assertEqual(len(self.cache.relations), 2) - relations = self.cache.get_relations("dbt", "foo") - self.assertEqual(len(relations), 2) - self.assertEqual(self.cache.schemas, {("dbt", "foo")}) - self.assertIsNot(self.cache.relations[("dbt", "foo", "bar")].inner, None) - self.assertIsNot(self.cache.relations[("dbt", "foo", "baz")].inner, None) - - def add_different_db(self): - self.cache.add(make_relation("dbt_2", "foo", "bar")) - - self.assertEqual(len(self.cache.relations), 2) - self.assertEqual(len(self.cache.get_relations("dbt_2", "foo")), 1) - self.assertEqual(len(self.cache.get_relations("dbt", "foo")), 1) - self.assertEqual(self.cache.schemas, {("dbt", "foo"), ("dbt_2", "foo")}) - self.assertIsNot(self.cache.relations[("dbt", "foo", "bar")].inner, None) - self.assertIsNot(self.cache.relations[("dbt_2", "foo", "bar")].inner, None) - - -class TestLikeDbt(TestCase): - def setUp(self): - self.cache = RelationsCache() - self._sleep = True - - # add a bunch of cache entries - for ident in "abcdef": - self.cache.add(make_relation("dbt", "schema", ident)) - # 'b' references 'a' - self.cache.add_link( - make_relation("dbt", "schema", "a"), make_relation("dbt", "schema", "b") - ) - # and 'c' references 'b' - self.cache.add_link( - make_relation("dbt", "schema", "b"), make_relation("dbt", "schema", "c") - ) - # and 'd' references 'b' - self.cache.add_link( - make_relation("dbt", "schema", "b"), make_relation("dbt", "schema", "d") - ) - # and 'e' references 'a' - self.cache.add_link( - make_relation("dbt", "schema", "a"), make_relation("dbt", "schema", "e") - ) - # and 'f' references 'd' - self.cache.add_link( - make_relation("dbt", "schema", "d"), make_relation("dbt", "schema", "f") - ) - # so drop propagation goes (a -> (b -> (c (d -> f))) e) - - def assert_has_relations(self, expected): - current = set(r.identifier for r in self.cache.get_relations("dbt", "schema")) - self.assertEqual(current, expected) - - def test_drop_inner(self): - self.assert_has_relations(set("abcdef")) - self.cache.drop(make_relation("dbt", "schema", "b")) - self.assert_has_relations({"a", "e"}) - - def test_rename_and_drop(self): - self.assert_has_relations(set("abcdef")) - # drop the backup/tmp - self.cache.drop(make_relation("dbt", "schema", "b__backup")) - self.cache.drop(make_relation("dbt", "schema", "b__tmp")) - self.assert_has_relations(set("abcdef")) - # create a new b__tmp - self.cache.add( - make_relation( - "dbt", - "schema", - "b__tmp", - ) - ) - self.assert_has_relations(set("abcdef") | {"b__tmp"}) - # rename b -> b__backup - self.cache.rename( - make_relation("dbt", "schema", "b"), make_relation("dbt", "schema", "b__backup") - ) - self.assert_has_relations(set("acdef") | {"b__tmp", "b__backup"}) - # rename temp to b - self.cache.rename( - make_relation("dbt", "schema", "b__tmp"), make_relation("dbt", "schema", "b") - ) - self.assert_has_relations(set("abcdef") | {"b__backup"}) - - # drop backup, everything that used to depend on b should be gone, but - # b itself should still exist - self.cache.drop(make_relation("dbt", "schema", "b__backup")) - self.assert_has_relations(set("abe")) - relation = self.cache.relations[("dbt", "schema", "a")] - self.assertEqual(len(relation.referenced_by), 1) - - def _rand_sleep(self): - if not self._sleep: - return - time.sleep(random.random() * 0.1) - - def _target(self, ident): - self._rand_sleep() - self.cache.rename( - make_relation("dbt", "schema", ident), - make_relation("dbt", "schema", ident + "__backup"), - ) - self._rand_sleep() - self.cache.add(make_relation("dbt", "schema", ident + "__tmp")) - self._rand_sleep() - self.cache.rename( - make_relation("dbt", "schema", ident + "__tmp"), make_relation("dbt", "schema", ident) - ) - self._rand_sleep() - self.cache.drop(make_relation("dbt", "schema", ident + "__backup")) - return ident, self.cache.get_relations("dbt", "schema") - - def test_threaded(self): - # add three more short subchains for threads to test on - for ident in "ghijklmno": - make_mock_relationship("test_db", "schema", ident) - self.cache.add(make_relation("dbt", "schema", ident)) - - self.cache.add_link( - make_relation("dbt", "schema", "a"), make_relation("dbt", "schema", "g") - ) - self.cache.add_link( - make_relation("dbt", "schema", "g"), make_relation("dbt", "schema", "h") - ) - self.cache.add_link( - make_relation("dbt", "schema", "h"), make_relation("dbt", "schema", "i") - ) - - self.cache.add_link( - make_relation("dbt", "schema", "a"), make_relation("dbt", "schema", "j") - ) - self.cache.add_link( - make_relation("dbt", "schema", "j"), make_relation("dbt", "schema", "k") - ) - self.cache.add_link( - make_relation("dbt", "schema", "k"), make_relation("dbt", "schema", "l") - ) - - self.cache.add_link( - make_relation("dbt", "schema", "a"), make_relation("dbt", "schema", "m") - ) - self.cache.add_link( - make_relation("dbt", "schema", "m"), make_relation("dbt", "schema", "n") - ) - self.cache.add_link( - make_relation("dbt", "schema", "n"), make_relation("dbt", "schema", "o") - ) - - pool = ThreadPool(4) - results = list(pool.imap_unordered(self._target, ("b", "g", "j", "m"))) - pool.close() - pool.join() - # at a minimum, we expect each table to "see" itself, its parent ('a'), - # and the unrelated table ('a') - min_expect = { - "b": {"a", "b", "e"}, - "g": {"a", "g", "e"}, - "j": {"a", "j", "e"}, - "m": {"a", "m", "e"}, - } - - for ident, relations in results: - seen = set(r.identifier for r in relations) - self.assertTrue(min_expect[ident].issubset(seen)) - - self.assert_has_relations(set("abgjme")) - - def test_threaded_repeated(self): - for _ in range(10): - self.setUp() - self._sleep = False - self.test_threaded() - - -class TestComplexCache(TestCase): - def setUp(self): - self.cache = RelationsCache() - inputs = [ - ("dbt", "foo", "table1"), - ("dbt", "foo", "table3"), - ("dbt", "foo", "table4"), - ("dbt", "bar", "table2"), - ("dbt", "bar", "table3"), - ("dbt_2", "foo", "table1"), - ("dbt_2", "foo", "table2"), - ] - self.inputs = [make_relation(d, s, i) for d, s, i in inputs] - for relation in self.inputs: - self.cache.add(relation) - - # dbt.foo.table3 references dbt.foo.table1 - # (create view dbt.foo.table3 as (select * from dbt.foo.table1...)) - self.cache.add_link( - make_relation("dbt", "foo", "table1"), make_relation("dbt", "foo", "table3") - ) - # dbt.bar.table3 references dbt.foo.table3 - # (create view dbt.bar.table5 as (select * from dbt.foo.table3...)) - self.cache.add_link( - make_relation("dbt", "foo", "table3"), make_relation("dbt", "bar", "table3") - ) - - # dbt.foo.table4 also references dbt.foo.table1 - self.cache.add_link( - make_relation("dbt", "foo", "table1"), make_relation("dbt", "foo", "table4") - ) - - # and dbt_2.foo.table1 references dbt.foo.table1 - self.cache.add_link( - make_relation("dbt", "foo", "table1"), - make_relation("dbt_2", "foo", "table1"), - ) - - def test_get_relations(self): - self.assertEqual(len(self.cache.get_relations("dbt", "foo")), 3) - self.assertEqual(len(self.cache.get_relations("dbt", "bar")), 2) - self.assertEqual(len(self.cache.get_relations("dbt_2", "foo")), 2) - self.assertEqual(len(self.cache.relations), 7) - - def test_drop_one(self): - # dropping dbt.bar.table2 should only drop itself - self.cache.drop(make_relation("dbt", "bar", "table2")) - self.assertEqual(len(self.cache.get_relations("dbt", "foo")), 3) - self.assertEqual(len(self.cache.get_relations("dbt", "bar")), 1) - self.assertEqual(len(self.cache.get_relations("dbt_2", "foo")), 2) - self.assertEqual(len(self.cache.relations), 6) - - def test_drop_many(self): - # dropping dbt.foo.table1 should drop everything but dbt.bar.table2 and - # dbt_2.foo.table2 - self.cache.drop(make_relation("dbt", "foo", "table1")) - self.assertEqual(len(self.cache.get_relations("dbt", "foo")), 0) - self.assertEqual(len(self.cache.get_relations("dbt", "bar")), 1) - self.assertEqual(len(self.cache.get_relations("dbt_2", "foo")), 1) - self.assertEqual(len(self.cache.relations), 2) - - def test_rename_root(self): - self.cache.rename( - make_relation("dbt", "foo", "table1"), make_relation("dbt", "bar", "table1") - ) - retrieved = self.cache.relations[("dbt", "bar", "table1")].inner - self.assertEqual(retrieved.schema, "bar") - self.assertEqual(retrieved.identifier, "table1") - self.assertEqual(len(self.cache.get_relations("dbt", "foo")), 2) - self.assertEqual(len(self.cache.get_relations("dbt", "bar")), 3) - self.assertEqual(len(self.cache.get_relations("dbt_2", "foo")), 2) - self.assertEqual(len(self.cache.relations), 7) - - # make sure drops still cascade from the renamed table - self.cache.drop(make_relation("dbt", "bar", "table1")) - self.assertEqual(len(self.cache.get_relations("dbt", "foo")), 0) - self.assertEqual(len(self.cache.get_relations("dbt", "bar")), 1) - self.assertEqual(len(self.cache.get_relations("dbt_2", "foo")), 1) - self.assertEqual(len(self.cache.relations), 2) - - def test_rename_branch(self): - self.cache.rename( - make_relation("dbt", "foo", "table3"), make_relation("dbt", "foo", "table2") - ) - self.assertEqual(len(self.cache.get_relations("dbt", "foo")), 3) - self.assertEqual(len(self.cache.get_relations("dbt", "bar")), 2) - self.assertEqual(len(self.cache.get_relations("dbt_2", "foo")), 2) - - # make sure drops still cascade through the renamed table - self.cache.drop(make_relation("dbt", "foo", "table1")) - self.assertEqual(len(self.cache.get_relations("dbt", "foo")), 0) - self.assertEqual(len(self.cache.get_relations("dbt", "bar")), 1) - self.assertEqual(len(self.cache.get_relations("dbt_2", "foo")), 1) - self.assertEqual(len(self.cache.relations), 2) diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py deleted file mode 100644 index b3e5d166e83..00000000000 --- a/tests/unit/test_config.py +++ /dev/null @@ -1,1365 +0,0 @@ -from copy import deepcopy -from contextlib import contextmanager -import json -import os -import shutil -import tempfile -import unittest -import pytest -from argparse import Namespace - -from unittest import mock -import yaml - -import dbt.config -from dbt.constants import DEPENDENCIES_FILE_NAME, PACKAGES_FILE_NAME -import dbt.exceptions -import dbt.tracking -from dbt import flags -from dbt.adapters.factory import load_plugin -from dbt.adapters.postgres import PostgresCredentials -from dbt.adapters.contracts.connection import QueryComment, DEFAULT_QUERY_COMMENT -from dbt.contracts.project import PackageConfig, LocalPackage, GitPackage -from dbt.node_types import NodeType -from dbt_common.semver import VersionSpecifier -import dbt_common.exceptions -from dbt.task.base import ConfiguredTask - -from dbt.flags import set_from_args -from dbt.tests.util import safe_set_invocation_context - -from .utils import normalize - -INITIAL_ROOT = os.getcwd() - - -@contextmanager -def temp_cd(path): - current_path = os.getcwd() - os.chdir(path) - try: - yield - finally: - os.chdir(current_path) - - -@contextmanager -def raises_nothing(): - yield - - -def empty_profile_renderer(): - return dbt.config.renderer.ProfileRenderer({}) - - -def empty_project_renderer(): - return dbt.config.renderer.DbtProjectYamlRenderer() - - -model_config = { - "my_package_name": { - "enabled": True, - "adwords": { - "adwords_ads": {"materialized": "table", "enabled": True, "schema": "analytics"} - }, - "snowplow": { - "snowplow_sessions": { - "sort": "timestamp", - "materialized": "incremental", - "dist": "user_id", - "unique_key": "id", - }, - "base": { - "snowplow_events": { - "sort": ["timestamp", "userid"], - "materialized": "table", - "sort_type": "interleaved", - "dist": "userid", - } - }, - }, - } -} - -model_fqns = frozenset( - ( - ("my_package_name", "snowplow", "snowplow_sessions"), - ("my_package_name", "snowplow", "base", "snowplow_events"), - ("my_package_name", "adwords", "adwords_ads"), - ) -) - - -class Args: - def __init__( - self, - profiles_dir=None, - threads=None, - profile=None, - cli_vars=None, - version_check=None, - project_dir=None, - target=None, - ): - self.profile = profile - self.threads = threads - self.target = target - if profiles_dir is not None: - self.profiles_dir = profiles_dir - flags.PROFILES_DIR = profiles_dir - if cli_vars is not None: - self.vars = cli_vars - if version_check is not None: - self.version_check = version_check - if project_dir is not None: - self.project_dir = project_dir - - -class BaseConfigTest(unittest.TestCase): - """Subclass this, and before calling the superclass setUp, set - self.profiles_dir and self.project_dir. - """ - - def setUp(self): - # Write project - self.project_dir = normalize(tempfile.mkdtemp()) - self.default_project_data = { - "version": "0.0.1", - "name": "my_test_project", - "profile": "default", - } - self.write_project(self.default_project_data) - - # Write profile - self.profiles_dir = normalize(tempfile.mkdtemp()) - self.default_profile_data = { - "default": { - "outputs": { - "postgres": { - "type": "postgres", - "host": "postgres-db-hostname", - "port": 5555, - "user": "db_user", - "pass": "db_pass", - "dbname": "postgres-db-name", - "schema": "postgres-schema", - "threads": 7, - }, - "with-vars": { - "type": "{{ env_var('env_value_type') }}", - "host": "{{ env_var('env_value_host') }}", - "port": "{{ env_var('env_value_port') | as_number }}", - "user": "{{ env_var('env_value_user') }}", - "pass": "{{ env_var('env_value_pass') }}", - "dbname": "{{ env_var('env_value_dbname') }}", - "schema": "{{ env_var('env_value_schema') }}", - }, - "cli-and-env-vars": { - "type": "{{ env_var('env_value_type') }}", - "host": "{{ var('cli_value_host') }}", - "port": "{{ env_var('env_value_port') | as_number }}", - "user": "{{ env_var('env_value_user') }}", - "pass": "{{ env_var('env_value_pass') }}", - "dbname": "{{ env_var('env_value_dbname') }}", - "schema": "{{ env_var('env_value_schema') }}", - }, - }, - "target": "postgres", - }, - "other": { - "outputs": { - "other-postgres": { - "type": "postgres", - "host": "other-postgres-db-hostname", - "port": 4444, - "user": "other_db_user", - "pass": "other_db_pass", - "dbname": "other-postgres-db-name", - "schema": "other-postgres-schema", - "threads": 2, - } - }, - "target": "other-postgres", - }, - "empty_profile_data": {}, - } - self.write_profile(self.default_profile_data) - - self.args = Namespace( - profiles_dir=self.profiles_dir, - cli_vars={}, - version_check=True, - project_dir=self.project_dir, - target=None, - threads=None, - profile=None, - ) - set_from_args(self.args, None) - self.env_override = { - "env_value_type": "postgres", - "env_value_host": "env-postgres-host", - "env_value_port": "6543", - "env_value_user": "env-postgres-user", - "env_value_pass": "env-postgres-pass", - "env_value_dbname": "env-postgres-dbname", - "env_value_schema": "env-postgres-schema", - "env_value_profile": "default", - } - - def assertRaisesOrReturns(self, exc): - if exc is None: - return raises_nothing() - else: - return self.assertRaises(exc) - - def tearDown(self): - try: - shutil.rmtree(self.project_dir) - except EnvironmentError: - pass - try: - shutil.rmtree(self.profiles_dir) - except EnvironmentError: - pass - - def project_path(self, name): - return os.path.join(self.project_dir, name) - - def profile_path(self, name): - return os.path.join(self.profiles_dir, name) - - def write_project(self, project_data=None): - if project_data is None: - project_data = self.project_data - with open(self.project_path("dbt_project.yml"), "w") as fp: - yaml.dump(project_data, fp) - - def write_packages(self, package_data): - with open(self.project_path("packages.yml"), "w") as fp: - yaml.dump(package_data, fp) - - def write_profile(self, profile_data=None): - if profile_data is None: - profile_data = self.profile_data - with open(self.profile_path("profiles.yml"), "w") as fp: - yaml.dump(profile_data, fp) - - def write_empty_profile(self): - with open(self.profile_path("profiles.yml"), "w") as fp: - yaml.dump("", fp) - - -class TestProfile(BaseConfigTest): - def from_raw_profiles(self): - renderer = empty_profile_renderer() - return dbt.config.Profile.from_raw_profiles(self.default_profile_data, "default", renderer) - - def test_from_raw_profiles(self): - profile = self.from_raw_profiles() - self.assertEqual(profile.profile_name, "default") - self.assertEqual(profile.target_name, "postgres") - self.assertEqual(profile.threads, 7) - self.assertTrue(isinstance(profile.credentials, PostgresCredentials)) - self.assertEqual(profile.credentials.type, "postgres") - self.assertEqual(profile.credentials.host, "postgres-db-hostname") - self.assertEqual(profile.credentials.port, 5555) - self.assertEqual(profile.credentials.user, "db_user") - self.assertEqual(profile.credentials.password, "db_pass") - self.assertEqual(profile.credentials.schema, "postgres-schema") - self.assertEqual(profile.credentials.database, "postgres-db-name") - - def test_missing_type(self): - del self.default_profile_data["default"]["outputs"]["postgres"]["type"] - with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: - self.from_raw_profiles() - self.assertIn("type", str(exc.exception)) - self.assertIn("postgres", str(exc.exception)) - self.assertIn("default", str(exc.exception)) - - def test_bad_type(self): - self.default_profile_data["default"]["outputs"]["postgres"]["type"] = "invalid" - with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: - self.from_raw_profiles() - self.assertIn("Credentials", str(exc.exception)) - self.assertIn("postgres", str(exc.exception)) - self.assertIn("default", str(exc.exception)) - - def test_invalid_credentials(self): - del self.default_profile_data["default"]["outputs"]["postgres"]["host"] - with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: - self.from_raw_profiles() - self.assertIn("Credentials", str(exc.exception)) - self.assertIn("postgres", str(exc.exception)) - self.assertIn("default", str(exc.exception)) - - def test_missing_target(self): - profile = self.default_profile_data["default"] - del profile["target"] - profile["outputs"]["default"] = profile["outputs"]["postgres"] - profile = self.from_raw_profiles() - self.assertEqual(profile.profile_name, "default") - self.assertEqual(profile.target_name, "default") - self.assertEqual(profile.credentials.type, "postgres") - - def test_extra_path(self): - self.default_project_data.update( - { - "model-paths": ["models"], - "source-paths": ["other-models"], - } - ) - with self.assertRaises(dbt.exceptions.DbtProjectError) as exc: - project_from_config_norender(self.default_project_data, project_root=self.project_dir) - - self.assertIn("source-paths and model-paths", str(exc.exception)) - self.assertIn("cannot both be defined.", str(exc.exception)) - - def test_profile_invalid_project(self): - renderer = empty_profile_renderer() - with self.assertRaises(dbt.exceptions.DbtProjectError) as exc: - dbt.config.Profile.from_raw_profiles( - self.default_profile_data, "invalid-profile", renderer - ) - - self.assertEqual(exc.exception.result_type, "invalid_project") - self.assertIn("Could not find", str(exc.exception)) - self.assertIn("invalid-profile", str(exc.exception)) - - def test_profile_invalid_target(self): - renderer = empty_profile_renderer() - with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: - dbt.config.Profile.from_raw_profiles( - self.default_profile_data, "default", renderer, target_override="nope" - ) - - self.assertIn("nope", str(exc.exception)) - self.assertIn("- postgres", str(exc.exception)) - self.assertIn("- with-vars", str(exc.exception)) - - def test_no_outputs(self): - renderer = empty_profile_renderer() - - with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: - dbt.config.Profile.from_raw_profiles( - {"some-profile": {"target": "blah"}}, "some-profile", renderer - ) - self.assertIn("outputs not specified", str(exc.exception)) - self.assertIn("some-profile", str(exc.exception)) - - def test_neq(self): - profile = self.from_raw_profiles() - self.assertNotEqual(profile, object()) - - def test_eq(self): - renderer = empty_profile_renderer() - profile = dbt.config.Profile.from_raw_profiles( - deepcopy(self.default_profile_data), "default", renderer - ) - - other = dbt.config.Profile.from_raw_profiles( - deepcopy(self.default_profile_data), "default", renderer - ) - self.assertEqual(profile, other) - - def test_invalid_env_vars(self): - self.env_override["env_value_port"] = "hello" - with mock.patch.dict(os.environ, self.env_override): - with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: - safe_set_invocation_context() - renderer = empty_profile_renderer() - dbt.config.Profile.from_raw_profile_info( - self.default_profile_data["default"], - "default", - renderer, - target_override="with-vars", - ) - self.assertIn("Could not convert value 'hello' into type 'number'", str(exc.exception)) - - -class TestProfileFile(BaseConfigTest): - def from_raw_profile_info(self, raw_profile=None, profile_name="default", **kwargs): - if raw_profile is None: - raw_profile = self.default_profile_data["default"] - renderer = empty_profile_renderer() - kw = { - "raw_profile": raw_profile, - "profile_name": profile_name, - "renderer": renderer, - } - kw.update(kwargs) - return dbt.config.Profile.from_raw_profile_info(**kw) - - def from_args(self, project_profile_name="default", **kwargs): - kw = { - "project_profile_name": project_profile_name, - "renderer": empty_profile_renderer(), - "threads_override": self.args.threads, - "target_override": self.args.target, - "profile_name_override": self.args.profile, - } - kw.update(kwargs) - return dbt.config.Profile.render(**kw) - - def test_profile_simple(self): - profile = self.from_args() - from_raw = self.from_raw_profile_info() - - self.assertEqual(profile.profile_name, "default") - self.assertEqual(profile.target_name, "postgres") - self.assertEqual(profile.threads, 7) - self.assertTrue(isinstance(profile.credentials, PostgresCredentials)) - self.assertEqual(profile.credentials.type, "postgres") - self.assertEqual(profile.credentials.host, "postgres-db-hostname") - self.assertEqual(profile.credentials.port, 5555) - self.assertEqual(profile.credentials.user, "db_user") - self.assertEqual(profile.credentials.password, "db_pass") - self.assertEqual(profile.credentials.schema, "postgres-schema") - self.assertEqual(profile.credentials.database, "postgres-db-name") - self.assertEqual(profile, from_raw) - - def test_profile_override(self): - self.args.profile = "other" - self.args.threads = 3 - set_from_args(self.args, None) - profile = self.from_args() - from_raw = self.from_raw_profile_info( - self.default_profile_data["other"], - "other", - threads_override=3, - ) - - self.assertEqual(profile.profile_name, "other") - self.assertEqual(profile.target_name, "other-postgres") - self.assertEqual(profile.threads, 3) - self.assertTrue(isinstance(profile.credentials, PostgresCredentials)) - self.assertEqual(profile.credentials.type, "postgres") - self.assertEqual(profile.credentials.host, "other-postgres-db-hostname") - self.assertEqual(profile.credentials.port, 4444) - self.assertEqual(profile.credentials.user, "other_db_user") - self.assertEqual(profile.credentials.password, "other_db_pass") - self.assertEqual(profile.credentials.schema, "other-postgres-schema") - self.assertEqual(profile.credentials.database, "other-postgres-db-name") - self.assertEqual(profile, from_raw) - - def test_env_vars(self): - self.args.target = "with-vars" - with mock.patch.dict(os.environ, self.env_override): - safe_set_invocation_context() # reset invocation context with new env - profile = self.from_args() - from_raw = self.from_raw_profile_info(target_override="with-vars") - - self.assertEqual(profile.profile_name, "default") - self.assertEqual(profile.target_name, "with-vars") - self.assertEqual(profile.threads, 1) - self.assertEqual(profile.credentials.type, "postgres") - self.assertEqual(profile.credentials.host, "env-postgres-host") - self.assertEqual(profile.credentials.port, 6543) - self.assertEqual(profile.credentials.user, "env-postgres-user") - self.assertEqual(profile.credentials.password, "env-postgres-pass") - self.assertEqual(profile, from_raw) - - def test_env_vars_env_target(self): - self.default_profile_data["default"]["target"] = "{{ env_var('env_value_target') }}" - self.write_profile(self.default_profile_data) - self.env_override["env_value_target"] = "with-vars" - with mock.patch.dict(os.environ, self.env_override): - safe_set_invocation_context() # reset invocation context with new env - profile = self.from_args() - from_raw = self.from_raw_profile_info(target_override="with-vars") - - self.assertEqual(profile.profile_name, "default") - self.assertEqual(profile.target_name, "with-vars") - self.assertEqual(profile.threads, 1) - self.assertEqual(profile.credentials.type, "postgres") - self.assertEqual(profile.credentials.host, "env-postgres-host") - self.assertEqual(profile.credentials.port, 6543) - self.assertEqual(profile.credentials.user, "env-postgres-user") - self.assertEqual(profile.credentials.password, "env-postgres-pass") - self.assertEqual(profile, from_raw) - - def test_invalid_env_vars(self): - self.env_override["env_value_port"] = "hello" - self.args.target = "with-vars" - with mock.patch.dict(os.environ, self.env_override): - with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: - safe_set_invocation_context() # reset invocation context with new env - self.from_args() - - self.assertIn("Could not convert value 'hello' into type 'number'", str(exc.exception)) - - def test_cli_and_env_vars(self): - self.args.target = "cli-and-env-vars" - self.args.vars = {"cli_value_host": "cli-postgres-host"} - renderer = dbt.config.renderer.ProfileRenderer({"cli_value_host": "cli-postgres-host"}) - with mock.patch.dict(os.environ, self.env_override): - safe_set_invocation_context() # reset invocation context with new env - profile = self.from_args(renderer=renderer) - from_raw = self.from_raw_profile_info( - target_override="cli-and-env-vars", - renderer=renderer, - ) - - self.assertEqual(profile.profile_name, "default") - self.assertEqual(profile.target_name, "cli-and-env-vars") - self.assertEqual(profile.threads, 1) - self.assertEqual(profile.credentials.type, "postgres") - self.assertEqual(profile.credentials.host, "cli-postgres-host") - self.assertEqual(profile.credentials.port, 6543) - self.assertEqual(profile.credentials.user, "env-postgres-user") - self.assertEqual(profile.credentials.password, "env-postgres-pass") - self.assertEqual(profile, from_raw) - - def test_no_profile(self): - with self.assertRaises(dbt.exceptions.DbtProjectError) as exc: - self.from_args(project_profile_name=None) - self.assertIn("no profile was specified", str(exc.exception)) - - def test_empty_profile(self): - self.write_empty_profile() - with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: - self.from_args() - self.assertIn("profiles.yml is empty", str(exc.exception)) - - def test_profile_with_empty_profile_data(self): - renderer = empty_profile_renderer() - with self.assertRaises(dbt.exceptions.DbtProfileError) as exc: - dbt.config.Profile.from_raw_profiles( - self.default_profile_data, "empty_profile_data", renderer - ) - self.assertIn("Profile empty_profile_data in profiles.yml is empty", str(exc.exception)) - - -def project_from_config_norender( - cfg, packages=None, project_root="/invalid-root-path", verify_version=False -): - if packages is None: - packages = {} - partial = dbt.config.project.PartialProject.from_dicts( - project_root, - project_dict=cfg, - packages_dict=packages, - selectors_dict={}, - verify_version=verify_version, - ) - # no rendering ... Why? - partial.project_dict["project-root"] = project_root - rendered = dbt.config.project.RenderComponents( - project_dict=partial.project_dict, - packages_dict=partial.packages_dict, - selectors_dict=partial.selectors_dict, - ) - return partial.create_project(rendered) - - -def project_from_config_rendered( - cfg, - packages=None, - project_root="/invalid-root-path", - verify_version=False, - packages_specified_path=PACKAGES_FILE_NAME, -): - if packages is None: - packages = {} - partial = dbt.config.project.PartialProject.from_dicts( - project_root, - project_dict=cfg, - packages_dict=packages, - selectors_dict={}, - verify_version=verify_version, - packages_specified_path=packages_specified_path, - ) - return partial.render(empty_project_renderer()) - - -class TestProject(BaseConfigTest): - def test_defaults(self): - project = project_from_config_norender( - self.default_project_data, project_root=self.project_dir - ) - self.assertEqual(project.project_name, "my_test_project") - self.assertEqual(project.version, "0.0.1") - self.assertEqual(project.profile_name, "default") - self.assertEqual(project.project_root, self.project_dir) - self.assertEqual(project.model_paths, ["models"]) - self.assertEqual(project.macro_paths, ["macros"]) - self.assertEqual(project.seed_paths, ["seeds"]) - self.assertEqual(project.test_paths, ["tests"]) - self.assertEqual(project.analysis_paths, ["analyses"]) - self.assertEqual( - set(project.docs_paths), set(["models", "seeds", "snapshots", "analyses", "macros"]) - ) - self.assertEqual(project.asset_paths, []) - self.assertEqual(project.target_path, "target") - self.assertEqual(project.clean_targets, ["target"]) - self.assertEqual(project.log_path, "logs") - self.assertEqual(project.packages_install_path, "dbt_packages") - self.assertEqual(project.quoting, {}) - self.assertEqual(project.models, {}) - self.assertEqual(project.on_run_start, []) - self.assertEqual(project.on_run_end, []) - self.assertEqual(project.seeds, {}) - self.assertEqual(project.dbt_version, [VersionSpecifier.from_version_string(">=0.0.0")]) - self.assertEqual(project.packages, PackageConfig(packages=[])) - # just make sure str() doesn't crash anything, that's always - # embarrassing - str(project) - - def test_eq(self): - project = project_from_config_norender( - self.default_project_data, project_root=self.project_dir - ) - other = project_from_config_norender( - self.default_project_data, project_root=self.project_dir - ) - self.assertEqual(project, other) - - def test_neq(self): - project = project_from_config_norender( - self.default_project_data, project_root=self.project_dir - ) - self.assertNotEqual(project, object()) - - def test_implicit_overrides(self): - self.default_project_data.update( - { - "model-paths": ["other-models"], - } - ) - project = project_from_config_norender( - self.default_project_data, project_root=self.project_dir - ) - self.assertEqual( - set(project.docs_paths), - set(["other-models", "seeds", "snapshots", "analyses", "macros"]), - ) - - def test_hashed_name(self): - project = project_from_config_norender( - self.default_project_data, project_root=self.project_dir - ) - self.assertEqual(project.hashed_name(), "754cd47eac1d6f50a5f7cd399ec43da4") - - def test_all_overrides(self): - # log-path is not tested because it is set exclusively from flags, not cfg - self.default_project_data.update( - { - "model-paths": ["other-models"], - "macro-paths": ["other-macros"], - "seed-paths": ["other-seeds"], - "test-paths": ["other-tests"], - "analysis-paths": ["other-analyses"], - "docs-paths": ["docs"], - "asset-paths": ["other-assets"], - "clean-targets": ["another-target"], - "packages-install-path": "other-dbt_packages", - "quoting": {"identifier": False}, - "models": { - "pre-hook": ["{{ logging.log_model_start_event() }}"], - "post-hook": ["{{ logging.log_model_end_event() }}"], - "my_test_project": { - "first": { - "enabled": False, - "sub": { - "enabled": True, - }, - }, - "second": { - "materialized": "table", - }, - }, - "third_party": { - "third": { - "materialized": "view", - }, - }, - }, - "on-run-start": [ - "{{ logging.log_run_start_event() }}", - ], - "on-run-end": [ - "{{ logging.log_run_end_event() }}", - ], - "seeds": { - "my_test_project": { - "enabled": True, - "schema": "seed_data", - "post-hook": "grant select on {{ this }} to bi_user", - }, - }, - "data_tests": {"my_test_project": {"fail_calc": "sum(failures)"}}, - "require-dbt-version": ">=0.1.0", - } - ) - packages = { - "packages": [ - { - "local": "foo", - }, - {"git": "git@example.com:dbt-labs/dbt-utils.git", "revision": "test-rev"}, - ], - } - project = project_from_config_norender( - self.default_project_data, project_root=self.project_dir, packages=packages - ) - self.assertEqual(project.project_name, "my_test_project") - self.assertEqual(project.version, "0.0.1") - self.assertEqual(project.profile_name, "default") - self.assertEqual(project.model_paths, ["other-models"]) - self.assertEqual(project.macro_paths, ["other-macros"]) - self.assertEqual(project.seed_paths, ["other-seeds"]) - self.assertEqual(project.test_paths, ["other-tests"]) - self.assertEqual(project.analysis_paths, ["other-analyses"]) - self.assertEqual(project.docs_paths, ["docs"]) - self.assertEqual(project.asset_paths, ["other-assets"]) - self.assertEqual(project.clean_targets, ["another-target"]) - self.assertEqual(project.packages_install_path, "other-dbt_packages") - self.assertEqual(project.quoting, {"identifier": False}) - self.assertEqual( - project.models, - { - "pre-hook": ["{{ logging.log_model_start_event() }}"], - "post-hook": ["{{ logging.log_model_end_event() }}"], - "my_test_project": { - "first": { - "enabled": False, - "sub": { - "enabled": True, - }, - }, - "second": { - "materialized": "table", - }, - }, - "third_party": { - "third": { - "materialized": "view", - }, - }, - }, - ) - self.assertEqual(project.on_run_start, ["{{ logging.log_run_start_event() }}"]) - self.assertEqual(project.on_run_end, ["{{ logging.log_run_end_event() }}"]) - self.assertEqual( - project.seeds, - { - "my_test_project": { - "enabled": True, - "schema": "seed_data", - "post-hook": "grant select on {{ this }} to bi_user", - }, - }, - ) - self.assertEqual( - project.data_tests, - { - "my_test_project": {"fail_calc": "sum(failures)"}, - }, - ) - self.assertEqual(project.dbt_version, [VersionSpecifier.from_version_string(">=0.1.0")]) - self.assertEqual( - project.packages, - PackageConfig( - packages=[ - LocalPackage(local="foo", unrendered={"local": "foo"}), - GitPackage( - git="git@example.com:dbt-labs/dbt-utils.git", - revision="test-rev", - unrendered={ - "git": "git@example.com:dbt-labs/dbt-utils.git", - "revision": "test-rev", - }, - ), - ] - ), - ) - str(project) # this does the equivalent of project.to_project_config(with_packages=True) - json.dumps(project.to_project_config()) - - def test_string_run_hooks(self): - self.default_project_data.update( - { - "on-run-start": "{{ logging.log_run_start_event() }}", - "on-run-end": "{{ logging.log_run_end_event() }}", - } - ) - project = project_from_config_rendered(self.default_project_data) - self.assertEqual(project.on_run_start, ["{{ logging.log_run_start_event() }}"]) - self.assertEqual(project.on_run_end, ["{{ logging.log_run_end_event() }}"]) - - def test_invalid_project_name(self): - self.default_project_data["name"] = "invalid-project-name" - with self.assertRaises(dbt.exceptions.DbtProjectError) as exc: - project_from_config_norender(self.default_project_data, project_root=self.project_dir) - - self.assertIn("invalid-project-name", str(exc.exception)) - - def test_no_project(self): - os.remove(os.path.join(self.project_dir, "dbt_project.yml")) - renderer = empty_project_renderer() - with self.assertRaises(dbt.exceptions.DbtProjectError) as exc: - dbt.config.Project.from_project_root(self.project_dir, renderer) - - self.assertIn("No dbt_project.yml", str(exc.exception)) - - def test_invalid_version(self): - self.default_project_data["require-dbt-version"] = "hello!" - with self.assertRaises(dbt.exceptions.DbtProjectError): - project_from_config_norender(self.default_project_data, project_root=self.project_dir) - - def test_unsupported_version(self): - self.default_project_data["require-dbt-version"] = ">99999.0.0" - # allowed, because the RuntimeConfig checks, not the Project itself - project_from_config_norender(self.default_project_data, project_root=self.project_dir) - - def test_none_values(self): - self.default_project_data.update( - { - "models": None, - "seeds": None, - "on-run-end": None, - "on-run-start": None, - } - ) - project = project_from_config_rendered(self.default_project_data) - self.assertEqual(project.models, {}) - self.assertEqual(project.on_run_start, []) - self.assertEqual(project.on_run_end, []) - self.assertEqual(project.seeds, {}) - - def test_nested_none_values(self): - self.default_project_data.update( - { - "models": {"vars": None, "pre-hook": None, "post-hook": None}, - "seeds": {"vars": None, "pre-hook": None, "post-hook": None, "column_types": None}, - } - ) - project = project_from_config_rendered(self.default_project_data) - self.assertEqual(project.models, {"vars": {}, "pre-hook": [], "post-hook": []}) - self.assertEqual( - project.seeds, {"vars": {}, "pre-hook": [], "post-hook": [], "column_types": {}} - ) - - @pytest.mark.skipif(os.name == "nt", reason="crashes CI for Windows") - def test_cycle(self): - models = {} - models["models"] = models - self.default_project_data.update( - { - "models": models, - } - ) - with self.assertRaises(dbt.exceptions.DbtProjectError) as exc: - project_from_config_rendered(self.default_project_data) - - assert "Cycle detected" in str(exc.exception) - - def test_query_comment_disabled(self): - self.default_project_data.update( - { - "query-comment": None, - } - ) - project = project_from_config_norender( - self.default_project_data, project_root=self.project_dir - ) - self.assertEqual(project.query_comment.comment, "") - self.assertEqual(project.query_comment.append, False) - - self.default_project_data.update( - { - "query-comment": "", - } - ) - project = project_from_config_norender( - self.default_project_data, project_root=self.project_dir - ) - self.assertEqual(project.query_comment.comment, "") - self.assertEqual(project.query_comment.append, False) - - def test_default_query_comment(self): - project = project_from_config_norender( - self.default_project_data, project_root=self.project_dir - ) - self.assertEqual(project.query_comment, QueryComment()) - - def test_default_query_comment_append(self): - self.default_project_data.update( - { - "query-comment": {"append": True}, - } - ) - project = project_from_config_norender( - self.default_project_data, project_root=self.project_dir - ) - self.assertEqual(project.query_comment.comment, DEFAULT_QUERY_COMMENT) - self.assertEqual(project.query_comment.append, True) - - def test_custom_query_comment_append(self): - self.default_project_data.update( - { - "query-comment": {"comment": "run by user test", "append": True}, - } - ) - project = project_from_config_norender( - self.default_project_data, project_root=self.project_dir - ) - self.assertEqual(project.query_comment.comment, "run by user test") - self.assertEqual(project.query_comment.append, True) - - def test_packages_from_dependencies(self): - packages = { - "packages": [ - { - "git": "{{ env_var('some_package') }}", - "warn-unpinned": True, - } - ], - } - - project = project_from_config_rendered( - self.default_project_data, packages, packages_specified_path=DEPENDENCIES_FILE_NAME - ) - git_package = project.packages.packages[0] - # packages did not render because packages_specified_path=DEPENDENCIES_FILE_NAME - assert git_package.git == "{{ env_var('some_package') }}" - - -class TestProjectFile(BaseConfigTest): - def test_from_project_root(self): - renderer = empty_project_renderer() - project = dbt.config.Project.from_project_root(self.project_dir, renderer) - from_config = project_from_config_norender( - self.default_project_data, project_root=self.project_dir - ) - self.assertEqual(project, from_config) - self.assertEqual(project.version, "0.0.1") - self.assertEqual(project.project_name, "my_test_project") - - def test_with_invalid_package(self): - renderer = empty_project_renderer() - self.write_packages({"invalid": ["not a package of any kind"]}) - with self.assertRaises(dbt.exceptions.DbtProjectError): - dbt.config.Project.from_project_root(self.project_dir, renderer) - - -class InheritsFromConfiguredTask(ConfiguredTask): - def run(self): - pass - - -class TestConfiguredTask(BaseConfigTest): - def tearDown(self): - super().tearDown() - # These tests will change the directory to the project path, - # so it's necessary to change it back at the end. - os.chdir(INITIAL_ROOT) - - def test_configured_task_dir_change(self): - self.assertEqual(os.getcwd(), INITIAL_ROOT) - self.assertNotEqual(INITIAL_ROOT, self.project_dir) - InheritsFromConfiguredTask.from_args(self.args) - self.assertEqual(os.path.realpath(os.getcwd()), os.path.realpath(self.project_dir)) - - def test_configured_task_dir_change_with_bad_path(self): - self.args.project_dir = "bad_path" - with self.assertRaises(dbt_common.exceptions.DbtRuntimeError): - InheritsFromConfiguredTask.from_args(self.args) - - -class TestVariableProjectFile(BaseConfigTest): - def setUp(self): - super().setUp() - self.default_project_data["version"] = "{{ var('cli_version') }}" - self.default_project_data["name"] = "blah" - self.default_project_data["profile"] = "{{ env_var('env_value_profile') }}" - self.write_project(self.default_project_data) - - def test_cli_and_env_vars(self): - renderer = dbt.config.renderer.DbtProjectYamlRenderer(None, {"cli_version": "0.1.2"}) - with mock.patch.dict(os.environ, self.env_override): - safe_set_invocation_context() # reset invocation context with new env - project = dbt.config.Project.from_project_root( - self.project_dir, - renderer, - ) - - self.assertEqual(renderer.ctx_obj.env_vars, {"env_value_profile": "default"}) - self.assertEqual(project.version, "0.1.2") - self.assertEqual(project.project_name, "blah") - self.assertEqual(project.profile_name, "default") - - -class TestRuntimeConfig(BaseConfigTest): - def get_project(self): - return project_from_config_norender( - self.default_project_data, - project_root=self.project_dir, - verify_version=self.args.version_check, - ) - - def get_profile(self): - renderer = empty_profile_renderer() - return dbt.config.Profile.from_raw_profiles( - self.default_profile_data, self.default_project_data["profile"], renderer - ) - - def from_parts(self, exc=None): - with self.assertRaisesOrReturns(exc) as err: - project = self.get_project() - profile = self.get_profile() - - result = dbt.config.RuntimeConfig.from_parts(project, profile, self.args) - - if exc is None: - return result - else: - return err - - def test_from_parts(self): - project = self.get_project() - profile = self.get_profile() - config = dbt.config.RuntimeConfig.from_parts(project, profile, self.args) - - self.assertEqual(config.cli_vars, {}) - self.assertEqual(config.to_profile_info(), profile.to_profile_info()) - # we should have the default quoting set in the full config, but not in - # the project - # TODO(jeb): Adapters must assert that quoting is populated? - expected_project = project.to_project_config() - self.assertEqual(expected_project["quoting"], {}) - - expected_project["quoting"] = { - "database": True, - "identifier": True, - "schema": True, - } - self.assertEqual(config.to_project_config(), expected_project) - - def test_str(self): - project = self.get_project() - profile = self.get_profile() - config = dbt.config.RuntimeConfig.from_parts(project, profile, {}) - - # to make sure nothing terrible happens - str(config) - - def test_supported_version(self): - self.default_project_data["require-dbt-version"] = ">0.0.0" - conf = self.from_parts() - self.assertEqual(set(x.to_version_string() for x in conf.dbt_version), {">0.0.0"}) - - def test_unsupported_version(self): - self.default_project_data["require-dbt-version"] = ">99999.0.0" - raised = self.from_parts(dbt.exceptions.DbtProjectError) - self.assertIn("This version of dbt is not supported", str(raised.exception)) - - def test_unsupported_version_no_check(self): - self.default_project_data["require-dbt-version"] = ">99999.0.0" - self.args.version_check = False - set_from_args(self.args, None) - conf = self.from_parts() - self.assertEqual(set(x.to_version_string() for x in conf.dbt_version), {">99999.0.0"}) - - def test_supported_version_range(self): - self.default_project_data["require-dbt-version"] = [">0.0.0", "<=99999.0.0"] - conf = self.from_parts() - self.assertEqual( - set(x.to_version_string() for x in conf.dbt_version), {">0.0.0", "<=99999.0.0"} - ) - - def test_unsupported_version_range(self): - self.default_project_data["require-dbt-version"] = [">0.0.0", "<=0.0.1"] - raised = self.from_parts(dbt.exceptions.DbtProjectError) - self.assertIn("This version of dbt is not supported", str(raised.exception)) - - def test_unsupported_version_range_bad_config(self): - self.default_project_data["require-dbt-version"] = [">0.0.0", "<=0.0.1"] - self.default_project_data["some-extra-field-not-allowed"] = True - raised = self.from_parts(dbt.exceptions.DbtProjectError) - self.assertIn("This version of dbt is not supported", str(raised.exception)) - - def test_unsupported_version_range_no_check(self): - self.default_project_data["require-dbt-version"] = [">0.0.0", "<=0.0.1"] - self.args.version_check = False - set_from_args(self.args, None) - conf = self.from_parts() - self.assertEqual( - set(x.to_version_string() for x in conf.dbt_version), {">0.0.0", "<=0.0.1"} - ) - - def test_impossible_version_range(self): - self.default_project_data["require-dbt-version"] = [">99999.0.0", "<=0.0.1"] - raised = self.from_parts(dbt.exceptions.DbtProjectError) - self.assertIn( - "The package version requirement can never be satisfied", str(raised.exception) - ) - - def test_unsupported_version_extra_config(self): - self.default_project_data["some-extra-field-not-allowed"] = True - raised = self.from_parts(dbt.exceptions.DbtProjectError) - self.assertIn("Additional properties are not allowed", str(raised.exception)) - - def test_archive_not_allowed(self): - self.default_project_data["archive"] = [ - { - "source_schema": "a", - "target_schema": "b", - "tables": [ - { - "source_table": "seed", - "target_table": "archive_actual", - "updated_at": "updated_at", - "unique_key": """id || '-' || first_name""", - }, - ], - } - ] - with self.assertRaises(dbt.exceptions.DbtProjectError): - self.get_project() - - def test__warn_for_unused_resource_config_paths_empty(self): - project = self.from_parts() - dbt.flags.WARN_ERROR = True - try: - project.warn_for_unused_resource_config_paths( - { - "models": frozenset( - ( - ("my_test_project", "foo", "bar"), - ("my_test_project", "foo", "baz"), - ) - ) - }, - [], - ) - finally: - dbt.flags.WARN_ERROR = False - - -class TestRuntimeConfigWithConfigs(BaseConfigTest): - def setUp(self): - self.profiles_dir = "/invalid-profiles-path" - self.project_dir = "/invalid-root-path" - super().setUp() - self.default_project_data["project-root"] = self.project_dir - self.default_project_data["models"] = { - "enabled": True, - "my_test_project": { - "foo": { - "materialized": "view", - "bar": { - "materialized": "table", - }, - }, - "baz": { - "materialized": "table", - }, - }, - } - self.used = { - "models": frozenset( - ( - ("my_test_project", "foo", "bar"), - ("my_test_project", "foo", "baz"), - ) - ) - } - - def get_project(self): - return project_from_config_norender( - self.default_project_data, project_root=self.project_dir, verify_version=True - ) - - def get_profile(self): - renderer = empty_profile_renderer() - return dbt.config.Profile.from_raw_profiles( - self.default_profile_data, self.default_project_data["profile"], renderer - ) - - def from_parts(self, exc=None): - with self.assertRaisesOrReturns(exc) as err: - project = self.get_project() - profile = self.get_profile() - - result = dbt.config.RuntimeConfig.from_parts(project, profile, self.args) - - if exc is None: - return result - else: - return err - - def test__warn_for_unused_resource_config_paths(self): - project = self.from_parts() - with mock.patch("dbt.config.runtime.warn_or_error") as warn_or_error_patch: - project.warn_for_unused_resource_config_paths(self.used, []) - warn_or_error_patch.assert_called_once() - event = warn_or_error_patch.call_args[0][0] - assert type(event).__name__ == "UnusedResourceConfigPath" - msg = event.message() - expected_msg = "- models.my_test_project.baz" - assert expected_msg in msg - - -class TestRuntimeConfigFiles(BaseConfigTest): - def test_from_args(self): - with temp_cd(self.project_dir): - config = dbt.config.RuntimeConfig.from_args(self.args) - self.assertEqual(config.version, "0.0.1") - self.assertEqual(config.profile_name, "default") - # on osx, for example, these are not necessarily equal due to /private - self.assertTrue(os.path.samefile(config.project_root, self.project_dir)) - self.assertEqual(config.model_paths, ["models"]) - self.assertEqual(config.macro_paths, ["macros"]) - self.assertEqual(config.seed_paths, ["seeds"]) - self.assertEqual(config.test_paths, ["tests"]) - self.assertEqual(config.analysis_paths, ["analyses"]) - self.assertEqual( - set(config.docs_paths), set(["models", "seeds", "snapshots", "analyses", "macros"]) - ) - self.assertEqual(config.asset_paths, []) - self.assertEqual(config.target_path, "target") - self.assertEqual(config.clean_targets, ["target"]) - self.assertEqual(config.log_path, "logs") - self.assertEqual(config.packages_install_path, "dbt_packages") - self.assertEqual(config.quoting, {"database": True, "identifier": True, "schema": True}) - self.assertEqual(config.models, {}) - self.assertEqual(config.on_run_start, []) - self.assertEqual(config.on_run_end, []) - self.assertEqual(config.seeds, {}) - self.assertEqual(config.packages, PackageConfig(packages=[])) - self.assertEqual(config.project_name, "my_test_project") - - -class TestVariableRuntimeConfigFiles(BaseConfigTest): - def setUp(self): - super().setUp() - self.default_project_data.update( - { - "version": "{{ var('cli_version') }}", - "name": "blah", - "profile": "{{ env_var('env_value_profile') }}", - "on-run-end": [ - "{{ env_var('env_value_profile') }}", - ], - "models": { - "foo": { - "post-hook": "{{ env_var('env_value_profile') }}", - }, - "bar": { - # just gibberish, make sure it gets interpreted - "materialized": "{{ env_var('env_value_profile') }}", - }, - }, - "seeds": { - "foo": { - "post-hook": "{{ env_var('env_value_profile') }}", - }, - "bar": { - # just gibberish, make sure it gets interpreted - "materialized": "{{ env_var('env_value_profile') }}", - }, - }, - } - ) - self.write_project(self.default_project_data) - - def test_cli_and_env_vars(self): - self.args.target = "cli-and-env-vars" - self.args.vars = {"cli_value_host": "cli-postgres-host", "cli_version": "0.1.2"} - self.args.project_dir = self.project_dir - set_from_args(self.args, None) - with mock.patch.dict(os.environ, self.env_override): - safe_set_invocation_context() # reset invocation context with new env - config = dbt.config.RuntimeConfig.from_args(self.args) - - self.assertEqual(config.version, "0.1.2") - self.assertEqual(config.project_name, "blah") - self.assertEqual(config.profile_name, "default") - self.assertEqual(config.credentials.host, "cli-postgres-host") - self.assertEqual(config.credentials.user, "env-postgres-user") - # make sure hooks are not interpreted - self.assertEqual(config.on_run_end, ["{{ env_var('env_value_profile') }}"]) - self.assertEqual(config.models["foo"]["post-hook"], "{{ env_var('env_value_profile') }}") - self.assertEqual(config.models["bar"]["materialized"], "default") # rendered! - self.assertEqual(config.seeds["foo"]["post-hook"], "{{ env_var('env_value_profile') }}") - self.assertEqual(config.seeds["bar"]["materialized"], "default") # rendered! - - -class TestVarLookups(unittest.TestCase): - def setUp(self): - self.initial_src_vars = { - # globals - "foo": 123, - "bar": "hello", - # project-scoped - "my_project": { - "bar": "goodbye", - "baz": True, - }, - "other_project": { - "foo": 456, - }, - } - self.src_vars = deepcopy(self.initial_src_vars) - self.dst = {"vars": deepcopy(self.initial_src_vars)} - - self.projects = ["my_project", "other_project", "third_project"] - load_plugin("postgres") - self.local_var_search = mock.MagicMock( - fqn=["my_project", "my_model"], resource_type=NodeType.Model, package_name="my_project" - ) - self.other_var_search = mock.MagicMock( - fqn=["other_project", "model"], - resource_type=NodeType.Model, - package_name="other_project", - ) - self.third_var_search = mock.MagicMock( - fqn=["third_project", "third_model"], - resource_type=NodeType.Model, - package_name="third_project", - ) - - def test_lookups(self): - vars_provider = dbt.config.project.VarProvider(self.initial_src_vars) - - expected = [ - (self.local_var_search, "foo", 123), - (self.other_var_search, "foo", 456), - (self.third_var_search, "foo", 123), - (self.local_var_search, "bar", "goodbye"), - (self.other_var_search, "bar", "hello"), - (self.third_var_search, "bar", "hello"), - (self.local_var_search, "baz", True), - (self.other_var_search, "baz", None), - (self.third_var_search, "baz", None), - ] - for node, key, expected_value in expected: - value = vars_provider.vars_for(node, "postgres").get(key) - assert value == expected_value - - -class TestMultipleProjectFlags(BaseConfigTest): - def setUp(self): - super().setUp() - - self.default_project_data.update( - { - "flags": { - "send_anonymous_usage_data": False, - } - } - ) - self.write_project(self.default_project_data) - - self.default_profile_data.update( - { - "config": { - "send_anonymous_usage_data": False, - } - } - ) - self.write_profile(self.default_profile_data) - - def test_setting_multiple_flags(self): - with pytest.raises(dbt.exceptions.DbtProjectError): - set_from_args(self.args, None) diff --git a/tests/unit/test_contracts_graph_compiled.py b/tests/unit/test_contracts_graph_compiled.py index 843793e0939..7a34f826747 100644 --- a/tests/unit/test_contracts_graph_compiled.py +++ b/tests/unit/test_contracts_graph_compiled.py @@ -8,13 +8,11 @@ from dbt.contracts.graph.nodes import ( DependsOn, GenericTestNode, - InjectedCTE, ModelNode, ModelConfig, - TestConfig, - TestMetadata, - Contract, ) +from dbt.artifacts.resources import TestConfig, TestMetadata +from tests.unit.fixtures import generic_test_node, model_node from dbt.node_types import NodeType from .utils import ( @@ -58,35 +56,7 @@ def basic_uncompiled_model(): @pytest.fixture def basic_compiled_model(): - return ModelNode( - package_name="test", - path="/root/models/foo.sql", - original_file_path="models/foo.sql", - language="sql", - raw_code='select * from {{ ref("other") }}', - name="foo", - resource_type=NodeType.Model, - unique_id="model.test.foo", - fqn=["test", "models", "foo"], - refs=[], - sources=[], - metrics=[], - depends_on=DependsOn(), - description="", - database="test_db", - schema="test_schema", - alias="bar", - tags=[], - config=ModelConfig(), - contract=Contract(), - meta={}, - compiled=True, - extra_ctes=[InjectedCTE("whatever", "select * from other")], - extra_ctes_injected=True, - compiled_code="with whatever as (select * from other) select * from whatever", - checksum=FileHash.from_contents(""), - unrendered_config={}, - ) + return model_node() @pytest.fixture @@ -429,39 +399,7 @@ def basic_uncompiled_schema_test_node(): @pytest.fixture def basic_compiled_schema_test_node(): - return GenericTestNode( - package_name="test", - path="/root/x/path.sql", - original_file_path="/root/path.sql", - language="sql", - raw_code='select * from {{ ref("other") }}', - name="foo", - resource_type=NodeType.Test, - unique_id="model.test.foo", - fqn=["test", "models", "foo"], - refs=[], - sources=[], - metrics=[], - depends_on=DependsOn(), - description="", - database="test_db", - schema="dbt_test__audit", - alias="bar", - tags=[], - config=TestConfig(severity="warn"), - contract=Contract(), - meta={}, - compiled=True, - extra_ctes=[InjectedCTE("whatever", "select * from other")], - extra_ctes_injected=True, - compiled_code="with whatever as (select * from other) select * from whatever", - column_name="id", - test_metadata=TestMetadata(namespace=None, name="foo", kwargs={}), - checksum=FileHash.from_contents(""), - unrendered_config={ - "severity": "warn", - }, - ) + return generic_test_node() @pytest.fixture diff --git a/tests/unit/test_contracts_graph_parsed.py b/tests/unit/test_contracts_graph_parsed.py index a7d514fd1be..63800a69b7c 100644 --- a/tests/unit/test_contracts_graph_parsed.py +++ b/tests/unit/test_contracts_graph_parsed.py @@ -19,20 +19,22 @@ Owner, Quoting, RefArgs, + MacroDependsOn, + TestMetadata, SourceConfig, Time, + Hook, ) from dbt.artifacts.resources.types import TimePeriod from dbt.node_types import NodeType, AccessType from dbt.contracts.files import FileHash from dbt.contracts.graph.model_config import ( - ModelConfig, NodeConfig, SeedConfig, TestConfig, SnapshotConfig, EmptySnapshotConfig, - Hook, + ModelConfig, ) from dbt.contracts.graph.nodes import ( ModelNode, @@ -45,11 +47,9 @@ Metric, SeedNode, Docs, - MacroDependsOn, SourceDefinition, Documentation, HookNode, - TestMetadata, SemanticModel, ) from dbt.artifacts.resources import SourceDefinition as SourceDefinitionResource @@ -1939,30 +1939,6 @@ def basic_parsed_source_definition_dict(): } -@pytest.fixture -def basic_parsed_source_definition_object(): - return SourceDefinition( - columns={}, - database="some_db", - description="", - fqn=["test", "source", "my_source", "my_source_table"], - identifier="my_source_table", - loader="stitch", - name="my_source_table", - original_file_path="/root/models/sources.yml", - package_name="test", - path="/root/models/sources.yml", - quoting=Quoting(), - resource_type=NodeType.Source, - schema="some_schema", - source_description="my source description", - source_name="my_source", - unique_id="test.source.my_source.my_source_table", - tags=[], - config=SourceConfig(), - ) - - @pytest.fixture def complex_parsed_source_definition_dict(): return { diff --git a/tests/unit/test_docs_blocks.py b/tests/unit/test_docs_blocks.py index 2a277d82030..cdf9933ce50 100644 --- a/tests/unit/test_docs_blocks.py +++ b/tests/unit/test_docs_blocks.py @@ -10,6 +10,10 @@ from .utils import config_from_parts_or_dicts +from dbt.flags import set_from_args +from argparse import Namespace + +set_from_args(Namespace(WARN_ERROR=False), None) SNOWPLOW_SESSIONS_DOCS = r""" This table contains one record for every session recorded by Snowplow. diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 000efdb6b18..e59662982e6 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -153,6 +153,13 @@ def test_event_codes(self): adapter_types.CollectFreshnessReturnSignature(), core_types.TestsConfigDeprecation(deprecated_path="", exp_path=""), core_types.ProjectFlagsMovedDeprecation(), + core_types.SpacesInModelNameDeprecation(model_name="", model_version="", level=""), + core_types.TotalModelNamesWithSpacesDeprecation( + count_invalid_names=1, show_debug_hint=True, level="" + ), + core_types.PackageMaterializationOverrideDeprecation( + package_name="my_package", materialization_name="view" + ), # E - DB Adapter ====================== adapter_types.AdapterEventDebug(), adapter_types.AdapterEventInfo(), @@ -359,6 +366,13 @@ def test_event_codes(self): total=0, execution_time=0, ), + core_types.LogNodeNoOpResult( + description="", + status="", + index=0, + total=0, + execution_time=0, + ), core_types.LogCancelLine(conn_name=""), core_types.DefaultSelector(name=""), core_types.NodeStart(), diff --git a/tests/unit/test_graph.py b/tests/unit/test_graph.py index d4282bcff95..b22b5302c14 100644 --- a/tests/unit/test_graph.py +++ b/tests/unit/test_graph.py @@ -1,6 +1,5 @@ import os -from argparse import Namespace import unittest from unittest.mock import MagicMock, patch @@ -8,31 +7,30 @@ from dbt.adapters.factory import reset_adapters, register_adapter import dbt.compilation import dbt.exceptions -import dbt.flags import dbt.parser import dbt.config import dbt.utils import dbt.parser.manifest from dbt import tracking +from dbt.cli.flags import convert_config from dbt.contracts.files import SourceFile, FileHash, FilePath from dbt.contracts.graph.manifest import MacroManifest, ManifestStateCheck from dbt.contracts.project import ProjectFlags +from dbt.flags import get_flags, set_from_args from dbt.graph import NodeSelector, parse_difference from dbt.events.logging import setup_event_logger from dbt.mp_context import get_mp_context +from queue import Empty +from .utils import config_from_parts_or_dicts, generate_name_macros, inject_plugin -try: - from queue import Empty -except ImportError: - from Queue import Empty +from argparse import Namespace -from .utils import config_from_parts_or_dicts, generate_name_macros, inject_plugin +set_from_args(Namespace(WARN_ERROR=False), None) class GraphTest(unittest.TestCase): def tearDown(self): self.mock_filesystem_search.stop() - self.mock_hook_constructor.stop() self.load_state_check.stop() self.load_source_file_patcher.stop() reset_adapters() @@ -74,17 +72,6 @@ def mock_filesystem_search(project, relative_dirs, extension, ignore_spec): self.mock_filesystem_search = self.filesystem_search.start() self.mock_filesystem_search.side_effect = mock_filesystem_search - # Create HookParser patcher - self.hook_patcher = patch.object(dbt.parser.hooks.HookParser, "__new__") - - def create_hook_patcher(cls, project, manifest, root_project): - result = MagicMock(project=project, manifest=manifest, root_project=root_project) - result.__iter__.side_effect = lambda: iter([]) - return result - - self.mock_hook_constructor = self.hook_patcher.start() - self.mock_hook_constructor.side_effect = create_hook_patcher - # Create the Manifest.state_check patcher @patch("dbt.parser.manifest.ManifestLoader.build_manifest_state_check") def _mock_state_check(self): @@ -117,6 +104,15 @@ def mock_load_source_file(path, parse_file_type, project_name, saved_files): self.mock_source_file.side_effect = mock_load_source_file + # Create hookparser source file patcher + self.load_source_file_manifest_patcher = patch("dbt.parser.manifest.load_source_file") + self.mock_source_file_manifest = self.load_source_file_manifest_patcher.start() + + def mock_load_source_file_manifest(path, parse_file_type, project_name, saved_files): + return [] + + self.mock_source_file_manifest.side_effect = mock_load_source_file_manifest + def get_config(self, extra_cfg=None): if extra_cfg is None: extra_cfg = {} @@ -131,9 +127,15 @@ def get_config(self, extra_cfg=None): cfg.update(extra_cfg) config = config_from_parts_or_dicts(project=cfg, profile=self.profile) - dbt.flags.set_from_args(Namespace(), ProjectFlags()) - setup_event_logger(dbt.flags.get_flags()) - object.__setattr__(dbt.flags.get_flags(), "PARTIAL_PARSE", False) + set_from_args(Namespace(), ProjectFlags()) + flags = get_flags() + setup_event_logger(flags) + object.__setattr__(flags, "PARTIAL_PARSE", False) + for arg_name, args_param_value in vars(flags).items(): + args_param_value = convert_config(arg_name, args_param_value) + object.__setattr__(config.args, arg_name.upper(), args_param_value) + object.__setattr__(config.args, arg_name.lower(), args_param_value) + return config def get_compiler(self, project): diff --git a/tests/unit/test_graph_selector_methods.py b/tests/unit/test_graph_selector_methods.py index 0fd9c96fbc9..1a3a16fdafc 100644 --- a/tests/unit/test_graph_selector_methods.py +++ b/tests/unit/test_graph_selector_methods.py @@ -1,47 +1,13 @@ import copy -from dataclasses import replace import pytest +from dataclasses import replace from unittest import mock from pathlib import Path +from dbt.artifacts.resources import ColumnInfo, FileHash +from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.files import FileHash -from dbt.contracts.graph.nodes import ( - DependsOn, - MacroDependsOn, - NodeConfig, - Macro, - ModelNode, - Exposure, - Metric, - Group, - SavedQuery, - SeedNode, - SemanticModel, - SingularTestNode, - GenericTestNode, - SourceDefinition, - TestConfig, - TestMetadata, - AccessType, - UnitTestDefinition, -) -from dbt.contracts.graph.manifest import Manifest, ManifestMetadata -from dbt.artifacts.resources import ( - ColumnInfo, - ExposureType, - MetricInputMeasure, - MetricTypeParams, - NodeRelation, - Owner, - QueryParams, -) -from dbt.contracts.graph.unparsed import ( - UnitTestInputFixture, - UnitTestOutputFixture, -) from dbt.contracts.state import PreviousState -from dbt.node_types import NodeType from dbt.graph.selector_methods import ( MethodManager, QualifiedNameSelectorMethod, @@ -63,852 +29,17 @@ SemanticModelSelectorMethod, ) import dbt_common.exceptions -from dbt_semantic_interfaces.type_enums import MetricType from .utils import replace_config - - -def make_model( - pkg, - name, - sql, - refs=None, - sources=None, - tags=None, - path=None, - alias=None, - config_kwargs=None, - fqn_extras=None, - depends_on_macros=None, - version=None, - latest_version=None, - access=None, -): - if refs is None: - refs = [] - if sources is None: - sources = [] - if tags is None: - tags = [] - if path is None: - path = f"{name}.sql" - if alias is None: - alias = name - if config_kwargs is None: - config_kwargs = {} - if depends_on_macros is None: - depends_on_macros = [] - - if fqn_extras is None: - fqn_extras = [] - - fqn = [pkg] + fqn_extras + [name] - if version: - fqn.append(f"v{version}") - - depends_on_nodes = [] - source_values = [] - ref_values = [] - for ref in refs: - ref_values.append([ref.name]) - depends_on_nodes.append(ref.unique_id) - for src in sources: - source_values.append([src.source_name, src.name]) - depends_on_nodes.append(src.unique_id) - - return ModelNode( - language="sql", - raw_code=sql, - database="dbt", - schema="dbt_schema", - alias=alias, - name=name, - fqn=fqn, - unique_id=f"model.{pkg}.{name}" if not version else f"model.{pkg}.{name}.v{version}", - package_name=pkg, - path=path, - original_file_path=f"models/{path}", - config=NodeConfig(**config_kwargs), - tags=tags, - refs=ref_values, - sources=source_values, - depends_on=DependsOn( - nodes=depends_on_nodes, - macros=depends_on_macros, - ), - resource_type=NodeType.Model, - checksum=FileHash.from_contents(""), - version=version, - latest_version=latest_version, - access=access or AccessType.Protected, - ) - - -def make_seed( - pkg, name, path=None, loader=None, alias=None, tags=None, fqn_extras=None, checksum=None -): - if alias is None: - alias = name - if tags is None: - tags = [] - if path is None: - path = f"{name}.csv" - - if fqn_extras is None: - fqn_extras = [] - - if checksum is None: - checksum = FileHash.from_contents("") - - fqn = [pkg] + fqn_extras + [name] - return SeedNode( - database="dbt", - schema="dbt_schema", - alias=alias, - name=name, - fqn=fqn, - unique_id=f"seed.{pkg}.{name}", - package_name=pkg, - path=path, - original_file_path=f"data/{path}", - tags=tags, - resource_type=NodeType.Seed, - checksum=FileHash.from_contents(""), - ) - - -def make_source( - pkg, source_name, table_name, path=None, loader=None, identifier=None, fqn_extras=None -): - if path is None: - path = "models/schema.yml" - if loader is None: - loader = "my_loader" - if identifier is None: - identifier = table_name - - if fqn_extras is None: - fqn_extras = [] - - fqn = [pkg] + fqn_extras + [source_name, table_name] - - return SourceDefinition( - fqn=fqn, - database="dbt", - schema="dbt_schema", - unique_id=f"source.{pkg}.{source_name}.{table_name}", - package_name=pkg, - path=path, - original_file_path=path, - name=table_name, - source_name=source_name, - loader="my_loader", - identifier=identifier, - resource_type=NodeType.Source, - loaded_at_field="loaded_at", - tags=[], - source_description="", - ) - - -def make_macro(pkg, name, macro_sql, path=None, depends_on_macros=None): - if path is None: - path = "macros/macros.sql" - - if depends_on_macros is None: - depends_on_macros = [] - - return Macro( - name=name, - macro_sql=macro_sql, - unique_id=f"macro.{pkg}.{name}", - package_name=pkg, - path=path, - original_file_path=path, - resource_type=NodeType.Macro, - depends_on=MacroDependsOn(macros=depends_on_macros), - ) - - -def make_unique_test(pkg, test_model, column_name, path=None, refs=None, sources=None, tags=None): - return make_generic_test(pkg, "unique", test_model, {}, column_name=column_name) - - -def make_not_null_test( - pkg, test_model, column_name, path=None, refs=None, sources=None, tags=None -): - return make_generic_test(pkg, "not_null", test_model, {}, column_name=column_name) - - -def make_generic_test( - pkg, - test_name, - test_model, - test_kwargs, - path=None, - refs=None, - sources=None, - tags=None, - column_name=None, -): - kwargs = test_kwargs.copy() - ref_values = [] - source_values = [] - # this doesn't really have to be correct - if isinstance(test_model, SourceDefinition): - kwargs["model"] = ( - "{{ source('" + test_model.source_name + "', '" + test_model.name + "') }}" - ) - source_values.append([test_model.source_name, test_model.name]) - else: - kwargs["model"] = "{{ ref('" + test_model.name + "')}}" - ref_values.append([test_model.name]) - if column_name is not None: - kwargs["column_name"] = column_name - - # whatever - args_name = test_model.search_name.replace(".", "_") - if column_name is not None: - args_name += "_" + column_name - node_name = f"{test_name}_{args_name}" - raw_code = ( - '{{ config(severity="ERROR") }}{{ test_' + test_name + "(**dbt_schema_test_kwargs) }}" - ) - name_parts = test_name.split(".") - - if len(name_parts) == 2: - namespace, test_name = name_parts - macro_depends = f"macro.{namespace}.test_{test_name}" - elif len(name_parts) == 1: - namespace = None - macro_depends = f"macro.dbt.test_{test_name}" - else: - assert False, f"invalid test name: {test_name}" - - if path is None: - path = "schema.yml" - if tags is None: - tags = ["schema"] - - if refs is None: - refs = [] - if sources is None: - sources = [] - - depends_on_nodes = [] - for ref in refs: - ref_values.append([ref.name]) - depends_on_nodes.append(ref.unique_id) - - for source in sources: - source_values.append([source.source_name, source.name]) - depends_on_nodes.append(source.unique_id) - - return GenericTestNode( - language="sql", - raw_code=raw_code, - test_metadata=TestMetadata( - namespace=namespace, - name=test_name, - kwargs=kwargs, - ), - database="dbt", - schema="dbt_postgres", - name=node_name, - alias=node_name, - fqn=["minimal", "schema_test", node_name], - unique_id=f"test.{pkg}.{node_name}", - package_name=pkg, - path=f"schema_test/{node_name}.sql", - original_file_path=f"models/{path}", - resource_type=NodeType.Test, - tags=tags, - refs=ref_values, - sources=[], - depends_on=DependsOn(macros=[macro_depends], nodes=depends_on_nodes), - column_name=column_name, - checksum=FileHash.from_contents(""), - ) - - -def make_unit_test( - pkg, - test_name, - test_model, -): - input_fixture = UnitTestInputFixture( - input="ref('table_model')", - rows=[{"id": 1, "string_a": "a"}], - ) - output_fixture = UnitTestOutputFixture( - rows=[{"id": 1, "string_a": "a"}], - ) - return UnitTestDefinition( - name=test_name, - model=test_model, - package_name=pkg, - resource_type=NodeType.Unit, - path="unit_tests.yml", - original_file_path="models/unit_tests.yml", - unique_id=f"unit.{pkg}.{test_model.name}__{test_name}", - given=[input_fixture], - expect=output_fixture, - fqn=[pkg, test_model.name, test_name], - ) - - -def make_singular_test( - pkg, name, sql, refs=None, sources=None, tags=None, path=None, config_kwargs=None -): - - if refs is None: - refs = [] - if sources is None: - sources = [] - if tags is None: - tags = ["data"] - if path is None: - path = f"{name}.sql" - - if config_kwargs is None: - config_kwargs = {} - - fqn = ["minimal", "data_test", name] - - depends_on_nodes = [] - source_values = [] - ref_values = [] - for ref in refs: - ref_values.append([ref.name]) - depends_on_nodes.append(ref.unique_id) - for src in sources: - source_values.append([src.source_name, src.name]) - depends_on_nodes.append(src.unique_id) - - return SingularTestNode( - language="sql", - raw_code=sql, - database="dbt", - schema="dbt_schema", - name=name, - alias=name, - fqn=fqn, - unique_id=f"test.{pkg}.{name}", - package_name=pkg, - path=path, - original_file_path=f"tests/{path}", - config=TestConfig(**config_kwargs), - tags=tags, - refs=ref_values, - sources=source_values, - depends_on=DependsOn(nodes=depends_on_nodes, macros=[]), - resource_type=NodeType.Test, - checksum=FileHash.from_contents(""), - ) - - -def make_exposure(pkg, name, path=None, fqn_extras=None, owner=None): - if path is None: - path = "schema.yml" - - if fqn_extras is None: - fqn_extras = [] - - if owner is None: - owner = Owner(email="test@example.com") - - fqn = [pkg, "exposures"] + fqn_extras + [name] - return Exposure( - name=name, - resource_type=NodeType.Exposure, - type=ExposureType.Notebook, - fqn=fqn, - unique_id=f"exposure.{pkg}.{name}", - package_name=pkg, - path=path, - original_file_path=path, - owner=owner, - ) - - -def make_metric(pkg, name, path=None): - if path is None: - path = "schema.yml" - - return Metric( - name=name, - resource_type=NodeType.Metric, - path=path, - package_name=pkg, - original_file_path=path, - unique_id=f"metric.{pkg}.{name}", - fqn=[pkg, "metrics", name], - label="New Customers", - description="New customers", - type=MetricType.SIMPLE, - type_params=MetricTypeParams(measure=MetricInputMeasure(name="count_cats")), - meta={"is_okr": True}, - tags=["okrs"], - ) - - -def make_group(pkg, name, path=None): - if path is None: - path = "schema.yml" - - return Group( - name=name, - resource_type=NodeType.Group, - path=path, - package_name=pkg, - original_file_path=path, - unique_id=f"group.{pkg}.{name}", - owner="email@gmail.com", - ) - - -def make_semantic_model(pkg: str, name: str, path=None, model=None): - if path is None: - path = "schema.yml" - - if model is None: - model = name - - node_relation = NodeRelation( - alias=model, - schema_name="dbt", - ) - - return SemanticModel( - name=name, - resource_type=NodeType.SemanticModel, - model=model, - node_relation=node_relation, - package_name=pkg, - path=path, - description="Customer entity", - primary_entity="customer", - unique_id=f"semantic_model.{pkg}.{name}", - original_file_path=path, - fqn=[pkg, "semantic_models", name], - ) - - -def make_saved_query(pkg: str, name: str, metric: str, path=None): - if path is None: - path = "schema.yml" - - return SavedQuery( - name=name, - resource_type=NodeType.SavedQuery, - package_name=pkg, - path=path, - description="Test Saved Query", - query_params=QueryParams( - metrics=[metric], - group_by=[], - where=None, - ), - exports=[], - unique_id=f"saved_query.{pkg}.{name}", - original_file_path=path, - fqn=[pkg, "saved_queries", name], - ) - - -@pytest.fixture -def macro_test_unique(): - return make_macro( - "dbt", "test_unique", "blablabla", depends_on_macros=["macro.dbt.default__test_unique"] - ) - - -@pytest.fixture -def macro_default_test_unique(): - return make_macro("dbt", "default__test_unique", "blablabla") - - -@pytest.fixture -def macro_test_not_null(): - return make_macro( - "dbt", "test_not_null", "blablabla", depends_on_macros=["macro.dbt.default__test_not_null"] - ) - - -@pytest.fixture -def macro_default_test_not_null(): - return make_macro("dbt", "default__test_not_null", "blabla") - - -@pytest.fixture -def seed(): - return make_seed("pkg", "seed") - - -@pytest.fixture -def source(): - return make_source("pkg", "raw", "seed", identifier="seed") - - -@pytest.fixture -def ephemeral_model(source): - return make_model( - "pkg", - "ephemeral_model", - 'select * from {{ source("raw", "seed") }}', - config_kwargs={"materialized": "ephemeral"}, - sources=[source], - ) - - -@pytest.fixture -def view_model(ephemeral_model): - return make_model( - "pkg", - "view_model", - 'select * from {{ ref("ephemeral_model") }}', - config_kwargs={"materialized": "view"}, - refs=[ephemeral_model], - tags=["uses_ephemeral"], - ) - - -@pytest.fixture -def table_model(ephemeral_model): - return make_model( - "pkg", - "table_model", - 'select * from {{ ref("ephemeral_model") }}', - config_kwargs={ - "materialized": "table", - "meta": { - # Other properties to test in test_select_config_meta - "string_property": "some_string", - "truthy_bool_property": True, - "falsy_bool_property": False, - "list_property": ["some_value", True, False], - }, - }, - refs=[ephemeral_model], - tags=["uses_ephemeral"], - path="subdirectory/table_model.sql", - ) - - -@pytest.fixture -def table_model_py(seed): - return make_model( - "pkg", - "table_model_py", - 'select * from {{ ref("seed") }}', - config_kwargs={"materialized": "table"}, - refs=[seed], - tags=[], - path="subdirectory/table_model.py", - ) - - -@pytest.fixture -def table_model_csv(seed): - return make_model( - "pkg", - "table_model_csv", - 'select * from {{ ref("seed") }}', - config_kwargs={"materialized": "table"}, - refs=[seed], - tags=[], - path="subdirectory/table_model.csv", - ) - - -@pytest.fixture -def ext_source(): - return make_source( - "ext", - "ext_raw", - "ext_source", - ) - - -@pytest.fixture -def ext_source_2(): - return make_source( - "ext", - "ext_raw", - "ext_source_2", - ) - - -@pytest.fixture -def ext_source_other(): - return make_source( - "ext", - "raw", - "ext_source", - ) - - -@pytest.fixture -def ext_source_other_2(): - return make_source( - "ext", - "raw", - "ext_source_2", - ) - - -@pytest.fixture -def ext_model(ext_source): - return make_model( - "ext", - "ext_model", - 'select * from {{ source("ext_raw", "ext_source") }}', - sources=[ext_source], - ) - - -@pytest.fixture -def union_model(seed, ext_source): - return make_model( - "pkg", - "union_model", - 'select * from {{ ref("seed") }} union all select * from {{ source("ext_raw", "ext_source") }}', - config_kwargs={"materialized": "table"}, - refs=[seed], - sources=[ext_source], - fqn_extras=["unions"], - path="subdirectory/union_model.sql", - tags=["unions"], - ) - - -@pytest.fixture -def versioned_model_v1(seed): - return make_model( - "pkg", - "versioned_model", - 'select * from {{ ref("seed") }}', - config_kwargs={"materialized": "table"}, - refs=[seed], - sources=[], - path="subdirectory/versioned_model_v1.sql", - version=1, - latest_version=2, - ) - - -@pytest.fixture -def versioned_model_v2(seed): - return make_model( - "pkg", - "versioned_model", - 'select * from {{ ref("seed") }}', - config_kwargs={"materialized": "table"}, - refs=[seed], - sources=[], - path="subdirectory/versioned_model_v2.sql", - version=2, - latest_version=2, - ) - - -@pytest.fixture -def versioned_model_v3(seed): - return make_model( - "pkg", - "versioned_model", - 'select * from {{ ref("seed") }}', - config_kwargs={"materialized": "table"}, - refs=[seed], - sources=[], - path="subdirectory/versioned_model_v3.sql", - version="3", - latest_version=2, - ) - - -@pytest.fixture -def versioned_model_v12_string(seed): - return make_model( - "pkg", - "versioned_model", - 'select * from {{ ref("seed") }}', - config_kwargs={"materialized": "table"}, - refs=[seed], - sources=[], - path="subdirectory/versioned_model_v12.sql", - version="12", - latest_version=2, - ) - - -@pytest.fixture -def versioned_model_v4_nested_dir(seed): - return make_model( - "pkg", - "versioned_model", - 'select * from {{ ref("seed") }}', - config_kwargs={"materialized": "table"}, - refs=[seed], - sources=[], - path="subdirectory/nested_dir/versioned_model_v3.sql", - version="4", - latest_version=2, - fqn_extras=["nested_dir"], - ) - - -@pytest.fixture -def table_id_unique(table_model): - return make_unique_test("pkg", table_model, "id") - - -@pytest.fixture -def table_id_not_null(table_model): - return make_not_null_test("pkg", table_model, "id") - - -@pytest.fixture -def view_id_unique(view_model): - return make_unique_test("pkg", view_model, "id") - - -@pytest.fixture -def ext_source_id_unique(ext_source): - return make_unique_test("ext", ext_source, "id") - - -@pytest.fixture -def view_test_nothing(view_model): - return make_singular_test( - "pkg", - "view_test_nothing", - 'select * from {{ ref("view_model") }} limit 0', - refs=[view_model], - ) - - -@pytest.fixture -def unit_test_table_model(table_model): - return make_unit_test( - "pkg", - "unit_test_table_model", - table_model, - ) - - -# Support dots as namespace separators -@pytest.fixture -def namespaced_seed(): - return make_seed("pkg", "mynamespace.seed") - - -@pytest.fixture -def namespace_model(source): - return make_model( - "pkg", - "mynamespace.ephemeral_model", - 'select * from {{ source("raw", "seed") }}', - config_kwargs={"materialized": "ephemeral"}, - sources=[source], - ) - - -@pytest.fixture -def namespaced_union_model(seed, ext_source): - return make_model( - "pkg", - "mynamespace.union_model", - 'select * from {{ ref("mynamespace.seed") }} union all select * from {{ ref("mynamespace.ephemeral_model") }}', - config_kwargs={"materialized": "table"}, - refs=[seed], - sources=[ext_source], - fqn_extras=["unions"], - path="subdirectory/union_model.sql", - tags=["unions"], - ) - - -@pytest.fixture -def manifest( - seed, - source, - ephemeral_model, - view_model, - table_model, - table_model_py, - table_model_csv, - ext_source, - ext_model, - union_model, - versioned_model_v1, - versioned_model_v2, - versioned_model_v3, - versioned_model_v4_nested_dir, - versioned_model_v12_string, - ext_source_2, - ext_source_other, - ext_source_other_2, - table_id_unique, - table_id_not_null, - view_id_unique, - ext_source_id_unique, - view_test_nothing, - namespaced_seed, - namespace_model, - namespaced_union_model, - macro_test_unique, - macro_default_test_unique, - macro_test_not_null, - macro_default_test_not_null, - unit_test_table_model, -): - nodes = [ - seed, - ephemeral_model, - view_model, - table_model, - table_model_py, - table_model_csv, - union_model, - versioned_model_v1, - versioned_model_v2, - versioned_model_v3, - versioned_model_v4_nested_dir, - versioned_model_v12_string, - ext_model, - table_id_unique, - table_id_not_null, - view_id_unique, - ext_source_id_unique, - view_test_nothing, - namespaced_seed, - namespace_model, - namespaced_union_model, - ] - sources = [source, ext_source, ext_source_2, ext_source_other, ext_source_other_2] - macros = [ - macro_test_unique, - macro_default_test_unique, - macro_test_not_null, - macro_default_test_not_null, - ] - unit_tests = [unit_test_table_model] - manifest = Manifest( - nodes={n.unique_id: n for n in nodes}, - sources={s.unique_id: s for s in sources}, - macros={m.unique_id: m for m in macros}, - unit_tests={t.unique_id: t for t in unit_tests}, - semantic_models={}, - docs={}, - files={}, - exposures={}, - metrics={}, - disabled=[], - selectors={}, - groups={}, - metadata=ManifestMetadata(adapter_type="postgres"), - ) - return manifest +from tests.unit.utils.manifest import ( + make_model, + make_seed, + make_exposure, + make_metric, + make_saved_query, + make_semantic_model, + make_group, + make_macro, +) def search_manifest_using_method(manifest, method, selection): @@ -1044,6 +175,7 @@ def test_select_group(manifest, view_model): assert method.arguments == [] assert search_manifest_using_method(manifest, method, group_name) == {"view_model"} + assert search_manifest_using_method(manifest, method, "my?group") == {"view_model"} assert not search_manifest_using_method(manifest, method, "not_my_group") @@ -1360,11 +492,11 @@ def test_select_metric(manifest): assert search_manifest_using_method(manifest, method, "*_metric") == {"my_metric"} -def test_select_semantic_model(manifest): +def test_select_semantic_model(manifest, table_model): semantic_model = make_semantic_model( "pkg", "customer", - model="customers", + model=table_model, path="_semantic_models.yml", ) manifest.semantic_models[semantic_model.unique_id] = semantic_model @@ -1376,11 +508,11 @@ def test_select_semantic_model(manifest): assert search_manifest_using_method(manifest, method, "*omer") == {"customer"} -def test_select_semantic_model_by_tag(manifest): +def test_select_semantic_model_by_tag(manifest, table_model): semantic_model = make_semantic_model( "pkg", "customer", - model="customers", + model=table_model, path="_semantic_models.yml", ) manifest.semantic_models[semantic_model.unique_id] = semantic_model @@ -1434,7 +566,7 @@ def previous_state(manifest): target_path=Path("/path/does/not/exist"), project_root=Path("/path/does/not/exist"), ) - state.manifest = writable + state.manifest = Manifest.from_writable_manifest(writable) return state diff --git a/tests/unit/test_infer_primary_key.py b/tests/unit/test_infer_primary_key.py new file mode 100644 index 00000000000..4afa2bf4652 --- /dev/null +++ b/tests/unit/test_infer_primary_key.py @@ -0,0 +1,200 @@ +from dbt_common.contracts.constraints import ( + ConstraintType, + ModelLevelConstraint, + ColumnLevelConstraint, +) + +from .fixtures import model_node, generic_test_node + +from dbt.contracts.graph.model_config import ( + TestConfig, +) +from dbt.contracts.graph.nodes import ( + ColumnInfo, +) +from dbt.artifacts.resources import TestMetadata + + +def test_no_primary_key(): + model = model_node() + assert model.infer_primary_key([]) == [] + + +def test_primary_key_model_constraint(): + model = model_node() + model.constraints = [ModelLevelConstraint(type=ConstraintType.primary_key, columns=["pk"])] + assertSameContents(model.infer_primary_key([]), ["pk"]) + + model.constraints = [ + ModelLevelConstraint(type=ConstraintType.primary_key, columns=["pk1", "pk2"]) + ] + assertSameContents(model.infer_primary_key([]), ["pk1", "pk2"]) + + +def test_primary_key_column_constraint(): + model = model_node() + model.columns = { + "column1": ColumnInfo( + "column1", constraints=[ColumnLevelConstraint(type=ConstraintType.primary_key)] + ), + "column2": ColumnInfo("column2"), + } + assertSameContents(model.infer_primary_key([]), ["column1"]) + + +def test_unique_non_null_single(): + model = model_node() + test1 = generic_test_node() + test1.test_metadata = TestMetadata(name="unique", kwargs={"column_name": "column1"}) + test2 = generic_test_node() + test2.test_metadata = TestMetadata(name="not_null", kwargs={"column_name": "column1"}) + test3 = generic_test_node() + test3.test_metadata = TestMetadata(name="unique", kwargs={"column_name": "column2"}) + tests = [test1, test2] + assertSameContents(model.infer_primary_key(tests), ["column1"]) + + +def test_unique_non_null_multiple(): + model = model_node() + tests = [] + for i in range(2): + for enabled in [True, False]: + test1 = generic_test_node() + test1.test_metadata = TestMetadata( + name="unique", kwargs={"column_name": "column" + str(i) + str(enabled)} + ) + test1.config = TestConfig(enabled=enabled) + test2 = generic_test_node() + test2.test_metadata = TestMetadata( + name="not_null", kwargs={"column_name": "column" + str(i) + str(enabled)} + ) + test2.config = TestConfig(enabled=enabled) + tests.extend([test1, test2]) + + assertSameContents( + model.infer_primary_key(tests), + ["column0True", "column1True", "column0False", "column1False"], + ) + + +def test_enabled_unique_single(): + model = model_node() + test1 = generic_test_node() + test1.test_metadata = TestMetadata(name="unique", kwargs={"column_name": "column1"}) + test2 = generic_test_node() + test2.config = TestConfig(enabled=False) + test2.test_metadata = TestMetadata(name="unique", kwargs={"column_name": "column3"}) + + tests = [test1, test2] + assertSameContents(model.infer_primary_key(tests), ["column1"]) + + +def test_enabled_unique_multiple(): + model = model_node() + test1 = generic_test_node() + test1.test_metadata = TestMetadata(name="unique", kwargs={"column_name": "column1"}) + test2 = generic_test_node() + test2.test_metadata = TestMetadata(name="unique", kwargs={"column_name": "column2 || column3"}) + + tests = [test1, test2] + assertSameContents(model.infer_primary_key(tests), ["column1", "column2 || column3"]) + + +def test_enabled_unique_combo_single(): + model = model_node() + test1 = generic_test_node() + test1.test_metadata = TestMetadata( + name="unique_combination_of_columns", + kwargs={"combination_of_columns": ["column1", "column2"]}, + ) + test2 = generic_test_node() + test2.config = TestConfig(enabled=False) + test2.test_metadata = TestMetadata( + name="unique_combination_of_columns", + kwargs={"combination_of_columns": ["column3", "column4"]}, + ) + + tests = [test1, test2] + assertSameContents(model.infer_primary_key(tests), ["column1", "column2"]) + + +def test_enabled_unique_combo_multiple(): + model = model_node() + test1 = generic_test_node() + test1.test_metadata = TestMetadata( + name="unique", kwargs={"combination_of_columns": ["column1", "column2"]} + ) + test2 = generic_test_node() + test2.test_metadata = TestMetadata( + name="unique", kwargs={"combination_of_columns": ["column3", "column4"]} + ) + + tests = [test1, test2] + assertSameContents( + model.infer_primary_key(tests), ["column1", "column2", "column3", "column4"] + ) + + +def test_disabled_unique_single(): + model = model_node() + test1 = generic_test_node() + test1.config = TestConfig(enabled=False) + test1.test_metadata = TestMetadata(name="unique", kwargs={"column_name": "column1"}) + test2 = generic_test_node() + test2.test_metadata = TestMetadata(name="not_null", kwargs={"column_name": "column2"}) + + tests = [test1, test2] + assertSameContents(model.infer_primary_key(tests), ["column1"]) + + +def test_disabled_unique_multiple(): + model = model_node() + test1 = generic_test_node() + test1.config = TestConfig(enabled=False) + test1.test_metadata = TestMetadata(name="unique", kwargs={"column_name": "column1"}) + test2 = generic_test_node() + test2.config = TestConfig(enabled=False) + test2.test_metadata = TestMetadata(name="unique", kwargs={"column_name": "column2 || column3"}) + + tests = [test1, test2] + assertSameContents(model.infer_primary_key(tests), ["column1", "column2 || column3"]) + + +def test_disabled_unique_combo_single(): + model = model_node() + test1 = generic_test_node() + test1.config = TestConfig(enabled=False) + test1.test_metadata = TestMetadata( + name="unique", kwargs={"combination_of_columns": ["column1", "column2"]} + ) + test2 = generic_test_node() + test2.config = TestConfig(enabled=False) + test2.test_metadata = TestMetadata( + name="random", kwargs={"combination_of_columns": ["column3", "column4"]} + ) + + tests = [test1, test2] + assertSameContents(model.infer_primary_key(tests), ["column1", "column2"]) + + +def test_disabled_unique_combo_multiple(): + model = model_node() + test1 = generic_test_node() + test1.config = TestConfig(enabled=False) + test1.test_metadata = TestMetadata( + name="unique", kwargs={"combination_of_columns": ["column1", "column2"]} + ) + test2 = generic_test_node() + test2.config = TestConfig(enabled=False) + test2.test_metadata = TestMetadata( + name="unique", kwargs={"combination_of_columns": ["column3", "column4"]} + ) + + tests = [test1, test2] + assertSameContents( + model.infer_primary_key(tests), ["column1", "column2", "column3", "column4"] + ) + + +def assertSameContents(list1, list2): + assert sorted(list1) == sorted(list2) diff --git a/tests/unit/test_linker.py b/tests/unit/test_linker.py index 9c36ae19674..d1d09532e12 100644 --- a/tests/unit/test_linker.py +++ b/tests/unit/test_linker.py @@ -4,14 +4,14 @@ from unittest import mock from dbt import compilation - -try: - from queue import Empty -except ImportError: - from Queue import Empty - from dbt.graph.selector import NodeSelector from dbt.graph.cli import parse_difference +from queue import Empty + +from dbt.flags import set_from_args +from argparse import Namespace + +set_from_args(Namespace(WARN_ERROR=False), None) def _mock_manifest(nodes): diff --git a/tests/unit/test_manifest.py b/tests/unit/test_manifest.py index ba5004b0fe5..892c955d1a8 100644 --- a/tests/unit/test_manifest.py +++ b/tests/unit/test_manifest.py @@ -358,8 +358,9 @@ def tearDown(self): del os.environ["DBT_ENV_CUSTOM_ENV_key"] reset_metadata_vars() + @mock.patch.object(tracking, "active_user") @freezegun.freeze_time("2018-02-14T09:15:13Z") - def test_no_nodes(self): + def test_no_nodes(self, mock_user): manifest = Manifest( nodes={}, sources={}, @@ -376,6 +377,8 @@ def test_no_nodes(self): ) invocation_id = dbt_common.invocation._INVOCATION_ID + mock_user.id = "cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf" + set_from_args(Namespace(SEND_ANONYMOUS_USAGE_STATS=False), None) self.assertEqual( manifest.writable_manifest().to_dict(omit_none=True), { @@ -395,6 +398,8 @@ def test_no_nodes(self): "dbt_version": dbt.version.__version__, "env": {ENV_KEY_NAME: "value"}, "invocation_id": invocation_id, + "send_anonymous_usage_stats": False, + "user_id": "cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf", }, "docs": {}, "disabled": {}, @@ -405,7 +410,10 @@ def test_no_nodes(self): ) @freezegun.freeze_time("2018-02-14T09:15:13Z") - def test_nested_nodes(self): + @mock.patch.object(tracking, "active_user") + def test_nested_nodes(self, mock_user): + set_from_args(Namespace(SEND_ANONYMOUS_USAGE_STATS=False), None) + mock_user.id = "cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf" nodes = deepcopy(self.nested_nodes) manifest = Manifest( nodes=nodes, @@ -421,6 +429,8 @@ def test_nested_nodes(self): ) serialized = manifest.writable_manifest().to_dict(omit_none=True) self.assertEqual(serialized["metadata"]["generated_at"], "2018-02-14T09:15:13Z") + self.assertEqual(serialized["metadata"]["user_id"], mock_user.id) + self.assertFalse(serialized["metadata"]["send_anonymous_usage_stats"]) self.assertEqual(serialized["docs"], {}) self.assertEqual(serialized["disabled"], {}) parent_map = serialized["parent_map"] @@ -510,28 +520,6 @@ def test_build_flat_graph(self): for node in flat_nodes.values(): self.assertEqual(frozenset(node), REQUIRED_PARSED_NODE_KEYS) - @mock.patch.object(tracking, "active_user") - def test_metadata(self, mock_user): - mock_user.id = "cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf" - dbt_common.invocation._INVOCATION_ID = "01234567-0123-0123-0123-0123456789ab" - set_from_args(Namespace(SEND_ANONYMOUS_USAGE_STATS=False), None) - now = datetime.utcnow() - self.assertEqual( - ManifestMetadata( - project_id="098f6bcd4621d373cade4e832627b4f6", - adapter_type="postgres", - generated_at=now, - ), - ManifestMetadata( - project_id="098f6bcd4621d373cade4e832627b4f6", - user_id="cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf", - send_anonymous_usage_stats=False, - adapter_type="postgres", - generated_at=now, - invocation_id="01234567-0123-0123-0123-0123456789ab", - ), - ) - @mock.patch.object(tracking, "active_user") @freezegun.freeze_time("2018-02-14T09:15:13Z") def test_no_nodes_with_metadata(self, mock_user): @@ -542,6 +530,8 @@ def test_no_nodes_with_metadata(self, mock_user): project_id="098f6bcd4621d373cade4e832627b4f6", adapter_type="postgres", generated_at=datetime.utcnow(), + user_id="cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf", + send_anonymous_usage_stats=False, ) manifest = Manifest( nodes={}, @@ -883,8 +873,11 @@ def setUp(self): def tearDown(self): del os.environ["DBT_ENV_CUSTOM_ENV_key"] + @mock.patch.object(tracking, "active_user") @freezegun.freeze_time("2018-02-14T09:15:13Z") - def test_no_nodes(self): + def test_no_nodes(self, mock_user): + mock_user.id = "cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf" + set_from_args(Namespace(SEND_ANONYMOUS_USAGE_STATS=False), None) metadata = ManifestMetadata( generated_at=datetime.utcnow(), invocation_id="01234567-0123-0123-0123-0123456789ab" ) @@ -920,6 +913,8 @@ def test_no_nodes(self): "dbt_version": dbt.version.__version__, "invocation_id": "01234567-0123-0123-0123-0123456789ab", "env": {ENV_KEY_NAME: "value"}, + "send_anonymous_usage_stats": False, + "user_id": "cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf", }, "docs": {}, "disabled": {}, @@ -1045,7 +1040,7 @@ def test_merge_from_artifact(self): original_manifest = Manifest(nodes=original_nodes) other_manifest = Manifest(nodes=other_nodes) - original_manifest.merge_from_artifact(other_manifest.writable_manifest()) + original_manifest.merge_from_artifact(other_manifest) # new node added should not be in original manifest assert "model.root.nested2" not in original_manifest.nodes @@ -1243,7 +1238,7 @@ def test_find_generate_macros_by_name(macros, expectations): FindMaterializationSpec = namedtuple("FindMaterializationSpec", "macros,adapter_type,expected") -def _materialization_parameter_sets(): +def _materialization_parameter_sets_legacy(): # inject the plugins used for materialization parameter tests FooPlugin = AdapterPlugin( adapter=mock.MagicMock(), @@ -1389,12 +1384,187 @@ def id_mat(arg): return "_".join(arg) +@pytest.mark.parametrize( + "macros,adapter_type,expected", + _materialization_parameter_sets_legacy(), + ids=id_mat, +) +def test_find_materialization_by_name_legacy(macros, adapter_type, expected): + set_from_args( + Namespace( + SEND_ANONYMOUS_USAGE_STATS=False, + REQUIRE_EXPLICIT_PACKAGE_OVERRIDES_FOR_BUILTIN_MATERIALIZATIONS=False, + ), + None, + ) + + manifest = make_manifest(macros=macros) + result = manifest.find_materialization_macro_by_name( + project_name="root", + materialization_name="my_materialization", + adapter_type=adapter_type, + ) + if expected is None: + assert result is expected + else: + expected_package, expected_adapter_type = expected + assert result.adapter_type == expected_adapter_type + assert result.package_name == expected_package + + +def _materialization_parameter_sets(): + # inject the plugins used for materialization parameter tests + FooPlugin = AdapterPlugin( + adapter=mock.MagicMock(), + credentials=mock.MagicMock(), + include_path="/path/to/root/plugin", + project_name="foo", + ) + FooPlugin.adapter.type.return_value = "foo" + inject_plugin(FooPlugin) + + BarPlugin = AdapterPlugin( + adapter=mock.MagicMock(), + credentials=mock.MagicMock(), + include_path="/path/to/root/plugin", + dependencies=["foo"], + project_name="bar", + ) + BarPlugin.adapter.type.return_value = "bar" + inject_plugin(BarPlugin) + + sets = [ + FindMaterializationSpec(macros=[], adapter_type="foo", expected=None), + ] + + # default only, each project + sets.extend( + FindMaterializationSpec( + macros=[MockMaterialization(project, adapter_type=None)], + adapter_type="foo", + expected=(project, "default"), + ) + for project in ["root", "dep", "dbt"] + ) + + # other type only, each project + sets.extend( + FindMaterializationSpec( + macros=[MockMaterialization(project, adapter_type="bar")], + adapter_type="foo", + expected=None, + ) + for project in ["root", "dep", "dbt"] + ) + + # matching type only, each project + sets.extend( + FindMaterializationSpec( + macros=[MockMaterialization(project, adapter_type="foo")], + adapter_type="foo", + expected=(project, "foo"), + ) + for project in ["root", "dep", "dbt"] + ) + + sets.extend( + [ + # matching type and default everywhere + FindMaterializationSpec( + macros=[ + MockMaterialization(project, adapter_type=atype) + for (project, atype) in product(["root", "dep", "dbt"], ["foo", None]) + ], + adapter_type="foo", + expected=("root", "foo"), + ), + # default in core, override is in dep, and root has unrelated override + # should find the dbt default because default materializations cannot be overwritten by packages. + FindMaterializationSpec( + macros=[ + MockMaterialization("root", adapter_type="bar"), + MockMaterialization("dep", adapter_type="foo"), + MockMaterialization("dbt", adapter_type=None), + ], + adapter_type="foo", + expected=("dbt", "default"), + ), + # default in core, unrelated override is in dep, and root has an override + # should find the root override. + FindMaterializationSpec( + macros=[ + MockMaterialization("root", adapter_type="foo"), + MockMaterialization("dep", adapter_type="bar"), + MockMaterialization("dbt", adapter_type=None), + ], + adapter_type="foo", + expected=("root", "foo"), + ), + # default in core, override is in dep, and root has an override too. + # should find the root override. + FindMaterializationSpec( + macros=[ + MockMaterialization("root", adapter_type="foo"), + MockMaterialization("dep", adapter_type="foo"), + MockMaterialization("dbt", adapter_type=None), + ], + adapter_type="foo", + expected=("root", "foo"), + ), + # core has default + adapter, dep has adapter, root has default + # should find the default adapter implementation, because it's the most specific + # and default materializations cannot be overwritten by packages + FindMaterializationSpec( + macros=[ + MockMaterialization("root", adapter_type=None), + MockMaterialization("dep", adapter_type="foo"), + MockMaterialization("dbt", adapter_type=None), + MockMaterialization("dbt", adapter_type="foo"), + ], + adapter_type="foo", + expected=("dbt", "foo"), + ), + ] + ) + + # inherit from parent adapter + sets.extend( + FindMaterializationSpec( + macros=[MockMaterialization(project, adapter_type="foo")], + adapter_type="bar", + expected=(project, "foo"), + ) + for project in ["root", "dep", "dbt"] + ) + sets.extend( + FindMaterializationSpec( + macros=[ + MockMaterialization(project, adapter_type="foo"), + MockMaterialization(project, adapter_type="bar"), + ], + adapter_type="bar", + expected=(project, "bar"), + ) + for project in ["root", "dep", "dbt"] + ) + + return sets + + @pytest.mark.parametrize( "macros,adapter_type,expected", _materialization_parameter_sets(), ids=id_mat, ) def test_find_materialization_by_name(macros, adapter_type, expected): + set_from_args( + Namespace( + SEND_ANONYMOUS_USAGE_STATS=False, + REQUIRE_EXPLICIT_PACKAGE_OVERRIDES_FOR_BUILTIN_MATERIALIZATIONS=True, + ), + None, + ) + manifest = make_manifest(macros=macros) result = manifest.find_materialization_macro_by_name( project_name="root", diff --git a/tests/unit/test_parse_manifest.py b/tests/unit/test_parse_manifest.py deleted file mode 100644 index 5dc39ab74ed..00000000000 --- a/tests/unit/test_parse_manifest.py +++ /dev/null @@ -1,123 +0,0 @@ -import unittest -from unittest import mock -from unittest.mock import patch, MagicMock -from argparse import Namespace - -from .utils import config_from_parts_or_dicts, normalize - -from dbt.contracts.files import SourceFile, FileHash, FilePath -from dbt.contracts.graph.manifest import Manifest, ManifestStateCheck -from dbt.parser import manifest -from dbt.parser.manifest import ManifestLoader -from dbt.config import RuntimeConfig -from dbt.flags import set_from_args - - -class MatchingHash(FileHash): - def __init__(self): - return super().__init__("", "") - - def __eq__(self, other): - return True - - -class MismatchedHash(FileHash): - def __init__(self): - return super().__init__("", "") - - def __eq__(self, other): - return False - - -class TestLoader(unittest.TestCase): - def setUp(self): - profile_data = { - "target": "test", - "quoting": {}, - "outputs": { - "test": { - "type": "postgres", - "host": "localhost", - "schema": "analytics", - "user": "test", - "pass": "test", - "dbname": "test", - "port": 1, - } - }, - } - - root_project = { - "name": "root", - "version": "0.1", - "profile": "test", - "project-root": normalize("/usr/src/app"), - "config-version": 2, - } - - self.root_project_config = config_from_parts_or_dicts( - project=root_project, profile=profile_data, cli_vars='{"test_schema_name": "foo"}' - ) - self.parser = mock.MagicMock() - - # Create the Manifest.state_check patcher - @patch("dbt.parser.manifest.ManifestLoader.build_manifest_state_check") - def _mock_state_check(self): - all_projects = self.all_projects - return ManifestStateCheck( - vars_hash=FileHash.from_contents("vars"), - project_hashes={name: FileHash.from_contents(name) for name in all_projects}, - profile_hash=FileHash.from_contents("profile"), - ) - - self.load_state_check = patch( - "dbt.parser.manifest.ManifestLoader.build_manifest_state_check" - ) - self.mock_state_check = self.load_state_check.start() - self.mock_state_check.side_effect = _mock_state_check - - self.loader = manifest.ManifestLoader( - self.root_project_config, {"root": self.root_project_config} - ) - - def _new_manifest(self): - state_check = ManifestStateCheck(MatchingHash(), MatchingHash, []) - manifest = Manifest({}, {}, {}, {}, {}, {}, [], {}) - manifest.state_check = state_check - return manifest - - def _mismatched_file(self, searched, name): - return self._new_file(searched, name, False) - - def _matching_file(self, searched, name): - return self._new_file(searched, name, True) - - def _new_file(self, searched, name, match): - if match: - checksum = MatchingHash() - else: - checksum = MismatchedHash() - path = FilePath( - searched_path=normalize(searched), - relative_path=normalize(name), - project_root=normalize(self.root_project_config.project_root), - ) - return SourceFile(path=path, checksum=checksum) - - -class TestPartialParse(unittest.TestCase): - @patch("dbt.parser.manifest.ManifestLoader.build_manifest_state_check") - @patch("dbt.parser.manifest.os.path.exists") - @patch("dbt.parser.manifest.open") - def test_partial_parse_file_path(self, patched_open, patched_os_exist, patched_state_check): - mock_project = MagicMock(RuntimeConfig) - mock_project.project_target_path = "mock_target_path" - patched_os_exist.return_value = True - set_from_args(Namespace(), {}) - ManifestLoader(mock_project, {}) - # by default we use the project_target_path - patched_open.assert_called_with("mock_target_path/partial_parse.msgpack", "rb") - set_from_args(Namespace(partial_parse_file_path="specified_partial_parse_path"), {}) - ManifestLoader(mock_project, {}) - # if specified in flags, we use the specified path - patched_open.assert_called_with("specified_partial_parse_path", "rb") diff --git a/tests/unit/test_parser.py b/tests/unit/test_parser.py index c949756eba8..420354f585e 100644 --- a/tests/unit/test_parser.py +++ b/tests/unit/test_parser.py @@ -1,5 +1,6 @@ import os import unittest +from argparse import Namespace from copy import deepcopy from unittest import mock @@ -8,11 +9,12 @@ import dbt.flags import dbt.parser from dbt import tracking +from dbt.artifacts.resources import ModelConfig from dbt.artifacts.resources import RefArgs from dbt.context.context_config import ContextConfig from dbt.contracts.files import SourceFile, FileHash, FilePath, SchemaSourceFile from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.model_config import NodeConfig, TestConfig, SnapshotConfig, ModelConfig +from dbt.contracts.graph.model_config import NodeConfig, TestConfig, SnapshotConfig from dbt.contracts.graph.nodes import ( ModelNode, Macro, @@ -22,7 +24,8 @@ AnalysisNode, UnpatchedSourceDefinition, ) -from dbt.exceptions import CompilationError, ParsingError +from dbt.exceptions import CompilationError, ParsingError, SchemaConfigError +from dbt.flags import set_from_args from dbt.node_types import NodeType from dbt.parser import ( ModelParser, @@ -52,8 +55,6 @@ from dbt.parser.search import FileBlock from dbt.parser.sources import SourcePatcher from .utils import config_from_parts_or_dicts, normalize, generate_name_macros, MockNode -from dbt.flags import set_from_args -from argparse import Namespace set_from_args(Namespace(WARN_ERROR=False), None) @@ -224,6 +225,23 @@ def assertEqualNodes(node_one, node_two): - name: my_table """ + +MULTIPLE_TABLE_SOURCE_META = """ +sources: + - name: my_source + meta: + source_field: source_value + shared_field: shared_field_default + tables: + - name: my_table_shared_field_default + meta: + table_field: table_value + - name: my_table_shared_field_override + meta: + shared_field: shared_field_table_override + table_field: table_value +""" + SINGLE_TABLE_SOURCE_TESTS = """ sources: - name: my_source @@ -255,6 +273,22 @@ def assertEqualNodes(node_one, node_two): arg: 100 """ +SINGLE_TABLE_MODEL_TESTS_WRONG_SEVERITY = """ +models: + - name: my_model + description: A description of my model + columns: + - name: color + description: The color value + data_tests: + - not_null: + severity: WARNING + - accepted_values: + values: ['red', 'blue', 'green'] + - foreign_package.test_case: + arg: 100 +""" + MULTIPLE_TABLE_VERSIONED_MODEL_TESTS = """ models: @@ -415,6 +449,41 @@ def test__parse_basic_source(self): assert src.resource_type == NodeType.Source assert src.fqn == ["snowplow", "my_source", "my_table"] + @mock.patch("dbt.parser.sources.get_adapter") + def test__parse_basic_source_meta(self, mock_get_adapter): + block = self.file_block_for(MULTIPLE_TABLE_SOURCE_META, "test_one.yml") + dct = yaml_from_file(block.file) + self.parser.parse_file(block, dct) + self.assert_has_manifest_lengths(self.parser.manifest, sources=2) + + unpatched_src_default = self.parser.manifest.sources[ + "source.snowplow.my_source.my_table_shared_field_default" + ] + src_default = self.source_patcher.parse_source(unpatched_src_default) + assert src_default.meta == { + "source_field": "source_value", + "shared_field": "shared_field_default", + "table_field": "table_value", + } + assert src_default.source_meta == { + "source_field": "source_value", + "shared_field": "shared_field_default", + } + + unpatched_src_override = self.parser.manifest.sources[ + "source.snowplow.my_source.my_table_shared_field_override" + ] + src_override = self.source_patcher.parse_source(unpatched_src_override) + assert src_override.meta == { + "source_field": "source_value", + "shared_field": "shared_field_table_override", + "table_field": "table_value", + } + assert src_override.source_meta == { + "source_field": "source_value", + "shared_field": "shared_field_default", + } + def test__read_basic_source_tests(self): block = self.yaml_block_for(SINGLE_TABLE_SOURCE_TESTS, "test_one.yml") analysis_tests = AnalysisPatchParser(self.parser, block, "analyses").parse().test_blocks @@ -524,6 +593,14 @@ def test__read_basic_model_tests(self): self.assertEqual(len(list(self.parser.manifest.sources)), 0) self.assertEqual(len(list(self.parser.manifest.nodes)), 4) + def test__read_basic_model_tests_wrong_severity(self): + block = self.yaml_block_for(SINGLE_TABLE_MODEL_TESTS_WRONG_SEVERITY, "test_one.yml") + dct = yaml_from_file(block.file) + with self.assertRaisesRegex( + SchemaConfigError, "Severity must be either 'warn' or 'error'. Got 'WARNING'" + ): + self.parser.parse_file(block, dct) + def test__parse_basic_model_tests(self): block = self.file_block_for(SINGLE_TABLE_MODEL_TESTS, "test_one.yml") self.parser.manifest.files[block.file.file_id] = block.file diff --git a/tests/unit/test_plugin_manager.py b/tests/unit/test_plugin_manager.py index 4cfe01d0dfc..bf25d810729 100644 --- a/tests/unit/test_plugin_manager.py +++ b/tests/unit/test_plugin_manager.py @@ -101,13 +101,25 @@ def test_get_nodes(self, tracking, get_nodes_plugins): nodes = pm.get_nodes() assert len(nodes.models) == 2 - assert tracking.track_plugin_get_nodes.called_once_with( - { - "plugin_name": get_nodes_plugins[0].name, - "num_model_nodes": 2, - "num_model_packages": 1, - } - ) + + expected_calls = [ + mock.call( + { + "plugin_name": get_nodes_plugins[0].name, + "num_model_nodes": 1, + "num_model_packages": 1, + } + ), + mock.call( + { + "plugin_name": get_nodes_plugins[1].name, + "num_model_nodes": 1, + "num_model_packages": 1, + } + ), + ] + + tracking.track_plugin_get_nodes.assert_has_calls(expected_calls) def test_get_manifest_artifact(self, get_artifacts_plugins): pm = PluginManager(plugins=get_artifacts_plugins) diff --git a/tests/unit/test_relation.py b/tests/unit/test_relation.py deleted file mode 100644 index aa9cda258f9..00000000000 --- a/tests/unit/test_relation.py +++ /dev/null @@ -1,68 +0,0 @@ -from dataclasses import replace - -import pytest - -from dbt.adapters.base import BaseRelation -from dbt.adapters.contracts.relation import RelationType - - -@pytest.mark.parametrize( - "relation_type,result", - [ - (RelationType.View, True), - (RelationType.External, False), - ], -) -def test_can_be_renamed(relation_type, result): - my_relation = BaseRelation.create(type=relation_type) - my_relation = replace(my_relation, renameable_relations=frozenset({RelationType.View})) - assert my_relation.can_be_renamed is result - - -def test_can_be_renamed_default(): - my_relation = BaseRelation.create(type=RelationType.View) - assert my_relation.can_be_renamed is False - - -@pytest.mark.parametrize( - "relation_type,result", - [ - (RelationType.View, True), - (RelationType.External, False), - ], -) -def test_can_be_replaced(relation_type, result): - my_relation = BaseRelation.create(type=relation_type) - my_relation = replace(my_relation, replaceable_relations=frozenset({RelationType.View})) - assert my_relation.can_be_replaced is result - - -def test_can_be_replaced_default(): - my_relation = BaseRelation.create(type=RelationType.View) - assert my_relation.can_be_replaced is False - - -@pytest.mark.parametrize( - "limit,expected_result", - [ - (None, '"test_database"."test_schema"."test_identifier"'), - ( - 0, - '(select * from "test_database"."test_schema"."test_identifier" where false limit 0) _dbt_limit_subq', - ), - ( - 1, - '(select * from "test_database"."test_schema"."test_identifier" limit 1) _dbt_limit_subq', - ), - ], -) -def test_render_limited(limit, expected_result): - my_relation = BaseRelation.create( - database="test_database", - schema="test_schema", - identifier="test_identifier", - limit=limit, - ) - actual_result = my_relation.render_limited() - assert actual_result == expected_result - assert str(my_relation) == expected_result diff --git a/tests/unit/test_unit_test_parser.py b/tests/unit/test_unit_test_parser.py index a2039e83784..9f6fb0ced55 100644 --- a/tests/unit/test_unit_test_parser.py +++ b/tests/unit/test_unit_test_parser.py @@ -1,4 +1,5 @@ -from dbt.contracts.graph.nodes import UnitTestDefinition, UnitTestConfig, DependsOn, NodeType +from dbt.contracts.graph.nodes import UnitTestDefinition, NodeType +from dbt.artifacts.resources import UnitTestConfig, DependsOn from dbt.parser import SchemaParser from dbt.parser.unit_tests import UnitTestParser diff --git a/tests/unit/utils.py b/tests/unit/utils/__init__.py similarity index 100% rename from tests/unit/utils.py rename to tests/unit/utils/__init__.py diff --git a/tests/unit/utils/manifest.py b/tests/unit/utils/manifest.py new file mode 100644 index 00000000000..2f56570df41 --- /dev/null +++ b/tests/unit/utils/manifest.py @@ -0,0 +1,1006 @@ +from argparse import Namespace +import pytest + +from dbt.artifacts.resources.v1.model import ModelConfig +from dbt.contracts.files import FileHash +from dbt.contracts.graph.nodes import ( + DependsOn, + NodeConfig, + Macro, + ModelNode, + Exposure, + Metric, + Group, + SavedQuery, + SeedNode, + SemanticModel, + SingularTestNode, + GenericTestNode, + SourceDefinition, + AccessType, + UnitTestDefinition, +) +from dbt.contracts.graph.manifest import Manifest, ManifestMetadata +from dbt.artifacts.resources import ( + ExposureType, + MetricInputMeasure, + MetricTypeParams, + NodeRelation, + Owner, + QueryParams, + MacroDependsOn, + TestConfig, + TestMetadata, + RefArgs, +) +from dbt.contracts.graph.unparsed import ( + UnitTestInputFixture, + UnitTestOutputFixture, +) +from dbt.node_types import NodeType + +from dbt_semantic_interfaces.type_enums import MetricType +from dbt.flags import set_from_args + +set_from_args(Namespace(WARN_ERROR=False), None) + + +def make_model( + pkg, + name, + sql, + refs=None, + sources=None, + tags=None, + path=None, + alias=None, + config_kwargs=None, + fqn_extras=None, + depends_on_macros=None, + version=None, + latest_version=None, + access=None, +): + if refs is None: + refs = [] + if sources is None: + sources = [] + if tags is None: + tags = [] + if path is None: + path = f"{name}.sql" + if alias is None: + alias = name + if config_kwargs is None: + config_kwargs = {} + if depends_on_macros is None: + depends_on_macros = [] + + if fqn_extras is None: + fqn_extras = [] + + fqn = [pkg] + fqn_extras + [name] + if version: + fqn.append(f"v{version}") + + depends_on_nodes = [] + source_values = [] + ref_values = [] + for ref in refs: + ref_version = ref.version if hasattr(ref, "version") else None + ref_values.append(RefArgs(name=ref.name, package=ref.package_name, version=ref_version)) + depends_on_nodes.append(ref.unique_id) + for src in sources: + source_values.append([src.source_name, src.name]) + depends_on_nodes.append(src.unique_id) + + return ModelNode( + language="sql", + raw_code=sql, + database="dbt", + schema="dbt_schema", + alias=alias, + name=name, + fqn=fqn, + unique_id=f"model.{pkg}.{name}" if not version else f"model.{pkg}.{name}.v{version}", + package_name=pkg, + path=path, + original_file_path=f"models/{path}", + config=NodeConfig(**config_kwargs), + tags=tags, + refs=ref_values, + sources=source_values, + depends_on=DependsOn( + nodes=depends_on_nodes, + macros=depends_on_macros, + ), + resource_type=NodeType.Model, + checksum=FileHash.from_contents(""), + version=version, + latest_version=latest_version, + access=access or AccessType.Protected, + ) + + +def make_seed( + pkg, name, path=None, loader=None, alias=None, tags=None, fqn_extras=None, checksum=None +): + if alias is None: + alias = name + if tags is None: + tags = [] + if path is None: + path = f"{name}.csv" + + if fqn_extras is None: + fqn_extras = [] + + if checksum is None: + checksum = FileHash.from_contents("") + + fqn = [pkg] + fqn_extras + [name] + return SeedNode( + database="dbt", + schema="dbt_schema", + alias=alias, + name=name, + fqn=fqn, + unique_id=f"seed.{pkg}.{name}", + package_name=pkg, + path=path, + original_file_path=f"data/{path}", + tags=tags, + resource_type=NodeType.Seed, + checksum=FileHash.from_contents(""), + ) + + +def make_source( + pkg, source_name, table_name, path=None, loader=None, identifier=None, fqn_extras=None +): + if path is None: + path = "models/schema.yml" + if loader is None: + loader = "my_loader" + if identifier is None: + identifier = table_name + + if fqn_extras is None: + fqn_extras = [] + + fqn = [pkg] + fqn_extras + [source_name, table_name] + + return SourceDefinition( + fqn=fqn, + database="dbt", + schema="dbt_schema", + unique_id=f"source.{pkg}.{source_name}.{table_name}", + package_name=pkg, + path=path, + original_file_path=path, + name=table_name, + source_name=source_name, + loader="my_loader", + identifier=identifier, + resource_type=NodeType.Source, + loaded_at_field="loaded_at", + tags=[], + source_description="", + ) + + +def make_macro(pkg, name, macro_sql, path=None, depends_on_macros=None): + if path is None: + path = "macros/macros.sql" + + if depends_on_macros is None: + depends_on_macros = [] + + return Macro( + name=name, + macro_sql=macro_sql, + unique_id=f"macro.{pkg}.{name}", + package_name=pkg, + path=path, + original_file_path=path, + resource_type=NodeType.Macro, + depends_on=MacroDependsOn(macros=depends_on_macros), + ) + + +def make_unique_test(pkg, test_model, column_name, path=None, refs=None, sources=None, tags=None): + return make_generic_test(pkg, "unique", test_model, {}, column_name=column_name) + + +def make_not_null_test( + pkg, test_model, column_name, path=None, refs=None, sources=None, tags=None +): + return make_generic_test(pkg, "not_null", test_model, {}, column_name=column_name) + + +def make_generic_test( + pkg, + test_name, + test_model, + test_kwargs, + path=None, + refs=None, + sources=None, + tags=None, + column_name=None, +): + kwargs = test_kwargs.copy() + ref_values = [] + source_values = [] + # this doesn't really have to be correct + if isinstance(test_model, SourceDefinition): + kwargs["model"] = ( + "{{ source('" + test_model.source_name + "', '" + test_model.name + "') }}" + ) + source_values.append([test_model.source_name, test_model.name]) + else: + kwargs["model"] = "{{ ref('" + test_model.name + "')}}" + ref_values.append( + RefArgs( + name=test_model.name, package=test_model.package_name, version=test_model.version + ) + ) + if column_name is not None: + kwargs["column_name"] = column_name + + # whatever + args_name = test_model.search_name.replace(".", "_") + if column_name is not None: + args_name += "_" + column_name + node_name = f"{test_name}_{args_name}" + raw_code = ( + '{{ config(severity="ERROR") }}{{ test_' + test_name + "(**dbt_schema_test_kwargs) }}" + ) + name_parts = test_name.split(".") + + if len(name_parts) == 2: + namespace, test_name = name_parts + macro_depends = f"macro.{namespace}.test_{test_name}" + elif len(name_parts) == 1: + namespace = None + macro_depends = f"macro.dbt.test_{test_name}" + else: + assert False, f"invalid test name: {test_name}" + + if path is None: + path = "schema.yml" + if tags is None: + tags = ["schema"] + + if refs is None: + refs = [] + if sources is None: + sources = [] + + depends_on_nodes = [] + for ref in refs: + ref_version = ref.version if hasattr(ref, "version") else None + ref_values.append(RefArgs(name=ref.name, package=ref.package_name, version=ref_version)) + depends_on_nodes.append(ref.unique_id) + + for source in sources: + source_values.append([source.source_name, source.name]) + depends_on_nodes.append(source.unique_id) + + return GenericTestNode( + language="sql", + raw_code=raw_code, + test_metadata=TestMetadata( + namespace=namespace, + name=test_name, + kwargs=kwargs, + ), + database="dbt", + schema="dbt_postgres", + name=node_name, + alias=node_name, + fqn=["minimal", "schema_test", node_name], + unique_id=f"test.{pkg}.{node_name}", + package_name=pkg, + path=f"schema_test/{node_name}.sql", + original_file_path=f"models/{path}", + resource_type=NodeType.Test, + tags=tags, + refs=ref_values, + sources=[], + depends_on=DependsOn(macros=[macro_depends], nodes=depends_on_nodes), + column_name=column_name, + checksum=FileHash.from_contents(""), + ) + + +def make_unit_test( + pkg, + test_name, + test_model, +): + input_fixture = UnitTestInputFixture( + input="ref('table_model')", + rows=[{"id": 1, "string_a": "a"}], + ) + output_fixture = UnitTestOutputFixture( + rows=[{"id": 1, "string_a": "a"}], + ) + return UnitTestDefinition( + name=test_name, + model=test_model, + package_name=pkg, + resource_type=NodeType.Unit, + path="unit_tests.yml", + original_file_path="models/unit_tests.yml", + unique_id=f"unit.{pkg}.{test_model.name}__{test_name}", + given=[input_fixture], + expect=output_fixture, + fqn=[pkg, test_model.name, test_name], + ) + + +def make_singular_test( + pkg, name, sql, refs=None, sources=None, tags=None, path=None, config_kwargs=None +): + if refs is None: + refs = [] + if sources is None: + sources = [] + if tags is None: + tags = ["data"] + if path is None: + path = f"{name}.sql" + + if config_kwargs is None: + config_kwargs = {} + + fqn = ["minimal", "data_test", name] + + depends_on_nodes = [] + source_values = [] + ref_values = [] + for ref in refs: + ref_version = ref.version if hasattr(ref, "version") else None + ref_values.append(RefArgs(name=ref.name, package=ref.package_name, version=ref_version)) + depends_on_nodes.append(ref.unique_id) + for src in sources: + source_values.append([src.source_name, src.name]) + depends_on_nodes.append(src.unique_id) + + return SingularTestNode( + language="sql", + raw_code=sql, + database="dbt", + schema="dbt_schema", + name=name, + alias=name, + fqn=fqn, + unique_id=f"test.{pkg}.{name}", + package_name=pkg, + path=path, + original_file_path=f"tests/{path}", + config=TestConfig(**config_kwargs), + tags=tags, + refs=ref_values, + sources=source_values, + depends_on=DependsOn(nodes=depends_on_nodes, macros=[]), + resource_type=NodeType.Test, + checksum=FileHash.from_contents(""), + ) + + +def make_exposure(pkg, name, path=None, fqn_extras=None, owner=None): + if path is None: + path = "schema.yml" + + if fqn_extras is None: + fqn_extras = [] + + if owner is None: + owner = Owner(email="test@example.com") + + fqn = [pkg, "exposures"] + fqn_extras + [name] + return Exposure( + name=name, + resource_type=NodeType.Exposure, + type=ExposureType.Notebook, + fqn=fqn, + unique_id=f"exposure.{pkg}.{name}", + package_name=pkg, + path=path, + original_file_path=path, + owner=owner, + ) + + +def make_metric(pkg, name, path=None): + if path is None: + path = "schema.yml" + + return Metric( + name=name, + resource_type=NodeType.Metric, + path=path, + package_name=pkg, + original_file_path=path, + unique_id=f"metric.{pkg}.{name}", + fqn=[pkg, "metrics", name], + label="New Customers", + description="New customers", + type=MetricType.SIMPLE, + type_params=MetricTypeParams(measure=MetricInputMeasure(name="count_cats")), + meta={"is_okr": True}, + tags=["okrs"], + ) + + +def make_group(pkg, name, path=None): + if path is None: + path = "schema.yml" + + return Group( + name=name, + resource_type=NodeType.Group, + path=path, + package_name=pkg, + original_file_path=path, + unique_id=f"group.{pkg}.{name}", + owner="email@gmail.com", + ) + + +def make_semantic_model( + pkg: str, + name: str, + model, + path=None, +): + if path is None: + path = "schema.yml" + + return SemanticModel( + name=name, + resource_type=NodeType.SemanticModel, + model=model, + node_relation=NodeRelation( + alias=model.alias, + schema_name="dbt", + relation_name=model.name, + ), + package_name=pkg, + path=path, + description="Customer entity", + primary_entity="customer", + unique_id=f"semantic_model.{pkg}.{name}", + original_file_path=path, + fqn=[pkg, "semantic_models", name], + ) + + +def make_saved_query(pkg: str, name: str, metric: str, path=None): + if path is None: + path = "schema.yml" + + return SavedQuery( + name=name, + resource_type=NodeType.SavedQuery, + package_name=pkg, + path=path, + description="Test Saved Query", + query_params=QueryParams( + metrics=[metric], + group_by=[], + where=None, + ), + exports=[], + unique_id=f"saved_query.{pkg}.{name}", + original_file_path=path, + fqn=[pkg, "saved_queries", name], + ) + + +@pytest.fixture +def macro_test_unique(): + return make_macro( + "dbt", "test_unique", "blablabla", depends_on_macros=["macro.dbt.default__test_unique"] + ) + + +@pytest.fixture +def macro_default_test_unique(): + return make_macro("dbt", "default__test_unique", "blablabla") + + +@pytest.fixture +def macro_test_not_null(): + return make_macro( + "dbt", "test_not_null", "blablabla", depends_on_macros=["macro.dbt.default__test_not_null"] + ) + + +@pytest.fixture +def macro_default_test_not_null(): + return make_macro("dbt", "default__test_not_null", "blabla") + + +@pytest.fixture +def seed(): + return make_seed("pkg", "seed") + + +@pytest.fixture +def source(): + return make_source("pkg", "raw", "seed", identifier="seed") + + +@pytest.fixture +def ephemeral_model(source): + return make_model( + "pkg", + "ephemeral_model", + 'select * from {{ source("raw", "seed") }}', + config_kwargs={"materialized": "ephemeral"}, + sources=[source], + ) + + +@pytest.fixture +def view_model(ephemeral_model): + return make_model( + "pkg", + "view_model", + 'select * from {{ ref("ephemeral_model") }}', + config_kwargs={"materialized": "view"}, + refs=[ephemeral_model], + tags=["uses_ephemeral"], + ) + + +@pytest.fixture +def table_model(ephemeral_model): + return make_model( + "pkg", + "table_model", + 'select * from {{ ref("ephemeral_model") }}', + config_kwargs={ + "materialized": "table", + "meta": { + # Other properties to test in test_select_config_meta + "string_property": "some_string", + "truthy_bool_property": True, + "falsy_bool_property": False, + "list_property": ["some_value", True, False], + }, + }, + refs=[ephemeral_model], + tags=["uses_ephemeral"], + path="subdirectory/table_model.sql", + ) + + +@pytest.fixture +def table_model_py(seed): + return make_model( + "pkg", + "table_model_py", + 'select * from {{ ref("seed") }}', + config_kwargs={"materialized": "table"}, + refs=[seed], + tags=[], + path="subdirectory/table_model.py", + ) + + +@pytest.fixture +def table_model_csv(seed): + return make_model( + "pkg", + "table_model_csv", + 'select * from {{ ref("seed") }}', + config_kwargs={"materialized": "table"}, + refs=[seed], + tags=[], + path="subdirectory/table_model.csv", + ) + + +@pytest.fixture +def ext_source(): + return make_source( + "ext", + "ext_raw", + "ext_source", + ) + + +@pytest.fixture +def ext_source_2(): + return make_source( + "ext", + "ext_raw", + "ext_source_2", + ) + + +@pytest.fixture +def ext_source_other(): + return make_source( + "ext", + "raw", + "ext_source", + ) + + +@pytest.fixture +def ext_source_other_2(): + return make_source( + "ext", + "raw", + "ext_source_2", + ) + + +@pytest.fixture +def ext_model(ext_source): + return make_model( + "ext", + "ext_model", + 'select * from {{ source("ext_raw", "ext_source") }}', + sources=[ext_source], + ) + + +@pytest.fixture +def union_model(seed, ext_source): + return make_model( + "pkg", + "union_model", + 'select * from {{ ref("seed") }} union all select * from {{ source("ext_raw", "ext_source") }}', + config_kwargs={"materialized": "table"}, + refs=[seed], + sources=[ext_source], + fqn_extras=["unions"], + path="subdirectory/union_model.sql", + tags=["unions"], + ) + + +@pytest.fixture +def versioned_model_v1(seed): + return make_model( + "pkg", + "versioned_model", + 'select * from {{ ref("seed") }}', + config_kwargs={"materialized": "table"}, + refs=[seed], + sources=[], + path="subdirectory/versioned_model_v1.sql", + version=1, + latest_version=2, + ) + + +@pytest.fixture +def versioned_model_v2(seed): + return make_model( + "pkg", + "versioned_model", + 'select * from {{ ref("seed") }}', + config_kwargs={"materialized": "table"}, + refs=[seed], + sources=[], + path="subdirectory/versioned_model_v2.sql", + version=2, + latest_version=2, + ) + + +@pytest.fixture +def versioned_model_v3(seed): + return make_model( + "pkg", + "versioned_model", + 'select * from {{ ref("seed") }}', + config_kwargs={"materialized": "table"}, + refs=[seed], + sources=[], + path="subdirectory/versioned_model_v3.sql", + version="3", + latest_version=2, + ) + + +@pytest.fixture +def versioned_model_v12_string(seed): + return make_model( + "pkg", + "versioned_model", + 'select * from {{ ref("seed") }}', + config_kwargs={"materialized": "table"}, + refs=[seed], + sources=[], + path="subdirectory/versioned_model_v12.sql", + version="12", + latest_version=2, + ) + + +@pytest.fixture +def versioned_model_v4_nested_dir(seed): + return make_model( + "pkg", + "versioned_model", + 'select * from {{ ref("seed") }}', + config_kwargs={"materialized": "table"}, + refs=[seed], + sources=[], + path="subdirectory/nested_dir/versioned_model_v3.sql", + version="4", + latest_version=2, + fqn_extras=["nested_dir"], + ) + + +@pytest.fixture +def table_id_unique(table_model): + return make_unique_test("pkg", table_model, "id") + + +@pytest.fixture +def table_id_not_null(table_model): + return make_not_null_test("pkg", table_model, "id") + + +@pytest.fixture +def view_id_unique(view_model): + return make_unique_test("pkg", view_model, "id") + + +@pytest.fixture +def ext_source_id_unique(ext_source): + return make_unique_test("ext", ext_source, "id") + + +@pytest.fixture +def view_test_nothing(view_model): + return make_singular_test( + "pkg", + "view_test_nothing", + 'select * from {{ ref("view_model") }} limit 0', + refs=[view_model], + ) + + +@pytest.fixture +def unit_test_table_model(table_model): + return make_unit_test( + "pkg", + "unit_test_table_model", + table_model, + ) + + +# Support dots as namespace separators +@pytest.fixture +def namespaced_seed(): + return make_seed("pkg", "mynamespace.seed") + + +@pytest.fixture +def namespace_model(source): + return make_model( + "pkg", + "mynamespace.ephemeral_model", + 'select * from {{ source("raw", "seed") }}', + config_kwargs={"materialized": "ephemeral"}, + sources=[source], + ) + + +@pytest.fixture +def namespaced_union_model(seed, ext_source): + return make_model( + "pkg", + "mynamespace.union_model", + 'select * from {{ ref("mynamespace.seed") }} union all select * from {{ ref("mynamespace.ephemeral_model") }}', + config_kwargs={"materialized": "table"}, + refs=[seed], + sources=[ext_source], + fqn_extras=["unions"], + path="subdirectory/union_model.sql", + tags=["unions"], + ) + + +@pytest.fixture +def metric() -> Metric: + return Metric( + name="my_metric", + resource_type=NodeType.Metric, + type=MetricType.SIMPLE, + type_params=MetricTypeParams(measure=MetricInputMeasure(name="a_measure")), + fqn=["test", "metrics", "myq_metric"], + unique_id="metric.test.my_metric", + package_name="test", + path="models/metric.yml", + original_file_path="models/metric.yml", + description="", + meta={}, + tags=[], + label="test_label", + ) + + +@pytest.fixture +def saved_query() -> SavedQuery: + pkg = "test" + name = "test_saved_query" + path = "test_path" + return SavedQuery( + name=name, + resource_type=NodeType.SavedQuery, + package_name=pkg, + path=path, + description="Test Saved Query", + query_params=QueryParams( + metrics=["my_metric"], + group_by=[], + where=None, + ), + exports=[], + unique_id=f"saved_query.{pkg}.{name}", + original_file_path=path, + fqn=[pkg, "saved_queries", name], + ) + + +@pytest.fixture +def semantic_model(table_model) -> SemanticModel: + return make_semantic_model("test", "test_semantic_model", model=table_model) + + +@pytest.fixture +def metricflow_time_spine_model() -> ModelNode: + return ModelNode( + name="metricflow_time_spine", + database="dbt", + schema="analytics", + alias="events", + resource_type=NodeType.Model, + unique_id="model.test.metricflow_time_spine", + fqn=["snowplow", "events"], + package_name="snowplow", + refs=[], + sources=[], + metrics=[], + depends_on=DependsOn(), + config=ModelConfig(), + tags=[], + path="events.sql", + original_file_path="events.sql", + meta={}, + language="sql", + raw_code="does not matter", + checksum=FileHash.empty(), + relation_name="events", + ) + + +@pytest.fixture +def nodes( + seed, + ephemeral_model, + view_model, + table_model, + table_model_py, + table_model_csv, + union_model, + versioned_model_v1, + versioned_model_v2, + versioned_model_v3, + versioned_model_v4_nested_dir, + versioned_model_v12_string, + ext_model, + table_id_unique, + table_id_not_null, + view_id_unique, + ext_source_id_unique, + view_test_nothing, + namespaced_seed, + namespace_model, + namespaced_union_model, +) -> list: + return [ + seed, + ephemeral_model, + view_model, + table_model, + table_model_py, + table_model_csv, + union_model, + versioned_model_v1, + versioned_model_v2, + versioned_model_v3, + versioned_model_v4_nested_dir, + versioned_model_v12_string, + ext_model, + table_id_unique, + table_id_not_null, + view_id_unique, + ext_source_id_unique, + view_test_nothing, + namespaced_seed, + namespace_model, + namespaced_union_model, + ] + + +@pytest.fixture +def sources( + source, + ext_source, + ext_source_2, + ext_source_other, + ext_source_other_2, +) -> list: + return [source, ext_source, ext_source_2, ext_source_other, ext_source_other_2] + + +@pytest.fixture +def macros( + macro_test_unique, + macro_default_test_unique, + macro_test_not_null, + macro_default_test_not_null, +) -> list: + return [ + macro_test_unique, + macro_default_test_unique, + macro_test_not_null, + macro_default_test_not_null, + ] + + +@pytest.fixture +def unit_tests(unit_test_table_model) -> list: + return [unit_test_table_model] + + +@pytest.fixture +def metrics() -> list: + return [] + + +@pytest.fixture +def semantic_models() -> list: + return [] + + +@pytest.fixture +def manifest( + metric, + semantic_model, + nodes, + sources, + macros, + unit_tests, + metrics, + semantic_models, +) -> Manifest: + manifest = Manifest( + nodes={n.unique_id: n for n in nodes}, + sources={s.unique_id: s for s in sources}, + macros={m.unique_id: m for m in macros}, + unit_tests={t.unique_id: t for t in unit_tests}, + semantic_models={s.unique_id: s for s in semantic_models}, + docs={}, + files={}, + exposures={}, + metrics={m.unique_id: m for m in metrics}, + disabled={}, + selectors={}, + groups={}, + metadata=ManifestMetadata(adapter_type="postgres"), + ) + return manifest