diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 3cdca1ad352..e55d8b13ece 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.4.0b1 +current_version = 1.5.0a1 parse = (?P\d+) \.(?P\d+) \.(?P\d+) diff --git a/.changes/0.0.0.md b/.changes/0.0.0.md index 5359cd07bf2..f3a5e03d1a1 100644 --- a/.changes/0.0.0.md +++ b/.changes/0.0.0.md @@ -3,6 +3,7 @@ For information on prior major and minor releases, see their changelogs: +* [1.4](https://github.com/dbt-labs/dbt-core/blob/1.4.latest/CHANGELOG.md) * [1.3](https://github.com/dbt-labs/dbt-core/blob/1.3.latest/CHANGELOG.md) * [1.2](https://github.com/dbt-labs/dbt-core/blob/1.2.latest/CHANGELOG.md) * [1.1](https://github.com/dbt-labs/dbt-core/blob/1.1.latest/CHANGELOG.md) diff --git a/.changes/1.4.0-b1.md b/.changes/1.4.0-b1.md deleted file mode 100644 index b2a0e96827c..00000000000 --- a/.changes/1.4.0-b1.md +++ /dev/null @@ -1,89 +0,0 @@ -## dbt-core 1.4.0-b1 - December 15, 2022 - -### Features - -- Added favor-state flag to optionally favor state nodes even if unselected node exists ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) -- Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. ([#5610](https://github.com/dbt-labs/dbt-core/issues/5610)) -- Friendlier error messages when packages.yml is malformed ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) -- Migrate dbt-utils current_timestamp macros into core + adapters ([#5521](https://github.com/dbt-labs/dbt-core/issues/5521)) -- Allow partitions in external tables to be supplied as a list ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) -- extend -f flag shorthand for seed command ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) -- This pulls the profile name from args when constructing a RuntimeConfig in lib.py, enabling the dbt-server to override the value that's in the dbt_project.yml ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) -- Adding tarball install method for packages. Allowing package tarball to be specified via url in the packages.yaml. ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) -- Added an md5 function to the base context ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) -- Exposures support metrics in lineage ([#6057](https://github.com/dbt-labs/dbt-core/issues/6057)) -- Add support for Python 3.11 ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) -- incremental predicates ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) - -### Fixes - -- Account for disabled flags on models in schema files more completely ([#3992](https://github.com/dbt-labs/dbt-core/issues/3992)) -- Add validation of enabled config for metrics, exposures and sources ([#6030](https://github.com/dbt-labs/dbt-core/issues/6030)) -- check length of args of python model function before accessing it ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) -- Add functors to ensure event types with str-type attributes are initialized to spec, even when provided non-str type params. ([#5436](https://github.com/dbt-labs/dbt-core/issues/5436)) -- Allow hooks to fail without halting execution flow ([#5625](https://github.com/dbt-labs/dbt-core/issues/5625)) -- Clarify Error Message for how many models are allowed in a Python file ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) -- After this, will be possible to use default values for dbt.config.get ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) -- Use full path for writing manifest ([#6055](https://github.com/dbt-labs/dbt-core/issues/6055)) -- [CT-1284] Change Python model default materialization to table ([#6345](https://github.com/dbt-labs/dbt-core/issues/6345)) -- Repair a regression which prevented basic logging before the logging subsystem is completely configured. ([#6434](https://github.com/dbt-labs/dbt-core/issues/6434)) - -### Docs - -- minor doc correction ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) -- Generate API docs for new CLI interface ([dbt-docs/#5528](https://github.com/dbt-labs/dbt-docs/issues/5528)) -- ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) -- Fix rendering of sample code for metrics ([dbt-docs/#323](https://github.com/dbt-labs/dbt-docs/issues/323)) -- Alphabetize `core/dbt/README.md` ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368)) - -### Under the Hood - -- Put black config in explicit config ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946)) -- Added flat_graph attribute the Manifest class's deepcopy() coverage ([#5809](https://github.com/dbt-labs/dbt-core/issues/5809)) -- Add mypy configs so `mypy` passes from CLI ([#5983](https://github.com/dbt-labs/dbt-core/issues/5983)) -- Exception message cleanup. ([#6023](https://github.com/dbt-labs/dbt-core/issues/6023)) -- Add dmypy cache to gitignore ([#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) -- Provide useful errors when the value of 'materialized' is invalid ([#5229](https://github.com/dbt-labs/dbt-core/issues/5229)) -- Clean up string formatting ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) -- Fixed extra whitespace in strings introduced by black. ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) -- Remove the 'root_path' field from most nodes ([#6171](https://github.com/dbt-labs/dbt-core/issues/6171)) -- Combine certain logging events with different levels ([#6173](https://github.com/dbt-labs/dbt-core/issues/6173)) -- Convert threading tests to pytest ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) -- Convert postgres index tests to pytest ([#5770](https://github.com/dbt-labs/dbt-core/issues/5770)) -- Convert use color tests to pytest ([#5771](https://github.com/dbt-labs/dbt-core/issues/5771)) -- Add github actions workflow to generate high level CLI API docs ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) -- Functionality-neutral refactor of event logging system to improve encapsulation and modularity. ([#6139](https://github.com/dbt-labs/dbt-core/issues/6139)) -- Consolidate ParsedNode and CompiledNode classes ([#6383](https://github.com/dbt-labs/dbt-core/issues/6383)) -- Prevent doc gen workflow from running on forks ([#6386](https://github.com/dbt-labs/dbt-core/issues/6386)) -- Fix intermittent database connection failure in Windows CI test ([#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) -- Refactor and clean up manifest nodes ([#6426](https://github.com/dbt-labs/dbt-core/issues/6426)) -- Restore important legacy logging behaviors, following refactor which removed them ([#6437](https://github.com/dbt-labs/dbt-core/issues/6437)) - -### Dependencies - -- Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core ([#5917](https://github.com/dbt-labs/dbt-core/pull/5917)) -- Bump black from 22.8.0 to 22.10.0 ([#6019](https://github.com/dbt-labs/dbt-core/pull/6019)) -- Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core ([#6108](https://github.com/dbt-labs/dbt-core/pull/6108)) -- Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core ([#6144](https://github.com/dbt-labs/dbt-core/pull/6144)) -- Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core ([#4904](https://github.com/dbt-labs/dbt-core/issues/4904)) - -### Contributors -- [@andy-clapson](https://github.com/andy-clapson) ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) -- [@chamini2](https://github.com/chamini2) ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) -- [@daniel-murray](https://github.com/daniel-murray) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) -- [@dave-connors-3](https://github.com/dave-connors-3) ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) -- [@dbeatty10](https://github.com/dbeatty10) ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368), [#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) -- [@devmessias](https://github.com/devmessias) ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) -- [@eve-johns](https://github.com/eve-johns) ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) -- [@haritamar](https://github.com/haritamar) ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) -- [@jared-rimmer](https://github.com/jared-rimmer) ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) -- [@josephberni](https://github.com/josephberni) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) -- [@joshuataylor](https://github.com/joshuataylor) ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) -- [@justbldwn](https://github.com/justbldwn) ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) -- [@luke-bassett](https://github.com/luke-bassett) ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) -- [@max-sixty](https://github.com/max-sixty) ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946), [#5983](https://github.com/dbt-labs/dbt-core/issues/5983), [#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) -- [@paulbenschmidt](https://github.com/paulbenschmidt) ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) -- [@pgoslatara](https://github.com/pgoslatara) ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) -- [@racheldaniel](https://github.com/racheldaniel) ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) -- [@timle2](https://github.com/timle2) ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) -- [@dave-connors-3](https://github.com/dave-connors-3) ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) diff --git a/.changes/1.4.0/Dependency-20220923-000646.yaml b/.changes/1.4.0/Dependency-20220923-000646.yaml deleted file mode 100644 index 0375eeb125f..00000000000 --- a/.changes/1.4.0/Dependency-20220923-000646.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: "Dependencies" -body: "Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core" -time: 2022-09-23T00:06:46.00000Z -custom: - Author: dependabot[bot] - PR: "5917" diff --git a/.changes/1.4.0/Dependency-20221007-000848.yaml b/.changes/1.4.0/Dependency-20221007-000848.yaml deleted file mode 100644 index 7e36733d14e..00000000000 --- a/.changes/1.4.0/Dependency-20221007-000848.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: "Dependencies" -body: "Bump black from 22.8.0 to 22.10.0" -time: 2022-10-07T00:08:48.00000Z -custom: - Author: dependabot[bot] - PR: "6019" diff --git a/.changes/1.4.0/Dependency-20221020-000753.yaml b/.changes/1.4.0/Dependency-20221020-000753.yaml deleted file mode 100644 index ce0f122826b..00000000000 --- a/.changes/1.4.0/Dependency-20221020-000753.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: "Dependencies" -body: "Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core" -time: 2022-10-20T00:07:53.00000Z -custom: - Author: dependabot[bot] - PR: "6108" diff --git a/.changes/1.4.0/Dependency-20221026-000910.yaml b/.changes/1.4.0/Dependency-20221026-000910.yaml deleted file mode 100644 index d68fa8a11ef..00000000000 --- a/.changes/1.4.0/Dependency-20221026-000910.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: "Dependencies" -body: "Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core" -time: 2022-10-26T00:09:10.00000Z -custom: - Author: dependabot[bot] - PR: "6144" diff --git a/.changes/1.4.0/Dependency-20221205-002118.yaml b/.changes/1.4.0/Dependency-20221205-002118.yaml deleted file mode 100644 index f4203a5285c..00000000000 --- a/.changes/1.4.0/Dependency-20221205-002118.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: "Dependencies" -body: "Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core" -time: 2022-12-05T00:21:18.00000Z -custom: - Author: dependabot[bot] - Issue: 4904 - PR: 6375 diff --git a/.changes/1.4.0/Docs-20220908-154157.yaml b/.changes/1.4.0/Docs-20220908-154157.yaml deleted file mode 100644 index e307f3bd5e0..00000000000 --- a/.changes/1.4.0/Docs-20220908-154157.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Docs -body: minor doc correction -time: 2022-09-08T15:41:57.689162-04:00 -custom: - Author: andy-clapson - Issue: "5791" diff --git a/.changes/1.4.0/Docs-20221007-090656.yaml b/.changes/1.4.0/Docs-20221007-090656.yaml deleted file mode 100644 index 070ecd48944..00000000000 --- a/.changes/1.4.0/Docs-20221007-090656.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Docs -body: Generate API docs for new CLI interface -time: 2022-10-07T09:06:56.446078-05:00 -custom: - Author: stu-k - Issue: "5528" diff --git a/.changes/1.4.0/Docs-20221017-171411.yaml b/.changes/1.4.0/Docs-20221017-171411.yaml deleted file mode 100644 index 487362c1d5c..00000000000 --- a/.changes/1.4.0/Docs-20221017-171411.yaml +++ /dev/null @@ -1,5 +0,0 @@ -kind: Docs -time: 2022-10-17T17:14:11.715348-05:00 -custom: - Author: paulbenschmidt - Issue: "5880" diff --git a/.changes/1.4.0/Docs-20221116-155743.yaml b/.changes/1.4.0/Docs-20221116-155743.yaml deleted file mode 100644 index 84d90a67b99..00000000000 --- a/.changes/1.4.0/Docs-20221116-155743.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Docs -body: Fix rendering of sample code for metrics -time: 2022-11-16T15:57:43.204201+01:00 -custom: - Author: jtcohen6 - Issue: "323" diff --git a/.changes/1.4.0/Docs-20221202-150523.yaml b/.changes/1.4.0/Docs-20221202-150523.yaml deleted file mode 100644 index b08a32cddf6..00000000000 --- a/.changes/1.4.0/Docs-20221202-150523.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Docs -body: Alphabetize `core/dbt/README.md` -time: 2022-12-02T15:05:23.695333-07:00 -custom: - Author: dbeatty10 - Issue: "6368" diff --git a/.changes/1.4.0/Features-20220408-165459.yaml b/.changes/1.4.0/Features-20220408-165459.yaml deleted file mode 100644 index 18675c7244a..00000000000 --- a/.changes/1.4.0/Features-20220408-165459.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Features -body: Added favor-state flag to optionally favor state nodes even if unselected node - exists -time: 2022-04-08T16:54:59.696564+01:00 -custom: - Author: daniel-murray josephberni - Issue: "5016" diff --git a/.changes/1.4.0/Features-20220817-154857.yaml b/.changes/1.4.0/Features-20220817-154857.yaml deleted file mode 100644 index ad53df05a3f..00000000000 --- a/.changes/1.4.0/Features-20220817-154857.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. -time: 2022-08-17T15:48:57.225267-04:00 -custom: - Author: gshank - Issue: "5610" diff --git a/.changes/1.4.0/Features-20220823-085727.yaml b/.changes/1.4.0/Features-20220823-085727.yaml deleted file mode 100644 index 4d8daebbf5e..00000000000 --- a/.changes/1.4.0/Features-20220823-085727.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Features -body: incremental predicates -time: 2022-08-23T08:57:27.640804-05:00 -custom: - Author: dave-connors-3 - Issue: "5680" - PR: "5702" diff --git a/.changes/1.4.0/Features-20220912-125935.yaml b/.changes/1.4.0/Features-20220912-125935.yaml deleted file mode 100644 index d49f35fd0af..00000000000 --- a/.changes/1.4.0/Features-20220912-125935.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: Friendlier error messages when packages.yml is malformed -time: 2022-09-12T12:59:35.121188+01:00 -custom: - Author: jared-rimmer - Issue: "5486" diff --git a/.changes/1.4.0/Features-20220914-095625.yaml b/.changes/1.4.0/Features-20220914-095625.yaml deleted file mode 100644 index d46b1bfa8d8..00000000000 --- a/.changes/1.4.0/Features-20220914-095625.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: Migrate dbt-utils current_timestamp macros into core + adapters -time: 2022-09-14T09:56:25.97818-07:00 -custom: - Author: colin-rogers-dbt - Issue: "5521" diff --git a/.changes/1.4.0/Features-20220925-211651.yaml b/.changes/1.4.0/Features-20220925-211651.yaml deleted file mode 100644 index d2c1911c720..00000000000 --- a/.changes/1.4.0/Features-20220925-211651.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: Allow partitions in external tables to be supplied as a list -time: 2022-09-25T21:16:51.051239654+02:00 -custom: - Author: pgoslatara - Issue: "5929" diff --git a/.changes/1.4.0/Features-20221003-110705.yaml b/.changes/1.4.0/Features-20221003-110705.yaml deleted file mode 100644 index 637d8be58c6..00000000000 --- a/.changes/1.4.0/Features-20221003-110705.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: extend -f flag shorthand for seed command -time: 2022-10-03T11:07:05.381632-05:00 -custom: - Author: dave-connors-3 - Issue: "5990" diff --git a/.changes/1.4.0/Features-20221102-150003.yaml b/.changes/1.4.0/Features-20221102-150003.yaml deleted file mode 100644 index 9d8ba192687..00000000000 --- a/.changes/1.4.0/Features-20221102-150003.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Features -body: This pulls the profile name from args when constructing a RuntimeConfig in lib.py, - enabling the dbt-server to override the value that's in the dbt_project.yml -time: 2022-11-02T15:00:03.000805-05:00 -custom: - Author: racheldaniel - Issue: "6201" diff --git a/.changes/1.4.0/Features-20221107-105018.yaml b/.changes/1.4.0/Features-20221107-105018.yaml deleted file mode 100644 index db6a0ab753a..00000000000 --- a/.changes/1.4.0/Features-20221107-105018.yaml +++ /dev/null @@ -1,8 +0,0 @@ -kind: Features -body: Adding tarball install method for packages. Allowing package tarball to be specified - via url in the packages.yaml. -time: 2022-11-07T10:50:18.464545-05:00 -custom: - Author: timle2 - Issue: "4205" - PR: "4689" diff --git a/.changes/1.4.0/Features-20221114-185207.yaml b/.changes/1.4.0/Features-20221114-185207.yaml deleted file mode 100644 index 459bc8ce234..00000000000 --- a/.changes/1.4.0/Features-20221114-185207.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: Added an md5 function to the base context -time: 2022-11-14T18:52:07.788593+02:00 -custom: - Author: haritamar - Issue: "6246" diff --git a/.changes/1.4.0/Features-20221130-112913.yaml b/.changes/1.4.0/Features-20221130-112913.yaml deleted file mode 100644 index 64832de2f68..00000000000 --- a/.changes/1.4.0/Features-20221130-112913.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: Exposures support metrics in lineage -time: 2022-11-30T11:29:13.256034-05:00 -custom: - Author: michelleark - Issue: "6057" diff --git a/.changes/1.4.0/Features-20221206-150704.yaml b/.changes/1.4.0/Features-20221206-150704.yaml deleted file mode 100644 index 47939ea5a79..00000000000 --- a/.changes/1.4.0/Features-20221206-150704.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Features -body: Add support for Python 3.11 -time: 2022-12-06T15:07:04.753127+01:00 -custom: - Author: joshuataylor MichelleArk jtcohen6 - Issue: "6147" - PR: "6326" diff --git a/.changes/1.4.0/Fixes-20220916-104854.yaml b/.changes/1.4.0/Fixes-20220916-104854.yaml deleted file mode 100644 index bd9af0469a7..00000000000 --- a/.changes/1.4.0/Fixes-20220916-104854.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Account for disabled flags on models in schema files more completely -time: 2022-09-16T10:48:54.162273-05:00 -custom: - Author: emmyoop - Issue: "3992" diff --git a/.changes/1.4.0/Fixes-20221010-113218.yaml b/.changes/1.4.0/Fixes-20221010-113218.yaml deleted file mode 100644 index 5b73b8d9ccd..00000000000 --- a/.changes/1.4.0/Fixes-20221010-113218.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Add validation of enabled config for metrics, exposures and sources -time: 2022-10-10T11:32:18.752322-05:00 -custom: - Author: emmyoop - Issue: "6030" diff --git a/.changes/1.4.0/Fixes-20221011-160715.yaml b/.changes/1.4.0/Fixes-20221011-160715.yaml deleted file mode 100644 index 936546a5232..00000000000 --- a/.changes/1.4.0/Fixes-20221011-160715.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: check length of args of python model function before accessing it -time: 2022-10-11T16:07:15.464093-04:00 -custom: - Author: chamini2 - Issue: "6041" diff --git a/.changes/1.4.0/Fixes-20221016-173742.yaml b/.changes/1.4.0/Fixes-20221016-173742.yaml deleted file mode 100644 index c7b00dddba8..00000000000 --- a/.changes/1.4.0/Fixes-20221016-173742.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: Add functors to ensure event types with str-type attributes are initialized - to spec, even when provided non-str type params. -time: 2022-10-16T17:37:42.846683-07:00 -custom: - Author: versusfacit - Issue: "5436" diff --git a/.changes/1.4.0/Fixes-20221107-095314.yaml b/.changes/1.4.0/Fixes-20221107-095314.yaml deleted file mode 100644 index 99da9c44522..00000000000 --- a/.changes/1.4.0/Fixes-20221107-095314.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Allow hooks to fail without halting execution flow -time: 2022-11-07T09:53:14.340257-06:00 -custom: - Author: ChenyuLInx - Issue: "5625" diff --git a/.changes/1.4.0/Fixes-20221115-081021.yaml b/.changes/1.4.0/Fixes-20221115-081021.yaml deleted file mode 100644 index 40c81fabacb..00000000000 --- a/.changes/1.4.0/Fixes-20221115-081021.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Clarify Error Message for how many models are allowed in a Python file -time: 2022-11-15T08:10:21.527884-05:00 -custom: - Author: justbldwn - Issue: "6245" diff --git a/.changes/1.4.0/Fixes-20221124-163419.yaml b/.changes/1.4.0/Fixes-20221124-163419.yaml deleted file mode 100644 index 010a073269a..00000000000 --- a/.changes/1.4.0/Fixes-20221124-163419.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: After this, will be possible to use default values for dbt.config.get -time: 2022-11-24T16:34:19.039512764-03:00 -custom: - Author: devmessias - Issue: "6309" - PR: "6317" diff --git a/.changes/1.4.0/Fixes-20221202-164859.yaml b/.changes/1.4.0/Fixes-20221202-164859.yaml deleted file mode 100644 index 6aad4ced192..00000000000 --- a/.changes/1.4.0/Fixes-20221202-164859.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Use full path for writing manifest -time: 2022-12-02T16:48:59.029519-05:00 -custom: - Author: gshank - Issue: "6055" diff --git a/.changes/1.4.0/Fixes-20221213-112620.yaml b/.changes/1.4.0/Fixes-20221213-112620.yaml deleted file mode 100644 index a2220f9a920..00000000000 --- a/.changes/1.4.0/Fixes-20221213-112620.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: '[CT-1284] Change Python model default materialization to table' -time: 2022-12-13T11:26:20.550017-08:00 -custom: - Author: aranke - Issue: "6345" diff --git a/.changes/1.4.0/Fixes-20221214-155307.yaml b/.changes/1.4.0/Fixes-20221214-155307.yaml deleted file mode 100644 index cb37e0a809c..00000000000 --- a/.changes/1.4.0/Fixes-20221214-155307.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: Repair a regression which prevented basic logging before the logging subsystem - is completely configured. -time: 2022-12-14T15:53:07.396512-05:00 -custom: - Author: peterallenwebb - Issue: "6434" diff --git a/.changes/1.4.0/Under the Hood-20220927-194259.yaml b/.changes/1.4.0/Under the Hood-20220927-194259.yaml deleted file mode 100644 index b6cb64b0155..00000000000 --- a/.changes/1.4.0/Under the Hood-20220927-194259.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Put black config in explicit config -time: 2022-09-27T19:42:59.241433-07:00 -custom: - Author: max-sixty - Issue: "5946" diff --git a/.changes/1.4.0/Under the Hood-20220929-134406.yaml b/.changes/1.4.0/Under the Hood-20220929-134406.yaml deleted file mode 100644 index b0175190747..00000000000 --- a/.changes/1.4.0/Under the Hood-20220929-134406.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Added flat_graph attribute the Manifest class's deepcopy() coverage -time: 2022-09-29T13:44:06.275941-04:00 -custom: - Author: peterallenwebb - Issue: "5809" diff --git a/.changes/1.4.0/Under the Hood-20221005-120310.yaml b/.changes/1.4.0/Under the Hood-20221005-120310.yaml deleted file mode 100644 index 797be31c319..00000000000 --- a/.changes/1.4.0/Under the Hood-20221005-120310.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Add mypy configs so `mypy` passes from CLI -time: 2022-10-05T12:03:10.061263-07:00 -custom: - Author: max-sixty - Issue: "5983" diff --git a/.changes/1.4.0/Under the Hood-20221007-094627.yaml b/.changes/1.4.0/Under the Hood-20221007-094627.yaml deleted file mode 100644 index d3a5da61566..00000000000 --- a/.changes/1.4.0/Under the Hood-20221007-094627.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Exception message cleanup. -time: 2022-10-07T09:46:27.682872-05:00 -custom: - Author: emmyoop - Issue: "6023" diff --git a/.changes/1.4.0/Under the Hood-20221007-140044.yaml b/.changes/1.4.0/Under the Hood-20221007-140044.yaml deleted file mode 100644 index 971d5a40ce8..00000000000 --- a/.changes/1.4.0/Under the Hood-20221007-140044.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Add dmypy cache to gitignore -time: 2022-10-07T14:00:44.227644-07:00 -custom: - Author: max-sixty - Issue: "6028" diff --git a/.changes/1.4.0/Under the Hood-20221013-181912.yaml b/.changes/1.4.0/Under the Hood-20221013-181912.yaml deleted file mode 100644 index 4f5218891b4..00000000000 --- a/.changes/1.4.0/Under the Hood-20221013-181912.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Provide useful errors when the value of 'materialized' is invalid -time: 2022-10-13T18:19:12.167548-04:00 -custom: - Author: peterallenwebb - Issue: "5229" diff --git a/.changes/1.4.0/Under the Hood-20221017-151511.yaml b/.changes/1.4.0/Under the Hood-20221017-151511.yaml deleted file mode 100644 index 94f4d27d6de..00000000000 --- a/.changes/1.4.0/Under the Hood-20221017-151511.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Fixed extra whitespace in strings introduced by black. -time: 2022-10-17T15:15:11.499246-05:00 -custom: - Author: luke-bassett - Issue: "1350" diff --git a/.changes/1.4.0/Under the Hood-20221017-155844.yaml b/.changes/1.4.0/Under the Hood-20221017-155844.yaml deleted file mode 100644 index c46ef040410..00000000000 --- a/.changes/1.4.0/Under the Hood-20221017-155844.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Clean up string formatting -time: 2022-10-17T15:58:44.676549-04:00 -custom: - Author: eve-johns - Issue: "6068" diff --git a/.changes/1.4.0/Under the Hood-20221028-104837.yaml b/.changes/1.4.0/Under the Hood-20221028-104837.yaml deleted file mode 100644 index 446d4898920..00000000000 --- a/.changes/1.4.0/Under the Hood-20221028-104837.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Remove the 'root_path' field from most nodes -time: 2022-10-28T10:48:37.687886-04:00 -custom: - Author: gshank - Issue: "6171" diff --git a/.changes/1.4.0/Under the Hood-20221028-110344.yaml b/.changes/1.4.0/Under the Hood-20221028-110344.yaml deleted file mode 100644 index cbe8dacb3d5..00000000000 --- a/.changes/1.4.0/Under the Hood-20221028-110344.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Combine certain logging events with different levels -time: 2022-10-28T11:03:44.887836-04:00 -custom: - Author: gshank - Issue: "6173" diff --git a/.changes/1.4.0/Under the Hood-20221108-074550.yaml b/.changes/1.4.0/Under the Hood-20221108-074550.yaml deleted file mode 100644 index a8fbc7e208b..00000000000 --- a/.changes/1.4.0/Under the Hood-20221108-074550.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Convert threading tests to pytest -time: 2022-11-08T07:45:50.589147-06:00 -custom: - Author: stu-k - Issue: "5942" diff --git a/.changes/1.4.0/Under the Hood-20221108-115633.yaml b/.changes/1.4.0/Under the Hood-20221108-115633.yaml deleted file mode 100644 index ea073719cda..00000000000 --- a/.changes/1.4.0/Under the Hood-20221108-115633.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Convert postgres index tests to pytest -time: 2022-11-08T11:56:33.743042-06:00 -custom: - Author: stu-k - Issue: "5770" diff --git a/.changes/1.4.0/Under the Hood-20221108-133104.yaml b/.changes/1.4.0/Under the Hood-20221108-133104.yaml deleted file mode 100644 index 6829dc097eb..00000000000 --- a/.changes/1.4.0/Under the Hood-20221108-133104.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Convert use color tests to pytest -time: 2022-11-08T13:31:04.788547-06:00 -custom: - Author: stu-k - Issue: "5771" diff --git a/.changes/1.4.0/Under the Hood-20221116-130037.yaml b/.changes/1.4.0/Under the Hood-20221116-130037.yaml deleted file mode 100644 index ecdedd6bd2d..00000000000 --- a/.changes/1.4.0/Under the Hood-20221116-130037.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Add github actions workflow to generate high level CLI API docs -time: 2022-11-16T13:00:37.916202-06:00 -custom: - Author: stu-k - Issue: "5942" diff --git a/.changes/1.4.0/Under the Hood-20221118-145717.yaml b/.changes/1.4.0/Under the Hood-20221118-145717.yaml deleted file mode 100644 index 934cd9dd5cb..00000000000 --- a/.changes/1.4.0/Under the Hood-20221118-145717.yaml +++ /dev/null @@ -1,8 +0,0 @@ -kind: Under the Hood -body: Functionality-neutral refactor of event logging system to improve encapsulation - and modularity. -time: 2022-11-18T14:57:17.792622-05:00 -custom: - Author: peterallenwebb - Issue: "6139" - PR: "6291" diff --git a/.changes/1.4.0/Under the Hood-20221205-164948.yaml b/.changes/1.4.0/Under the Hood-20221205-164948.yaml deleted file mode 100644 index 579f973955b..00000000000 --- a/.changes/1.4.0/Under the Hood-20221205-164948.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Under the Hood -body: Consolidate ParsedNode and CompiledNode classes -time: 2022-12-05T16:49:48.563583-05:00 -custom: - Author: gshank - Issue: "6383" - PR: "6384" diff --git a/.changes/1.4.0/Under the Hood-20221206-094015.yaml b/.changes/1.4.0/Under the Hood-20221206-094015.yaml deleted file mode 100644 index ebcb9999430..00000000000 --- a/.changes/1.4.0/Under the Hood-20221206-094015.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Under the Hood -body: Prevent doc gen workflow from running on forks -time: 2022-12-06T09:40:15.301984-06:00 -custom: - Author: stu-k - Issue: "6386" - PR: "6390" diff --git a/.changes/1.4.0/Under the Hood-20221206-113053.yaml b/.changes/1.4.0/Under the Hood-20221206-113053.yaml deleted file mode 100644 index a1f94f68f43..00000000000 --- a/.changes/1.4.0/Under the Hood-20221206-113053.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Under the Hood -body: Fix intermittent database connection failure in Windows CI test -time: 2022-12-06T11:30:53.166009-07:00 -custom: - Author: MichelleArk dbeatty10 - Issue: "6394" - PR: "6395" diff --git a/.changes/1.4.0/Under the Hood-20221211-214240.yaml b/.changes/1.4.0/Under the Hood-20221211-214240.yaml deleted file mode 100644 index adeaefba257..00000000000 --- a/.changes/1.4.0/Under the Hood-20221211-214240.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Under the Hood -body: Refactor and clean up manifest nodes -time: 2022-12-11T21:42:40.560074-05:00 -custom: - Author: gshank - Issue: "6426" - PR: "6427" diff --git a/.changes/1.4.0/Under the Hood-20221213-214106.yaml b/.changes/1.4.0/Under the Hood-20221213-214106.yaml deleted file mode 100644 index 708c84661d6..00000000000 --- a/.changes/1.4.0/Under the Hood-20221213-214106.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Under the Hood -body: Restore important legacy logging behaviors, following refactor which removed - them -time: 2022-12-13T21:41:06.815133-05:00 -custom: - Author: peterallenwebb - Issue: "6437" diff --git a/.changes/unreleased/Breaking Changes-20221205-141937.yaml b/.changes/unreleased/Breaking Changes-20221205-141937.yaml deleted file mode 100644 index be840b20a99..00000000000 --- a/.changes/unreleased/Breaking Changes-20221205-141937.yaml +++ /dev/null @@ -1,9 +0,0 @@ -kind: Breaking Changes -body: Cleaned up exceptions to directly raise in code. Removed use of all exception - functions in the code base and marked them all as deprecated to be removed next - minor release. -time: 2022-12-05T14:19:37.863032-06:00 -custom: - Author: emmyoop - Issue: "6339" - PR: "6347" diff --git a/.changes/unreleased/Dependencies-20230104-000306.yaml b/.changes/unreleased/Dependencies-20230104-000306.yaml deleted file mode 100644 index 9da884ff595..00000000000 --- a/.changes/unreleased/Dependencies-20230104-000306.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: "Dependencies" -body: "Update agate requirement from <1.6.4,>=1.6 to >=1.6,<1.7.1 in /core" -time: 2023-01-04T00:03:06.00000Z -custom: - Author: dependabot[bot] - PR: 6506 diff --git a/.changes/unreleased/Features-20221207-091722.yaml b/.changes/unreleased/Features-20221207-091722.yaml deleted file mode 100644 index 16845f3663e..00000000000 --- a/.changes/unreleased/Features-20221207-091722.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Features -body: Making timestamp optional for metrics -time: 2022-12-07T09:17:22.571877-06:00 -custom: - Author: callum-mcdata - Issue: "6398" - PR: "9400" diff --git a/.changes/unreleased/Fixes-20221113-104150.yaml b/.changes/unreleased/Fixes-20221113-104150.yaml deleted file mode 100644 index 75c34bda436..00000000000 --- a/.changes/unreleased/Fixes-20221113-104150.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: fix missing f-strings, convert old .format() messages to f-strings for consistency -time: 2022-11-13T10:41:50.009727-05:00 -custom: - Author: justbldwn - Issue: "6241" - PR: "6243" diff --git a/.changes/unreleased/Fixes-20221117-220320.yaml b/.changes/unreleased/Fixes-20221117-220320.yaml deleted file mode 100644 index 2f71fe213fc..00000000000 --- a/.changes/unreleased/Fixes-20221117-220320.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: Fix typo in util.py -time: 2022-11-17T22:03:20.4836855+09:00 -custom: - Author: eltociear - Issue: "4904" - PR: "6037" diff --git a/.changes/unreleased/Fixes-20221212-115912.yaml b/.changes/unreleased/Fixes-20221212-115912.yaml deleted file mode 100644 index 1dc428830eb..00000000000 --- a/.changes/unreleased/Fixes-20221212-115912.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: add pre-commit install to make dev script in Makefile -time: 2022-12-12T11:59:12.175136-05:00 -custom: - Author: justbldwn - Issue: "6269" - PR: "6417" diff --git a/.changes/unreleased/Fixes-20221213-113915.yaml b/.changes/unreleased/Fixes-20221213-113915.yaml deleted file mode 100644 index b92a2d6cbc9..00000000000 --- a/.changes/unreleased/Fixes-20221213-113915.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: '[CT-1591] Don''t parse empty Python files' -time: 2022-12-13T11:39:15.818464-08:00 -custom: - Author: aranke - Issue: "6345" diff --git a/.changes/unreleased/Fixes-20230101-223405.yaml b/.changes/unreleased/Fixes-20230101-223405.yaml deleted file mode 100644 index d90e24aaa56..00000000000 --- a/.changes/unreleased/Fixes-20230101-223405.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Bug when partial parsing with an empty schema file -time: 2023-01-01T22:34:05.97322-05:00 -custom: - Author: gshank - Issue: "4850" diff --git a/.changes/unreleased/Fixes-20230104-141047.yaml b/.changes/unreleased/Fixes-20230104-141047.yaml deleted file mode 100644 index 9d5466fbe68..00000000000 --- a/.changes/unreleased/Fixes-20230104-141047.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: Fix DBT_FAVOR_STATE env var -time: 2023-01-04T14:10:47.637495-08:00 -custom: - Author: NiallRees - Issue: "5859" - PR: "6392" diff --git a/.changes/unreleased/Fixes-20230116-123645.yaml b/.changes/unreleased/Fixes-20230116-123645.yaml new file mode 100644 index 00000000000..ee15803a297 --- /dev/null +++ b/.changes/unreleased/Fixes-20230116-123645.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Respect quoting config for dbt.ref() + dbt.source() in dbt-py models +time: 2023-01-16T12:36:45.63092+01:00 +custom: + Author: jtcohen6 + Issue: "6103" diff --git a/.changes/unreleased/Fixes-20230116-123709.yaml b/.changes/unreleased/Fixes-20230116-123709.yaml new file mode 100644 index 00000000000..56788519d0a --- /dev/null +++ b/.changes/unreleased/Fixes-20230116-123709.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Respect quoting config for dbt.this() in dbt-py models +time: 2023-01-16T12:37:09.000659+01:00 +custom: + Author: jtcohen6 + Issue: "6619" diff --git a/.changes/unreleased/Fixes-20230117-101342.yaml b/.changes/unreleased/Fixes-20230117-101342.yaml new file mode 100644 index 00000000000..9a879e60a89 --- /dev/null +++ b/.changes/unreleased/Fixes-20230117-101342.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Provide backward compatibility for `get_merge_sql` arguments +time: 2023-01-17T10:13:42.118336-06:00 +custom: + Author: dave-connors-3 + Issue: "6625" diff --git a/.changes/unreleased/Under the Hood-20221219-193435.yaml b/.changes/unreleased/Under the Hood-20221219-193435.yaml deleted file mode 100644 index 82388dbb759..00000000000 --- a/.changes/unreleased/Under the Hood-20221219-193435.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Treat dense text blobs as binary for `git grep` -time: 2022-12-19T19:34:35.890275-07:00 -custom: - Author: dbeatty10 - Issue: "6294" diff --git a/.changes/unreleased/Under the Hood-20221221-121904.yaml b/.changes/unreleased/Under the Hood-20221221-121904.yaml deleted file mode 100644 index d1f2f03bef7..00000000000 --- a/.changes/unreleased/Under the Hood-20221221-121904.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Prune partial parsing logging events -time: 2022-12-21T12:19:04.7402-05:00 -custom: - Author: gshank - Issue: "6313" diff --git a/.changes/unreleased/Under the Hood-20230104-155257.yaml b/.changes/unreleased/Under the Hood-20230104-155257.yaml deleted file mode 100644 index 2d10f09d857..00000000000 --- a/.changes/unreleased/Under the Hood-20230104-155257.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Updating the deprecation warning in the metric attributes renamed event -time: 2023-01-04T15:52:57.916398-06:00 -custom: - Author: callum-mcdata - Issue: "6507" diff --git a/.changes/unreleased/Under the Hood-20230106-112855.yaml b/.changes/unreleased/Under the Hood-20230106-112855.yaml deleted file mode 100644 index 1344b3397c0..00000000000 --- a/.changes/unreleased/Under the Hood-20230106-112855.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: '[CT-1693] Port severity test to Pytest' -time: 2023-01-06T11:28:55.800547-08:00 -custom: - Author: aranke - Issue: "6466" diff --git a/.changes/unreleased/Under the Hood-20230113-132513.yaml b/.changes/unreleased/Under the Hood-20230113-132513.yaml new file mode 100644 index 00000000000..2274fbc01a7 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230113-132513.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Fix use of ConnectionReused logging event +time: 2023-01-13T13:25:13.023168-05:00 +custom: + Author: gshank + Issue: "6168" diff --git a/.changes/unreleased/Under the Hood-20230113-150700.yaml b/.changes/unreleased/Under the Hood-20230113-150700.yaml new file mode 100644 index 00000000000..178603104e9 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230113-150700.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Port docs tests to pytest +time: 2023-01-13T15:07:00.477038-05:00 +custom: + Author: peterallenwebb + Issue: "6573" diff --git a/.changes/unreleased/Under the Hood-20230117-111737.yaml b/.changes/unreleased/Under the Hood-20230117-111737.yaml new file mode 100644 index 00000000000..126a25ea28a --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230117-111737.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Update deprecated github action command +time: 2023-01-17T11:17:37.046095-06:00 +custom: + Author: davidbloss + Issue: "6153" diff --git a/.flake8 b/.flake8 index 38b207c6e9b..e39b2fa4646 100644 --- a/.flake8 +++ b/.flake8 @@ -9,4 +9,4 @@ ignore = E203 # makes Flake8 work like black E741 E501 # long line checking is done in black -exclude = test +exclude = test/ diff --git a/.github/_README.md b/.github/_README.md index 4da081fe2b6..f624fc5fec6 100644 --- a/.github/_README.md +++ b/.github/_README.md @@ -63,12 +63,12 @@ permissions: contents: read pull-requests: write ``` - + ### Secrets - When to use a [Personal Access Token (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) vs the [GITHUB_TOKEN](https://docs.github.com/en/actions/security-guides/automatic-token-authentication) generated for the action? The `GITHUB_TOKEN` is used by default. In most cases it is sufficient for what you need. - + If you expect the workflow to result in a commit to that should retrigger workflows, you will need to use a Personal Access Token for the bot to commit the file. When using the GITHUB_TOKEN, the resulting commit will not trigger another GitHub Actions Workflow run. This is due to limitations set by GitHub. See [the docs](https://docs.github.com/en/actions/security-guides/automatic-token-authentication#using-the-github_token-in-a-workflow) for a more detailed explanation. For example, we must use a PAT in our workflow to commit a new changelog yaml file for bot PRs. Once the file has been committed to the branch, it should retrigger the check to validate that a changelog exists on the PR. Otherwise, it would stay in a failed state since the check would never retrigger. @@ -105,7 +105,7 @@ Some triggers of note that we use: ``` # **what?** - # Describe what the action does. + # Describe what the action does. # **why?** # Why does this action exist? @@ -138,7 +138,7 @@ Some triggers of note that we use: id: fp run: | FILEPATH=.changes/unreleased/Dependencies-${{ steps.filename_time.outputs.time }}.yaml - echo "::set-output name=FILEPATH::$FILEPATH" + echo "FILEPATH=$FILEPATH" >> $GITHUB_OUTPUT ``` - Print out all variables you will reference as the first step of a job. This allows for easier debugging. The first job should log all inputs. Subsequent jobs should reference outputs of other jobs, if present. @@ -158,14 +158,14 @@ Some triggers of note that we use: echo "The build_script_path: ${{ inputs.build_script_path }}" echo "The s3_bucket_name: ${{ inputs.s3_bucket_name }}" echo "The package_test_command: ${{ inputs.package_test_command }}" - + # collect all the variables that need to be used in subsequent jobs - name: Set Variables id: variables run: | - echo "::set-output name=important_path::'performance/runner/Cargo.toml'" - echo "::set-output name=release_id::${{github.event.inputs.release_id}}" - echo "::set-output name=open_prs::${{github.event.inputs.open_prs}}" + echo "important_path='performance/runner/Cargo.toml'" >> $GITHUB_OUTPUT + echo "release_id=${{github.event.inputs.release_id}}" >> $GITHUB_OUTPUT + echo "open_prs=${{github.event.inputs.open_prs}}" >> $GITHUB_OUTPUT job2: needs: [job1] @@ -190,7 +190,7 @@ ___ ### Actions from the Marketplace - Don’t use external actions for things that can easily be accomplished manually. - Always read through what an external action does before using it! Often an action in the GitHub Actions Marketplace can be replaced with a few lines in bash. This is much more maintainable (and won’t change under us) and clear as to what’s actually happening. It also prevents any -- Pin actions _we don't control_ to tags. +- Pin actions _we don't control_ to tags. ### Connecting to AWS - Authenticate with the aws managed workflow @@ -208,7 +208,7 @@ ___ ```yaml - name: Copy Artifacts from S3 via CLI - run: aws s3 cp ${{ env.s3_bucket }} . --recursive + run: aws s3 cp ${{ env.s3_bucket }} . --recursive ``` ### Testing diff --git a/.github/actions/latest-wrangler/main.py b/.github/actions/latest-wrangler/main.py index 23e14cf5abe..db91cf8354b 100644 --- a/.github/actions/latest-wrangler/main.py +++ b/.github/actions/latest-wrangler/main.py @@ -28,11 +28,12 @@ if package_request.status_code == 404: if halt_on_missing: sys.exit(1) - else: - # everything is the latest if the package doesn't exist - print(f"::set-output name=latest::{True}") - print(f"::set-output name=minor_latest::{True}") - sys.exit(0) + # everything is the latest if the package doesn't exist + github_output = os.environ.get("GITHUB_OUTPUT") + with open(github_output, "at", encoding="utf-8") as gh_output: + gh_output.write("latest=True") + gh_output.write("minor_latest=True") + sys.exit(0) # TODO: verify package meta is "correct" # https://github.com/dbt-labs/dbt-core/issues/4640 @@ -91,5 +92,7 @@ def is_latest( latest = is_latest(pre_rel, new_version, current_latest) minor_latest = is_latest(pre_rel, new_version, current_minor_latest) - print(f"::set-output name=latest::{latest}") - print(f"::set-output name=minor_latest::{minor_latest}") + github_output = os.environ.get("GITHUB_OUTPUT") + with open(github_output, "at", encoding="utf-8") as gh_output: + gh_output.write(f"latest={latest}") + gh_output.write(f"minor_latest={minor_latest}") diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 8138b730d34..c8347f6b069 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -101,7 +101,9 @@ jobs: - name: Get current date if: always() id: date - run: echo "::set-output name=date::$(date +'%Y-%m-%dT%H_%M_%S')" #no colons allowed for artifacts + run: | + CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts + echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT - uses: actions/upload-artifact@v2 if: always() @@ -168,7 +170,9 @@ jobs: - name: Get current date if: always() id: date - run: echo "::set-output name=date::$(date +'%Y_%m_%dT%H_%M_%S')" #no colons allowed for artifacts + run: | + CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts + echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT - uses: actions/upload-artifact@v2 if: always() diff --git a/.github/workflows/release-branch-tests.yml b/.github/workflows/release-branch-tests.yml index 3b329f17b6c..bdd01aa495a 100644 --- a/.github/workflows/release-branch-tests.yml +++ b/.github/workflows/release-branch-tests.yml @@ -39,7 +39,7 @@ jobs: max-parallel: 1 fail-fast: false matrix: - branch: [1.0.latest, 1.1.latest, 1.2.latest, 1.3.latest, main] + branch: [1.0.latest, 1.1.latest, 1.2.latest, 1.3.latest, 1.4.latest, main] steps: - name: Call CI workflow for ${{ matrix.branch }} branch diff --git a/.github/workflows/release-docker.yml b/.github/workflows/release-docker.yml index f47f110aeb1..f7b8dc29543 100644 --- a/.github/workflows/release-docker.yml +++ b/.github/workflows/release-docker.yml @@ -41,9 +41,9 @@ jobs: id: version run: | IFS="." read -r MAJOR MINOR PATCH <<< ${{ github.event.inputs.version_number }} - echo "::set-output name=major::$MAJOR" - echo "::set-output name=minor::$MINOR" - echo "::set-output name=patch::$PATCH" + echo "major=$MAJOR" >> $GITHUB_OUTPUT + echo "minor=$MINOR" >> $GITHUB_OUTPUT + echo "patch=$PATCH" >> $GITHUB_OUTPUT - name: Is pkg 'latest' id: latest @@ -70,8 +70,10 @@ jobs: - name: Get docker build arg id: build_arg run: | - echo "::set-output name=build_arg_name::"$(echo ${{ github.event.inputs.package }} | sed 's/\-/_/g') - echo "::set-output name=build_arg_value::"$(echo ${{ github.event.inputs.package }} | sed 's/postgres/core/g') + BUILD_ARG_NAME=$(echo ${{ github.event.inputs.package }} | sed 's/\-/_/g') + BUILD_ARG_VALUE=$(echo ${{ github.event.inputs.package }} | sed 's/postgres/core/g') + echo "build_arg_name=$BUILD_ARG_NAME" >> $GITHUB_OUTPUT + echo "build_arg_value=$BUILD_ARG_VALUE" >> $GITHUB_OUTPUT - name: Log in to the GHCR uses: docker/login-action@v1 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1abab3e5013..ade939b6ee3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -165,7 +165,7 @@ jobs: env: IS_PRERELEASE: ${{ contains(github.event.inputs.version_number, 'rc') || contains(github.event.inputs.version_number, 'b') }} run: | - echo ::set-output name=isPrerelease::$IS_PRERELEASE + echo "isPrerelease=$IS_PRERELEASE" >> $GITHUB_OUTPUT - name: Creating GitHub Release uses: softprops/action-gh-release@v1 diff --git a/.github/workflows/version-bump.yml b/.github/workflows/version-bump.yml index 1a5be6aefb1..2bbaf1cef82 100644 --- a/.github/workflows/version-bump.yml +++ b/.github/workflows/version-bump.yml @@ -65,7 +65,7 @@ jobs: - name: Set branch value id: variables run: | - echo "::set-output name=BRANCH_NAME::prep-release/${{ github.event.inputs.version_number }}_$GITHUB_RUN_ID" + echo "BRANCH_NAME=prep-release/${{ github.event.inputs.version_number }}_$GITHUB_RUN_ID" >> $GITHUB_OUTPUT - name: Create PR branch run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a91696f68b..45347e50b1f 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,102 +5,12 @@ - "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version. - Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry) -## dbt-core 1.4.0-b1 - December 15, 2022 - -### Features - -- Added favor-state flag to optionally favor state nodes even if unselected node exists ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) -- Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. ([#5610](https://github.com/dbt-labs/dbt-core/issues/5610)) -- Friendlier error messages when packages.yml is malformed ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) -- Migrate dbt-utils current_timestamp macros into core + adapters ([#5521](https://github.com/dbt-labs/dbt-core/issues/5521)) -- Allow partitions in external tables to be supplied as a list ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) -- extend -f flag shorthand for seed command ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) -- This pulls the profile name from args when constructing a RuntimeConfig in lib.py, enabling the dbt-server to override the value that's in the dbt_project.yml ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) -- Adding tarball install method for packages. Allowing package tarball to be specified via url in the packages.yaml. ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) -- Added an md5 function to the base context ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) -- Exposures support metrics in lineage ([#6057](https://github.com/dbt-labs/dbt-core/issues/6057)) -- Add support for Python 3.11 ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) -- incremental predicates ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) - -### Fixes - -- Account for disabled flags on models in schema files more completely ([#3992](https://github.com/dbt-labs/dbt-core/issues/3992)) -- Add validation of enabled config for metrics, exposures and sources ([#6030](https://github.com/dbt-labs/dbt-core/issues/6030)) -- check length of args of python model function before accessing it ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) -- Add functors to ensure event types with str-type attributes are initialized to spec, even when provided non-str type params. ([#5436](https://github.com/dbt-labs/dbt-core/issues/5436)) -- Allow hooks to fail without halting execution flow ([#5625](https://github.com/dbt-labs/dbt-core/issues/5625)) -- Clarify Error Message for how many models are allowed in a Python file ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) -- After this, will be possible to use default values for dbt.config.get ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) -- Use full path for writing manifest ([#6055](https://github.com/dbt-labs/dbt-core/issues/6055)) -- [CT-1284] Change Python model default materialization to table ([#6345](https://github.com/dbt-labs/dbt-core/issues/6345)) -- Repair a regression which prevented basic logging before the logging subsystem is completely configured. ([#6434](https://github.com/dbt-labs/dbt-core/issues/6434)) - -### Docs - -- minor doc correction ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) -- Generate API docs for new CLI interface ([dbt-docs/#5528](https://github.com/dbt-labs/dbt-docs/issues/5528)) -- ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) -- Fix rendering of sample code for metrics ([dbt-docs/#323](https://github.com/dbt-labs/dbt-docs/issues/323)) -- Alphabetize `core/dbt/README.md` ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368)) - -### Under the Hood - -- Put black config in explicit config ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946)) -- Added flat_graph attribute the Manifest class's deepcopy() coverage ([#5809](https://github.com/dbt-labs/dbt-core/issues/5809)) -- Add mypy configs so `mypy` passes from CLI ([#5983](https://github.com/dbt-labs/dbt-core/issues/5983)) -- Exception message cleanup. ([#6023](https://github.com/dbt-labs/dbt-core/issues/6023)) -- Add dmypy cache to gitignore ([#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) -- Provide useful errors when the value of 'materialized' is invalid ([#5229](https://github.com/dbt-labs/dbt-core/issues/5229)) -- Clean up string formatting ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) -- Fixed extra whitespace in strings introduced by black. ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) -- Remove the 'root_path' field from most nodes ([#6171](https://github.com/dbt-labs/dbt-core/issues/6171)) -- Combine certain logging events with different levels ([#6173](https://github.com/dbt-labs/dbt-core/issues/6173)) -- Convert threading tests to pytest ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) -- Convert postgres index tests to pytest ([#5770](https://github.com/dbt-labs/dbt-core/issues/5770)) -- Convert use color tests to pytest ([#5771](https://github.com/dbt-labs/dbt-core/issues/5771)) -- Add github actions workflow to generate high level CLI API docs ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) -- Functionality-neutral refactor of event logging system to improve encapsulation and modularity. ([#6139](https://github.com/dbt-labs/dbt-core/issues/6139)) -- Consolidate ParsedNode and CompiledNode classes ([#6383](https://github.com/dbt-labs/dbt-core/issues/6383)) -- Prevent doc gen workflow from running on forks ([#6386](https://github.com/dbt-labs/dbt-core/issues/6386)) -- Fix intermittent database connection failure in Windows CI test ([#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) -- Refactor and clean up manifest nodes ([#6426](https://github.com/dbt-labs/dbt-core/issues/6426)) -- Restore important legacy logging behaviors, following refactor which removed them ([#6437](https://github.com/dbt-labs/dbt-core/issues/6437)) - -### Dependencies - -- Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core ([#5917](https://github.com/dbt-labs/dbt-core/pull/5917)) -- Bump black from 22.8.0 to 22.10.0 ([#6019](https://github.com/dbt-labs/dbt-core/pull/6019)) -- Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core ([#6108](https://github.com/dbt-labs/dbt-core/pull/6108)) -- Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core ([#6144](https://github.com/dbt-labs/dbt-core/pull/6144)) -- Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core ([#4904](https://github.com/dbt-labs/dbt-core/issues/4904)) - -### Contributors -- [@andy-clapson](https://github.com/andy-clapson) ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) -- [@chamini2](https://github.com/chamini2) ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) -- [@daniel-murray](https://github.com/daniel-murray) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) -- [@dave-connors-3](https://github.com/dave-connors-3) ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) -- [@dbeatty10](https://github.com/dbeatty10) ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368), [#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) -- [@devmessias](https://github.com/devmessias) ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) -- [@eve-johns](https://github.com/eve-johns) ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) -- [@haritamar](https://github.com/haritamar) ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) -- [@jared-rimmer](https://github.com/jared-rimmer) ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) -- [@josephberni](https://github.com/josephberni) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) -- [@joshuataylor](https://github.com/joshuataylor) ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) -- [@justbldwn](https://github.com/justbldwn) ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) -- [@luke-bassett](https://github.com/luke-bassett) ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) -- [@max-sixty](https://github.com/max-sixty) ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946), [#5983](https://github.com/dbt-labs/dbt-core/issues/5983), [#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) -- [@paulbenschmidt](https://github.com/paulbenschmidt) ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) -- [@pgoslatara](https://github.com/pgoslatara) ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) -- [@racheldaniel](https://github.com/racheldaniel) ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) -- [@timle2](https://github.com/timle2) ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) -- [@dave-connors-3](https://github.com/dave-connors-3) ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) - - ## Previous Releases For information on prior major and minor releases, see their changelogs: +* [1.4](https://github.com/dbt-labs/dbt-core/blob/1.4.latest/CHANGELOG.md) * [1.3](https://github.com/dbt-labs/dbt-core/blob/1.3.latest/CHANGELOG.md) * [1.2](https://github.com/dbt-labs/dbt-core/blob/1.2.latest/CHANGELOG.md) * [1.1](https://github.com/dbt-labs/dbt-core/blob/1.1.latest/CHANGELOG.md) diff --git a/core/dbt/adapters/base/column.py b/core/dbt/adapters/base/column.py index b47aac64062..3c6246b33a6 100644 --- a/core/dbt/adapters/base/column.py +++ b/core/dbt/adapters/base/column.py @@ -2,7 +2,7 @@ import re from typing import Dict, ClassVar, Any, Optional -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError @dataclass @@ -85,7 +85,7 @@ def is_numeric(self) -> bool: def string_size(self) -> int: if not self.is_string(): - raise RuntimeException("Called string_size() on non-string field!") + raise DbtRuntimeError("Called string_size() on non-string field!") if self.dtype == "text" or self.char_size is None: # char_size should never be None. Handle it reasonably just in case @@ -124,7 +124,7 @@ def __repr__(self) -> str: def from_description(cls, name: str, raw_data_type: str) -> "Column": match = re.match(r"([^(]+)(\([^)]+\))?", raw_data_type) if match is None: - raise RuntimeException(f'Could not interpret data type "{raw_data_type}"') + raise DbtRuntimeError(f'Could not interpret data type "{raw_data_type}"') data_type, size_info = match.groups() char_size = None numeric_precision = None @@ -137,7 +137,7 @@ def from_description(cls, name: str, raw_data_type: str) -> "Column": try: char_size = int(parts[0]) except ValueError: - raise RuntimeException( + raise DbtRuntimeError( f'Could not interpret data_type "{raw_data_type}": ' f'could not convert "{parts[0]}" to an integer' ) @@ -145,14 +145,14 @@ def from_description(cls, name: str, raw_data_type: str) -> "Column": try: numeric_precision = int(parts[0]) except ValueError: - raise RuntimeException( + raise DbtRuntimeError( f'Could not interpret data_type "{raw_data_type}": ' f'could not convert "{parts[0]}" to an integer' ) try: numeric_scale = int(parts[1]) except ValueError: - raise RuntimeException( + raise DbtRuntimeError( f'Could not interpret data_type "{raw_data_type}": ' f'could not convert "{parts[1]}" to an integer' ) diff --git a/core/dbt/adapters/base/connections.py b/core/dbt/adapters/base/connections.py index 577cdf6d9a6..d449b27e5e6 100644 --- a/core/dbt/adapters/base/connections.py +++ b/core/dbt/adapters/base/connections.py @@ -91,13 +91,13 @@ def get_thread_connection(self) -> Connection: key = self.get_thread_identifier() with self.lock: if key not in self.thread_connections: - raise dbt.exceptions.InvalidConnectionException(key, list(self.thread_connections)) + raise dbt.exceptions.InvalidConnectionError(key, list(self.thread_connections)) return self.thread_connections[key] def set_thread_connection(self, conn: Connection) -> None: key = self.get_thread_identifier() if key in self.thread_connections: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( "In set_thread_connection, existing connection exists for {}" ) self.thread_connections[key] = conn @@ -137,49 +137,49 @@ def exception_handler(self, sql: str) -> ContextManager: :return: A context manager that handles exceptions raised by the underlying database. """ - raise dbt.exceptions.NotImplementedException( + raise dbt.exceptions.NotImplementedError( "`exception_handler` is not implemented for this adapter!" ) def set_connection_name(self, name: Optional[str] = None) -> Connection: - conn_name: str - if name is None: - # if a name isn't specified, we'll re-use a single handle - # named 'master' - conn_name = "master" - else: - if not isinstance(name, str): - raise dbt.exceptions.CompilerException( - f"For connection name, got {name} - not a string!" - ) - assert isinstance(name, str) - conn_name = name + """Called by 'acquire_connection' in BaseAdapter, which is called by + 'connection_named', called by 'connection_for(node)'. + Creates a connection for this thread if one doesn't already + exist, and will rename an existing connection.""" + + conn_name: str = "master" if name is None else name + # Get a connection for this thread conn = self.get_if_exists() + + if conn and conn.name == conn_name and conn.state == "open": + # Found a connection and nothing to do, so just return it + return conn + if conn is None: + # Create a new connection conn = Connection( type=Identifier(self.TYPE), - name=None, + name=conn_name, state=ConnectionState.INIT, transaction_open=False, handle=None, credentials=self.profile.credentials, ) - self.set_thread_connection(conn) - - if conn.name == conn_name and conn.state == "open": - return conn - - fire_event( - NewConnection(conn_name=conn_name, conn_type=self.TYPE, node_info=get_node_info()) - ) - - if conn.state == "open": - fire_event(ConnectionReused(conn_name=conn_name)) - else: conn.handle = LazyHandle(self.open) + # Add the connection to thread_connections for this thread + self.set_thread_connection(conn) + fire_event( + NewConnection(conn_name=conn_name, conn_type=self.TYPE, node_info=get_node_info()) + ) + else: # existing connection either wasn't open or didn't have the right name + if conn.state != "open": + conn.handle = LazyHandle(self.open) + if conn.name != conn_name: + orig_conn_name: str = conn.name or "" + conn.name = conn_name + fire_event(ConnectionReused(orig_conn_name=orig_conn_name, conn_name=conn_name)) - conn.name = conn_name return conn @classmethod @@ -211,7 +211,7 @@ def retry_connection( connect should trigger a retry. :type retryable_exceptions: Iterable[Type[Exception]] :param int retry_limit: How many times to retry the call to connect. If this limit - is exceeded before a successful call, a FailedToConnectException will be raised. + is exceeded before a successful call, a FailedToConnectError will be raised. Must be non-negative. :param retry_timeout: Time to wait between attempts to connect. Can also take a Callable that takes the number of attempts so far, beginning at 0, and returns an int @@ -220,14 +220,14 @@ def retry_connection( :param int _attempts: Parameter used to keep track of the number of attempts in calling the connect function across recursive calls. Passed as an argument to retry_timeout if it is a Callable. This parameter should not be set by the initial caller. - :raises dbt.exceptions.FailedToConnectException: Upon exhausting all retry attempts without + :raises dbt.exceptions.FailedToConnectError: Upon exhausting all retry attempts without successfully acquiring a handle. :return: The given connection with its appropriate state and handle attributes set depending on whether we successfully acquired a handle or not. """ timeout = retry_timeout(_attempts) if callable(retry_timeout) else retry_timeout if timeout < 0: - raise dbt.exceptions.FailedToConnectException( + raise dbt.exceptions.FailedToConnectError( "retry_timeout cannot be negative or return a negative time." ) @@ -235,7 +235,7 @@ def retry_connection( # This guard is not perfect others may add to the recursion limit (e.g. built-ins). connection.handle = None connection.state = ConnectionState.FAIL - raise dbt.exceptions.FailedToConnectException("retry_limit cannot be negative") + raise dbt.exceptions.FailedToConnectError("retry_limit cannot be negative") try: connection.handle = connect() @@ -246,7 +246,7 @@ def retry_connection( if retry_limit <= 0: connection.handle = None connection.state = ConnectionState.FAIL - raise dbt.exceptions.FailedToConnectException(str(e)) + raise dbt.exceptions.FailedToConnectError(str(e)) logger.debug( f"Got a retryable error when attempting to open a {cls.TYPE} connection.\n" @@ -268,12 +268,12 @@ def retry_connection( except Exception as e: connection.handle = None connection.state = ConnectionState.FAIL - raise dbt.exceptions.FailedToConnectException(str(e)) + raise dbt.exceptions.FailedToConnectError(str(e)) @abc.abstractmethod def cancel_open(self) -> Optional[List[str]]: """Cancel all open connections on the adapter. (passable)""" - raise dbt.exceptions.NotImplementedException( + raise dbt.exceptions.NotImplementedError( "`cancel_open` is not implemented for this adapter!" ) @@ -288,7 +288,7 @@ def open(cls, connection: Connection) -> Connection: This should be thread-safe, or hold the lock if necessary. The given connection should not be in either in_use or available. """ - raise dbt.exceptions.NotImplementedException("`open` is not implemented for this adapter!") + raise dbt.exceptions.NotImplementedError("`open` is not implemented for this adapter!") def release(self) -> None: with self.lock: @@ -320,16 +320,12 @@ def cleanup_all(self) -> None: @abc.abstractmethod def begin(self) -> None: """Begin a transaction. (passable)""" - raise dbt.exceptions.NotImplementedException( - "`begin` is not implemented for this adapter!" - ) + raise dbt.exceptions.NotImplementedError("`begin` is not implemented for this adapter!") @abc.abstractmethod def commit(self) -> None: """Commit a transaction. (passable)""" - raise dbt.exceptions.NotImplementedException( - "`commit` is not implemented for this adapter!" - ) + raise dbt.exceptions.NotImplementedError("`commit` is not implemented for this adapter!") @classmethod def _rollback_handle(cls, connection: Connection) -> None: @@ -365,7 +361,7 @@ def _close_handle(cls, connection: Connection) -> None: def _rollback(cls, connection: Connection) -> None: """Roll back the given connection.""" if connection.transaction_open is False: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Tried to rollback transaction on connection " f'"{connection.name}", but it does not have one open!' ) @@ -415,6 +411,4 @@ def execute( :return: A tuple of the query status and results (empty if fetch=False). :rtype: Tuple[AdapterResponse, agate.Table] """ - raise dbt.exceptions.NotImplementedException( - "`execute` is not implemented for this adapter!" - ) + raise dbt.exceptions.NotImplementedError("`execute` is not implemented for this adapter!") diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py index 64ebbeac5dd..98b78217c14 100644 --- a/core/dbt/adapters/base/impl.py +++ b/core/dbt/adapters/base/impl.py @@ -22,20 +22,20 @@ import pytz from dbt.exceptions import ( - InternalException, - InvalidMacroArgType, - InvalidMacroResult, - InvalidQuoteConfigType, - NotImplementedException, - NullRelationCacheAttempted, - NullRelationDropAttempted, - RelationReturnedMultipleResults, - RenameToNoneAttempted, - RuntimeException, - SnapshotTargetIncomplete, - SnapshotTargetNotSnapshotTable, - UnexpectedNull, - UnexpectedNonTimestamp, + DbtInternalError, + MacroArgTypeError, + MacroResultError, + QuoteConfigTypeError, + NotImplementedError, + NullRelationCacheAttemptedError, + NullRelationDropAttemptedError, + RelationReturnedMultipleResultsError, + RenameToNoneAttemptedError, + DbtRuntimeError, + SnapshotTargetIncompleteError, + SnapshotTargetNotSnapshotTableError, + UnexpectedNullError, + UnexpectedNonTimestampError, ) from dbt.adapters.protocol import ( @@ -75,7 +75,7 @@ def _expect_row_value(key: str, row: agate.Row): if key not in row.keys(): - raise InternalException( + raise DbtInternalError( 'Got a row without "{}" column, columns: {}'.format(key, row.keys()) ) return row[key] @@ -104,10 +104,10 @@ def _utc(dt: Optional[datetime], source: BaseRelation, field_name: str) -> datet assume the datetime is already for UTC and add the timezone. """ if dt is None: - raise UnexpectedNull(field_name, source) + raise UnexpectedNullError(field_name, source) elif not hasattr(dt, "tzinfo"): - raise UnexpectedNonTimestamp(field_name, source, dt) + raise UnexpectedNonTimestampError(field_name, source, dt) elif dt.tzinfo: return dt.astimezone(pytz.UTC) @@ -433,7 +433,7 @@ def cache_added(self, relation: Optional[BaseRelation]) -> str: """Cache a new relation in dbt. It will show up in `list relations`.""" if relation is None: name = self.nice_connection_name() - raise NullRelationCacheAttempted(name) + raise NullRelationCacheAttemptedError(name) self.cache.add(relation) # so jinja doesn't render things return "" @@ -445,7 +445,7 @@ def cache_dropped(self, relation: Optional[BaseRelation]) -> str: """ if relation is None: name = self.nice_connection_name() - raise NullRelationDropAttempted(name) + raise NullRelationDropAttemptedError(name) self.cache.drop(relation) return "" @@ -462,7 +462,7 @@ def cache_renamed( name = self.nice_connection_name() src_name = _relation_name(from_relation) dst_name = _relation_name(to_relation) - raise RenameToNoneAttempted(src_name, dst_name, name) + raise RenameToNoneAttemptedError(src_name, dst_name, name) self.cache.rename(from_relation, to_relation) return "" @@ -474,12 +474,12 @@ def cache_renamed( @abc.abstractmethod def date_function(cls) -> str: """Get the date function used by this adapter's database.""" - raise NotImplementedException("`date_function` is not implemented for this adapter!") + raise NotImplementedError("`date_function` is not implemented for this adapter!") @classmethod @abc.abstractmethod def is_cancelable(cls) -> bool: - raise NotImplementedException("`is_cancelable` is not implemented for this adapter!") + raise NotImplementedError("`is_cancelable` is not implemented for this adapter!") ### # Abstract methods about schemas @@ -487,7 +487,7 @@ def is_cancelable(cls) -> bool: @abc.abstractmethod def list_schemas(self, database: str) -> List[str]: """Get a list of existing schemas in database""" - raise NotImplementedException("`list_schemas` is not implemented for this adapter!") + raise NotImplementedError("`list_schemas` is not implemented for this adapter!") @available.parse(lambda *a, **k: False) def check_schema_exists(self, database: str, schema: str) -> bool: @@ -510,13 +510,13 @@ def drop_relation(self, relation: BaseRelation) -> None: *Implementors must call self.cache.drop() to preserve cache state!* """ - raise NotImplementedException("`drop_relation` is not implemented for this adapter!") + raise NotImplementedError("`drop_relation` is not implemented for this adapter!") @abc.abstractmethod @available.parse_none def truncate_relation(self, relation: BaseRelation) -> None: """Truncate the given relation.""" - raise NotImplementedException("`truncate_relation` is not implemented for this adapter!") + raise NotImplementedError("`truncate_relation` is not implemented for this adapter!") @abc.abstractmethod @available.parse_none @@ -525,15 +525,13 @@ def rename_relation(self, from_relation: BaseRelation, to_relation: BaseRelation Implementors must call self.cache.rename() to preserve cache state. """ - raise NotImplementedException("`rename_relation` is not implemented for this adapter!") + raise NotImplementedError("`rename_relation` is not implemented for this adapter!") @abc.abstractmethod @available.parse_list def get_columns_in_relation(self, relation: BaseRelation) -> List[BaseColumn]: """Get a list of the columns in the given Relation.""" - raise NotImplementedException( - "`get_columns_in_relation` is not implemented for this adapter!" - ) + raise NotImplementedError("`get_columns_in_relation` is not implemented for this adapter!") @available.deprecated("get_columns_in_relation", lambda *a, **k: []) def get_columns_in_table(self, schema: str, identifier: str) -> List[BaseColumn]: @@ -555,7 +553,7 @@ def expand_column_types(self, goal: BaseRelation, current: BaseRelation) -> None :param self.Relation current: A relation that currently exists in the database with columns of unspecified types. """ - raise NotImplementedException( + raise NotImplementedError( "`expand_target_column_types` is not implemented for this adapter!" ) @@ -570,7 +568,7 @@ def list_relations_without_caching(self, schema_relation: BaseRelation) -> List[ :return: The relations in schema :rtype: List[self.Relation] """ - raise NotImplementedException( + raise NotImplementedError( "`list_relations_without_caching` is not implemented for this adapter!" ) @@ -612,7 +610,7 @@ def get_missing_columns( to_relation. """ if not isinstance(from_relation, self.Relation): - raise InvalidMacroArgType( + raise MacroArgTypeError( method_name="get_missing_columns", arg_name="from_relation", got_value=from_relation, @@ -620,7 +618,7 @@ def get_missing_columns( ) if not isinstance(to_relation, self.Relation): - raise InvalidMacroArgType( + raise MacroArgTypeError( method_name="get_missing_columns", arg_name="to_relation", got_value=to_relation, @@ -641,11 +639,11 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None: expected columns. :param Relation relation: The relation to check - :raises CompilationException: If the columns are + :raises InvalidMacroArgType: If the columns are incorrect. """ if not isinstance(relation, self.Relation): - raise InvalidMacroArgType( + raise MacroArgTypeError( method_name="valid_snapshot_target", arg_name="relation", got_value=relation, @@ -666,16 +664,16 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None: if missing: if extra: - raise SnapshotTargetIncomplete(extra, missing) + raise SnapshotTargetIncompleteError(extra, missing) else: - raise SnapshotTargetNotSnapshotTable(missing) + raise SnapshotTargetNotSnapshotTableError(missing) @available.parse_none def expand_target_column_types( self, from_relation: BaseRelation, to_relation: BaseRelation ) -> None: if not isinstance(from_relation, self.Relation): - raise InvalidMacroArgType( + raise MacroArgTypeError( method_name="expand_target_column_types", arg_name="from_relation", got_value=from_relation, @@ -683,7 +681,7 @@ def expand_target_column_types( ) if not isinstance(to_relation, self.Relation): - raise InvalidMacroArgType( + raise MacroArgTypeError( method_name="expand_target_column_types", arg_name="to_relation", got_value=to_relation, @@ -765,7 +763,7 @@ def get_relation(self, database: str, schema: str, identifier: str) -> Optional[ "schema": schema, "database": database, } - raise RelationReturnedMultipleResults(kwargs, matches) + raise RelationReturnedMultipleResultsError(kwargs, matches) elif matches: return matches[0] @@ -787,20 +785,20 @@ def already_exists(self, schema: str, name: str) -> bool: @available.parse_none def create_schema(self, relation: BaseRelation): """Create the given schema if it does not exist.""" - raise NotImplementedException("`create_schema` is not implemented for this adapter!") + raise NotImplementedError("`create_schema` is not implemented for this adapter!") @abc.abstractmethod @available.parse_none def drop_schema(self, relation: BaseRelation): """Drop the given schema (and everything in it) if it exists.""" - raise NotImplementedException("`drop_schema` is not implemented for this adapter!") + raise NotImplementedError("`drop_schema` is not implemented for this adapter!") @available @classmethod @abc.abstractmethod def quote(cls, identifier: str) -> str: """Quote the given identifier, as appropriate for the database.""" - raise NotImplementedException("`quote` is not implemented for this adapter!") + raise NotImplementedError("`quote` is not implemented for this adapter!") @available def quote_as_configured(self, identifier: str, quote_key: str) -> str: @@ -829,7 +827,7 @@ def quote_seed_column(self, column: str, quote_config: Optional[bool]) -> str: elif quote_config is None: pass else: - raise InvalidQuoteConfigType(quote_config) + raise QuoteConfigTypeError(quote_config) if quote_columns: return self.quote(column) @@ -850,7 +848,7 @@ def convert_text_type(cls, agate_table: agate.Table, col_idx: int) -> str: :param col_idx: The index into the agate table for the column. :return: The name of the type in the database """ - raise NotImplementedException("`convert_text_type` is not implemented for this adapter!") + raise NotImplementedError("`convert_text_type` is not implemented for this adapter!") @classmethod @abc.abstractmethod @@ -862,7 +860,7 @@ def convert_number_type(cls, agate_table: agate.Table, col_idx: int) -> str: :param col_idx: The index into the agate table for the column. :return: The name of the type in the database """ - raise NotImplementedException("`convert_number_type` is not implemented for this adapter!") + raise NotImplementedError("`convert_number_type` is not implemented for this adapter!") @classmethod @abc.abstractmethod @@ -874,9 +872,7 @@ def convert_boolean_type(cls, agate_table: agate.Table, col_idx: int) -> str: :param col_idx: The index into the agate table for the column. :return: The name of the type in the database """ - raise NotImplementedException( - "`convert_boolean_type` is not implemented for this adapter!" - ) + raise NotImplementedError("`convert_boolean_type` is not implemented for this adapter!") @classmethod @abc.abstractmethod @@ -888,9 +884,7 @@ def convert_datetime_type(cls, agate_table: agate.Table, col_idx: int) -> str: :param col_idx: The index into the agate table for the column. :return: The name of the type in the database """ - raise NotImplementedException( - "`convert_datetime_type` is not implemented for this adapter!" - ) + raise NotImplementedError("`convert_datetime_type` is not implemented for this adapter!") @classmethod @abc.abstractmethod @@ -902,7 +896,7 @@ def convert_date_type(cls, agate_table: agate.Table, col_idx: int) -> str: :param col_idx: The index into the agate table for the column. :return: The name of the type in the database """ - raise NotImplementedException("`convert_date_type` is not implemented for this adapter!") + raise NotImplementedError("`convert_date_type` is not implemented for this adapter!") @classmethod @abc.abstractmethod @@ -914,7 +908,7 @@ def convert_time_type(cls, agate_table: agate.Table, col_idx: int) -> str: :param col_idx: The index into the agate table for the column. :return: The name of the type in the database """ - raise NotImplementedException("`convert_time_type` is not implemented for this adapter!") + raise NotImplementedError("`convert_time_type` is not implemented for this adapter!") @available @classmethod @@ -981,7 +975,7 @@ def execute_macro( else: package_name = 'the "{}" package'.format(project) - raise RuntimeException( + raise DbtRuntimeError( 'dbt could not find a macro with the name "{}" in {}'.format( macro_name, package_name ) @@ -1079,7 +1073,7 @@ def calculate_freshness( # now we have a 1-row table of the maximum `loaded_at_field` value and # the current time according to the db. if len(table) != 1 or len(table[0]) != 2: - raise InvalidMacroResult(FRESHNESS_MACRO_NAME, table) + raise MacroResultError(FRESHNESS_MACRO_NAME, table) if table[0][0] is None: # no records in the table, so really the max_loaded_at was # infinitely long ago. Just call it 0:00 January 1 year UTC @@ -1156,7 +1150,7 @@ def string_add_sql( elif location == "prepend": return f"'{value}' || {add_to}" else: - raise RuntimeException(f'Got an unexpected location value of "{location}"') + raise DbtRuntimeError(f'Got an unexpected location value of "{location}"') def get_rows_different_sql( self, @@ -1214,7 +1208,7 @@ def submit_python_job(self, parsed_model: dict, compiled_code: str) -> AdapterRe return self.generate_python_submission_response(submission_result) def generate_python_submission_response(self, submission_result: Any) -> AdapterResponse: - raise NotImplementedException( + raise NotImplementedError( "Your adapter need to implement generate_python_submission_response" ) @@ -1238,7 +1232,7 @@ def get_incremental_strategy_macro(self, model_context, strategy: str): valid_strategies.append("default") builtin_strategies = self.builtin_incremental_strategies() if strategy in builtin_strategies and strategy not in valid_strategies: - raise RuntimeException( + raise DbtRuntimeError( f"The incremental strategy '{strategy}' is not valid for this adapter" ) @@ -1246,7 +1240,7 @@ def get_incremental_strategy_macro(self, model_context, strategy: str): macro_name = f"get_incremental_{strategy}_sql" # The model_context should have MacroGenerator callable objects for all macros if macro_name not in model_context: - raise RuntimeException( + raise DbtRuntimeError( 'dbt could not find an incremental strategy macro with the name "{}" in {}'.format( macro_name, self.config.project_name ) diff --git a/core/dbt/adapters/base/plugin.py b/core/dbt/adapters/base/plugin.py index f0d348d8f57..f1a77f89b9d 100644 --- a/core/dbt/adapters/base/plugin.py +++ b/core/dbt/adapters/base/plugin.py @@ -1,7 +1,7 @@ from typing import List, Optional, Type from dbt.adapters.base import Credentials -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.adapters.protocol import AdapterProtocol @@ -11,7 +11,7 @@ def project_name_from_path(include_path: str) -> str: partial = Project.partial_load(include_path) if partial.project_name is None: - raise CompilationException(f"Invalid project at {include_path}: name not set!") + raise CompilationError(f"Invalid project at {include_path}: name not set!") return partial.project_name diff --git a/core/dbt/adapters/base/query_headers.py b/core/dbt/adapters/base/query_headers.py index dd88fdb2d41..bfacd2aee8c 100644 --- a/core/dbt/adapters/base/query_headers.py +++ b/core/dbt/adapters/base/query_headers.py @@ -7,7 +7,7 @@ from dbt.contracts.connection import AdapterRequiredConfig, QueryComment from dbt.contracts.graph.nodes import ResultNode from dbt.contracts.graph.manifest import Manifest -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError class NodeWrapper: @@ -48,7 +48,7 @@ def set(self, comment: Optional[str], append: bool): if isinstance(comment, str) and "*/" in comment: # tell the user "no" so they don't hurt themselves by writing # garbage - raise RuntimeException(f'query comment contains illegal value "*/": {comment}') + raise DbtRuntimeError(f'query comment contains illegal value "*/": {comment}') self.query_comment = comment self.append = append diff --git a/core/dbt/adapters/base/relation.py b/core/dbt/adapters/base/relation.py index 5bc0c56b264..13f64c01742 100644 --- a/core/dbt/adapters/base/relation.py +++ b/core/dbt/adapters/base/relation.py @@ -11,7 +11,11 @@ Policy, Path, ) -from dbt.exceptions import ApproximateMatch, InternalException, MultipleDatabasesNotAllowed +from dbt.exceptions import ( + ApproximateMatchError, + DbtInternalError, + MultipleDatabasesNotAllowedError, +) from dbt.node_types import NodeType from dbt.utils import filter_null_values, deep_merge, classproperty @@ -83,7 +87,7 @@ def matches( if not search: # nothing was passed in - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( "Tried to match relation, but no search path was passed!" ) @@ -100,7 +104,7 @@ def matches( if approximate_match and not exact_match: target = self.create(database=database, schema=schema, identifier=identifier) - raise ApproximateMatch(target, self) + raise ApproximateMatchError(target, self) return exact_match @@ -249,14 +253,14 @@ def create_from( ) -> Self: if node.resource_type == NodeType.Source: if not isinstance(node, SourceDefinition): - raise InternalException( + raise DbtInternalError( "type mismatch, expected SourceDefinition but got {}".format(type(node)) ) return cls.create_from_source(node, **kwargs) else: # Can't use ManifestNode here because of parameterized generics if not isinstance(node, (ParsedNode)): - raise InternalException( + raise DbtInternalError( f"type mismatch, expected ManifestNode but got {type(node)}" ) return cls.create_from_node(config, node, **kwargs) @@ -354,7 +358,7 @@ class InformationSchema(BaseRelation): def __post_init__(self): if not isinstance(self.information_schema_view, (type(None), str)): - raise dbt.exceptions.CompilationException( + raise dbt.exceptions.CompilationError( "Got an invalid name: {}".format(self.information_schema_view) ) @@ -438,7 +442,7 @@ def flatten(self, allow_multiple_databases: bool = False): if not allow_multiple_databases: seen = {r.database.lower() for r in self if r.database} if len(seen) > 1: - raise MultipleDatabasesNotAllowed(seen) + raise MultipleDatabasesNotAllowedError(seen) for information_schema_name, schema in self.search(): path = {"database": information_schema_name.database, "schema": schema} diff --git a/core/dbt/adapters/cache.py b/core/dbt/adapters/cache.py index 90c4cab27fb..24a0e469df1 100644 --- a/core/dbt/adapters/cache.py +++ b/core/dbt/adapters/cache.py @@ -9,28 +9,14 @@ _ReferenceKey, ) from dbt.exceptions import ( - DependentLinkNotCached, - NewNameAlreadyInCache, - NoneRelationFound, - ReferencedLinkNotCached, - TruncatedModelNameCausedCollision, + DependentLinkNotCachedError, + NewNameAlreadyInCacheError, + NoneRelationFoundError, + ReferencedLinkNotCachedError, + TruncatedModelNameCausedCollisionError, ) from dbt.events.functions import fire_event, fire_event_if -from dbt.events.types import ( - AddLink, - AddRelation, - DropCascade, - DropMissingRelation, - DropRelation, - DumpAfterAddGraph, - DumpAfterRenameSchema, - DumpBeforeAddGraph, - DumpBeforeRenameSchema, - RenameSchema, - TemporaryRelation, - UncachedRelation, - UpdateReference, -) +from dbt.events.types import CacheAction, CacheDumpGraph import dbt.flags as flags from dbt.utils import lowercase @@ -155,7 +141,7 @@ def rename_key(self, old_key, new_key): :raises InternalError: If the new key already exists. """ if new_key in self.referenced_by: - raise NewNameAlreadyInCache(old_key, new_key) + raise NewNameAlreadyInCacheError(old_key, new_key) if old_key not in self.referenced_by: return @@ -271,17 +257,17 @@ def _add_link(self, referenced_key, dependent_key): if referenced is None: return if referenced is None: - raise ReferencedLinkNotCached(referenced_key) + raise ReferencedLinkNotCachedError(referenced_key) dependent = self.relations.get(dependent_key) if dependent is None: - raise DependentLinkNotCached(dependent_key) + raise DependentLinkNotCachedError(dependent_key) assert dependent is not None # we just raised! referenced.add_reference(dependent) - # TODO: Is this dead code? I can't seem to find it grepping the codebase. + # This is called in plugins/postgres/dbt/adapters/postgres/impl.py def add_link(self, referenced, dependent): """Add a link between two relations to the database. If either relation does not exist, it will be added as an "external" relation. @@ -303,9 +289,9 @@ def add_link(self, referenced, dependent): # referring to a table outside our control. There's no need to make # a link - we will never drop the referenced relation during a run. fire_event( - UncachedRelation( - dep_key=_make_msg_from_ref_key(dep_key), + CacheAction( ref_key=_make_msg_from_ref_key(ref_key), + ref_key_2=_make_msg_from_ref_key(dep_key), ) ) return @@ -318,8 +304,10 @@ def add_link(self, referenced, dependent): dependent = dependent.replace(type=referenced.External) self.add(dependent) fire_event( - AddLink( - dep_key=_make_msg_from_ref_key(dep_key), ref_key=_make_msg_from_ref_key(ref_key) + CacheAction( + action="add_link", + ref_key=_make_msg_from_ref_key(dep_key), + ref_key_2=_make_msg_from_ref_key(ref_key), ) ) with self.lock: @@ -332,12 +320,18 @@ def add(self, relation): :param BaseRelation relation: The underlying relation. """ cached = _CachedRelation(relation) - fire_event(AddRelation(relation=_make_ref_key_msg(cached))) - fire_event_if(flags.LOG_CACHE_EVENTS, lambda: DumpBeforeAddGraph(dump=self.dump_graph())) + fire_event_if( + flags.LOG_CACHE_EVENTS, + lambda: CacheDumpGraph(before_after="before", action="adding", dump=self.dump_graph()), + ) + fire_event(CacheAction(action="add_relation", ref_key=_make_ref_key_msg(cached))) with self.lock: self._setdefault(cached) - fire_event_if(flags.LOG_CACHE_EVENTS, lambda: DumpAfterAddGraph(dump=self.dump_graph())) + fire_event_if( + flags.LOG_CACHE_EVENTS, + lambda: CacheDumpGraph(before_after="after", action="adding", dump=self.dump_graph()), + ) def _remove_refs(self, keys): """Removes all references to all entries in keys. This does not @@ -365,16 +359,19 @@ def drop(self, relation): """ dropped_key = _make_ref_key(relation) dropped_key_msg = _make_ref_key_msg(relation) - fire_event(DropRelation(dropped=dropped_key_msg)) + fire_event(CacheAction(action="drop_relation", ref_key=dropped_key_msg)) with self.lock: if dropped_key not in self.relations: - fire_event(DropMissingRelation(relation=dropped_key_msg)) + fire_event(CacheAction(action="drop_missing_relation", ref_key=dropped_key_msg)) return consequences = self.relations[dropped_key].collect_consequences() # convert from a list of _ReferenceKeys to a list of ReferenceKeyMsgs consequence_msgs = [_make_msg_from_ref_key(key) for key in consequences] - - fire_event(DropCascade(dropped=dropped_key_msg, consequences=consequence_msgs)) + fire_event( + CacheAction( + action="drop_cascade", ref_key=dropped_key_msg, ref_list=consequence_msgs + ) + ) self._remove_refs(consequences) def _rename_relation(self, old_key, new_relation): @@ -397,12 +394,14 @@ def _rename_relation(self, old_key, new_relation): for cached in self.relations.values(): if cached.is_referenced_by(old_key): fire_event( - UpdateReference( - old_key=_make_ref_key_msg(old_key), - new_key=_make_ref_key_msg(new_key), - cached_key=_make_ref_key_msg(cached.key()), + CacheAction( + action="update_reference", + ref_key=_make_ref_key_msg(old_key), + ref_key_2=_make_ref_key_msg(new_key), + ref_key_3=_make_ref_key_msg(cached.key()), ) ) + cached.rename_key(old_key, new_key) self.relations[new_key] = relation @@ -427,10 +426,12 @@ def _check_rename_constraints(self, old_key, new_key): if new_key in self.relations: # Tell user when collision caused by model names truncated during # materialization. - raise TruncatedModelNameCausedCollision(new_key, self.relations) + raise TruncatedModelNameCausedCollisionError(new_key, self.relations) if old_key not in self.relations: - fire_event(TemporaryRelation(key=_make_msg_from_ref_key(old_key))) + fire_event( + CacheAction(action="temporary_relation", ref_key=_make_msg_from_ref_key(old_key)) + ) return False return True @@ -449,13 +450,16 @@ def rename(self, old, new): old_key = _make_ref_key(old) new_key = _make_ref_key(new) fire_event( - RenameSchema( - old_key=_make_msg_from_ref_key(old_key), new_key=_make_msg_from_ref_key(new) + CacheAction( + action="rename_relation", + ref_key=_make_msg_from_ref_key(old_key), + ref_key_2=_make_msg_from_ref_key(new), ) ) fire_event_if( - flags.LOG_CACHE_EVENTS, lambda: DumpBeforeRenameSchema(dump=self.dump_graph()) + flags.LOG_CACHE_EVENTS, + lambda: CacheDumpGraph(before_after="before", action="rename", dump=self.dump_graph()), ) with self.lock: @@ -465,7 +469,8 @@ def rename(self, old, new): self._setdefault(_CachedRelation(new)) fire_event_if( - flags.LOG_CACHE_EVENTS, lambda: DumpAfterRenameSchema(dump=self.dump_graph()) + flags.LOG_CACHE_EVENTS, + lambda: CacheDumpGraph(before_after="after", action="rename", dump=self.dump_graph()), ) def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[Any]: @@ -485,7 +490,7 @@ def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[ ] if None in results: - raise NoneRelationFound() + raise NoneRelationFoundError() return results def clear(self): diff --git a/core/dbt/adapters/factory.py b/core/dbt/adapters/factory.py index 16a0a3ffcd1..38c6bcb7894 100644 --- a/core/dbt/adapters/factory.py +++ b/core/dbt/adapters/factory.py @@ -10,7 +10,7 @@ from dbt.contracts.connection import AdapterRequiredConfig, Credentials from dbt.events.functions import fire_event from dbt.events.types import AdapterImportError, PluginLoadError -from dbt.exceptions import InternalException, RuntimeException +from dbt.exceptions import DbtInternalError, DbtRuntimeError from dbt.include.global_project import PACKAGE_PATH as GLOBAL_PROJECT_PATH from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME @@ -34,7 +34,7 @@ def get_plugin_by_name(self, name: str) -> AdapterPlugin: names = ", ".join(self.plugins.keys()) message = f"Invalid adapter type {name}! Must be one of {names}" - raise RuntimeException(message) + raise DbtRuntimeError(message) def get_adapter_class_by_name(self, name: str) -> Type[Adapter]: plugin = self.get_plugin_by_name(name) @@ -60,7 +60,7 @@ def load_plugin(self, name: str) -> Type[Credentials]: # the user about it via a runtime error if exc.name == "dbt.adapters." + name: fire_event(AdapterImportError(exc=str(exc))) - raise RuntimeException(f"Could not find adapter type {name}!") + raise DbtRuntimeError(f"Could not find adapter type {name}!") # otherwise, the error had to have come from some underlying # library. Log the stack trace. @@ -70,7 +70,7 @@ def load_plugin(self, name: str) -> Type[Credentials]: plugin_type = plugin.adapter.type() if plugin_type != name: - raise RuntimeException( + raise DbtRuntimeError( f"Expected to find adapter with type named {name}, got " f"adapter with type {plugin_type}" ) @@ -132,7 +132,7 @@ def get_adapter_plugins(self, name: Optional[str]) -> List[AdapterPlugin]: try: plugin = self.plugins[plugin_name] except KeyError: - raise InternalException(f"No plugin found for {plugin_name}") from None + raise DbtInternalError(f"No plugin found for {plugin_name}") from None plugins.append(plugin) seen.add(plugin_name) for dep in plugin.dependencies: @@ -151,7 +151,7 @@ def get_include_paths(self, name: Optional[str]) -> List[Path]: try: path = self.packages[package_name] except KeyError: - raise InternalException(f"No internal package listing found for {package_name}") + raise DbtInternalError(f"No internal package listing found for {package_name}") paths.append(path) return paths diff --git a/core/dbt/adapters/sql/connections.py b/core/dbt/adapters/sql/connections.py index bc1a562ad86..e13cf12e319 100644 --- a/core/dbt/adapters/sql/connections.py +++ b/core/dbt/adapters/sql/connections.py @@ -27,9 +27,7 @@ class SQLConnectionManager(BaseConnectionManager): @abc.abstractmethod def cancel(self, connection: Connection): """Cancel the given connection.""" - raise dbt.exceptions.NotImplementedException( - "`cancel` is not implemented for this adapter!" - ) + raise dbt.exceptions.NotImplementedError("`cancel` is not implemented for this adapter!") def cancel_open(self) -> List[str]: names = [] @@ -95,7 +93,7 @@ def add_query( @abc.abstractmethod def get_response(cls, cursor: Any) -> AdapterResponse: """Get the status of the cursor.""" - raise dbt.exceptions.NotImplementedException( + raise dbt.exceptions.NotImplementedError( "`get_response` is not implemented for this adapter!" ) @@ -151,7 +149,7 @@ def add_commit_query(self): def begin(self): connection = self.get_thread_connection() if connection.transaction_open is True: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( 'Tried to begin a new transaction on connection "{}", but ' "it already had one open!".format(connection.name) ) @@ -164,7 +162,7 @@ def begin(self): def commit(self): connection = self.get_thread_connection() if connection.transaction_open is False: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( 'Tried to commit transaction on connection "{}", but ' "it does not have one open!".format(connection.name) ) diff --git a/core/dbt/adapters/sql/impl.py b/core/dbt/adapters/sql/impl.py index 4606b046f54..fc787f0c834 100644 --- a/core/dbt/adapters/sql/impl.py +++ b/core/dbt/adapters/sql/impl.py @@ -2,7 +2,7 @@ from typing import Any, Optional, Tuple, Type, List from dbt.contracts.connection import Connection -from dbt.exceptions import RelationTypeNull +from dbt.exceptions import RelationTypeNullError from dbt.adapters.base import BaseAdapter, available from dbt.adapters.cache import _make_ref_key_msg from dbt.adapters.sql import SQLConnectionManager @@ -131,7 +131,7 @@ def alter_column_type(self, relation, column_name, new_column_type) -> None: def drop_relation(self, relation): if relation.type is None: - raise RelationTypeNull(relation) + raise RelationTypeNullError(relation) self.cache_dropped(relation) self.execute_macro(DROP_RELATION_MACRO_NAME, kwargs={"relation": relation}) diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 6f0a153c923..9942db702ca 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -46,6 +46,7 @@ def cli_runner(): @p.version @p.version_check @p.warn_error +@p.warn_error_options @p.write_json def cli(ctx, **kwargs): """An ELT tool for managing your SQL transformations and data models. diff --git a/core/dbt/cli/option_types.py b/core/dbt/cli/option_types.py index 523df651775..f0c497b5bec 100644 --- a/core/dbt/cli/option_types.py +++ b/core/dbt/cli/option_types.py @@ -1,6 +1,8 @@ from click import ParamType import yaml +from dbt.helper_types import WarnErrorOptions + class YAML(ParamType): """The Click YAML type. Converts YAML strings into objects.""" @@ -17,6 +19,19 @@ def convert(self, value, param, ctx): self.fail(f"String '{value}' is not valid YAML", param, ctx) +class WarnErrorOptionsType(YAML): + """The Click WarnErrorOptions type. Converts YAML strings into objects.""" + + name = "WarnErrorOptionsType" + + def convert(self, value, param, ctx): + include_exclude = super().convert(value, param, ctx) + + return WarnErrorOptions( + include=include_exclude.get("include", []), exclude=include_exclude.get("exclude", []) + ) + + class Truthy(ParamType): """The Click Truthy type. Converts strings into a "truthy" type""" diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index 5045d04cc18..3ad3747e962 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -1,7 +1,7 @@ from pathlib import Path, PurePath import click -from dbt.cli.option_types import YAML +from dbt.cli.option_types import YAML, WarnErrorOptionsType from dbt.cli.resolvers import default_project_dir, default_profiles_dir @@ -270,7 +270,7 @@ ) skip_profile_setup = click.option( - "--skip-profile-setup", "-s", envvar=None, help="Skip interative profile setup.", is_flag=True + "--skip-profile-setup", "-s", envvar=None, help="Skip interactive profile setup.", is_flag=True ) # TODO: The env var and name (reflected in flags) are corrections! @@ -358,9 +358,20 @@ ) warn_error = click.option( - "--warn-error/--no-warn-error", + "--warn-error", envvar="DBT_WARN_ERROR", - help="If dbt would normally warn, instead raise an exception. Examples include --models that selects nothing, deprecations, configurations with no associated models, invalid test configurations, and missing sources/refs in tests.", + help="If dbt would normally warn, instead raise an exception. Examples include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations, and missing sources/refs in tests.", + default=None, + flag_value=True, +) + +warn_error_options = click.option( + "--warn-error-options", + envvar="DBT_WARN_ERROR_OPTIONS", + default=None, + help="""If dbt would normally warn, instead raise an exception based on include/exclude configuration. Examples include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations, + and missing sources/refs in tests. This argument should be a YAML string, with keys 'include' or 'exclude'. eg. '{"include": "all", "exclude": ["NoNodesForSelectionCriteria"]}'""", + type=WarnErrorOptionsType(), ) write_json = click.option( diff --git a/core/dbt/clients/_jinja_blocks.py b/core/dbt/clients/_jinja_blocks.py index fa74a317649..1ada0a6234d 100644 --- a/core/dbt/clients/_jinja_blocks.py +++ b/core/dbt/clients/_jinja_blocks.py @@ -2,13 +2,13 @@ from collections import namedtuple from dbt.exceptions import ( - BlockDefinitionNotAtTop, - InternalException, - MissingCloseTag, - MissingControlFlowStartTag, - NestedTags, - UnexpectedControlFlowEndTag, - UnexpectedMacroEOF, + BlockDefinitionNotAtTopError, + DbtInternalError, + MissingCloseTagError, + MissingControlFlowStartTagError, + NestedTagsError, + UnexpectedControlFlowEndTagError, + UnexpectedMacroEOFError, ) @@ -147,7 +147,7 @@ def _first_match(self, *patterns, **kwargs): def _expect_match(self, expected_name, *patterns, **kwargs): match = self._first_match(*patterns, **kwargs) if match is None: - raise UnexpectedMacroEOF(expected_name, self.data[self.pos :]) + raise UnexpectedMacroEOFError(expected_name, self.data[self.pos :]) return match def handle_expr(self, match): @@ -261,7 +261,7 @@ def find_tags(self): elif block_type_name is not None: yield self.handle_tag(match) else: - raise InternalException( + raise DbtInternalError( "Invalid regex match in next_block, expected block start, " "expr start, or comment start" ) @@ -317,16 +317,16 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True): found = self.stack.pop() else: expected = _CONTROL_FLOW_END_TAGS[tag.block_type_name] - raise UnexpectedControlFlowEndTag(tag, expected, self.tag_parser) + raise UnexpectedControlFlowEndTagError(tag, expected, self.tag_parser) expected = _CONTROL_FLOW_TAGS[found] if expected != tag.block_type_name: - raise MissingControlFlowStartTag(tag, expected, self.tag_parser) + raise MissingControlFlowStartTagError(tag, expected, self.tag_parser) if tag.block_type_name in allowed_blocks: if self.stack: - raise BlockDefinitionNotAtTop(self.tag_parser, tag.start) + raise BlockDefinitionNotAtTopError(self.tag_parser, tag.start) if self.current is not None: - raise NestedTags(outer=self.current, inner=tag) + raise NestedTagsError(outer=self.current, inner=tag) if collect_raw_data: raw_data = self.data[self.last_position : tag.start] self.last_position = tag.start @@ -347,7 +347,7 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True): if self.current: linecount = self.data[: self.current.end].count("\n") + 1 - raise MissingCloseTag(self.current.block_type_name, linecount) + raise MissingCloseTagError(self.current.block_type_name, linecount) if collect_raw_data: raw_data = self.data[self.last_position :] diff --git a/core/dbt/clients/agate_helper.py b/core/dbt/clients/agate_helper.py index 11492a9faef..1d69a2bd17f 100644 --- a/core/dbt/clients/agate_helper.py +++ b/core/dbt/clients/agate_helper.py @@ -7,7 +7,7 @@ import dbt.utils from typing import Iterable, List, Dict, Union, Optional, Any -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError BOM = BOM_UTF8.decode("utf-8") # '\ufeff' @@ -168,7 +168,7 @@ def __setitem__(self, key, value): return elif not isinstance(value, type(existing_type)): # actual type mismatch! - raise RuntimeException( + raise DbtRuntimeError( f"Tables contain columns with the same names ({key}), " f"but different types ({value} vs {existing_type})" ) diff --git a/core/dbt/clients/git.py b/core/dbt/clients/git.py index 4ddbb1969ee..d6cb3f3870c 100644 --- a/core/dbt/clients/git.py +++ b/core/dbt/clients/git.py @@ -16,8 +16,8 @@ CommandResultError, GitCheckoutError, GitCloningError, - GitCloningProblem, - RuntimeException, + UnknownGitCloningProblemError, + DbtRuntimeError, ) from packaging import version @@ -134,7 +134,7 @@ def clone_and_checkout( err = exc.stderr exists = re.match("fatal: destination path '(.+)' already exists", err) if not exists: - raise GitCloningProblem(repo) + raise UnknownGitCloningProblemError(repo) directory = None start_sha = None @@ -144,7 +144,7 @@ def clone_and_checkout( else: matches = re.match("Cloning into '(.+)'", err.decode("utf-8")) if matches is None: - raise RuntimeException(f'Error cloning {repo} - never saw "Cloning into ..." from git') + raise DbtRuntimeError(f'Error cloning {repo} - never saw "Cloning into ..." from git') directory = matches.group(1) fire_event(GitProgressPullingNewDependency(dir=directory)) full_path = os.path.join(cwd, directory) diff --git a/core/dbt/clients/jinja.py b/core/dbt/clients/jinja.py index c1b8865e33e..e9dcb45017b 100644 --- a/core/dbt/clients/jinja.py +++ b/core/dbt/clients/jinja.py @@ -28,17 +28,17 @@ from dbt.contracts.graph.nodes import GenericTestNode from dbt.exceptions import ( - CaughtMacroException, - CaughtMacroExceptionWithNode, - CompilationException, - InternalException, - InvalidMaterializationArg, - JinjaRenderingException, + CaughtMacroError, + CaughtMacroErrorWithNodeError, + CompilationError, + DbtInternalError, + MaterializationArgError, + JinjaRenderingError, MacroReturn, - MaterializtionMacroNotUsed, - NoSupportedLanguagesFound, - UndefinedCompilation, - UndefinedMacroException, + MaterializtionMacroNotUsedError, + NoSupportedLanguagesFoundError, + UndefinedCompilationError, + UndefinedMacroError, ) from dbt import flags from dbt.node_types import ModelLanguage @@ -161,9 +161,9 @@ def quoted_native_concat(nodes): except (ValueError, SyntaxError, MemoryError): result = raw if isinstance(raw, BoolMarker) and not isinstance(result, bool): - raise JinjaRenderingException(f"Could not convert value '{raw!s}' into type 'bool'") + raise JinjaRenderingError(f"Could not convert value '{raw!s}' into type 'bool'") if isinstance(raw, NumberMarker) and not _is_number(result): - raise JinjaRenderingException(f"Could not convert value '{raw!s}' into type 'number'") + raise JinjaRenderingError(f"Could not convert value '{raw!s}' into type 'number'") return result @@ -241,12 +241,12 @@ def exception_handler(self) -> Iterator[None]: try: yield except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e: - raise CaughtMacroException(e) + raise CaughtMacroError(e) def call_macro(self, *args, **kwargs): # called from __call__ methods if self.context is None: - raise InternalException("Context is still None in call_macro!") + raise DbtInternalError("Context is still None in call_macro!") assert self.context is not None macro = self.get_macro() @@ -273,7 +273,7 @@ def push(self, name): def pop(self, name): got = self.call_stack.pop() if got != name: - raise InternalException(f"popped {got}, expected {name}") + raise DbtInternalError(f"popped {got}, expected {name}") class MacroGenerator(BaseMacroGenerator): @@ -300,8 +300,8 @@ def exception_handler(self) -> Iterator[None]: try: yield except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e: - raise CaughtMacroExceptionWithNode(exc=e, node=self.macro) - except CompilationException as e: + raise CaughtMacroErrorWithNodeError(exc=e, node=self.macro) + except CompilationError as e: e.stack.append(self.macro) raise e @@ -380,7 +380,7 @@ def parse(self, parser): node.defaults.append(languages) else: - raise InvalidMaterializationArg(materialization_name, target.name) + raise MaterializationArgError(materialization_name, target.name) if SUPPORTED_LANG_ARG not in node.args: node.args.append(SUPPORTED_LANG_ARG) @@ -455,7 +455,7 @@ def __call__(self, *args, **kwargs): return self def __reduce__(self): - raise UndefinedCompilation(name=self.name, node=node) + raise UndefinedCompilationError(name=self.name, node=node) return Undefined @@ -513,10 +513,10 @@ def catch_jinja(node=None) -> Iterator[None]: yield except jinja2.exceptions.TemplateSyntaxError as e: e.translated = False - raise CompilationException(str(e), node) from e + raise CompilationError(str(e), node) from e except jinja2.exceptions.UndefinedError as e: - raise UndefinedMacroException(str(e), node) from e - except CompilationException as exc: + raise UndefinedMacroError(str(e), node) from e + except CompilationError as exc: exc.add_node(node) raise @@ -655,13 +655,13 @@ def _convert_function(value: Any, keypath: Tuple[Union[str, int], ...]) -> Any: def get_supported_languages(node: jinja2.nodes.Macro) -> List[ModelLanguage]: if "materialization" not in node.name: - raise MaterializtionMacroNotUsed(node=node) + raise MaterializtionMacroNotUsedError(node=node) no_kwargs = not node.defaults no_langs_found = SUPPORTED_LANG_ARG not in node.args if no_kwargs or no_langs_found: - raise NoSupportedLanguagesFound(node=node) + raise NoSupportedLanguagesFoundError(node=node) lang_idx = node.args.index(SUPPORTED_LANG_ARG) # indexing defaults from the end diff --git a/core/dbt/clients/jinja_static.py b/core/dbt/clients/jinja_static.py index d71211cea6e..47790166ae5 100644 --- a/core/dbt/clients/jinja_static.py +++ b/core/dbt/clients/jinja_static.py @@ -1,6 +1,6 @@ import jinja2 from dbt.clients.jinja import get_environment -from dbt.exceptions import MacroNamespaceNotString, MacroNameNotString +from dbt.exceptions import MacroNamespaceNotStringError, MacroNameNotStringError def statically_extract_macro_calls(string, ctx, db_wrapper=None): @@ -117,14 +117,14 @@ def statically_parse_adapter_dispatch(func_call, ctx, db_wrapper): func_name = kwarg.value.value possible_macro_calls.append(func_name) else: - raise MacroNameNotString(kwarg_value=kwarg.value.value) + raise MacroNameNotStringError(kwarg_value=kwarg.value.value) elif kwarg.key == "macro_namespace": # This will remain to enable static resolution kwarg_type = type(kwarg.value).__name__ if kwarg_type == "Const": macro_namespace = kwarg.value.value else: - raise MacroNamespaceNotString(kwarg_type) + raise MacroNamespaceNotStringError(kwarg_type) # positional arguments if packages_arg: diff --git a/core/dbt/clients/system.py b/core/dbt/clients/system.py index b776e91b1d0..6c72fadea52 100644 --- a/core/dbt/clients/system.py +++ b/core/dbt/clients/system.py @@ -19,8 +19,8 @@ SystemErrorRetrievingModTime, SystemCouldNotWrite, SystemExecutingCmd, - SystemStdOutMsg, - SystemStdErrMsg, + SystemStdOut, + SystemStdErr, SystemReportReturnCode, ) import dbt.exceptions @@ -412,7 +412,7 @@ def _interpret_oserror(exc: OSError, cwd: str, cmd: List[str]) -> NoReturn: _handle_posix_error(exc, cwd, cmd) # this should not be reachable, raise _something_ at least! - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( "Unhandled exception in _interpret_oserror: {}".format(exc) ) @@ -441,8 +441,8 @@ def run_cmd(cwd: str, cmd: List[str], env: Optional[Dict[str, Any]] = None) -> T except OSError as exc: _interpret_oserror(exc, cwd, cmd) - fire_event(SystemStdOutMsg(bmsg=out)) - fire_event(SystemStdErrMsg(bmsg=err)) + fire_event(SystemStdOut(bmsg=out)) + fire_event(SystemStdErr(bmsg=err)) if proc.returncode != 0: fire_event(SystemReportReturnCode(returncode=proc.returncode)) diff --git a/core/dbt/clients/yaml_helper.py b/core/dbt/clients/yaml_helper.py index bc0ada41ebb..d5a29b0309f 100644 --- a/core/dbt/clients/yaml_helper.py +++ b/core/dbt/clients/yaml_helper.py @@ -60,4 +60,4 @@ def load_yaml_text(contents, path=None): else: error = str(e) - raise dbt.exceptions.ValidationException(error) + raise dbt.exceptions.DbtValidationError(error) diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py index 563b4bb1171..961ef03cf85 100644 --- a/core/dbt/compilation.py +++ b/core/dbt/compilation.py @@ -21,9 +21,9 @@ SeedNode, ) from dbt.exceptions import ( - GraphDependencyNotFound, - InternalException, - RuntimeException, + GraphDependencyNotFoundError, + DbtInternalError, + DbtRuntimeError, ) from dbt.graph import Graph from dbt.events.functions import fire_event @@ -258,7 +258,7 @@ def _recursively_prepend_ctes( inserting CTEs into the SQL. """ if model.compiled_code is None: - raise RuntimeException("Cannot inject ctes into an unparsed node", model) + raise DbtRuntimeError("Cannot inject ctes into an unparsed node", model) if model.extra_ctes_injected: return (model, model.extra_ctes) @@ -279,7 +279,7 @@ def _recursively_prepend_ctes( # ephemeral model. for cte in model.extra_ctes: if cte.id not in manifest.nodes: - raise InternalException( + raise DbtInternalError( f"During compilation, found a cte reference that " f"could not be resolved: {cte.id}" ) @@ -287,7 +287,7 @@ def _recursively_prepend_ctes( assert not isinstance(cte_model, SeedNode) if not cte_model.is_ephemeral_model: - raise InternalException(f"{cte.id} is not ephemeral") + raise DbtInternalError(f"{cte.id} is not ephemeral") # This model has already been compiled, so it's been # through here before @@ -352,13 +352,6 @@ def _compile_node( ) if node.language == ModelLanguage.python: - # TODO could we also 'minify' this code at all? just aesthetic, not functional - - # quoating seems like something very specific to sql so far - # for all python implementations we are seeing there's no quating. - # TODO try to find better way to do this, given that - original_quoting = self.config.quoting - self.config.quoting = {key: False for key in original_quoting.keys()} context = self._create_node_context(node, manifest, extra_context) postfix = jinja.get_rendered( @@ -368,8 +361,6 @@ def _compile_node( ) # we should NOT jinja render the python model's 'raw code' node.compiled_code = f"{node.raw_code}\n\n{postfix}" - # restore quoting settings in the end since context is lazy evaluated - self.config.quoting = original_quoting else: context = self._create_node_context(node, manifest, extra_context) @@ -400,7 +391,7 @@ def link_node(self, linker: Linker, node: GraphMemberNode, manifest: Manifest): elif dependency in manifest.metrics: linker.dependency(node.unique_id, (manifest.metrics[dependency].unique_id)) else: - raise GraphDependencyNotFound(node, dependency) + raise GraphDependencyNotFoundError(node, dependency) def link_graph(self, linker: Linker, manifest: Manifest, add_test_edges: bool = False): for source in manifest.sources.values(): diff --git a/core/dbt/config/profile.py b/core/dbt/config/profile.py index e8bf85dbd27..156c41445e9 100644 --- a/core/dbt/config/profile.py +++ b/core/dbt/config/profile.py @@ -10,12 +10,12 @@ from dbt.contracts.connection import Credentials, HasCredentials from dbt.contracts.project import ProfileConfig, UserConfig from dbt.exceptions import ( - CompilationException, + CompilationError, DbtProfileError, DbtProjectError, - ValidationException, - RuntimeException, - ProfileConfigInvalid, + DbtValidationError, + DbtRuntimeError, + ProfileConfigError, ) from dbt.events.types import MissingProfileTarget from dbt.events.functions import fire_event @@ -60,9 +60,9 @@ def read_profile(profiles_dir: str) -> Dict[str, Any]: msg = f"The profiles.yml file at {path} is empty" raise DbtProfileError(INVALID_PROFILE_MESSAGE.format(error_string=msg)) return yaml_content - except ValidationException as e: + except DbtValidationError as e: msg = INVALID_PROFILE_MESSAGE.format(error_string=e) - raise ValidationException(msg) from e + raise DbtValidationError(msg) from e return {} @@ -75,7 +75,7 @@ def read_user_config(directory: str) -> UserConfig: if user_config is not None: UserConfig.validate(user_config) return UserConfig.from_dict(user_config) - except (RuntimeException, ValidationError): + except (DbtRuntimeError, ValidationError): pass return UserConfig() @@ -158,7 +158,7 @@ def validate(self): dct = self.to_profile_info(serialize_credentials=True) ProfileConfig.validate(dct) except ValidationError as exc: - raise ProfileConfigInvalid(exc) from exc + raise ProfileConfigError(exc) from exc @staticmethod def _credentials_from_profile( @@ -182,8 +182,8 @@ def _credentials_from_profile( data = cls.translate_aliases(profile) cls.validate(data) credentials = cls.from_dict(data) - except (RuntimeException, ValidationError) as e: - msg = str(e) if isinstance(e, RuntimeException) else e.message + except (DbtRuntimeError, ValidationError) as e: + msg = str(e) if isinstance(e, DbtRuntimeError) else e.message raise DbtProfileError( 'Credentials in profile "{}", target "{}" invalid: {}'.format( profile_name, target_name, msg @@ -299,7 +299,7 @@ def render_profile( try: profile_data = renderer.render_data(raw_profile_data) - except CompilationException as exc: + except CompilationError as exc: raise DbtProfileError(str(exc)) from exc return target_name, profile_data diff --git a/core/dbt/config/project.py b/core/dbt/config/project.py index 69c6b79866c..7f0398f53c6 100644 --- a/core/dbt/config/project.py +++ b/core/dbt/config/project.py @@ -21,10 +21,10 @@ from dbt.contracts.connection import QueryComment from dbt.exceptions import ( DbtProjectError, - SemverException, - ProjectContractBroken, - ProjectContractInvalid, - RuntimeException, + SemverError, + ProjectContractBrokenError, + ProjectContractError, + DbtRuntimeError, ) from dbt.graph import SelectionSpec from dbt.helper_types import NoValue @@ -219,7 +219,7 @@ def _get_required_version( try: dbt_version = _parse_versions(dbt_raw_version) - except SemverException as e: + except SemverError as e: raise DbtProjectError(str(e)) from e if verify_version: @@ -325,7 +325,7 @@ def create_project(self, rendered: RenderComponents) -> "Project": ProjectContract.validate(rendered.project_dict) cfg = ProjectContract.from_dict(rendered.project_dict) except ValidationError as e: - raise ProjectContractInvalid(e) from e + raise ProjectContractError(e) from e # name/version are required in the Project definition, so we can assume # they are present name = cfg.name @@ -642,7 +642,7 @@ def validate(self): try: ProjectContract.validate(self.to_project_config()) except ValidationError as e: - raise ProjectContractBroken(e) from e + raise ProjectContractBrokenError(e) from e @classmethod def partial_load(cls, project_root: str, *, verify_version: bool = False) -> PartialProject: @@ -667,7 +667,7 @@ def hashed_name(self): def get_selector(self, name: str) -> Union[SelectionSpec, bool]: if name not in self.selectors: - raise RuntimeException( + raise DbtRuntimeError( f"Could not find selector named {name}, expected one of {list(self.selectors)}" ) return self.selectors[name]["definition"] diff --git a/core/dbt/config/renderer.py b/core/dbt/config/renderer.py index 8fc4211754e..68958dbbce5 100644 --- a/core/dbt/config/renderer.py +++ b/core/dbt/config/renderer.py @@ -8,7 +8,7 @@ from dbt.context.secret import SecretContext, SECRET_PLACEHOLDER from dbt.context.base import BaseContext from dbt.contracts.connection import HasCredentials -from dbt.exceptions import DbtProjectError, CompilationException, RecursionException +from dbt.exceptions import DbtProjectError, CompilationError, RecursionError from dbt.utils import deep_map_render @@ -40,14 +40,14 @@ def render_value(self, value: Any, keypath: Optional[Keypath] = None) -> Any: try: with catch_jinja(): return get_rendered(value, self.context, native=True) - except CompilationException as exc: + except CompilationError as exc: msg = f"Could not render {value}: {exc.msg}" - raise CompilationException(msg) from exc + raise CompilationError(msg) from exc def render_data(self, data: Dict[str, Any]) -> Dict[str, Any]: try: return deep_map_render(self.render_entry, data) - except RecursionException: + except RecursionError: raise DbtProjectError( f"Cycle detected: {self.name} input has a reference to itself", project=data ) @@ -159,7 +159,8 @@ def should_render_keypath(self, keypath: Keypath) -> bool: if first in {"seeds", "models", "snapshots", "tests"}: keypath_parts = {(k.lstrip("+ ") if isinstance(k, str) else k) for k in keypath} # model-level hooks - if "pre-hook" in keypath_parts or "post-hook" in keypath_parts: + late_rendered_hooks = {"pre-hook", "post-hook", "pre_hook", "post_hook"} + if keypath_parts.intersection(late_rendered_hooks): return False return True diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py index 8b1b30f383b..b0b74b9a222 100644 --- a/core/dbt/config/runtime.py +++ b/core/dbt/config/runtime.py @@ -25,11 +25,11 @@ from dbt.contracts.relation import ComponentName from dbt.dataclass_schema import ValidationError from dbt.exceptions import ( - ConfigContractBroken, + ConfigContractBrokenError, DbtProjectError, - NonUniquePackageName, - RuntimeException, - UninstalledPackagesFound, + NonUniquePackageNameError, + DbtRuntimeError, + UninstalledPackagesFoundError, ) from dbt.events.functions import warn_or_error from dbt.events.types import UnusedResourceConfigPath @@ -187,7 +187,7 @@ def validate(self): try: Configuration.validate(self.serialize()) except ValidationError as e: - raise ConfigContractBroken(e) from e + raise ConfigContractBrokenError(e) from e @classmethod def _get_rendered_profile( @@ -258,7 +258,7 @@ def from_args(cls, args: Any) -> "RuntimeConfig": :param args: The arguments as parsed from the cli. :raises DbtProjectError: If the project is invalid or missing. :raises DbtProfileError: If the profile is invalid or missing. - :raises ValidationException: If the cli variables are invalid. + :raises DbtValidationError: If the cli variables are invalid. """ project, profile = cls.collect_parts(args) @@ -353,7 +353,7 @@ def load_dependencies(self, base_only=False) -> Mapping[str, "RuntimeConfig"]: count_packages_specified = len(self.packages.packages) # type: ignore count_packages_installed = len(tuple(self._get_project_directories())) if count_packages_specified > count_packages_installed: - raise UninstalledPackagesFound( + raise UninstalledPackagesFoundError( count_packages_specified, count_packages_installed, self.packages_install_path, @@ -361,7 +361,7 @@ def load_dependencies(self, base_only=False) -> Mapping[str, "RuntimeConfig"]: project_paths = itertools.chain(internal_packages, self._get_project_directories()) for project_name, project in self.load_projects(project_paths): if project_name in all_projects: - raise NonUniquePackageName(project_name) + raise NonUniquePackageNameError(project_name) all_projects[project_name] = project self.dependencies = all_projects return self.dependencies @@ -426,7 +426,7 @@ def to_target_dict(self): def __getattribute__(self, name): if name in {"profile_name", "target_name", "threads"}: - raise RuntimeException(f'Error: disallowed attribute "{name}" - no profile!') + raise DbtRuntimeError(f'Error: disallowed attribute "{name}" - no profile!') return Profile.__getattribute__(self, name) @@ -453,7 +453,7 @@ def __post_init__(self): def __getattribute__(self, name): # Override __getattribute__ to check that the attribute isn't 'banned'. if name in {"profile_name", "target_name"}: - raise RuntimeException(f'Error: disallowed attribute "{name}" - no profile!') + raise DbtRuntimeError(f'Error: disallowed attribute "{name}" - no profile!') # avoid every attribute access triggering infinite recursion return RuntimeConfig.__getattribute__(self, name) @@ -602,7 +602,7 @@ def from_args(cls: Type[RuntimeConfig], args: Any) -> "RuntimeConfig": :param args: The arguments as parsed from the cli. :raises DbtProjectError: If the project is invalid or missing. :raises DbtProfileError: If the profile is invalid or missing. - :raises ValidationException: If the cli variables are invalid. + :raises DbtValidationError: If the cli variables are invalid. """ project, profile = cls.collect_parts(args) diff --git a/core/dbt/config/selectors.py b/core/dbt/config/selectors.py index 193a1bb70a8..e26ee01d316 100644 --- a/core/dbt/config/selectors.py +++ b/core/dbt/config/selectors.py @@ -12,7 +12,7 @@ resolve_path_from_base, ) from dbt.contracts.selection import SelectorFile -from dbt.exceptions import DbtSelectorsError, RuntimeException +from dbt.exceptions import DbtSelectorsError, DbtRuntimeError from dbt.graph import parse_from_selectors_definition, SelectionSpec from dbt.graph.selector_spec import SelectionCriteria @@ -46,7 +46,7 @@ def selectors_from_dict(cls, data: Dict[str, Any]) -> "SelectorConfig": f"yaml-selectors", result_type="invalid_selector", ) from exc - except RuntimeException as exc: + except DbtRuntimeError as exc: raise DbtSelectorsError( f"Could not read selector file data: {exc}", result_type="invalid_selector", @@ -62,7 +62,7 @@ def render_from_dict( ) -> "SelectorConfig": try: rendered = renderer.render_data(data) - except (ValidationError, RuntimeException) as exc: + except (ValidationError, DbtRuntimeError) as exc: raise DbtSelectorsError( f"Could not render selector data: {exc}", result_type="invalid_selector", @@ -77,7 +77,7 @@ def from_path( ) -> "SelectorConfig": try: data = load_yaml_text(load_file_contents(str(path))) - except (ValidationError, RuntimeException) as exc: + except (ValidationError, DbtRuntimeError) as exc: raise DbtSelectorsError( f"Could not read selector file: {exc}", result_type="invalid_selector", diff --git a/core/dbt/config/utils.py b/core/dbt/config/utils.py index 921626ba088..c69f0d5c79c 100644 --- a/core/dbt/config/utils.py +++ b/core/dbt/config/utils.py @@ -8,20 +8,24 @@ from dbt.config import Profile, Project, read_user_config from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer from dbt.events.functions import fire_event -from dbt.events.types import InvalidVarsYAML -from dbt.exceptions import ValidationException, VarsArgNotYamlDict +from dbt.events.types import InvalidOptionYAML +from dbt.exceptions import DbtValidationError, OptionNotYamlDictError def parse_cli_vars(var_string: str) -> Dict[str, Any]: + return parse_cli_yaml_string(var_string, "vars") + + +def parse_cli_yaml_string(var_string: str, cli_option_name: str) -> Dict[str, Any]: try: cli_vars = yaml_helper.load_yaml_text(var_string) var_type = type(cli_vars) if var_type is dict: return cli_vars else: - raise VarsArgNotYamlDict(var_type) - except ValidationException: - fire_event(InvalidVarsYAML()) + raise OptionNotYamlDictError(var_type, cli_option_name) + except DbtValidationError: + fire_event(InvalidOptionYAML(option_name=cli_option_name)) raise diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index fc218538bac..edf0895fe31 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -10,12 +10,12 @@ from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER from dbt.contracts.graph.nodes import Resource from dbt.exceptions import ( - DisallowSecretEnvVar, - EnvVarMissing, + SecretEnvVarLocationError, + EnvVarMissingError, MacroReturn, - RequiredVarNotFound, - SetStrictWrongType, - ZipStrictWrongType, + RequiredVarNotFoundError, + SetStrictWrongTypeError, + ZipStrictWrongTypeError, ) from dbt.events.functions import fire_event, get_invocation_id from dbt.events.types import JinjaLogInfo, JinjaLogDebug @@ -153,7 +153,7 @@ def node_name(self): return "" def get_missing_var(self, var_name): - raise RequiredVarNotFound(var_name, self._merged, self._node) + raise RequiredVarNotFoundError(var_name, self._merged, self._node) def has_var(self, var_name: str): return var_name in self._merged @@ -297,7 +297,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: """ return_value = None if var.startswith(SECRET_ENV_PREFIX): - raise DisallowSecretEnvVar(var) + raise SecretEnvVarLocationError(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -312,7 +312,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: return return_value else: - raise EnvVarMissing(var) + raise EnvVarMissingError(var) if os.environ.get("DBT_MACRO_DEBUGGING"): @@ -493,7 +493,7 @@ def set_strict(value: Iterable[Any]) -> Set[Any]: try: return set(value) except TypeError as e: - raise SetStrictWrongType(e) + raise SetStrictWrongTypeError(e) @contextmember("zip") @staticmethod @@ -537,7 +537,7 @@ def zip_strict(*args: Iterable[Any]) -> Iterable[Any]: try: return zip(*args) except TypeError as e: - raise ZipStrictWrongType(e) + raise ZipStrictWrongTypeError(e) @contextmember @staticmethod diff --git a/core/dbt/context/configured.py b/core/dbt/context/configured.py index ca1de35423b..da4132e8046 100644 --- a/core/dbt/context/configured.py +++ b/core/dbt/context/configured.py @@ -8,7 +8,7 @@ from dbt.context.base import contextproperty, contextmember, Var from dbt.context.target import TargetContext -from dbt.exceptions import EnvVarMissing, DisallowSecretEnvVar +from dbt.exceptions import EnvVarMissingError, SecretEnvVarLocationError class ConfiguredContext(TargetContext): @@ -86,7 +86,7 @@ def var(self) -> ConfiguredVar: def env_var(self, var: str, default: Optional[str] = None) -> str: return_value = None if var.startswith(SECRET_ENV_PREFIX): - raise DisallowSecretEnvVar(var) + raise SecretEnvVarLocationError(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -104,7 +104,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: return return_value else: - raise EnvVarMissing(var) + raise EnvVarMissingError(var) class MacroResolvingContext(ConfiguredContext): diff --git a/core/dbt/context/context_config.py b/core/dbt/context/context_config.py index 2b0aafe7189..b497887ab45 100644 --- a/core/dbt/context/context_config.py +++ b/core/dbt/context/context_config.py @@ -5,7 +5,7 @@ from dbt.config import RuntimeConfig, Project, IsFQNResource from dbt.contracts.graph.model_config import BaseConfig, get_config_for, _listify -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.node_types import NodeType from dbt.utils import fqn_search @@ -89,7 +89,7 @@ def get_node_project(self, project_name: str): return self._active_project dependencies = self._active_project.load_dependencies() if project_name not in dependencies: - raise InternalException( + raise DbtInternalError( f"Project name {project_name} not found in dependencies " f"(found {list(dependencies)})" ) @@ -287,14 +287,14 @@ def _add_config_call(cls, config_call_dict, opts: Dict[str, Any]) -> None: elif k in BaseConfig.mergebehavior["update"]: if not isinstance(v, dict): - raise InternalException(f"expected dict, got {v}") + raise DbtInternalError(f"expected dict, got {v}") if k in config_call_dict and isinstance(config_call_dict[k], dict): config_call_dict[k].update(v) else: config_call_dict[k] = v elif k in BaseConfig.mergebehavior["dict_key_append"]: if not isinstance(v, dict): - raise InternalException(f"expected dict, got {v}") + raise DbtInternalError(f"expected dict, got {v}") if k in config_call_dict: # should always be a dict for key, value in v.items(): extend = False diff --git a/core/dbt/context/docs.py b/core/dbt/context/docs.py index 89a652736dd..3d5abf42e11 100644 --- a/core/dbt/context/docs.py +++ b/core/dbt/context/docs.py @@ -1,8 +1,8 @@ from typing import Any, Dict, Union from dbt.exceptions import ( - DocTargetNotFound, - InvalidDocArgs, + DocTargetNotFoundError, + DocArgsError, ) from dbt.config.runtime import RuntimeConfig from dbt.contracts.graph.manifest import Manifest @@ -52,7 +52,7 @@ def doc(self, *args: str) -> str: elif len(args) == 2: doc_package_name, doc_name = args else: - raise InvalidDocArgs(self.node, args) + raise DocArgsError(self.node, args) # Documentation target_doc = self.manifest.resolve_doc( @@ -68,7 +68,7 @@ def doc(self, *args: str) -> str: # TODO CT-211 source_file.add_node(self.node.unique_id) # type: ignore[union-attr] else: - raise DocTargetNotFound( + raise DocTargetNotFoundError( node=self.node, target_doc_name=doc_name, target_doc_package=doc_package_name ) diff --git a/core/dbt/context/exceptions_jinja.py b/core/dbt/context/exceptions_jinja.py index 5663b4701e0..98f19048f1a 100644 --- a/core/dbt/context/exceptions_jinja.py +++ b/core/dbt/context/exceptions_jinja.py @@ -6,23 +6,23 @@ from dbt.events.types import JinjaLogWarning from dbt.exceptions import ( - RuntimeException, - MissingConfig, - MissingMaterialization, - MissingRelation, - AmbiguousAlias, - AmbiguousCatalogMatch, - CacheInconsistency, - DataclassNotDict, - CompilationException, - DatabaseException, - DependencyNotFound, - DependencyException, - DuplicatePatchPath, - DuplicateResourceName, - InvalidPropertyYML, - NotImplementedException, - RelationWrongType, + DbtRuntimeError, + MissingConfigError, + MissingMaterializationError, + MissingRelationError, + AmbiguousAliasError, + AmbiguousCatalogMatchError, + CacheInconsistencyError, + DataclassNotDictError, + CompilationError, + DbtDatabaseError, + DependencyNotFoundError, + DependencyError, + DuplicatePatchPathError, + DuplicateResourceNameError, + PropertyYMLError, + NotImplementedError, + RelationWrongTypeError, ) @@ -32,67 +32,69 @@ def warn(msg, node=None): def missing_config(model, name) -> NoReturn: - raise MissingConfig(unique_id=model.unique_id, name=name) + raise MissingConfigError(unique_id=model.unique_id, name=name) def missing_materialization(model, adapter_type) -> NoReturn: - raise MissingMaterialization(model=model, adapter_type=adapter_type) + raise MissingMaterializationError( + materialization=model.config.materialized, adapter_type=adapter_type + ) def missing_relation(relation, model=None) -> NoReturn: - raise MissingRelation(relation, model) + raise MissingRelationError(relation, model) def raise_ambiguous_alias(node_1, node_2, duped_name=None) -> NoReturn: - raise AmbiguousAlias(node_1, node_2, duped_name) + raise AmbiguousAliasError(node_1, node_2, duped_name) def raise_ambiguous_catalog_match(unique_id, match_1, match_2) -> NoReturn: - raise AmbiguousCatalogMatch(unique_id, match_1, match_2) + raise AmbiguousCatalogMatchError(unique_id, match_1, match_2) def raise_cache_inconsistent(message) -> NoReturn: - raise CacheInconsistency(message) + raise CacheInconsistencyError(message) def raise_dataclass_not_dict(obj) -> NoReturn: - raise DataclassNotDict(obj) + raise DataclassNotDictError(obj) def raise_compiler_error(msg, node=None) -> NoReturn: - raise CompilationException(msg, node) + raise CompilationError(msg, node) def raise_database_error(msg, node=None) -> NoReturn: - raise DatabaseException(msg, node) + raise DbtDatabaseError(msg, node) def raise_dep_not_found(node, node_description, required_pkg) -> NoReturn: - raise DependencyNotFound(node, node_description, required_pkg) + raise DependencyNotFoundError(node, node_description, required_pkg) def raise_dependency_error(msg) -> NoReturn: - raise DependencyException(scrub_secrets(msg, env_secrets())) + raise DependencyError(scrub_secrets(msg, env_secrets())) def raise_duplicate_patch_name(patch_1, existing_patch_path) -> NoReturn: - raise DuplicatePatchPath(patch_1, existing_patch_path) + raise DuplicatePatchPathError(patch_1, existing_patch_path) def raise_duplicate_resource_name(node_1, node_2) -> NoReturn: - raise DuplicateResourceName(node_1, node_2) + raise DuplicateResourceNameError(node_1, node_2) def raise_invalid_property_yml_version(path, issue) -> NoReturn: - raise InvalidPropertyYML(path, issue) + raise PropertyYMLError(path, issue) def raise_not_implemented(msg) -> NoReturn: - raise NotImplementedException(msg) + raise NotImplementedError(msg) def relation_wrong_type(relation, expected_type, model=None) -> NoReturn: - raise RelationWrongType(relation, expected_type, model) + raise RelationWrongTypeError(relation, expected_type, model) # Update this when a new function should be added to the @@ -128,7 +130,7 @@ def wrap(func): def inner(*args, **kwargs): try: return func(*args, **kwargs) - except RuntimeException as exc: + except DbtRuntimeError as exc: exc.add_node(model) raise exc diff --git a/core/dbt/context/macro_resolver.py b/core/dbt/context/macro_resolver.py index 6e70bafd05e..20f97febcb0 100644 --- a/core/dbt/context/macro_resolver.py +++ b/core/dbt/context/macro_resolver.py @@ -1,6 +1,6 @@ from typing import Dict, MutableMapping, Optional from dbt.contracts.graph.nodes import Macro -from dbt.exceptions import DuplicateMacroName, PackageNotFoundForMacro +from dbt.exceptions import DuplicateMacroNameError, PackageNotFoundForMacroError from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME from dbt.clients.jinja import MacroGenerator @@ -86,7 +86,7 @@ def _add_macro_to( package_namespaces[macro.package_name] = namespace if macro.name in namespace: - raise DuplicateMacroName(macro, macro, macro.package_name) + raise DuplicateMacroNameError(macro, macro, macro.package_name) package_namespaces[macro.package_name][macro.name] = macro def add_macro(self, macro: Macro): @@ -187,7 +187,7 @@ def get_from_package(self, package_name: Optional[str], name: str) -> Optional[M elif package_name in self.macro_resolver.packages: macro = self.macro_resolver.packages[package_name].get(name) else: - raise PackageNotFoundForMacro(package_name) + raise PackageNotFoundForMacroError(package_name) if not macro: return None macro_func = MacroGenerator(macro, self.ctx, self.node, self.thread_ctx) diff --git a/core/dbt/context/macros.py b/core/dbt/context/macros.py index 921480ec05a..1c61e564e06 100644 --- a/core/dbt/context/macros.py +++ b/core/dbt/context/macros.py @@ -3,7 +3,7 @@ from dbt.clients.jinja import MacroGenerator, MacroStack from dbt.contracts.graph.nodes import Macro from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME -from dbt.exceptions import DuplicateMacroName, PackageNotFoundForMacro +from dbt.exceptions import DuplicateMacroNameError, PackageNotFoundForMacroError FlatNamespace = Dict[str, MacroGenerator] @@ -75,7 +75,7 @@ def get_from_package(self, package_name: Optional[str], name: str) -> Optional[M elif package_name in self.packages: return self.packages[package_name].get(name) else: - raise PackageNotFoundForMacro(package_name) + raise PackageNotFoundForMacroError(package_name) # This class builds the MacroNamespace by adding macros to @@ -122,7 +122,7 @@ def _add_macro_to( hierarchy[macro.package_name] = namespace if macro.name in namespace: - raise DuplicateMacroName(macro_func.macro, macro, macro.package_name) + raise DuplicateMacroNameError(macro_func.macro, macro, macro.package_name) hierarchy[macro.package_name][macro.name] = macro_func def add_macro(self, macro: Macro, ctx: Dict[str, Any]): diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index 2e7af0a79f2..fec5111e36c 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -41,28 +41,28 @@ from dbt.contracts.graph.metrics import MetricReference, ResolvedMetricReference from dbt.events.functions import get_metadata_vars from dbt.exceptions import ( - CompilationException, - ConflictingConfigKeys, - DisallowSecretEnvVar, - EnvVarMissing, - InternalException, - InvalidInlineModelConfig, - InvalidNumberSourceArgs, - InvalidPersistDocsValueType, - LoadAgateTableNotSeed, + CompilationError, + ConflictingConfigKeysError, + SecretEnvVarLocationError, + EnvVarMissingError, + DbtInternalError, + InlineModelConfigError, + NumberSourceArgsError, + PersistDocsValueTypeError, + LoadAgateTableNotSeedError, LoadAgateTableValueError, - MacroInvalidDispatchArg, - MacrosSourcesUnWriteable, - MetricInvalidArgs, - MissingConfig, - OperationsCannotRefEphemeralNodes, - PackageNotInDeps, - ParsingException, - RefBadContext, - RefInvalidArgs, - RuntimeException, - TargetNotFound, - ValidationException, + MacroDispatchArgError, + MacrosSourcesUnWriteableError, + MetricArgsError, + MissingConfigError, + OperationsCannotRefEphemeralNodesError, + PackageNotInDepsError, + ParsingError, + RefBadContextError, + RefArgsError, + DbtRuntimeError, + TargetNotFoundError, + DbtValidationError, ) from dbt.config import IsFQNResource from dbt.node_types import NodeType, ModelLanguage @@ -144,10 +144,10 @@ def dispatch( f'`adapter.dispatch("{suggest_macro_name}", ' f'macro_namespace="{suggest_macro_namespace}")`?' ) - raise CompilationException(msg) + raise CompilationError(msg) if packages is not None: - raise MacroInvalidDispatchArg(macro_name) + raise MacroDispatchArgError(macro_name) namespace = macro_namespace @@ -159,7 +159,7 @@ def dispatch( search_packages = [self.config.project_name, namespace] else: # Not a string and not None so must be a list - raise CompilationException( + raise CompilationError( f"In adapter.dispatch, got a list macro_namespace argument " f'("{macro_namespace}"), but macro_namespace should be None or a string.' ) @@ -172,8 +172,8 @@ def dispatch( try: # this uses the namespace from the context macro = self._namespace.get_from_package(package_name, search_name) - except CompilationException: - # Only raise CompilationException if macro is not found in + except CompilationError: + # Only raise CompilationError if macro is not found in # any package macro = None @@ -187,7 +187,7 @@ def dispatch( searched = ", ".join(repr(a) for a in attempts) msg = f"In dispatch: No macro named '{macro_name}' found\n Searched for: {searched}" - raise CompilationException(msg) + raise CompilationError(msg) class BaseResolver(metaclass=abc.ABCMeta): @@ -223,12 +223,12 @@ def _repack_args(self, name: str, package: Optional[str]) -> List[str]: def validate_args(self, name: str, package: Optional[str]): if not isinstance(name, str): - raise CompilationException( + raise CompilationError( f"The name argument to ref() must be a string, got {type(name)}" ) if package is not None and not isinstance(package, str): - raise CompilationException( + raise CompilationError( f"The package argument to ref() must be a string or None, got {type(package)}" ) @@ -241,7 +241,7 @@ def __call__(self, *args: str) -> RelationProxy: elif len(args) == 2: package, name = args else: - raise RefInvalidArgs(node=self.model, args=args) + raise RefArgsError(node=self.model, args=args) self.validate_args(name, package) return self.resolve(name, package) @@ -253,19 +253,19 @@ def resolve(self, source_name: str, table_name: str): def validate_args(self, source_name: str, table_name: str): if not isinstance(source_name, str): - raise CompilationException( + raise CompilationError( f"The source name (first) argument to source() must be a " f"string, got {type(source_name)}" ) if not isinstance(table_name, str): - raise CompilationException( + raise CompilationError( f"The table name (second) argument to source() must be a " f"string, got {type(table_name)}" ) def __call__(self, *args: str) -> RelationProxy: if len(args) != 2: - raise InvalidNumberSourceArgs(args, node=self.model) + raise NumberSourceArgsError(args, node=self.model) self.validate_args(args[0], args[1]) return self.resolve(args[0], args[1]) @@ -282,12 +282,12 @@ def _repack_args(self, name: str, package: Optional[str]) -> List[str]: def validate_args(self, name: str, package: Optional[str]): if not isinstance(name, str): - raise CompilationException( + raise CompilationError( f"The name argument to metric() must be a string, got {type(name)}" ) if package is not None and not isinstance(package, str): - raise CompilationException( + raise CompilationError( f"The package argument to metric() must be a string or None, got {type(package)}" ) @@ -300,7 +300,7 @@ def __call__(self, *args: str) -> MetricReference: elif len(args) == 2: package, name = args else: - raise MetricInvalidArgs(node=self.model, args=args) + raise MetricArgsError(node=self.model, args=args) self.validate_args(name, package) return self.resolve(name, package) @@ -321,7 +321,7 @@ def _transform_config(self, config): if oldkey in config: newkey = oldkey.replace("_", "-") if newkey in config: - raise ConflictingConfigKeys(oldkey, newkey, node=self.model) + raise ConflictingConfigKeysError(oldkey, newkey, node=self.model) config[newkey] = config.pop(oldkey) return config @@ -331,14 +331,14 @@ def __call__(self, *args, **kwargs): elif len(args) == 0 and len(kwargs) > 0: opts = kwargs else: - raise InvalidInlineModelConfig(node=self.model) + raise InlineModelConfigError(node=self.model) opts = self._transform_config(opts) # it's ok to have a parse context with no context config, but you must # not call it! if self.context_config is None: - raise RuntimeException("At parse time, did not receive a context config") + raise DbtRuntimeError("At parse time, did not receive a context config") self.context_config.add_config_call(opts) return "" @@ -379,7 +379,7 @@ def _lookup(self, name, default=_MISSING): else: result = self.model.config.get(name, default) if result is _MISSING: - raise MissingConfig(unique_id=self.model.unique_id, name=name) + raise MissingConfigError(unique_id=self.model.unique_id, name=name) return result def require(self, name, validator=None): @@ -401,14 +401,14 @@ def get(self, name, default=None, validator=None): def persist_relation_docs(self) -> bool: persist_docs = self.get("persist_docs", default={}) if not isinstance(persist_docs, dict): - raise InvalidPersistDocsValueType(persist_docs) + raise PersistDocsValueTypeError(persist_docs) return persist_docs.get("relation", False) def persist_column_docs(self) -> bool: persist_docs = self.get("persist_docs", default={}) if not isinstance(persist_docs, dict): - raise InvalidPersistDocsValueType(persist_docs) + raise PersistDocsValueTypeError(persist_docs) return persist_docs.get("columns", False) @@ -467,7 +467,7 @@ def resolve(self, target_name: str, target_package: Optional[str] = None) -> Rel ) if target_model is None or isinstance(target_model, Disabled): - raise TargetNotFound( + raise TargetNotFoundError( node=self.model, target_name=target_name, target_kind="node", @@ -489,7 +489,7 @@ def validate( ) -> None: if resolved.unique_id not in self.model.depends_on.nodes: args = self._repack_args(target_name, target_package) - raise RefBadContext(node=self.model, args=args) + raise RefBadContextError(node=self.model, args=args) class OperationRefResolver(RuntimeRefResolver): @@ -505,7 +505,7 @@ def create_relation(self, target_model: ManifestNode, name: str) -> RelationProx if target_model.is_ephemeral_model: # In operations, we can't ref() ephemeral nodes, because # Macros do not support set_cte - raise OperationsCannotRefEphemeralNodes(target_model.name, node=self.model) + raise OperationsCannotRefEphemeralNodesError(target_model.name, node=self.model) else: return super().create_relation(target_model, name) @@ -528,7 +528,7 @@ def resolve(self, source_name: str, table_name: str): ) if target_source is None or isinstance(target_source, Disabled): - raise TargetNotFound( + raise TargetNotFoundError( node=self.model, target_name=f"{source_name}.{table_name}", target_kind="source", @@ -555,7 +555,7 @@ def resolve(self, target_name: str, target_package: Optional[str] = None) -> Met ) if target_metric is None or isinstance(target_metric, Disabled): - raise TargetNotFound( + raise TargetNotFoundError( node=self.model, target_name=target_name, target_kind="metric", @@ -584,7 +584,7 @@ def packages_for_node(self) -> Iterable[Project]: if package_name != self._config.project_name: if package_name not in dependencies: # I don't think this is actually reachable - raise PackageNotInDeps(package_name, node=self._node) + raise PackageNotInDepsError(package_name, node=self._node) yield dependencies[package_name] yield self._config @@ -674,7 +674,7 @@ def __init__( context_config: Optional[ContextConfig], ) -> None: if provider is None: - raise InternalException(f"Invalid provider given to context: {provider}") + raise DbtInternalError(f"Invalid provider given to context: {provider}") # mypy appeasement - we know it'll be a RuntimeConfig self.config: RuntimeConfig self.model: Union[Macro, ManifestNode] = model @@ -751,7 +751,7 @@ def inner(value: T) -> None: return elif value == arg: return - raise ValidationException( + raise DbtValidationError( 'Expected value "{}" to be one of {}'.format(value, ",".join(map(str, args))) ) @@ -767,7 +767,7 @@ def inner(value: T) -> None: def write(self, payload: str) -> str: # macros/source defs aren't 'writeable'. if isinstance(self.model, (Macro, SourceDefinition)): - raise MacrosSourcesUnWriteable(node=self.model) + raise MacrosSourcesUnWriteableError(node=self.model) self.model.build_path = self.model.write_node(self.config.target_path, "run", payload) return "" @@ -782,12 +782,12 @@ def try_or_compiler_error( try: return func(*args, **kwargs) except Exception: - raise CompilationException(message_if_exception, self.model) + raise CompilationError(message_if_exception, self.model) @contextmember def load_agate_table(self) -> agate.Table: if not isinstance(self.model, SeedNode): - raise LoadAgateTableNotSeed(self.model.resource_type, node=self.model) + raise LoadAgateTableNotSeedError(self.model.resource_type, node=self.model) assert self.model.root_path path = os.path.join(self.model.root_path, self.model.original_file_path) column_types = self.model.config.column_types @@ -1185,7 +1185,7 @@ def adapter_macro(self, name: str, *args, **kwargs): "https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch)" " adapter_macro was called for: {macro_name}".format(macro_name=name) ) - raise CompilationException(msg) + raise CompilationError(msg) @contextmember def env_var(self, var: str, default: Optional[str] = None) -> str: @@ -1196,7 +1196,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: """ return_value = None if var.startswith(SECRET_ENV_PREFIX): - raise DisallowSecretEnvVar(var) + raise SecretEnvVarLocationError(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -1229,7 +1229,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: source_file.env_vars.append(var) # type: ignore[union-attr] return return_value else: - raise EnvVarMissing(var) + raise EnvVarMissingError(var) @contextproperty def selected_resources(self) -> List[str]: @@ -1248,7 +1248,7 @@ def submit_python_job(self, parsed_model: Dict, compiled_code: str) -> AdapterRe and self.context_macro_stack.call_stack[1] == "macro.dbt.statement" and "materialization" in self.context_macro_stack.call_stack[0] ): - raise RuntimeException( + raise DbtRuntimeError( f"submit_python_job is not intended to be called here, at model {parsed_model['alias']}, with macro call_stack {self.context_macro_stack.call_stack}." ) return self.adapter.submit_python_job(parsed_model, compiled_code) @@ -1410,7 +1410,7 @@ def generate_runtime_macro_context( class ExposureRefResolver(BaseResolver): def __call__(self, *args) -> str: if len(args) not in (1, 2): - raise RefInvalidArgs(node=self.model, args=args) + raise RefArgsError(node=self.model, args=args) self.model.refs.append(list(args)) return "" @@ -1418,7 +1418,7 @@ def __call__(self, *args) -> str: class ExposureSourceResolver(BaseResolver): def __call__(self, *args) -> str: if len(args) != 2: - raise InvalidNumberSourceArgs(args, node=self.model) + raise NumberSourceArgsError(args, node=self.model) self.model.sources.append(list(args)) return "" @@ -1426,7 +1426,7 @@ def __call__(self, *args) -> str: class ExposureMetricResolver(BaseResolver): def __call__(self, *args) -> str: if len(args) not in (1, 2): - raise MetricInvalidArgs(node=self.model, args=args) + raise MetricArgsError(node=self.model, args=args) self.model.metrics.append(list(args)) return "" @@ -1468,14 +1468,14 @@ def __call__(self, *args) -> str: elif len(args) == 2: package, name = args else: - raise RefInvalidArgs(node=self.model, args=args) + raise RefArgsError(node=self.model, args=args) self.validate_args(name, package) self.model.refs.append(list(args)) return "" def validate_args(self, name, package): if not isinstance(name, str): - raise ParsingException( + raise ParsingError( f"In a metrics section in {self.model.original_file_path} " "the name argument to ref() must be a string" ) @@ -1558,7 +1558,7 @@ def _build_test_namespace(self): def env_var(self, var: str, default: Optional[str] = None) -> str: return_value = None if var.startswith(SECRET_ENV_PREFIX): - raise DisallowSecretEnvVar(var) + raise SecretEnvVarLocationError(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -1584,7 +1584,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: source_file.add_env_var(var, yaml_key, name) # type: ignore[union-attr] return return_value else: - raise EnvVarMissing(var) + raise EnvVarMissingError(var) def generate_test_context( diff --git a/core/dbt/context/secret.py b/core/dbt/context/secret.py index da13509ef50..4d8ff342aff 100644 --- a/core/dbt/context/secret.py +++ b/core/dbt/context/secret.py @@ -4,7 +4,7 @@ from .base import BaseContext, contextmember from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER -from dbt.exceptions import EnvVarMissing +from dbt.exceptions import EnvVarMissingError SECRET_PLACEHOLDER = "$$$DBT_SECRET_START$$${}$$$DBT_SECRET_END$$$" @@ -50,7 +50,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: self.env_vars[var] = return_value if var in os.environ else DEFAULT_ENV_PLACEHOLDER return return_value else: - raise EnvVarMissing(var) + raise EnvVarMissingError(var) def generate_secret_context(cli_vars: Dict[str, Any]) -> Dict[str, Any]: diff --git a/core/dbt/contracts/connection.py b/core/dbt/contracts/connection.py index fe4ae912229..3f12a603363 100644 --- a/core/dbt/contracts/connection.py +++ b/core/dbt/contracts/connection.py @@ -12,7 +12,7 @@ List, Callable, ) -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.utils import translate_aliases from dbt.events.functions import fire_event from dbt.events.types import NewConnectionOpening @@ -94,7 +94,7 @@ def handle(self): # this will actually change 'self._handle'. self._handle.resolve(self) except RecursionError as exc: - raise InternalException( + raise DbtInternalError( "A connection's open() method attempted to read the handle value" ) from exc return self._handle diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index c43012ec521..4dd2ddc2f33 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -40,10 +40,10 @@ from dbt.contracts.util import BaseArtifactMetadata, SourceKey, ArtifactMixin, schema_version from dbt.dataclass_schema import dbtClassMixin from dbt.exceptions import ( - CompilationException, - DuplicateResourceName, - DuplicateMacroInPackage, - DuplicateMaterializationName, + CompilationError, + DuplicateResourceNameError, + DuplicateMacroInPackageError, + DuplicateMaterializationNameError, ) from dbt.helper_types import PathSet from dbt.events.functions import fire_event @@ -102,7 +102,7 @@ def populate(self, manifest): def perform_lookup(self, unique_id: UniqueID, manifest) -> Documentation: if unique_id not in manifest.docs: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Doc {unique_id} found in cache but not found in manifest" ) return manifest.docs[unique_id] @@ -135,7 +135,7 @@ def populate(self, manifest): def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> SourceDefinition: if unique_id not in manifest.sources: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Source {unique_id} found in cache but not found in manifest" ) return manifest.sources[unique_id] @@ -173,7 +173,7 @@ def populate(self, manifest): def perform_lookup(self, unique_id: UniqueID, manifest) -> ManifestNode: if unique_id not in manifest.nodes: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Node {unique_id} found in cache but not found in manifest" ) return manifest.nodes[unique_id] @@ -206,7 +206,7 @@ def populate(self, manifest): def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> Metric: if unique_id not in manifest.metrics: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Metric {unique_id} found in cache but not found in manifest" ) return manifest.metrics[unique_id] @@ -398,7 +398,7 @@ def __eq__(self, other: object) -> bool: return NotImplemented equal = self.specificity == other.specificity and self.locality == other.locality if equal: - raise DuplicateMaterializationName(self.macro, other) + raise DuplicateMaterializationNameError(self.macro, other) return equal @@ -480,13 +480,13 @@ def _update_into(dest: MutableMapping[str, T], new_item: T): """ unique_id = new_item.unique_id if unique_id not in dest: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"got an update_{new_item.resource_type} call with an " f"unrecognized {new_item.resource_type}: {new_item.unique_id}" ) existing = dest[unique_id] if new_item.original_file_path != existing.original_file_path: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"cannot update a {new_item.resource_type} to have a new file path!" ) dest[unique_id] = new_item @@ -839,7 +839,7 @@ def expect(self, unique_id: str) -> GraphMemberNode: return self.metrics[unique_id] else: # something terrible has happened - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( "Expected node {} not found in manifest".format(unique_id) ) @@ -1035,7 +1035,7 @@ def merge_from_artifact( def add_macro(self, source_file: SourceFile, macro: Macro): if macro.unique_id in self.macros: # detect that the macro exists and emit an error - raise DuplicateMacroInPackage(macro=macro, macro_mapping=self.macros) + raise DuplicateMacroInPackageError(macro=macro, macro_mapping=self.macros) self.macros[macro.unique_id] = macro source_file.macros.append(macro.unique_id) @@ -1213,7 +1213,7 @@ def __post_serialize__(self, dct): def _check_duplicates(value: BaseNode, src: Mapping[str, BaseNode]): if value.unique_id in src: - raise DuplicateResourceName(value, src[value.unique_id]) + raise DuplicateResourceNameError(value, src[value.unique_id]) K_T = TypeVar("K_T") @@ -1222,7 +1222,7 @@ def _check_duplicates(value: BaseNode, src: Mapping[str, BaseNode]): def _expect_value(key: K_T, src: Mapping[K_T, V_T], old_file: SourceFile, name: str) -> V_T: if key not in src: - raise CompilationException( + raise CompilationError( 'Expected to find "{}" in cached "result.{}" based ' "on cached file information: {}!".format(key, name, old_file) ) diff --git a/core/dbt/contracts/graph/model_config.py b/core/dbt/contracts/graph/model_config.py index b22f724de53..407c5435786 100644 --- a/core/dbt/contracts/graph/model_config.py +++ b/core/dbt/contracts/graph/model_config.py @@ -9,7 +9,7 @@ ) from dbt.contracts.graph.unparsed import AdditionalPropertiesAllowed, Docs from dbt.contracts.graph.utils import validate_color -from dbt.exceptions import InternalException, CompilationException +from dbt.exceptions import DbtInternalError, CompilationError from dbt.contracts.util import Replaceable, list_str from dbt import hooks from dbt.node_types import NodeType @@ -30,7 +30,7 @@ def _get_meta_value(cls: Type[M], fld: Field, key: str, default: Any) -> M: try: return cls(value) except ValueError as exc: - raise InternalException(f"Invalid {cls} value: {value}") from exc + raise DbtInternalError(f"Invalid {cls} value: {value}") from exc def _set_meta_value(obj: M, key: str, existing: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: @@ -140,17 +140,17 @@ def _merge_field_value( return _listify(self_value) + _listify(other_value) elif merge_behavior == MergeBehavior.Update: if not isinstance(self_value, dict): - raise InternalException(f"expected dict, got {self_value}") + raise DbtInternalError(f"expected dict, got {self_value}") if not isinstance(other_value, dict): - raise InternalException(f"expected dict, got {other_value}") + raise DbtInternalError(f"expected dict, got {other_value}") value = self_value.copy() value.update(other_value) return value elif merge_behavior == MergeBehavior.DictKeyAppend: if not isinstance(self_value, dict): - raise InternalException(f"expected dict, got {self_value}") + raise DbtInternalError(f"expected dict, got {self_value}") if not isinstance(other_value, dict): - raise InternalException(f"expected dict, got {other_value}") + raise DbtInternalError(f"expected dict, got {other_value}") new_dict = {} for key in self_value.keys(): new_dict[key] = _listify(self_value[key]) @@ -172,7 +172,7 @@ def _merge_field_value( return new_dict else: - raise InternalException(f"Got an invalid merge_behavior: {merge_behavior}") + raise DbtInternalError(f"Got an invalid merge_behavior: {merge_behavior}") def insensitive_patterns(*patterns: str): @@ -227,7 +227,7 @@ def __delitem__(self, key): msg = ( 'Error, tried to delete config key "{}": Cannot delete ' "built-in keys" ).format(key) - raise CompilationException(msg) + raise CompilationError(msg) else: del self._extra[key] diff --git a/core/dbt/contracts/graph/nodes.py b/core/dbt/contracts/graph/nodes.py index 730e2286ccd..a299f5e9b12 100644 --- a/core/dbt/contracts/graph/nodes.py +++ b/core/dbt/contracts/graph/nodes.py @@ -46,6 +46,7 @@ from dbt.events.contextvars import set_contextvars from dbt import flags from dbt.node_types import ModelLanguage, NodeType +from dbt.utils import cast_dict_to_dict_of_strings from .model_config import ( @@ -206,6 +207,8 @@ class NodeInfoMixin: @property def node_info(self): + meta = getattr(self, "meta", {}) + meta_stringified = cast_dict_to_dict_of_strings(meta) node_info = { "node_path": getattr(self, "path", None), "node_name": getattr(self, "name", None), @@ -215,6 +218,7 @@ def node_info(self): "node_status": str(self._event_status.get("node_status")), "node_started_at": self._event_status.get("started_at"), "node_finished_at": self._event_status.get("finished_at"), + "meta": meta_stringified, } node_info_msg = NodeInfo(**node_info) return node_info_msg diff --git a/core/dbt/contracts/graph/unparsed.py b/core/dbt/contracts/graph/unparsed.py index ba2e48c7c9c..6521e644542 100644 --- a/core/dbt/contracts/graph/unparsed.py +++ b/core/dbt/contracts/graph/unparsed.py @@ -11,7 +11,7 @@ # trigger the PathEncoder import dbt.helper_types # noqa:F401 -from dbt.exceptions import CompilationException, ParsingException +from dbt.exceptions import CompilationError, ParsingError from dbt.dataclass_schema import dbtClassMixin, StrEnum, ExtensibleDbtClassMixin, ValidationError @@ -222,7 +222,7 @@ class ExternalPartition(AdditionalPropertiesAllowed, Replaceable): def __post_init__(self): if self.name == "" or self.data_type == "": - raise CompilationException("External partition columns must have names and data types") + raise CompilationError("External partition columns must have names and data types") @dataclass @@ -514,7 +514,7 @@ def validate(cls, data): errors.append("must contain only letters, numbers and underscores") if errors: - raise ParsingException( + raise ParsingError( f"The metric name '{data['name']}' is invalid. It {', '.join(e for e in errors)}" ) diff --git a/core/dbt/contracts/project.py b/core/dbt/contracts/project.py index 2fd7434bd87..ba15b9d32b6 100644 --- a/core/dbt/contracts/project.py +++ b/core/dbt/contracts/project.py @@ -249,6 +249,7 @@ class UserConfig(ExtensibleDbtClassMixin, Replaceable, UserConfigContract): printer_width: Optional[int] = None write_json: Optional[bool] = None warn_error: Optional[bool] = None + warn_error_options: Optional[Dict[str, Union[str, List[str]]]] = None log_format: Optional[str] = None debug: Optional[bool] = None version_check: Optional[bool] = None diff --git a/core/dbt/contracts/relation.py b/core/dbt/contracts/relation.py index e8cba2ad155..e557c358966 100644 --- a/core/dbt/contracts/relation.py +++ b/core/dbt/contracts/relation.py @@ -9,7 +9,7 @@ from dbt.dataclass_schema import dbtClassMixin, StrEnum from dbt.contracts.util import Replaceable -from dbt.exceptions import CompilationException, DataclassNotDict +from dbt.exceptions import CompilationError, DataclassNotDictError from dbt.utils import deep_merge @@ -43,10 +43,10 @@ def __getitem__(self, key): raise KeyError(key) from None def __iter__(self): - raise DataclassNotDict(self) + raise DataclassNotDictError(self) def __len__(self): - raise DataclassNotDict(self) + raise DataclassNotDictError(self) def incorporate(self, **kwargs): value = self.to_dict(omit_none=True) @@ -88,13 +88,11 @@ class Path(FakeAPIObject): def __post_init__(self): # handle pesky jinja2.Undefined sneaking in here and messing up rende if not isinstance(self.database, (type(None), str)): - raise CompilationException("Got an invalid path database: {}".format(self.database)) + raise CompilationError("Got an invalid path database: {}".format(self.database)) if not isinstance(self.schema, (type(None), str)): - raise CompilationException("Got an invalid path schema: {}".format(self.schema)) + raise CompilationError("Got an invalid path schema: {}".format(self.schema)) if not isinstance(self.identifier, (type(None), str)): - raise CompilationException( - "Got an invalid path identifier: {}".format(self.identifier) - ) + raise CompilationError("Got an invalid path identifier: {}".format(self.identifier)) def get_lowered_part(self, key: ComponentName) -> Optional[str]: part = self.get_part(key) diff --git a/core/dbt/contracts/results.py b/core/dbt/contracts/results.py index 97c43396e33..9243750284f 100644 --- a/core/dbt/contracts/results.py +++ b/core/dbt/contracts/results.py @@ -7,7 +7,7 @@ Replaceable, schema_version, ) -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.events.functions import fire_event from dbt.events.types import TimingInfoCollected from dbt.events.proto_types import RunResultMsg, TimingInfoMsg @@ -343,14 +343,14 @@ def process_freshness_result(result: FreshnessNodeResult) -> FreshnessNodeOutput # we know that this must be a SourceFreshnessResult if not isinstance(result, SourceFreshnessResult): - raise InternalException( + raise DbtInternalError( "Got {} instead of a SourceFreshnessResult for a " "non-error result in freshness execution!".format(type(result)) ) # if we're here, we must have a non-None freshness threshold criteria = result.node.freshness if criteria is None: - raise InternalException( + raise DbtInternalError( "Somehow evaluated a freshness result for a source that has no freshness criteria!" ) return SourceFreshnessOutput( diff --git a/core/dbt/contracts/state.py b/core/dbt/contracts/state.py index 9940a0cb93d..cb135e241ac 100644 --- a/core/dbt/contracts/state.py +++ b/core/dbt/contracts/state.py @@ -3,7 +3,7 @@ from .results import RunResultsArtifact from .results import FreshnessExecutionResultArtifact from typing import Optional -from dbt.exceptions import IncompatibleSchemaException +from dbt.exceptions import IncompatibleSchemaError class PreviousState: @@ -19,7 +19,7 @@ def __init__(self, path: Path, current_path: Path): if manifest_path.exists() and manifest_path.is_file(): try: self.manifest = WritableManifest.read_and_check_versions(str(manifest_path)) - except IncompatibleSchemaException as exc: + except IncompatibleSchemaError as exc: exc.add_filename(str(manifest_path)) raise @@ -27,7 +27,7 @@ def __init__(self, path: Path, current_path: Path): if results_path.exists() and results_path.is_file(): try: self.results = RunResultsArtifact.read_and_check_versions(str(results_path)) - except IncompatibleSchemaException as exc: + except IncompatibleSchemaError as exc: exc.add_filename(str(results_path)) raise @@ -37,7 +37,7 @@ def __init__(self, path: Path, current_path: Path): self.sources = FreshnessExecutionResultArtifact.read_and_check_versions( str(sources_path) ) - except IncompatibleSchemaException as exc: + except IncompatibleSchemaError as exc: exc.add_filename(str(sources_path)) raise @@ -47,6 +47,6 @@ def __init__(self, path: Path, current_path: Path): self.sources_current = FreshnessExecutionResultArtifact.read_and_check_versions( str(sources_current_path) ) - except IncompatibleSchemaException as exc: + except IncompatibleSchemaError as exc: exc.add_filename(str(sources_current_path)) raise diff --git a/core/dbt/contracts/util.py b/core/dbt/contracts/util.py index 99f7a35c66d..fb2af2dac59 100644 --- a/core/dbt/contracts/util.py +++ b/core/dbt/contracts/util.py @@ -5,9 +5,9 @@ from dbt.clients.system import write_json, read_json from dbt import deprecations from dbt.exceptions import ( - InternalException, - RuntimeException, - IncompatibleSchemaException, + DbtInternalError, + DbtRuntimeError, + IncompatibleSchemaError, ) from dbt.version import __version__ from dbt.events.functions import get_invocation_id, get_metadata_vars @@ -123,7 +123,7 @@ def read(cls, path: str): try: data = read_json(path) except (EnvironmentError, ValueError) as exc: - raise RuntimeException( + raise DbtRuntimeError( f'Could not read {cls.__name__} at "{path}" as JSON: {exc}' ) from exc @@ -320,7 +320,7 @@ def read_and_check_versions(cls, path: str): try: data = read_json(path) except (EnvironmentError, ValueError) as exc: - raise RuntimeException( + raise DbtRuntimeError( f'Could not read {cls.__name__} at "{path}" as JSON: {exc}' ) from exc @@ -332,7 +332,7 @@ def read_and_check_versions(cls, path: str): previous_schema_version = data["metadata"]["dbt_schema_version"] # cls.dbt_schema_version is a SchemaVersion object if not cls.is_compatible_version(previous_schema_version): - raise IncompatibleSchemaException( + raise IncompatibleSchemaError( expected=str(cls.dbt_schema_version), found=previous_schema_version, ) @@ -357,7 +357,7 @@ class ArtifactMixin(VersionedSchema, Writable, Readable): def validate(cls, data): super().validate(data) if cls.dbt_schema_version is None: - raise InternalException("Cannot call from_dict with no schema version!") + raise DbtInternalError("Cannot call from_dict with no schema version!") class Identifier(ValidatedStringMixin): diff --git a/core/dbt/deps/git.py b/core/dbt/deps/git.py index 5d7a1331c58..a32f91ee158 100644 --- a/core/dbt/deps/git.py +++ b/core/dbt/deps/git.py @@ -9,7 +9,7 @@ GitPackage, ) from dbt.deps.base import PinnedPackage, UnpinnedPackage, get_downloads_path -from dbt.exceptions import ExecutableError, MultipleVersionGitDeps +from dbt.exceptions import ExecutableError, MultipleVersionGitDepsError from dbt.events.functions import fire_event, warn_or_error from dbt.events.types import EnsureGitInstalled, DepsUnpinned @@ -143,7 +143,7 @@ def resolved(self) -> GitPinnedPackage: if len(requested) == 0: requested = {"HEAD"} elif len(requested) > 1: - raise MultipleVersionGitDeps(self.git, requested) + raise MultipleVersionGitDepsError(self.git, requested) return GitPinnedPackage( git=self.git, diff --git a/core/dbt/deps/registry.py b/core/dbt/deps/registry.py index f3398f4b16f..e1f39a7551d 100644 --- a/core/dbt/deps/registry.py +++ b/core/dbt/deps/registry.py @@ -10,10 +10,10 @@ ) from dbt.deps.base import PinnedPackage, UnpinnedPackage from dbt.exceptions import ( - DependencyException, - PackageNotFound, - PackageVersionNotFound, - VersionsNotCompatibleException, + DependencyError, + PackageNotFoundError, + PackageVersionNotFoundError, + VersionsNotCompatibleError, ) @@ -71,7 +71,7 @@ def __init__( def _check_in_index(self): index = registry.index_cached() if self.package not in index: - raise PackageNotFound(self.package) + raise PackageNotFoundError(self.package) @classmethod def from_contract(cls, contract: RegistryPackage) -> "RegistryUnpinnedPackage": @@ -95,9 +95,9 @@ def resolved(self) -> RegistryPinnedPackage: self._check_in_index() try: range_ = semver.reduce_versions(*self.versions) - except VersionsNotCompatibleException as e: + except VersionsNotCompatibleError as e: new_msg = "Version error for package {}: {}".format(self.name, e) - raise DependencyException(new_msg) from e + raise DependencyError(new_msg) from e should_version_check = bool(flags.VERSION_CHECK) dbt_version = get_installed_version() @@ -118,7 +118,9 @@ def resolved(self) -> RegistryPinnedPackage: target = None if not target: # raise an exception if no installable target version is found - raise PackageVersionNotFound(self.package, range_, installable, should_version_check) + raise PackageVersionNotFoundError( + self.package, range_, installable, should_version_check + ) latest_compatible = installable[-1] return RegistryPinnedPackage( package=self.package, version=target, version_latest=latest_compatible diff --git a/core/dbt/deps/resolver.py b/core/dbt/deps/resolver.py index 323e2f562c1..db57ef0f641 100644 --- a/core/dbt/deps/resolver.py +++ b/core/dbt/deps/resolver.py @@ -2,10 +2,10 @@ from typing import Dict, List, NoReturn, Union, Type, Iterator, Set from dbt.exceptions import ( - DuplicateDependencyToRoot, - DuplicateProjectDependency, - MismatchedDependencyTypes, - InternalException, + DuplicateDependencyToRootError, + DuplicateProjectDependencyError, + MismatchedDependencyTypeError, + DbtInternalError, ) from dbt.config import Project, RuntimeConfig @@ -56,7 +56,7 @@ def __setitem__(self, key: BasePackage, value): self.packages[key_str] = value def _mismatched_types(self, old: UnpinnedPackage, new: UnpinnedPackage) -> NoReturn: - raise MismatchedDependencyTypes(new, old) + raise MismatchedDependencyTypeError(new, old) def incorporate(self, package: UnpinnedPackage): key: str = self._pick_key(package) @@ -80,7 +80,7 @@ def update_from(self, src: List[PackageContract]) -> None: elif isinstance(contract, RegistryPackage): pkg = RegistryUnpinnedPackage.from_contract(contract) else: - raise InternalException("Invalid package type {}".format(type(contract))) + raise DbtInternalError("Invalid package type {}".format(type(contract))) self.incorporate(pkg) @classmethod @@ -107,9 +107,9 @@ def _check_for_duplicate_project_names( for package in final_deps: project_name = package.get_project_name(config, renderer) if project_name in seen: - raise DuplicateProjectDependency(project_name) + raise DuplicateProjectDependencyError(project_name) elif project_name == config.project_name: - raise DuplicateDependencyToRoot(project_name) + raise DuplicateDependencyToRootError(project_name) seen.add(project_name) diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index 8aaad5e25b0..cbdd7fd930c 100644 Binary files a/core/dbt/docs/build/doctrees/environment.pickle and b/core/dbt/docs/build/doctrees/environment.pickle differ diff --git a/core/dbt/docs/build/doctrees/index.doctree b/core/dbt/docs/build/doctrees/index.doctree index 3acd417b911..55bd0490c3b 100644 Binary files a/core/dbt/docs/build/doctrees/index.doctree and b/core/dbt/docs/build/doctrees/index.doctree differ diff --git a/core/dbt/docs/build/html/.buildinfo b/core/dbt/docs/build/html/.buildinfo index 39803f13c3e..f5b6f776592 100644 --- a/core/dbt/docs/build/html/.buildinfo +++ b/core/dbt/docs/build/html/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 1ee31fc16e025fb98598189ba2cb5fcb +config: e27d6c1c419f2f0af393858cdf674109 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js b/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js deleted file mode 100644 index 8549469dc29..00000000000 --- a/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js +++ /dev/null @@ -1,134 +0,0 @@ -/* - * _sphinx_javascript_frameworks_compat.js - * ~~~~~~~~~~ - * - * Compatability shim for jQuery and underscores.js. - * - * WILL BE REMOVED IN Sphinx 6.0 - * xref RemovedInSphinx60Warning - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - - -/** - * small helper function to urldecode strings - * - * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL - */ -jQuery.urldecode = function(x) { - if (!x) { - return x - } - return decodeURIComponent(x.replace(/\+/g, ' ')); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - var bbox = node.parentElement.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} diff --git a/core/dbt/docs/build/html/_static/alabaster.css b/core/dbt/docs/build/html/_static/alabaster.css index 0eddaeb07d1..517d0b29cb1 100644 --- a/core/dbt/docs/build/html/_static/alabaster.css +++ b/core/dbt/docs/build/html/_static/alabaster.css @@ -419,7 +419,9 @@ table.footnote td { } dl { - margin: 0; + margin-left: 0; + margin-right: 0; + margin-top: 0; padding: 0; } diff --git a/core/dbt/docs/build/html/_static/basic.css b/core/dbt/docs/build/html/_static/basic.css index 4e9a9f1faca..7577acb1ad1 100644 --- a/core/dbt/docs/build/html/_static/basic.css +++ b/core/dbt/docs/build/html/_static/basic.css @@ -4,7 +4,7 @@ * * Sphinx stylesheet -- basic theme. * - * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -324,6 +324,7 @@ aside.sidebar { p.sidebar-title { font-weight: bold; } + nav.contents, aside.topic, div.admonition, div.topic, blockquote { @@ -331,6 +332,7 @@ div.admonition, div.topic, blockquote { } /* -- topics ---------------------------------------------------------------- */ + nav.contents, aside.topic, div.topic { @@ -606,6 +608,7 @@ ol.simple p, ul.simple p { margin-bottom: 0; } + aside.footnote > span, div.citation > span { float: left; diff --git a/core/dbt/docs/build/html/_static/doctools.js b/core/dbt/docs/build/html/_static/doctools.js index 527b876ca63..d06a71d7518 100644 --- a/core/dbt/docs/build/html/_static/doctools.js +++ b/core/dbt/docs/build/html/_static/doctools.js @@ -4,7 +4,7 @@ * * Base JavaScript utilities for all Sphinx HTML documentation. * - * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ diff --git a/core/dbt/docs/build/html/_static/jquery-3.6.0.js b/core/dbt/docs/build/html/_static/jquery-3.6.0.js deleted file mode 100644 index fc6c299b73e..00000000000 --- a/core/dbt/docs/build/html/_static/jquery-3.6.0.js +++ /dev/null @@ -1,10881 +0,0 @@ -/*! - * jQuery JavaScript Library v3.6.0 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright OpenJS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2021-03-02T17:08Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var flat = arr.flat ? function( array ) { - return arr.flat.call( array ); -} : function( array ) { - return arr.concat.apply( [], array ); -}; - - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - -var isFunction = function isFunction( obj ) { - - // Support: Chrome <=57, Firefox <=52 - // In some browsers, typeof returns "function" for HTML elements - // (i.e., `typeof document.createElement( "object" ) === "function"`). - // We don't want to classify *any* DOM node as a function. - // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 - // Plus for old WebKit, typeof returns "function" for HTML collections - // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) - return typeof obj === "function" && typeof obj.nodeType !== "number" && - typeof obj.item !== "function"; - }; - - -var isWindow = function isWindow( obj ) { - return obj != null && obj === obj.window; - }; - - -var document = window.document; - - - - var preservedScriptAttributes = { - type: true, - src: true, - nonce: true, - noModule: true - }; - - function DOMEval( code, node, doc ) { - doc = doc || document; - - var i, val, - script = doc.createElement( "script" ); - - script.text = code; - if ( node ) { - for ( i in preservedScriptAttributes ) { - - // Support: Firefox 64+, Edge 18+ - // Some browsers don't support the "nonce" property on scripts. - // On the other hand, just using `getAttribute` is not enough as - // the `nonce` attribute is reset to an empty string whenever it - // becomes browsing-context connected. - // See https://github.com/whatwg/html/issues/2369 - // See https://html.spec.whatwg.org/#nonce-attributes - // The `node.getAttribute` check was added for the sake of - // `jQuery.globalEval` so that it can fake a nonce-containing node - // via an object. - val = node[ i ] || node.getAttribute && node.getAttribute( i ); - if ( val ) { - script.setAttribute( i, val ); - } - } - } - doc.head.appendChild( script ).parentNode.removeChild( script ); - } - - -function toType( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; -} -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.6.0", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - even: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return ( i + 1 ) % 2; - } ) ); - }, - - odd: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return i % 2; - } ) ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - copy = options[ name ]; - - // Prevent Object.prototype pollution - // Prevent never-ending loop - if ( name === "__proto__" || target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - src = target[ name ]; - - // Ensure proper type for the source value - if ( copyIsArray && !Array.isArray( src ) ) { - clone = []; - } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { - clone = {}; - } else { - clone = src; - } - copyIsArray = false; - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - // Evaluates a script in a provided context; falls back to the global one - // if not specified. - globalEval: function( code, options, doc ) { - DOMEval( code, { nonce: options && options.nonce }, doc ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return flat( ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), - function( _i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); - } ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = toType( obj ); - - if ( isFunction( obj ) || isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.6 - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://js.foundation/ - * - * Date: 2021-02-16 - */ -( function( window ) { -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - nonnativeSelectorCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ( {} ).hasOwnProperty, - arr = [], - pop = arr.pop, - pushNative = arr.push, - push = arr.push, - slice = arr.slice, - - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[ i ] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + - "ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram - identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + - "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - - // "Attribute values must be CSS identifiers [capture 5] - // or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + - whitespace + "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + - whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + - "*" ), - rdescend = new RegExp( whitespace + "|>" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + - whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + - whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + - "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + - "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rhtml = /HTML$/i, - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), - funescape = function( escape, nonHex ) { - var high = "0x" + escape.slice( 1 ) - 0x10000; - - return nonHex ? - - // Strip the backslash prefix from a non-hex escape sequence - nonHex : - - // Replace a hexadecimal escape sequence with the encoded Unicode code point - // Support: IE <=11+ - // For values outside the Basic Multilingual Plane (BMP), manually construct a - // surrogate pair - high < 0 ? - String.fromCharCode( high + 0x10000 ) : - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + - ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - inDisabledFieldset = addCombinator( - function( elem ) { - return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - ( arr = slice.call( preferredDoc.childNodes ) ), - preferredDoc.childNodes - ); - - // Support: Android<4.0 - // Detect silently failing push.apply - // eslint-disable-next-line no-unused-expressions - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - pushNative.apply( target, slice.call( els ) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - - // Can't trust NodeList.length - while ( ( target[ j++ ] = els[ i++ ] ) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - setDocument( context ); - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { - - // ID selector - if ( ( m = match[ 1 ] ) ) { - - // Document context - if ( nodeType === 9 ) { - if ( ( elem = context.getElementById( m ) ) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && ( elem = newContext.getElementById( m ) ) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[ 2 ] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !nonnativeSelectorCache[ selector + " " ] && - ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && - - // Support: IE 8 only - // Exclude object elements - ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { - - newSelector = selector; - newContext = context; - - // qSA considers elements outside a scoping root when evaluating child or - // descendant combinators, which is not what we want. - // In such cases, we work around the behavior by prefixing every selector in the - // list with an ID selector referencing the scope context. - // The technique has to be used as well when a leading combinator is used - // as such selectors are not recognized by querySelectorAll. - // Thanks to Andrew Dupont for this technique. - if ( nodeType === 1 && - ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - - // We can use :scope instead of the ID hack if the browser - // supports it & if we're not changing the context. - if ( newContext !== context || !support.scope ) { - - // Capture the context ID, setting it first if necessary - if ( ( nid = context.getAttribute( "id" ) ) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", ( nid = expando ) ); - } - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + - toSelector( groups[ i ] ); - } - newSelector = groups.join( "," ); - } - - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - nonnativeSelectorCache( selector, true ); - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return ( cache[ key + " " ] = value ); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement( "fieldset" ); - - try { - return !!fn( el ); - } catch ( e ) { - return false; - } finally { - - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split( "|" ), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[ i ] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( ( cur = cur.nextSibling ) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return ( name === "input" || name === "button" ) && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - inDisabledFieldset( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction( function( argument ) { - argument = +argument; - return markFunction( function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ ( j = matchIndexes[ i ] ) ] ) { - seed[ j ] = !( matches[ j ] = seed[ j ] ); - } - } - } ); - } ); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - var namespace = elem && elem.namespaceURI, - docElem = elem && ( elem.ownerDocument || elem ).documentElement; - - // Support: IE <=8 - // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes - // https://bugs.jquery.com/ticket/4833 - return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9 - 11+, Edge 12 - 18+ - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( preferredDoc != document && - ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, - // Safari 4 - 5 only, Opera <=11.6 - 12.x only - // IE/Edge & older browsers don't support the :scope pseudo-class. - // Support: Safari 6.0 only - // Safari 6.0 supports :scope but it's an alias of :root there. - support.scope = assert( function( el ) { - docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); - return typeof el.querySelectorAll !== "undefined" && - !el.querySelectorAll( ":scope fieldset div" ).length; - } ); - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert( function( el ) { - el.className = "i"; - return !el.getAttribute( "className" ); - } ); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert( function( el ) { - el.appendChild( document.createComment( "" ) ); - return !el.getElementsByTagName( "*" ).length; - } ); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert( function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - } ); - - // ID filter and find - if ( support.getById ) { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute( "id" ) === attrId; - }; - }; - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode( "id" ); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( ( elem = elems[ i++ ] ) ) { - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find[ "TAG" ] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { - - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert( function( el ) { - - var input; - - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll( "[selected]" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push( "~=" ); - } - - // Support: IE 11+, Edge 15 - 18+ - // IE 11/Edge don't find elements on a `[name='']` query in some cases. - // Adding a temporary attribute to the document before the selection works - // around the issue. - // Interestingly, IE 10 & older don't seem to have the issue. - input = document.createElement( "input" ); - input.setAttribute( "name", "" ); - el.appendChild( input ); - if ( !el.querySelectorAll( "[name='']" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + - whitespace + "*(?:''|\"\")" ); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll( ":checked" ).length ) { - rbuggyQSA.push( ":checked" ); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push( ".#.+[+~]" ); - } - - // Support: Firefox <=3.6 - 5 only - // Old Firefox doesn't throw on a badly-escaped identifier. - el.querySelectorAll( "\\\f" ); - rbuggyQSA.push( "[\\r\\n\\f]" ); - } ); - - assert( function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement( "input" ); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll( "[name=d]" ).length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: Opera 10 - 11 only - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll( "*,:x" ); - rbuggyQSA.push( ",.*:" ); - } ); - } - - if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector ) ) ) ) { - - assert( function( el ) { - - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - } ); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - ) ); - } : - function( a, b ) { - if ( b ) { - while ( ( b = b.parentNode ) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { - - // Choose the first element that is related to our preferred document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( a == document || a.ownerDocument == preferredDoc && - contains( preferredDoc, a ) ) { - return -1; - } - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( b == document || b.ownerDocument == preferredDoc && - contains( preferredDoc, b ) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - return a == document ? -1 : - b == document ? 1 : - /* eslint-enable eqeqeq */ - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( ( cur = cur.parentNode ) ) { - ap.unshift( cur ); - } - cur = b; - while ( ( cur = cur.parentNode ) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[ i ] === bp[ i ] ) { - i++; - } - - return i ? - - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[ i ], bp[ i ] ) : - - // Otherwise nodes in our document sort first - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - ap[ i ] == preferredDoc ? -1 : - bp[ i ] == preferredDoc ? 1 : - /* eslint-enable eqeqeq */ - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - setDocument( elem ); - - if ( support.matchesSelector && documentIsHTML && - !nonnativeSelectorCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch ( e ) { - nonnativeSelectorCache( expr, true ); - } - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( context.ownerDocument || context ) != document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( elem.ownerDocument || elem ) != document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return ( sel + "" ).replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - - // If no nodeType, this is expected to be an array - while ( ( node = elem[ i++ ] ) ) { - - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[ 1 ] = match[ 1 ].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[ 3 ] = ( match[ 3 ] || match[ 4 ] || - match[ 5 ] || "" ).replace( runescape, funescape ); - - if ( match[ 2 ] === "~=" ) { - match[ 3 ] = " " + match[ 3 ] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[ 1 ] = match[ 1 ].toLowerCase(); - - if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { - - // nth-* requires argument - if ( !match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[ 4 ] = +( match[ 4 ] ? - match[ 5 ] + ( match[ 6 ] || 1 ) : - 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); - match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); - - // other types prohibit arguments - } else if ( match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[ 6 ] && match[ 2 ]; - - if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[ 3 ] ) { - match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - - // Get excess from tokenize (recursively) - ( excess = tokenize( unquoted, true ) ) && - - // advance to the next closing parenthesis - ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { - - // excess is a negative index - match[ 0 ] = match[ 0 ].slice( 0, excess ); - match[ 2 ] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { - return true; - } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - ( pattern = new RegExp( "(^|" + whitespace + - ")" + className + "(" + whitespace + "|$)" ) ) && classCache( - className, function( elem ) { - return pattern.test( - typeof elem.className === "string" && elem.className || - typeof elem.getAttribute !== "undefined" && - elem.getAttribute( "class" ) || - "" - ); - } ); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - /* eslint-disable max-len */ - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - /* eslint-enable max-len */ - - }; - }, - - "CHILD": function( type, what, _argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, _context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( ( node = node[ dir ] ) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( ( node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - - // Use previously-cached element index if available - if ( useCache ) { - - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - - // Use the same loop as above to seek `elem` from the start - while ( ( node = ++nodeIndex && node && node[ dir ] || - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || - ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction( function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[ i ] ); - seed[ idx ] = !( matches[ idx ] = matched[ i ] ); - } - } ) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - - // Potentially complex pseudos - "not": markFunction( function( selector ) { - - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction( function( seed, matches, _context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( ( elem = unmatched[ i ] ) ) { - seed[ i ] = !( matches[ i ] = elem ); - } - } - } ) : - function( elem, _context, xml ) { - input[ 0 ] = elem; - matcher( input, null, xml, results ); - - // Don't keep the element (issue #299) - input[ 0 ] = null; - return !results.pop(); - }; - } ), - - "has": markFunction( function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - } ), - - "contains": markFunction( function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; - }; - } ), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - - // lang value must be a valid identifier - if ( !ridentifier.test( lang || "" ) ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( ( elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); - return false; - }; - } ), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && - ( !document.hasFocus || document.hasFocus() ) && - !!( elem.type || elem.href || ~elem.tabIndex ); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return ( nodeName === "input" && !!elem.checked ) || - ( nodeName === "option" && !!elem.selected ); - }, - - "selected": function( elem ) { - - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - // eslint-disable-next-line no-unused-expressions - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos[ "empty" ]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( ( attr = elem.getAttribute( "type" ) ) == null || - attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo( function() { - return [ 0 ]; - } ), - - "last": createPositionalPseudo( function( _matchIndexes, length ) { - return [ length - 1 ]; - } ), - - "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - } ), - - "even": createPositionalPseudo( function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "odd": createPositionalPseudo( function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? - argument + length : - argument > length ? - length : - argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ) - } -}; - -Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || ( match = rcomma.exec( soFar ) ) ) { - if ( match ) { - - // Don't consume trailing commas as valid - soFar = soFar.slice( match[ 0 ].length ) || soFar; - } - groups.push( ( tokens = [] ) ); - } - - matched = false; - - // Combinators - if ( ( match = rcombinators.exec( soFar ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - - // Cast descendant combinators to space - type: match[ 0 ].replace( rtrim, " " ) - } ); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || - ( match = preFilters[ type ]( match ) ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - type: type, - matches: match - } ); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[ i ].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || ( elem[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || - ( outerCache[ elem.uniqueID ] = {} ); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( ( oldCache = uniqueCache[ key ] ) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return ( newCache[ 2 ] = oldCache[ 2 ] ); - } else { - - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[ i ]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[ 0 ]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[ i ], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( ( elem = unmatched[ i ] ) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction( function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( - selector || "*", - context.nodeType ? [ context ] : context, - [] - ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( ( elem = temp[ i ] ) ) { - matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) ) { - - // Restore matcherIn since elem is not yet a final match - temp.push( ( matcherIn[ i ] = elem ) ); - } - } - postFinder( null, ( matcherOut = [] ), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) && - ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { - - seed[ temp ] = !( results[ temp ] = elem ); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - } ); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[ 0 ].type ], - implicitRelative = leadingRelative || Expr.relative[ " " ], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - ( checkContext = context ).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { - matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; - } else { - matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[ j ].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens - .slice( 0, i - 1 ) - .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), - - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), - len = elems.length; - - if ( outermost ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - outermostContext = context == document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( !context && elem.ownerDocument != document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( ( matcher = elementMatchers[ j++ ] ) ) { - if ( matcher( elem, context || document, xml ) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - - // They will have gone through all possible matchers - if ( ( elem = !matcher && elem ) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( ( matcher = setMatchers[ j++ ] ) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !( unmatched[ i ] || setMatched[ i ] ) ) { - setMatched[ i ] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[ i ] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( - selector, - matcherFromGroupMatchers( elementMatchers, setMatchers ) - ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( ( selector = compiled.selector || selector ) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[ 0 ] = match[ 0 ].slice( 0 ); - if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { - - context = ( Expr.find[ "ID" ]( token.matches[ 0 ] - .replace( runescape, funescape ), context ) || [] )[ 0 ]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[ i ]; - - // Abort if we hit a combinator - if ( Expr.relative[ ( type = token.type ) ] ) { - break; - } - if ( ( find = Expr.find[ type ] ) ) { - - // Search, expanding context for leading sibling combinators - if ( ( seed = find( - token.matches[ 0 ].replace( runescape, funescape ), - rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || - context - ) ) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert( function( el ) { - - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; -} ); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert( function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute( "href" ) === "#"; -} ) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - } ); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert( function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -} ) ) { - addHandle( "value", function( elem, _name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - } ); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert( function( el ) { - return el.getAttribute( "disabled" ) == null; -} ) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; - } - } ); -} - -return Sizzle; - -} )( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -} -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Filtered directly for both simple and complex selectors - return jQuery.filter( qualifier, elements, not ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, _i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, _i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, _i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( elem.contentDocument != null && - - // Support: IE 11+ - // elements with no `data` attribute has an object - // `contentDocument` with a `null` prototype. - getProto( elem.contentDocument ) ) { - - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && toType( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( _i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // rejected_handlers.disable - // fulfilled_handlers.disable - tuples[ 3 - i ][ 3 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock, - - // progress_handlers.lock - tuples[ 0 ][ 3 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the primary Deferred - primary = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - primary.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, primary.done( updateFunc( i ) ).resolve, primary.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( primary.state() === "pending" || - isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return primary.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), primary.reject ); - } - - return primary.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( toType( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, _key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; - - -// Matches dashed string for camelizing -var rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g; - -// Used by camelCase as callback to replace() -function fcamelCase( _all, letter ) { - return letter.toUpperCase(); -} - -// Convert dashed to camelCase; used by the css and data modules -// Support: IE <=9 - 11, Edge 12 - 15 -// Microsoft forgot to hump their vendor prefix (#9572) -function camelCase( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); -} -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( camelCase ); - } else { - key = camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var documentElement = document.documentElement; - - - - var isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ); - }, - composed = { composed: true }; - - // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only - // Check attachment across shadow DOM boundaries when possible (gh-3504) - // Support: iOS 10.0-10.2 only - // Early iOS 10 versions support `attachShadow` but not `getRootNode`, - // leading to errors. We need to check for `getRootNode`. - if ( documentElement.getRootNode ) { - isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ) || - elem.getRootNode( composed ) === elem.ownerDocument; - }; - } -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - isAttached( elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, scale, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = elem.nodeType && - ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Support: Firefox <=54 - // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) - initial = initial / 2; - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - while ( maxIterations-- ) { - - // Evaluate and update our best guess (doubling guesses that zero out). - // Finish if the scale equals or crosses 1 (making the old*new product non-positive). - jQuery.style( elem, prop, initialInUnit + unit ); - if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { - maxIterations = 0; - } - initialInUnit = initialInUnit / scale; - - } - - initialInUnit = initialInUnit * 2; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); - -var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); - - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; - - // Support: IE <=9 only - // IE <=9 replaces "; - support.option = !!div.lastChild; -} )(); - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - -// Support: IE <=9 only -if ( !support.option ) { - wrapMap.optgroup = wrapMap.option = [ 1, "" ]; -} - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, attached, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( toType( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - attached = isAttached( elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( attached ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 - 11+ -// focus() and blur() are asynchronous, except when they are no-op. -// So expect focus to be synchronous when the element is already active, -// and blur to be synchronous when the element is not already active. -// (focus and blur are always synchronous in other supported browsers, -// this just defines when we can count on it). -function expectSync( elem, type ) { - return ( elem === safeActiveElement() ) === ( type === "focus" ); -} - -// Support: IE <=9 only -// Accessing document.activeElement can throw unexpectedly -// https://bugs.jquery.com/ticket/13393 -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Only attach events to objects that accept data - if ( !acceptData( elem ) ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = Object.create( null ); - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( nativeEvent ), - - handlers = ( - dataPriv.get( this, "events" ) || Object.create( null ) - )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // If the event is namespaced, then each handler is only invoked if it is - // specially universal or its namespaces are a superset of the event's. - if ( !event.rnamespace || handleObj.namespace === false || - event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - - // Utilize native event to ensure correct state for checkable inputs - setup: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Claim the first handler - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - // dataPriv.set( el, "click", ... ) - leverageNative( el, "click", returnTrue ); - } - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Force setup before triggering a click - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - leverageNative( el, "click" ); - } - - // Return non-false to allow normal event-path propagation - return true; - }, - - // For cross-browser consistency, suppress native .click() on links - // Also prevent it if we're currently inside a leveraged native-event stack - _default: function( event ) { - var target = event.target; - return rcheckableType.test( target.type ) && - target.click && nodeName( target, "input" ) && - dataPriv.get( target, "click" ) || - nodeName( target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -// Ensure the presence of an event listener that handles manually-triggered -// synthetic events by interrupting progress until reinvoked in response to -// *native* events that it fires directly, ensuring that state changes have -// already occurred before other listeners are invoked. -function leverageNative( el, type, expectSync ) { - - // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add - if ( !expectSync ) { - if ( dataPriv.get( el, type ) === undefined ) { - jQuery.event.add( el, type, returnTrue ); - } - return; - } - - // Register the controller as a special universal handler for all event namespaces - dataPriv.set( el, type, false ); - jQuery.event.add( el, type, { - namespace: false, - handler: function( event ) { - var notAsync, result, - saved = dataPriv.get( this, type ); - - if ( ( event.isTrigger & 1 ) && this[ type ] ) { - - // Interrupt processing of the outer synthetic .trigger()ed event - // Saved data should be false in such cases, but might be a leftover capture object - // from an async native handler (gh-4350) - if ( !saved.length ) { - - // Store arguments for use when handling the inner native event - // There will always be at least one argument (an event object), so this array - // will not be confused with a leftover capture object. - saved = slice.call( arguments ); - dataPriv.set( this, type, saved ); - - // Trigger the native event and capture its result - // Support: IE <=9 - 11+ - // focus() and blur() are asynchronous - notAsync = expectSync( this, type ); - this[ type ](); - result = dataPriv.get( this, type ); - if ( saved !== result || notAsync ) { - dataPriv.set( this, type, false ); - } else { - result = {}; - } - if ( saved !== result ) { - - // Cancel the outer synthetic event - event.stopImmediatePropagation(); - event.preventDefault(); - - // Support: Chrome 86+ - // In Chrome, if an element having a focusout handler is blurred by - // clicking outside of it, it invokes the handler synchronously. If - // that handler calls `.remove()` on the element, the data is cleared, - // leaving `result` undefined. We need to guard against this. - return result && result.value; - } - - // If this is an inner synthetic event for an event with a bubbling surrogate - // (focus or blur), assume that the surrogate already propagated from triggering the - // native event and prevent that from happening again here. - // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the - // bubbling surrogate propagates *after* the non-bubbling base), but that seems - // less bad than duplication. - } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { - event.stopPropagation(); - } - - // If this is a native event triggered above, everything is now in order - // Fire an inner synthetic event with the original arguments - } else if ( saved.length ) { - - // ...and capture the result - dataPriv.set( this, type, { - value: jQuery.event.trigger( - - // Support: IE <=9 - 11+ - // Extend with the prototype to reset the above stopImmediatePropagation() - jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), - saved.slice( 1 ), - this - ) - } ); - - // Abort handling of the native event - event.stopImmediatePropagation(); - } - } - } ); -} - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || Date.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - code: true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - which: true -}, jQuery.event.addProp ); - -jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { - jQuery.event.special[ type ] = { - - // Utilize native event if possible so blur/focus sequence is correct - setup: function() { - - // Claim the first handler - // dataPriv.set( this, "focus", ... ) - // dataPriv.set( this, "blur", ... ) - leverageNative( this, type, expectSync ); - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function() { - - // Force setup before trigger - leverageNative( this, type ); - - // Return non-false to allow normal event-path propagation - return true; - }, - - // Suppress native focus or blur as it's already being fired - // in leverageNative. - _default: function() { - return true; - }, - - delegateType: delegateType - }; -} ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - // Support: IE <=10 - 11, Edge 12 - 13 only - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( elem ).children( "tbody" )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { - elem.type = elem.type.slice( 5 ); - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.get( src ); - events = pdataOld.events; - - if ( events ) { - dataPriv.remove( dest, "handle events" ); - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = flat( args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - valueIsFunction = isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( valueIsFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( valueIsFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl && !node.noModule ) { - jQuery._evalUrl( node.src, { - nonce: node.nonce || node.getAttribute( "nonce" ) - }, doc ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && isAttached( node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html; - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = isAttached( elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - -var swap = function( elem, options, callback ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.call( elem ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - -var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - container.style.cssText = "position:absolute;left:-11111px;width:60px;" + - "margin-top:1px;padding:0;border:0"; - div.style.cssText = - "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + - "margin:auto;border:1px;padding:1px;" + - "width:60%;top:1%"; - documentElement.appendChild( container ).appendChild( div ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; - - // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 - // Some styles come back with percentage values, even though they shouldn't - div.style.right = "60%"; - pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; - - // Support: IE 9 - 11 only - // Detect misreporting of content dimensions for box-sizing:border-box elements - boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; - - // Support: IE 9 only - // Detect overflow:scroll screwiness (gh-3699) - // Support: Chrome <=64 - // Don't get tricked when zoom affects offsetWidth (gh-4029) - div.style.position = "absolute"; - scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - function roundPixelMeasures( measure ) { - return Math.round( parseFloat( measure ) ); - } - - var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, - reliableTrDimensionsVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - jQuery.extend( support, { - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelBoxStyles: function() { - computeStyleTests(); - return pixelBoxStylesVal; - }, - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - }, - scrollboxSize: function() { - computeStyleTests(); - return scrollboxSizeVal; - }, - - // Support: IE 9 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Behavior in IE 9 is more subtle than in newer versions & it passes - // some versions of this test; make sure not to make it pass there! - // - // Support: Firefox 70+ - // Only Firefox includes border widths - // in computed dimensions. (gh-4529) - reliableTrDimensions: function() { - var table, tr, trChild, trStyle; - if ( reliableTrDimensionsVal == null ) { - table = document.createElement( "table" ); - tr = document.createElement( "tr" ); - trChild = document.createElement( "div" ); - - table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; - tr.style.cssText = "border:1px solid"; - - // Support: Chrome 86+ - // Height set through cssText does not get applied. - // Computed height then comes back as 0. - tr.style.height = "1px"; - trChild.style.height = "9px"; - - // Support: Android 8 Chrome 86+ - // In our bodyBackground.html iframe, - // display for all div elements is set to "inline", - // which causes a problem only in Android 8 Chrome 86. - // Ensuring the div is display: block - // gets around this issue. - trChild.style.display = "block"; - - documentElement - .appendChild( table ) - .appendChild( tr ) - .appendChild( trChild ); - - trStyle = window.getComputedStyle( tr ); - reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + - parseInt( trStyle.borderTopWidth, 10 ) + - parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; - - documentElement.removeChild( table ); - } - return reliableTrDimensionsVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !isAttached( elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style, - vendorProps = {}; - -// Return a vendor-prefixed property or undefined -function vendorPropName( name ) { - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a potentially-mapped jQuery.cssProps or vendor prefixed property -function finalPropName( name ) { - var final = jQuery.cssProps[ name ] || vendorProps[ name ]; - - if ( final ) { - return final; - } - if ( name in emptyStyle ) { - return name; - } - return vendorProps[ name ] = vendorPropName( name ) || name; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }; - -function setPositiveNumber( _elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { - var i = dimension === "width" ? 1 : 0, - extra = 0, - delta = 0; - - // Adjustment may not be necessary - if ( box === ( isBorderBox ? "border" : "content" ) ) { - return 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin - if ( box === "margin" ) { - delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); - } - - // If we get here with a content-box, we're seeking "padding" or "border" or "margin" - if ( !isBorderBox ) { - - // Add padding - delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // For "border" or "margin", add border - if ( box !== "padding" ) { - delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - - // But still keep track of it otherwise - } else { - extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - - // If we get here with a border-box (content + padding + border), we're seeking "content" or - // "padding" or "margin" - } else { - - // For "content", subtract padding - if ( box === "content" ) { - delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // For "content" or "padding", subtract border - if ( box !== "margin" ) { - delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - // Account for positive content-box scroll gutter when requested by providing computedVal - if ( !isBorderBox && computedVal >= 0 ) { - - // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border - // Assuming integer scroll gutter, subtract the rest and round down - delta += Math.max( 0, Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - computedVal - - delta - - extra - - 0.5 - - // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter - // Use an explicit zero to avoid NaN (gh-3964) - ) ) || 0; - } - - return delta; -} - -function getWidthOrHeight( elem, dimension, extra ) { - - // Start with computed style - var styles = getStyles( elem ), - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). - // Fake content-box until we know it's needed to know the true value. - boxSizingNeeded = !support.boxSizingReliable() || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - valueIsBorderBox = isBorderBox, - - val = curCSS( elem, dimension, styles ), - offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); - - // Support: Firefox <=54 - // Return a confounding non-pixel value or feign ignorance, as appropriate. - if ( rnumnonpx.test( val ) ) { - if ( !extra ) { - return val; - } - val = "auto"; - } - - - // Support: IE 9 - 11 only - // Use offsetWidth/offsetHeight for when box sizing is unreliable. - // In those cases, the computed value can be trusted to be border-box. - if ( ( !support.boxSizingReliable() && isBorderBox || - - // Support: IE 10 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Interestingly, in some cases IE 9 doesn't suffer from this issue. - !support.reliableTrDimensions() && nodeName( elem, "tr" ) || - - // Fall back to offsetWidth/offsetHeight when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - val === "auto" || - - // Support: Android <=4.1 - 4.3 only - // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) - !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && - - // Make sure the element is visible & connected - elem.getClientRects().length ) { - - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Where available, offsetWidth/offsetHeight approximate border box dimensions. - // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the - // retrieved value as a content box dimension. - valueIsBorderBox = offsetProp in elem; - if ( valueIsBorderBox ) { - val = elem[ offsetProp ]; - } - } - - // Normalize "" and auto - val = parseFloat( val ) || 0; - - // Adjust for the element's box model - return ( val + - boxModelAdjustment( - elem, - dimension, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles, - - // Provide the current computed size to request scroll gutter calculation (gh-3589) - val - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "gridArea": true, - "gridColumn": true, - "gridColumnEnd": true, - "gridColumnStart": true, - "gridRow": true, - "gridRowEnd": true, - "gridRowStart": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: {}, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append - // "px" to a few hardcoded values. - if ( type === "number" && !isCustomProp ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( _i, dimension ) { - jQuery.cssHooks[ dimension ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, dimension, extra ); - } ) : - getWidthOrHeight( elem, dimension, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = getStyles( elem ), - - // Only read styles.position if the test has a chance to fail - // to avoid forcing a reflow. - scrollboxSizeBuggy = !support.scrollboxSize() && - styles.position === "absolute", - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) - boxSizingNeeded = scrollboxSizeBuggy || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - subtract = extra ? - boxModelAdjustment( - elem, - dimension, - extra, - isBorderBox, - styles - ) : - 0; - - // Account for unreliable border-box dimensions by comparing offset* to computed and - // faking a content-box to get border and padding (gh-3699) - if ( isBorderBox && scrollboxSizeBuggy ) { - subtract -= Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - parseFloat( styles[ dimension ] ) - - boxModelAdjustment( elem, dimension, "border", false, styles ) - - 0.5 - ); - } - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ dimension ] = value; - value = jQuery.css( elem, dimension ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( prefix !== "margin" ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && ( - jQuery.cssHooks[ tween.prop ] || - tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = Date.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 15 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY and Edge just mirrors - // the overflowX value there. - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - result.stop.bind( result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = Date.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -function classesToArray( value ) { - if ( Array.isArray( value ) ) { - return value; - } - if ( typeof value === "string" ) { - return value.match( rnothtmlwhite ) || []; - } - return []; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isValidValue = type === "string" || Array.isArray( value ); - - if ( typeof stateVal === "boolean" && isValidValue ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( isValidValue ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = classesToArray( value ); - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, valueIsFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - valueIsFunction = isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( valueIsFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -support.focusin = "onfocusin" in window; - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - stopPropagationCallback = function( e ) { - e.stopPropagation(); - }; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = lastElement = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - lastElement = cur; - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - - if ( event.isPropagationStopped() ) { - lastElement.addEventListener( type, stopPropagationCallback ); - } - - elem[ type ](); - - if ( event.isPropagationStopped() ) { - lastElement.removeEventListener( type, stopPropagationCallback ); - } - - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - - // Handle: regular nodes (via `this.ownerDocument`), window - // (via `this.document`) & document (via `this`). - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = { guid: Date.now() }; - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml, parserErrorElem; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) {} - - parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; - if ( !xml || parserErrorElem ) { - jQuery.error( "Invalid XML: " + ( - parserErrorElem ? - jQuery.map( parserErrorElem.childNodes, function( el ) { - return el.textContent; - } ).join( "\n" ) : - data - ) ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && toType( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - if ( a == null ) { - return ""; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ).filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ).map( function( _i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - -originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() + " " ] = - ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) - .concat( match[ 2 ] ); - } - } - match = responseHeaders[ key.toLowerCase() + " " ]; - } - return match == null ? null : match.join( ", " ); - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 15 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available and should be processed, append data to url - if ( s.data && ( s.processData || typeof s.data === "string" ) ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + - uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Use a noop converter for missing script but not if jsonp - if ( !isSuccess && - jQuery.inArray( "script", s.dataTypes ) > -1 && - jQuery.inArray( "json", s.dataTypes ) < 0 ) { - s.converters[ "text script" ] = function() {}; - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( _i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - -jQuery.ajaxPrefilter( function( s ) { - var i; - for ( i in s.headers ) { - if ( i.toLowerCase() === "content-type" ) { - s.contentType = s.headers[ i ] || ""; - } - } -} ); - - -jQuery._evalUrl = function( url, options, doc ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - - // Only evaluate the response if it is successful (gh-4126) - // dataFilter is not invoked for failure responses, so using it instead - // of the default converter is kludgy but it works. - converters: { - "text script": function() {} - }, - dataFilter: function( response ) { - jQuery.globalEval( response, options, doc ); - } - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var htmlIsFunction = isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.ontimeout = - xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain or forced-by-attrs requests - if ( s.crossDomain || s.scriptAttrs ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " - - - @@ -90,8 +87,8 @@

Quick search

©2022, dbt Labs. | - Powered by Sphinx 5.3.0 - & Alabaster 0.7.12 + Powered by Sphinx 6.1.3 + & Alabaster 0.7.13 diff --git a/core/dbt/docs/build/html/index.html b/core/dbt/docs/build/html/index.html index d4238bb08c3..fd48833b877 100644 --- a/core/dbt/docs/build/html/index.html +++ b/core/dbt/docs/build/html/index.html @@ -10,9 +10,6 @@ - - - @@ -321,7 +318,7 @@

project_dir

skip_profile_setup

Type: boolean

-

Skip interative profile setup.

+

Skip interactive profile setup.

target

@@ -840,8 +837,8 @@

Quick search

©2022, dbt Labs. | - Powered by Sphinx 5.3.0 - & Alabaster 0.7.12 + Powered by Sphinx 6.1.3 + & Alabaster 0.7.13 | - - - @@ -109,8 +106,8 @@

Related Topics

©2022, dbt Labs. | - Powered by
Sphinx 5.3.0 - & Alabaster 0.7.12 + Powered by Sphinx 6.1.3 + & Alabaster 0.7.13 diff --git a/core/dbt/docs/build/html/searchindex.js b/core/dbt/docs/build/html/searchindex.js index 25dd9fd3af5..62af17602dc 100644 --- a/core/dbt/docs/build/html/searchindex.js +++ b/core/dbt/docs/build/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "string": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "from": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "select": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "those": 0, "have": 0, "been": 0, "explicitli": 0, "path": 0, "configur": 0, "log": 0, "onli": 0, "appli": 0, "thi": 0, "current": 0, "overrid": 0, "dbt_log_path": 0, "i": 0, "includ": 0, "which": 0, "load": 0, "dbt_project": 0, "yml": 0, "directori": 0, "look": 0, "file": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "The": 0, "name": 0, "us": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "project": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "avail": 0, "inform": 0, "skip": 0, "inter": 0, "setup": 0, "metric": 0, "analysi": 0, "exposur": 0, "macro": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "command": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "log_path": 0, "model": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "resource_typ": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "log_path": [[0, "build|log_path"], [0, "compile|log_path"], [0, "parse|log_path"], [0, "run|log_path"], [0, "seed|log_path"], [0, "test|log_path"]], "models": [[0, "build|models"], [0, "compile|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"]], "output": [[0, "list|output"]], "output_keys": [[0, "list|output_keys"]], "resource_type": [[0, "list|resource_type"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file +Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "string": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "from": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "select": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "those": 0, "have": 0, "been": 0, "explicitli": 0, "path": 0, "configur": 0, "log": 0, "onli": 0, "appli": 0, "thi": 0, "current": 0, "overrid": 0, "dbt_log_path": 0, "i": 0, "includ": 0, "which": 0, "load": 0, "dbt_project": 0, "yml": 0, "directori": 0, "look": 0, "file": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "The": 0, "name": 0, "us": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "project": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "avail": 0, "inform": 0, "skip": 0, "interact": 0, "setup": 0, "metric": 0, "analysi": 0, "exposur": 0, "macro": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "command": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "log_path": 0, "model": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "resource_typ": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "log_path": [[0, "build|log_path"], [0, "compile|log_path"], [0, "parse|log_path"], [0, "run|log_path"], [0, "seed|log_path"], [0, "test|log_path"]], "models": [[0, "build|models"], [0, "compile|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"]], "output": [[0, "list|output"]], "output_keys": [[0, "list|output_keys"]], "resource_type": [[0, "list|resource_type"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file diff --git a/core/dbt/events/base_types.py b/core/dbt/events/base_types.py index db74016099a..fbd35b58fa1 100644 --- a/core/dbt/events/base_types.py +++ b/core/dbt/events/base_types.py @@ -3,6 +3,13 @@ import os import threading from datetime import datetime +import dbt.events.proto_types as pt +import sys + +if sys.version_info >= (3, 8): + from typing import Protocol +else: + from typing_extensions import Protocol # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # These base types define the _required structure_ for the concrete event # @@ -58,25 +65,20 @@ class EventLevel(str, Enum): class BaseEvent: """BaseEvent for proto message generated python events""" - def __post_init__(self): - super().__post_init__() - if not self.info.level: - self.info.level = self.level_tag() - assert self.info.level in ["info", "warn", "error", "debug", "test"] - if not hasattr(self.info, "msg") or not self.info.msg: - self.info.msg = self.message() - self.info.invocation_id = get_invocation_id() - self.info.extra = get_global_metadata_vars() - self.info.ts = datetime.utcnow() - self.info.pid = get_pid() - self.info.thread = get_thread_name() - self.info.code = self.code() - self.info.name = type(self).__name__ - - # This is here because although we know that info should always - # exist, mypy doesn't. - def log_level(self) -> EventLevel: - return self.info.level # type: ignore + # def __post_init__(self): + # super().__post_init__() + # if not self.info.level: + # self.info.level = self.level_tag() + # assert self.info.level in ["info", "warn", "error", "debug", "test"] + # if not hasattr(self.info, "msg") or not self.info.msg: + # self.info.msg = self.message() + # self.info.invocation_id = get_invocation_id() + # self.info.extra = get_global_metadata_vars() + # self.info.ts = datetime.utcnow() + # self.info.pid = get_pid() + # self.info.thread = get_thread_name() + # self.info.code = self.code() + # self.info.name = type(self).__name__ def level_tag(self) -> EventLevel: return EventLevel.DEBUG @@ -84,6 +86,37 @@ def level_tag(self) -> EventLevel: def message(self) -> str: raise Exception("message() not implemented for event") + def code(self) -> str: + raise Exception("code() not implemented for event") + + +class EventMsg(Protocol): + info: pt.EventInfo + data: BaseEvent + + +def msg_from_base_event(event: BaseEvent, level: EventLevel = None): + + msg_class_name = f"{type(event).__name__}Msg" + msg_cls = getattr(pt, msg_class_name) + + # level in EventInfo must be a string, not an EventLevel + msg_level: str = level.value if level else event.level_tag().value + assert msg_level is not None + event_info = pt.EventInfo( + level=msg_level, + msg=event.message(), + invocation_id=get_invocation_id(), + extra=get_global_metadata_vars(), + ts=datetime.utcnow(), + pid=get_pid(), + thread=get_thread_name(), + code=event.code(), + name=type(event).__name__, + ) + new_event = msg_cls(data=event, info=event_info) + return new_event + # DynamicLevel requires that the level be supplied on the # event construction call using the "info" function from functions.py diff --git a/core/dbt/events/eventmgr.py b/core/dbt/events/eventmgr.py index 97a7d5d4360..10bf225bef7 100644 --- a/core/dbt/events/eventmgr.py +++ b/core/dbt/events/eventmgr.py @@ -9,16 +9,16 @@ from typing import Any, Callable, List, Optional, TextIO from uuid import uuid4 -from dbt.events.base_types import BaseEvent, EventLevel +from dbt.events.base_types import BaseEvent, EventLevel, msg_from_base_event, EventMsg # A Filter is a function which takes a BaseEvent and returns True if the event # should be logged, False otherwise. -Filter = Callable[[BaseEvent], bool] +Filter = Callable[[EventMsg], bool] # Default filter which logs every event -def NoFilter(_: BaseEvent) -> bool: +def NoFilter(_: EventMsg) -> bool: return True @@ -47,13 +47,6 @@ class LineFormat(Enum): } -# We should consider fixing the problem, but log_level() can return a string for -# DynamicLevel events, even thought it is supposed to return an EventLevel. This -# function gets a string for the level, no matter what. -def _get_level_str(e: BaseEvent) -> str: - return e.log_level().value if isinstance(e.log_level(), EventLevel) else str(e.log_level()) - - # We need this function for now because the numeric log severity levels in # Python do not match those for logbook, so we have to explicitly call the # correct function by name. @@ -113,14 +106,14 @@ def __init__(self, event_manager: "EventManager", config: LoggerConfig) -> None: self._python_logger = log - def create_line(self, e: BaseEvent) -> str: + def create_line(self, msg: EventMsg) -> str: raise NotImplementedError() - def write_line(self, e: BaseEvent): - line = self.create_line(e) - python_level = _log_level_map[e.log_level()] + def write_line(self, msg: EventMsg): + line = self.create_line(msg) + python_level = _log_level_map[EventLevel(msg.info.level)] if self._python_logger is not None: - send_to_logger(self._python_logger, _get_level_str(e), line) + send_to_logger(self._python_logger, msg.info.level, line) elif self._stream is not None and _log_level_map[self.level] <= python_level: self._stream.write(line + "\n") @@ -138,24 +131,26 @@ def __init__(self, event_manager: "EventManager", config: LoggerConfig) -> None: self.use_colors = config.use_colors self.use_debug_format = config.line_format == LineFormat.DebugText - def create_line(self, e: BaseEvent) -> str: - return self.create_debug_line(e) if self.use_debug_format else self.create_info_line(e) + def create_line(self, msg: EventMsg) -> str: + return self.create_debug_line(msg) if self.use_debug_format else self.create_info_line(msg) - def create_info_line(self, e: BaseEvent) -> str: + def create_info_line(self, msg: EventMsg) -> str: ts: str = datetime.utcnow().strftime("%H:%M:%S") - scrubbed_msg: str = self.scrubber(e.message()) # type: ignore + scrubbed_msg: str = self.scrubber(msg.info.msg) # type: ignore return f"{self._get_color_tag()}{ts} {scrubbed_msg}" - def create_debug_line(self, e: BaseEvent) -> str: + def create_debug_line(self, msg: EventMsg) -> str: log_line: str = "" # Create a separator if this is the beginning of an invocation # TODO: This is an ugly hack, get rid of it if we can - if type(e).__name__ == "MainReportVersion": + if msg.info.name == "MainReportVersion": separator = 30 * "=" - log_line = f"\n\n{separator} {datetime.utcnow()} | {self.event_manager.invocation_id} {separator}\n" - ts: str = datetime.utcnow().strftime("%H:%M:%S.%f") - scrubbed_msg: str = self.scrubber(e.message()) # type: ignore - level = _get_level_str(e) + log_line = ( + f"\n\n{separator} {msg.info.ts} | {self.event_manager.invocation_id} {separator}\n" + ) + ts: str = msg.info.ts.strftime("%H:%M:%S.%f") + scrubbed_msg: str = self.scrubber(msg.info.msg) # type: ignore + level = msg.info.level log_line += ( f"{self._get_color_tag()}{ts} [{level:<5}]{self._get_thread_name()} {scrubbed_msg}" ) @@ -175,11 +170,11 @@ def _get_thread_name(self) -> str: class _JsonLogger(_Logger): - def create_line(self, e: BaseEvent) -> str: - from dbt.events.functions import event_to_dict + def create_line(self, msg: EventMsg) -> str: + from dbt.events.functions import msg_to_dict - event_dict = event_to_dict(e) - raw_log_line = json.dumps(event_dict, sort_keys=True) + msg_dict = msg_to_dict(msg) + raw_log_line = json.dumps(msg_dict, sort_keys=True) line = self.scrubber(raw_log_line) # type: ignore return line @@ -187,16 +182,17 @@ def create_line(self, e: BaseEvent) -> str: class EventManager: def __init__(self) -> None: self.loggers: List[_Logger] = [] - self.callbacks: List[Callable[[BaseEvent], None]] = [] + self.callbacks: List[Callable[[EventMsg], None]] = [] self.invocation_id: str = str(uuid4()) - def fire_event(self, e: BaseEvent) -> None: + def fire_event(self, e: BaseEvent, level: EventLevel = None) -> None: + msg = msg_from_base_event(e, level=level) for logger in self.loggers: - if logger.filter(e): # type: ignore - logger.write_line(e) + if logger.filter(msg): # type: ignore + logger.write_line(msg) for callback in self.callbacks: - callback(e) + callback(msg) def add_logger(self, config: LoggerConfig): logger = ( diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index f061606632e..f32287c3049 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -1,9 +1,8 @@ import betterproto from dbt.constants import METADATA_ENV_PREFIX -from dbt.events.base_types import BaseEvent, Cache, EventLevel, NoFile, NoStdOut +from dbt.events.base_types import BaseEvent, Cache, EventLevel, NoFile, NoStdOut, EventMsg from dbt.events.eventmgr import EventManager, LoggerConfig, LineFormat, NoFilter from dbt.events.helpers import env_secrets, scrub_secrets -from dbt.events.proto_types import EventInfo from dbt.events.types import EmptyLine import dbt.flags as flags from dbt.logger import GLOBAL_LOGGER, make_log_dir_if_missing @@ -59,14 +58,14 @@ def _get_stdout_config(level: Optional[EventLevel] = None) -> LoggerConfig: def _stdout_filter( - log_cache_events: bool, debug_mode: bool, quiet_mode: bool, evt: BaseEvent + log_cache_events: bool, debug_mode: bool, quiet_mode: bool, msg: EventMsg ) -> bool: return ( - not isinstance(evt, NoStdOut) - and (not isinstance(evt, Cache) or log_cache_events) - and (evt.log_level() != EventLevel.DEBUG or debug_mode) - and (evt.log_level() == EventLevel.ERROR or not quiet_mode) - and not (flags.LOG_FORMAT == "json" and type(evt) == EmptyLine) + not isinstance(msg.data, NoStdOut) + and (not isinstance(msg.data, Cache) or log_cache_events) + and (EventLevel(msg.info.level) != EventLevel.DEBUG or debug_mode) + and (EventLevel(msg.info.level) == EventLevel.ERROR or not quiet_mode) + and not (flags.LOG_FORMAT == "json" and type(msg.data) == EmptyLine) ) @@ -82,18 +81,18 @@ def _get_logfile_config(log_path: str) -> LoggerConfig: ) -def _logfile_filter(log_cache_events: bool, evt: BaseEvent) -> bool: +def _logfile_filter(log_cache_events: bool, msg: EventMsg) -> bool: return ( - not isinstance(evt, NoFile) - and not (isinstance(evt, Cache) and not log_cache_events) - and not (flags.LOG_FORMAT == "json" and type(evt) == EmptyLine) + not isinstance(msg.data, NoFile) + and not (isinstance(msg.data, Cache) and not log_cache_events) + and not (flags.LOG_FORMAT == "json" and type(msg.data) == EmptyLine) ) def _get_logbook_log_config(level: Optional[EventLevel] = None) -> LoggerConfig: config = _get_stdout_config(level) config.name = "logbook_log" - config.filter = NoFilter if flags.LOG_CACHE_EVENTS else lambda e: not isinstance(e, Cache) + config.filter = NoFilter if flags.LOG_CACHE_EVENTS else lambda e: not isinstance(e.data, Cache) config.logger = GLOBAL_LOGGER return config @@ -138,48 +137,58 @@ def stop_capture_stdout_logs(): # returns a dictionary representation of the event fields. # the message may contain secrets which must be scrubbed at the usage site. -def event_to_json(event: BaseEvent) -> str: - event_dict = event_to_dict(event) - raw_log_line = json.dumps(event_dict, sort_keys=True) +def msg_to_json(msg: EventMsg) -> str: + msg_dict = msg_to_dict(msg) + raw_log_line = json.dumps(msg_dict, sort_keys=True) return raw_log_line -def event_to_dict(event: BaseEvent) -> dict: - event_dict = dict() +def msg_to_dict(msg: EventMsg) -> dict: + msg_dict = dict() try: - event_dict = event.to_dict(casing=betterproto.Casing.SNAKE, include_default_values=True) # type: ignore + msg_dict = msg.to_dict(casing=betterproto.Casing.SNAKE, include_default_values=True) # type: ignore except AttributeError as exc: - event_type = type(event).__name__ + event_type = type(msg).__name__ raise Exception(f"type {event_type} is not serializable. {str(exc)}") # We don't want an empty NodeInfo in output - if "node_info" in event_dict and event_dict["node_info"]["node_name"] == "": - del event_dict["node_info"] - return event_dict + if ( + "data" in msg_dict + and "node_info" in msg_dict["data"] + and msg_dict["data"]["node_info"]["node_name"] == "" + ): + del msg_dict["data"]["node_info"] + return msg_dict def warn_or_error(event, node=None): - if flags.WARN_ERROR: + # TODO: resolve this circular import when flags.WARN_ERROR_OPTIONS is WarnErrorOptions type via click CLI. + from dbt.helper_types import WarnErrorOptions + + warn_error_options = WarnErrorOptions.from_yaml_string(flags.WARN_ERROR_OPTIONS) + if flags.WARN_ERROR or warn_error_options.includes(type(event).__name__): # TODO: resolve this circular import when at top - from dbt.exceptions import EventCompilationException + from dbt.exceptions import EventCompilationError - raise EventCompilationException(event.info.msg, node) + raise EventCompilationError(event.message(), node) else: fire_event(event) # an alternative to fire_event which only creates and logs the event value # if the condition is met. Does nothing otherwise. -def fire_event_if(conditional: bool, lazy_e: Callable[[], BaseEvent]) -> None: +def fire_event_if( + conditional: bool, lazy_e: Callable[[], BaseEvent], level: EventLevel = None +) -> None: if conditional: - fire_event(lazy_e()) + fire_event(lazy_e(), level=level) # top-level method for accessing the new eventing system # this is where all the side effects happen branched by event type # (i.e. - mutating the event history, printing to stdout, logging # to files, etc.) -def fire_event(e: BaseEvent) -> None: - EVENT_MANAGER.fire_event(e) +def fire_event(e: BaseEvent, level: EventLevel = None) -> None: + EVENT_MANAGER.fire_event(e, level=level) def get_metadata_vars() -> Dict[str, str]: @@ -206,11 +215,3 @@ def set_invocation_id() -> None: # This is primarily for setting the invocation_id for separate # commands in the dbt servers. It shouldn't be necessary for the CLI. EVENT_MANAGER.invocation_id = str(uuid.uuid4()) - - -# Currently used to set the level in EventInfo, so logging events can -# provide more than one "level". Might be used in the future to set -# more fields in EventInfo, once some of that information is no longer global -def info(level="info"): - info = EventInfo(level=level) - return info diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index 70dc7bd240b..67e489f6798 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -47,6 +47,9 @@ class NodeInfo(betterproto.Message): node_status: str = betterproto.string_field(6) node_started_at: str = betterproto.string_field(7) node_finished_at: str = betterproto.string_field(8) + meta: Dict[str, str] = betterproto.map_field( + 9, betterproto.TYPE_STRING, betterproto.TYPE_STRING + ) @dataclass @@ -91,1929 +94,2757 @@ class GenericMessage(betterproto.Message): class MainReportVersion(betterproto.Message): """A001""" + version: str = betterproto.string_field(1) + log_version: int = betterproto.int32_field(2) + + +@dataclass +class MainReportVersionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - version: str = betterproto.string_field(2) - log_version: int = betterproto.int32_field(3) + data: "MainReportVersion" = betterproto.message_field(2) @dataclass class MainReportArgs(betterproto.Message): """A002""" - info: "EventInfo" = betterproto.message_field(1) args: Dict[str, str] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_STRING + 1, betterproto.TYPE_STRING, betterproto.TYPE_STRING ) +@dataclass +class MainReportArgsMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "MainReportArgs" = betterproto.message_field(2) + + @dataclass class MainTrackingUserState(betterproto.Message): """A003""" - info: "EventInfo" = betterproto.message_field(1) - user_state: str = betterproto.string_field(2) + user_state: str = betterproto.string_field(1) @dataclass -class MergedFromState(betterproto.Message): - """A004""" - +class MainTrackingUserStateMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - num_merged: int = betterproto.int32_field(2) - sample: List[str] = betterproto.string_field(3) + data: "MainTrackingUserState" = betterproto.message_field(2) @dataclass -class MissingProfileTarget(betterproto.Message): - """A005""" +class MergedFromState(betterproto.Message): + """A004""" - info: "EventInfo" = betterproto.message_field(1) - profile_name: str = betterproto.string_field(2) - target_name: str = betterproto.string_field(3) + num_merged: int = betterproto.int32_field(1) + sample: List[str] = betterproto.string_field(2) @dataclass -class InvalidVarsYAML(betterproto.Message): - """A008""" - +class MergedFromStateMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "MergedFromState" = betterproto.message_field(2) @dataclass -class DbtProjectError(betterproto.Message): - """A009""" +class MissingProfileTarget(betterproto.Message): + """A005""" - info: "EventInfo" = betterproto.message_field(1) + profile_name: str = betterproto.string_field(1) + target_name: str = betterproto.string_field(2) @dataclass -class DbtProjectErrorException(betterproto.Message): - """A010""" - +class MissingProfileTargetMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "MissingProfileTarget" = betterproto.message_field(2) @dataclass -class DbtProfileError(betterproto.Message): - """A011""" +class InvalidOptionYAML(betterproto.Message): + """A008""" - info: "EventInfo" = betterproto.message_field(1) + option_name: str = betterproto.string_field(1) @dataclass -class DbtProfileErrorException(betterproto.Message): - """A012""" - +class InvalidOptionYAMLMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "InvalidOptionYAML" = betterproto.message_field(2) @dataclass -class ProfileListTitle(betterproto.Message): - """A013""" +class LogDbtProjectError(betterproto.Message): + """A009""" - info: "EventInfo" = betterproto.message_field(1) + exc: str = betterproto.string_field(1) @dataclass -class ListSingleProfile(betterproto.Message): - """A014""" - +class LogDbtProjectErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - profile: str = betterproto.string_field(2) + data: "LogDbtProjectError" = betterproto.message_field(2) @dataclass -class NoDefinedProfiles(betterproto.Message): - """A015""" +class LogDbtProfileError(betterproto.Message): + """A011""" - info: "EventInfo" = betterproto.message_field(1) + exc: str = betterproto.string_field(1) + profiles: List[str] = betterproto.string_field(2) @dataclass -class ProfileHelpMessage(betterproto.Message): - """A016""" - +class LogDbtProfileErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "LogDbtProfileError" = betterproto.message_field(2) @dataclass class StarterProjectPath(betterproto.Message): """A017""" + dir: str = betterproto.string_field(1) + + +@dataclass +class StarterProjectPathMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dir: str = betterproto.string_field(2) + data: "StarterProjectPath" = betterproto.message_field(2) @dataclass class ConfigFolderDirectory(betterproto.Message): """A018""" + dir: str = betterproto.string_field(1) + + +@dataclass +class ConfigFolderDirectoryMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dir: str = betterproto.string_field(2) + data: "ConfigFolderDirectory" = betterproto.message_field(2) @dataclass class NoSampleProfileFound(betterproto.Message): """A019""" + adapter: str = betterproto.string_field(1) + + +@dataclass +class NoSampleProfileFoundMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - adapter: str = betterproto.string_field(2) + data: "NoSampleProfileFound" = betterproto.message_field(2) @dataclass class ProfileWrittenWithSample(betterproto.Message): """A020""" + name: str = betterproto.string_field(1) + path: str = betterproto.string_field(2) + + +@dataclass +class ProfileWrittenWithSampleMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - path: str = betterproto.string_field(3) + data: "ProfileWrittenWithSample" = betterproto.message_field(2) @dataclass class ProfileWrittenWithTargetTemplateYAML(betterproto.Message): """A021""" + name: str = betterproto.string_field(1) + path: str = betterproto.string_field(2) + + +@dataclass +class ProfileWrittenWithTargetTemplateYAMLMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - path: str = betterproto.string_field(3) + data: "ProfileWrittenWithTargetTemplateYAMLMsg" = betterproto.message_field(2) @dataclass class ProfileWrittenWithProjectTemplateYAML(betterproto.Message): """A022""" + name: str = betterproto.string_field(1) + path: str = betterproto.string_field(2) + + +@dataclass +class ProfileWrittenWithProjectTemplateYAMLMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - path: str = betterproto.string_field(3) + data: "ProfileWrittenWithProjectTemplateYAML" = betterproto.message_field(2) @dataclass class SettingUpProfile(betterproto.Message): """A023""" + pass + + +@dataclass +class SettingUpProfileMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "SettingUpProfile" = betterproto.message_field(2) @dataclass class InvalidProfileTemplateYAML(betterproto.Message): """A024""" + pass + + +@dataclass +class InvalidProfileTemplateYAMLMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "InvalidProfileTemplateYAML" = betterproto.message_field(2) @dataclass class ProjectNameAlreadyExists(betterproto.Message): """A025""" + name: str = betterproto.string_field(1) + + +@dataclass +class ProjectNameAlreadyExistsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) + data: "ProjectNameAlreadyExists" = betterproto.message_field(2) @dataclass class ProjectCreated(betterproto.Message): """A026""" + project_name: str = betterproto.string_field(1) + docs_url: str = betterproto.string_field(2) + slack_url: str = betterproto.string_field(3) + + +@dataclass +class ProjectCreatedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - project_name: str = betterproto.string_field(2) - docs_url: str = betterproto.string_field(3) - slack_url: str = betterproto.string_field(4) + data: "ProjectCreated" = betterproto.message_field(2) @dataclass class PackageRedirectDeprecation(betterproto.Message): """D001""" + old_name: str = betterproto.string_field(1) + new_name: str = betterproto.string_field(2) + + +@dataclass +class PackageRedirectDeprecationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - old_name: str = betterproto.string_field(2) - new_name: str = betterproto.string_field(3) + data: "PackageRedirectDeprecation" = betterproto.message_field(2) @dataclass class PackageInstallPathDeprecation(betterproto.Message): """D002""" + pass + + +@dataclass +class PackageInstallPathDeprecationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "PackageInstallPathDeprecation" = betterproto.message_field(2) @dataclass class ConfigSourcePathDeprecation(betterproto.Message): """D003""" + deprecated_path: str = betterproto.string_field(1) + exp_path: str = betterproto.string_field(2) + + +@dataclass +class ConfigSourcePathDeprecationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - deprecated_path: str = betterproto.string_field(2) - exp_path: str = betterproto.string_field(3) + data: "ConfigSourcePathDeprecation" = betterproto.message_field(2) @dataclass class ConfigDataPathDeprecation(betterproto.Message): """D004""" + deprecated_path: str = betterproto.string_field(1) + exp_path: str = betterproto.string_field(2) + + +@dataclass +class ConfigDataPathDeprecationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - deprecated_path: str = betterproto.string_field(2) - exp_path: str = betterproto.string_field(3) + data: "ConfigDataPathDeprecation" = betterproto.message_field(2) @dataclass class AdapterDeprecationWarning(betterproto.Message): """D005""" + old_name: str = betterproto.string_field(1) + new_name: str = betterproto.string_field(2) + + +@dataclass +class AdapterDeprecationWarningMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - old_name: str = betterproto.string_field(2) - new_name: str = betterproto.string_field(3) + data: "AdapterDeprecationWarning" = betterproto.message_field(2) @dataclass class MetricAttributesRenamed(betterproto.Message): """D006""" + metric_name: str = betterproto.string_field(1) + + +@dataclass +class MetricAttributesRenamedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - metric_name: str = betterproto.string_field(2) + data: "MetricAttributesRenamed" = betterproto.message_field(2) @dataclass class ExposureNameDeprecation(betterproto.Message): """D007""" + exposure: str = betterproto.string_field(1) + + +@dataclass +class ExposureNameDeprecationMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "ExposureNameDeprecation" = betterproto.message_field(2) + + +@dataclass +class InternalDeprecation(betterproto.Message): + """D008""" + + name: str = betterproto.string_field(1) + reason: str = betterproto.string_field(2) + suggested_action: str = betterproto.string_field(3) + version: str = betterproto.string_field(4) + + +@dataclass +class InternalDeprecationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exposure: str = betterproto.string_field(2) + data: "InternalDeprecation" = betterproto.message_field(2) @dataclass class AdapterEventDebug(betterproto.Message): """E001""" + node_info: "NodeInfo" = betterproto.message_field(1) + name: str = betterproto.string_field(2) + base_msg: str = betterproto.string_field(3) + args: List[str] = betterproto.string_field(4) + + +@dataclass +class AdapterEventDebugMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - base_msg: str = betterproto.string_field(4) - args: List[str] = betterproto.string_field(5) + data: "AdapterEventDebug" = betterproto.message_field(2) @dataclass class AdapterEventInfo(betterproto.Message): """E002""" + node_info: "NodeInfo" = betterproto.message_field(1) + name: str = betterproto.string_field(2) + base_msg: str = betterproto.string_field(3) + args: List[str] = betterproto.string_field(4) + + +@dataclass +class AdapterEventInfoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - base_msg: str = betterproto.string_field(4) - args: List[str] = betterproto.string_field(5) + data: "AdapterEventInfo" = betterproto.message_field(2) @dataclass class AdapterEventWarning(betterproto.Message): """E003""" + node_info: "NodeInfo" = betterproto.message_field(1) + name: str = betterproto.string_field(2) + base_msg: str = betterproto.string_field(3) + args: List[str] = betterproto.string_field(4) + + +@dataclass +class AdapterEventWarningMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - base_msg: str = betterproto.string_field(4) - args: List[str] = betterproto.string_field(5) + data: "AdapterEventWarning" = betterproto.message_field(2) @dataclass class AdapterEventError(betterproto.Message): """E004""" + node_info: "NodeInfo" = betterproto.message_field(1) + name: str = betterproto.string_field(2) + base_msg: str = betterproto.string_field(3) + args: List[str] = betterproto.string_field(4) + exc_info: str = betterproto.string_field(5) + + +@dataclass +class AdapterEventErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - base_msg: str = betterproto.string_field(4) - args: List[str] = betterproto.string_field(5) - exc_info: str = betterproto.string_field(6) + data: "AdapterEventError" = betterproto.message_field(2) @dataclass class NewConnection(betterproto.Message): """E005""" + node_info: "NodeInfo" = betterproto.message_field(1) + conn_type: str = betterproto.string_field(2) + conn_name: str = betterproto.string_field(3) + + +@dataclass +class NewConnectionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_type: str = betterproto.string_field(3) - conn_name: str = betterproto.string_field(4) + data: "NewConnection" = betterproto.message_field(2) @dataclass class ConnectionReused(betterproto.Message): """E006""" + conn_name: str = betterproto.string_field(1) + orig_conn_name: str = betterproto.string_field(2) + + +@dataclass +class ConnectionReusedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + data: "ConnectionReused" = betterproto.message_field(2) @dataclass class ConnectionLeftOpenInCleanup(betterproto.Message): """E007""" + conn_name: str = betterproto.string_field(1) + + +@dataclass +class ConnectionLeftOpenInCleanupMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + data: "ConnectionLeftOpen" = betterproto.message_field(2) @dataclass class ConnectionClosedInCleanup(betterproto.Message): """E008""" + conn_name: str = betterproto.string_field(1) + + +@dataclass +class ConnectionClosedInCleanupMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + data: "ConnectionClosedInCleanup" = betterproto.message_field(2) @dataclass class RollbackFailed(betterproto.Message): """E009""" + node_info: "NodeInfo" = betterproto.message_field(1) + conn_name: str = betterproto.string_field(2) + exc_info: str = betterproto.string_field(3) + + +@dataclass +class RollbackFailedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_name: str = betterproto.string_field(3) - exc_info: str = betterproto.string_field(4) + data: "RollbackFailed" = betterproto.message_field(2) @dataclass class ConnectionClosed(betterproto.Message): """E010""" + node_info: "NodeInfo" = betterproto.message_field(1) + conn_name: str = betterproto.string_field(2) + + +@dataclass +class ConnectionClosedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_name: str = betterproto.string_field(3) + data: "ConnectionClosed" = betterproto.message_field(2) @dataclass class ConnectionLeftOpen(betterproto.Message): """E011""" + node_info: "NodeInfo" = betterproto.message_field(1) + conn_name: str = betterproto.string_field(2) + + +@dataclass +class ConnectionLeftOpenMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_name: str = betterproto.string_field(3) + data: "ConnectionLeftOpen" = betterproto.message_field(2) @dataclass class Rollback(betterproto.Message): """E012""" - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_name: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(1) + conn_name: str = betterproto.string_field(2) @dataclass -class CacheMiss(betterproto.Message): - """E013""" - +class RollbackMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) - database: str = betterproto.string_field(3) - schema: str = betterproto.string_field(4) + data: "Rollback" = betterproto.message_field(2) @dataclass -class ListRelations(betterproto.Message): - """E014""" +class CacheMiss(betterproto.Message): + """E013""" - info: "EventInfo" = betterproto.message_field(1) + conn_name: str = betterproto.string_field(1) database: str = betterproto.string_field(2) schema: str = betterproto.string_field(3) - relations: List["ReferenceKeyMsg"] = betterproto.message_field(4) @dataclass -class ConnectionUsed(betterproto.Message): - """E015""" - +class CacheMissMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_type: str = betterproto.string_field(3) - conn_name: str = betterproto.string_field(4) + data: "CacheMiss" = betterproto.message_field(2) @dataclass -class SQLQuery(betterproto.Message): - """E016""" +class ListRelations(betterproto.Message): + """E014""" - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_name: str = betterproto.string_field(3) - sql: str = betterproto.string_field(4) + database: str = betterproto.string_field(1) + schema: str = betterproto.string_field(2) + relations: List["ReferenceKeyMsg"] = betterproto.message_field(3) @dataclass -class SQLQueryStatus(betterproto.Message): - """E017""" - +class ListRelationsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - status: str = betterproto.string_field(3) - elapsed: float = betterproto.float_field(4) + data: "ListRelations" = betterproto.message_field(2) @dataclass -class SQLCommit(betterproto.Message): - """E018""" +class ConnectionUsed(betterproto.Message): + """E015""" - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) + node_info: "NodeInfo" = betterproto.message_field(1) + conn_type: str = betterproto.string_field(2) conn_name: str = betterproto.string_field(3) @dataclass -class ColTypeChange(betterproto.Message): - """E019""" - +class ConnectionUsedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - orig_type: str = betterproto.string_field(2) - new_type: str = betterproto.string_field(3) - table: "ReferenceKeyMsg" = betterproto.message_field(4) + data: "ConnectionUsed" = betterproto.message_field(2) @dataclass -class SchemaCreation(betterproto.Message): - """E020""" +class SQLQuery(betterproto.Message): + """E016""" - info: "EventInfo" = betterproto.message_field(1) - relation: "ReferenceKeyMsg" = betterproto.message_field(2) + node_info: "NodeInfo" = betterproto.message_field(1) + conn_name: str = betterproto.string_field(2) + sql: str = betterproto.string_field(3) @dataclass -class SchemaDrop(betterproto.Message): - """E021""" - +class SQLQueryMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - relation: "ReferenceKeyMsg" = betterproto.message_field(2) + data: "SQLQuery" = betterproto.message_field(2) @dataclass -class UncachedRelation(betterproto.Message): - """E022""" +class SQLQueryStatus(betterproto.Message): + """E017""" - info: "EventInfo" = betterproto.message_field(1) - dep_key: "ReferenceKeyMsg" = betterproto.message_field(2) - ref_key: "ReferenceKeyMsg" = betterproto.message_field(3) + node_info: "NodeInfo" = betterproto.message_field(1) + status: str = betterproto.string_field(2) + elapsed: float = betterproto.float_field(3) @dataclass -class AddLink(betterproto.Message): - """E023""" - +class SQLQueryStatusMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dep_key: "ReferenceKeyMsg" = betterproto.message_field(2) - ref_key: "ReferenceKeyMsg" = betterproto.message_field(3) + data: "SQLQueryStatus" = betterproto.message_field(2) @dataclass -class AddRelation(betterproto.Message): - """E024""" +class SQLCommit(betterproto.Message): + """E018""" - info: "EventInfo" = betterproto.message_field(1) - relation: "ReferenceKeyMsg" = betterproto.message_field(2) + node_info: "NodeInfo" = betterproto.message_field(1) + conn_name: str = betterproto.string_field(2) @dataclass -class DropMissingRelation(betterproto.Message): - """E025""" - +class SQLCommitMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - relation: "ReferenceKeyMsg" = betterproto.message_field(2) + data: "SQLCommit" = betterproto.message_field(2) @dataclass -class DropCascade(betterproto.Message): - """E026""" +class ColTypeChange(betterproto.Message): + """E019""" - info: "EventInfo" = betterproto.message_field(1) - dropped: "ReferenceKeyMsg" = betterproto.message_field(2) - consequences: List["ReferenceKeyMsg"] = betterproto.message_field(3) + orig_type: str = betterproto.string_field(1) + new_type: str = betterproto.string_field(2) + table: "ReferenceKeyMsg" = betterproto.message_field(3) @dataclass -class DropRelation(betterproto.Message): - """E027""" - +class ColTypeChangeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dropped: "ReferenceKeyMsg" = betterproto.message_field(2) + data: "ColTypeChange" = betterproto.message_field(2) @dataclass -class UpdateReference(betterproto.Message): - """E028""" +class SchemaCreation(betterproto.Message): + """E020""" - info: "EventInfo" = betterproto.message_field(1) - old_key: "ReferenceKeyMsg" = betterproto.message_field(2) - new_key: "ReferenceKeyMsg" = betterproto.message_field(3) - cached_key: "ReferenceKeyMsg" = betterproto.message_field(4) + relation: "ReferenceKeyMsg" = betterproto.message_field(1) @dataclass -class TemporaryRelation(betterproto.Message): - """E029""" - +class SchemaCreationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - key: "ReferenceKeyMsg" = betterproto.message_field(2) + data: "SchemaCreation" = betterproto.message_field(2) @dataclass -class RenameSchema(betterproto.Message): - """E030""" +class SchemaDrop(betterproto.Message): + """E021""" - info: "EventInfo" = betterproto.message_field(1) - old_key: "ReferenceKeyMsg" = betterproto.message_field(2) - new_key: "ReferenceKeyMsg" = betterproto.message_field(3) + relation: "ReferenceKeyMsg" = betterproto.message_field(1) @dataclass -class DumpBeforeAddGraph(betterproto.Message): - """E031""" - +class SchemaDropMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dump: Dict[str, "ListOfStrings"] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) + data: "SchemaDrop" = betterproto.message_field(2) @dataclass -class DumpAfterAddGraph(betterproto.Message): - """E032""" +class CacheAction(betterproto.Message): + """E022""" + + action: str = betterproto.string_field(1) + ref_key: "ReferenceKeyMsg" = betterproto.message_field(2) + ref_key_2: "ReferenceKeyMsg" = betterproto.message_field(3) + ref_key_3: "ReferenceKeyMsg" = betterproto.message_field(4) + ref_list: List["ReferenceKeyMsg"] = betterproto.message_field(5) + +@dataclass +class CacheActionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dump: Dict[str, "ListOfStrings"] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) + data: "CacheAction" = betterproto.message_field(2) @dataclass -class DumpBeforeRenameSchema(betterproto.Message): - """E033""" +class CacheDumpGraph(betterproto.Message): + """E031""" - info: "EventInfo" = betterproto.message_field(1) dump: Dict[str, "ListOfStrings"] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE + 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE ) + before_after: str = betterproto.string_field(2) + action: str = betterproto.string_field(3) @dataclass -class DumpAfterRenameSchema(betterproto.Message): - """E034""" - +class CacheDumpGraphMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dump: Dict[str, "ListOfStrings"] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) + data: "CacheDumpGraph" = betterproto.message_field(2) @dataclass class AdapterImportError(betterproto.Message): """E035""" + exc: str = betterproto.string_field(1) + + +@dataclass +class AdapterImportErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "AdapterImportError" = betterproto.message_field(2) @dataclass class PluginLoadError(betterproto.Message): """E036""" + exc_info: str = betterproto.string_field(1) + + +@dataclass +class PluginLoadErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc_info: str = betterproto.string_field(2) + data: "PluginLoadError" = betterproto.message_field(2) @dataclass class NewConnectionOpening(betterproto.Message): """E037""" + node_info: "NodeInfo" = betterproto.message_field(1) + connection_state: str = betterproto.string_field(2) + + +@dataclass +class NewConnectionOpeningMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - connection_state: str = betterproto.string_field(3) + data: "NewConnectionOpening" = betterproto.message_field(2) @dataclass class CodeExecution(betterproto.Message): """E038""" + conn_name: str = betterproto.string_field(1) + code_content: str = betterproto.string_field(2) + + +@dataclass +class CodeExecutionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) - code_content: str = betterproto.string_field(3) + data: "CodeExecution" = betterproto.message_field(2) @dataclass class CodeExecutionStatus(betterproto.Message): """E039""" + status: str = betterproto.string_field(1) + elapsed: float = betterproto.float_field(2) + + +@dataclass +class CodeExecutionStatusMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - status: str = betterproto.string_field(2) - elapsed: float = betterproto.float_field(3) + data: "CodeExecutionStatus" = betterproto.message_field(2) @dataclass class CatalogGenerationError(betterproto.Message): """E040""" + exc: str = betterproto.string_field(1) + + +@dataclass +class CatalogGenerationErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "CatalogGenerationError" = betterproto.message_field(2) @dataclass class WriteCatalogFailure(betterproto.Message): """E041""" + num_exceptions: int = betterproto.int32_field(1) + + +@dataclass +class WriteCatalogFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - num_exceptions: int = betterproto.int32_field(2) + data: "WriteCatalogFailure" = betterproto.message_field(2) @dataclass class CatalogWritten(betterproto.Message): """E042""" + path: str = betterproto.string_field(1) + + +@dataclass +class CatalogWrittenMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "CatalogWritten" = betterproto.message_field(2) @dataclass class CannotGenerateDocs(betterproto.Message): """E043""" - info: "EventInfo" = betterproto.message_field(1) + pass @dataclass -class BuildingCatalog(betterproto.Message): - """E044""" - +class CannotGenerateDocsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "CannotGenerateDocs" = betterproto.message_field(2) @dataclass -class DatabaseErrorRunningHook(betterproto.Message): - """E045""" +class BuildingCatalog(betterproto.Message): + """E044""" - info: "EventInfo" = betterproto.message_field(1) - hook_type: str = betterproto.string_field(2) + pass @dataclass -class HooksRunning(betterproto.Message): - """E046""" - +class BuildingCatalogMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - num_hooks: int = betterproto.int32_field(2) - hook_type: str = betterproto.string_field(3) + data: "BuildingCatalog" = betterproto.message_field(2) @dataclass -class HookFinished(betterproto.Message): - """E047""" +class DatabaseErrorRunningHook(betterproto.Message): + """E045""" - info: "EventInfo" = betterproto.message_field(1) - stat_line: str = betterproto.string_field(2) - execution: str = betterproto.string_field(3) - execution_time: float = betterproto.float_field(4) + hook_type: str = betterproto.string_field(1) @dataclass -class ParseCmdStart(betterproto.Message): - """I001""" - +class DatabaseErrorRunningHookMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "DatabaseErrorRunningHook" = betterproto.message_field(2) @dataclass -class ParseCmdCompiling(betterproto.Message): - """I002""" +class HooksRunning(betterproto.Message): + """E046""" - info: "EventInfo" = betterproto.message_field(1) + num_hooks: int = betterproto.int32_field(1) + hook_type: str = betterproto.string_field(2) @dataclass -class ParseCmdWritingManifest(betterproto.Message): - """I003""" - +class HooksRunningMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "HooksRunning" = betterproto.message_field(2) @dataclass -class ParseCmdDone(betterproto.Message): - """I004""" +class FinishedRunningStats(betterproto.Message): + """E047""" - info: "EventInfo" = betterproto.message_field(1) + stat_line: str = betterproto.string_field(1) + execution: str = betterproto.string_field(2) + execution_time: float = betterproto.float_field(3) @dataclass -class ManifestDependenciesLoaded(betterproto.Message): - """I005""" - +class FinishedRunningStatsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "FinishedRunningStats" = betterproto.message_field(2) @dataclass -class ManifestLoaderCreated(betterproto.Message): - """I006""" +class ParseCmdOut(betterproto.Message): + """I001""" - info: "EventInfo" = betterproto.message_field(1) + msg: str = betterproto.string_field(1) @dataclass -class ManifestLoaded(betterproto.Message): - """I007""" - +class ParseCmdOutMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "ParseCmdOut" = betterproto.message_field(2) @dataclass -class ManifestChecked(betterproto.Message): - """I008""" +class GenericTestFileParse(betterproto.Message): + """I011""" - info: "EventInfo" = betterproto.message_field(1) + path: str = betterproto.string_field(1) @dataclass -class ManifestFlatGraphBuilt(betterproto.Message): - """I009""" - +class GenericTestFileParseMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "GenericTestFileParse" = betterproto.message_field(2) @dataclass -class ParseCmdPerfInfoPath(betterproto.Message): - """I010""" +class MacroFileParse(betterproto.Message): + """I012""" - info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + path: str = betterproto.string_field(1) @dataclass -class GenericTestFileParse(betterproto.Message): - """I011""" - +class MacroFileParseMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "MacroFileParse" = betterproto.message_field(2) @dataclass -class MacroFileParse(betterproto.Message): - """I012""" +class PartialParsingErrorProcessingFile(betterproto.Message): + """I014""" - info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + file: str = betterproto.string_field(1) @dataclass -class PartialParsingExceptionProcessingFile(betterproto.Message): - """I014""" - +class PartialParsingErrorProcessingFileMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - file: str = betterproto.string_field(2) + data: "PartialParsingErrorProcessingFile" = betterproto.message_field(2) @dataclass -class PartialParsingException(betterproto.Message): +class PartialParsingError(betterproto.Message): """I016""" - info: "EventInfo" = betterproto.message_field(1) exc_info: Dict[str, str] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_STRING + 1, betterproto.TYPE_STRING, betterproto.TYPE_STRING ) +@dataclass +class PartialParsingErrorMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "PartialParsingError" = betterproto.message_field(2) + + @dataclass class PartialParsingSkipParsing(betterproto.Message): """I017""" + pass + + +@dataclass +class PartialParsingSkipParsingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "PartialParsingSkipParsing" = betterproto.message_field(2) @dataclass class UnableToPartialParse(betterproto.Message): """I024""" + reason: str = betterproto.string_field(1) + + +@dataclass +class UnableToPartialParseMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - reason: str = betterproto.string_field(2) + data: "UnableToPartialParse" = betterproto.message_field(2) @dataclass class PartialParsingNotEnabled(betterproto.Message): """I028""" + pass + + +@dataclass +class PartialParsingNotEnabledMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "PartialParsingNotEnabled" = betterproto.message_field(2) @dataclass class ParsedFileLoadFailed(betterproto.Message): """I029""" + path: str = betterproto.string_field(1) + exc: str = betterproto.string_field(2) + exc_info: str = betterproto.string_field(3) + + +@dataclass +class ParsedFileLoadFailedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) - exc: str = betterproto.string_field(3) - exc_info: str = betterproto.string_field(4) + data: "ParsedFileLoadFailed" = betterproto.message_field(2) @dataclass class StaticParserCausedJinjaRendering(betterproto.Message): """I031""" + path: str = betterproto.string_field(1) + + +@dataclass +class StaticParserCausedJinjaRenderingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "StaticParserCausedJinjaRendering" = betterproto.message_field(2) @dataclass class UsingExperimentalParser(betterproto.Message): """I032""" + path: str = betterproto.string_field(1) + + +@dataclass +class UsingExperimentalParserMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "UsingExperimentalParser" = betterproto.message_field(2) @dataclass class SampleFullJinjaRendering(betterproto.Message): """I033""" + path: str = betterproto.string_field(1) + + +@dataclass +class SampleFullJinjaRenderingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "SampleFullJinjaRendering" = betterproto.message_field(2) @dataclass class StaticParserFallbackJinjaRendering(betterproto.Message): """I034""" + path: str = betterproto.string_field(1) + + +@dataclass +class StaticParserFallbackJinjaRenderingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "StaticParserFallbackJinjaRendering" = betterproto.message_field(2) @dataclass class StaticParsingMacroOverrideDetected(betterproto.Message): """I035""" + path: str = betterproto.string_field(1) + + +@dataclass +class StaticParsingMacroOverrideDetectedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "StaticParsingMacroOverrideDetected" = betterproto.message_field(2) @dataclass class StaticParserSuccess(betterproto.Message): """I036""" + path: str = betterproto.string_field(1) + + +@dataclass +class StaticParserSuccessMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "StaticParserSuccess" = betterproto.message_field(2) @dataclass class StaticParserFailure(betterproto.Message): """I037""" + path: str = betterproto.string_field(1) + + +@dataclass +class StaticParserFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "StaticParserFailure" = betterproto.message_field(2) @dataclass class ExperimentalParserSuccess(betterproto.Message): """I038""" + path: str = betterproto.string_field(1) + + +@dataclass +class ExperimentalParserSuccessMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "ExperimentalParserSuccess" = betterproto.message_field(2) @dataclass class ExperimentalParserFailure(betterproto.Message): """I039""" + path: str = betterproto.string_field(1) + + +@dataclass +class ExperimentalParserFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "ExperimentalParserFailure" = betterproto.message_field(2) @dataclass class PartialParsingEnabled(betterproto.Message): """I040""" + deleted: int = betterproto.int32_field(1) + added: int = betterproto.int32_field(2) + changed: int = betterproto.int32_field(3) + + +@dataclass +class PartialParsingEnabledMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - deleted: int = betterproto.int32_field(2) - added: int = betterproto.int32_field(3) - changed: int = betterproto.int32_field(4) + data: "PartialParsingEnabled" = betterproto.message_field(2) @dataclass class PartialParsingFile(betterproto.Message): """I041""" + file_id: str = betterproto.string_field(1) + operation: str = betterproto.string_field(2) + + +@dataclass +class PartialParsingFileMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - file_id: str = betterproto.string_field(2) - operation: str = betterproto.string_field(3) + data: "PartialParsingFile" = betterproto.message_field(2) @dataclass class InvalidDisabledTargetInTestNode(betterproto.Message): """I050""" + resource_type_title: str = betterproto.string_field(1) + unique_id: str = betterproto.string_field(2) + original_file_path: str = betterproto.string_field(3) + target_kind: str = betterproto.string_field(4) + target_name: str = betterproto.string_field(5) + target_package: str = betterproto.string_field(6) + + +@dataclass +class InvalidDisabledTargetInTestNodeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - resource_type_title: str = betterproto.string_field(2) - unique_id: str = betterproto.string_field(3) - original_file_path: str = betterproto.string_field(4) - target_kind: str = betterproto.string_field(5) - target_name: str = betterproto.string_field(6) - target_package: str = betterproto.string_field(7) + data: "InvalidDisabledTargetInTestNode" = betterproto.message_field(2) @dataclass class UnusedResourceConfigPath(betterproto.Message): """I051""" + unused_config_paths: List[str] = betterproto.string_field(1) + + +@dataclass +class UnusedResourceConfigPathMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - unused_config_paths: List[str] = betterproto.string_field(2) + data: "UnusedResourceConfigPath" = betterproto.message_field(2) @dataclass class SeedIncreased(betterproto.Message): """I052""" + package_name: str = betterproto.string_field(1) + name: str = betterproto.string_field(2) + + +@dataclass +class SeedIncreasedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - package_name: str = betterproto.string_field(2) - name: str = betterproto.string_field(3) + data: "SeedIncreased" = betterproto.message_field(2) @dataclass class SeedExceedsLimitSamePath(betterproto.Message): """I053""" + package_name: str = betterproto.string_field(1) + name: str = betterproto.string_field(2) + + +@dataclass +class SeedExceedsLimitSamePathMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - package_name: str = betterproto.string_field(2) - name: str = betterproto.string_field(3) + data: "SeedExceedsLimitSamePath" = betterproto.message_field(2) @dataclass class SeedExceedsLimitAndPathChanged(betterproto.Message): """I054""" + package_name: str = betterproto.string_field(1) + name: str = betterproto.string_field(2) + + +@dataclass +class SeedExceedsLimitAndPathChangedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - package_name: str = betterproto.string_field(2) - name: str = betterproto.string_field(3) + data: "SeedExceedsLimitAndPathChanged" = betterproto.message_field(2) @dataclass class SeedExceedsLimitChecksumChanged(betterproto.Message): """I055""" + package_name: str = betterproto.string_field(1) + name: str = betterproto.string_field(2) + checksum_name: str = betterproto.string_field(3) + + +@dataclass +class SeedExceedsLimitChecksumChangedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - package_name: str = betterproto.string_field(2) - name: str = betterproto.string_field(3) - checksum_name: str = betterproto.string_field(4) + data: "SeedExceedsLimitChecksumChanged" = betterproto.message_field(2) @dataclass class UnusedTables(betterproto.Message): """I056""" + unused_tables: List[str] = betterproto.string_field(1) + + +@dataclass +class UnusedTablesMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - unused_tables: List[str] = betterproto.string_field(2) + data: "UnusedTables" = betterproto.message_field(2) @dataclass class WrongResourceSchemaFile(betterproto.Message): """I057""" + patch_name: str = betterproto.string_field(1) + resource_type: str = betterproto.string_field(2) + plural_resource_type: str = betterproto.string_field(3) + yaml_key: str = betterproto.string_field(4) + file_path: str = betterproto.string_field(5) + + +@dataclass +class WrongResourceSchemaFileMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - patch_name: str = betterproto.string_field(2) - resource_type: str = betterproto.string_field(3) - plural_resource_type: str = betterproto.string_field(4) - yaml_key: str = betterproto.string_field(5) - file_path: str = betterproto.string_field(6) + data: "WrongResourceSchemaFile" = betterproto.message_field(2) @dataclass class NoNodeForYamlKey(betterproto.Message): """I058""" + patch_name: str = betterproto.string_field(1) + yaml_key: str = betterproto.string_field(2) + file_path: str = betterproto.string_field(3) + + +@dataclass +class NoNodeForYamlKeyMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - patch_name: str = betterproto.string_field(2) - yaml_key: str = betterproto.string_field(3) - file_path: str = betterproto.string_field(4) + data: "NoNodeForYamlKey" = betterproto.message_field(2) @dataclass -class MacroPatchNotFound(betterproto.Message): +class MacroNotFoundForPatch(betterproto.Message): """I059""" + patch_name: str = betterproto.string_field(1) + + +@dataclass +class MacroNotFoundForPatchMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - patch_name: str = betterproto.string_field(2) + data: "MacroNotFoundForPatch" = betterproto.message_field(2) @dataclass class NodeNotFoundOrDisabled(betterproto.Message): """I060""" + original_file_path: str = betterproto.string_field(1) + unique_id: str = betterproto.string_field(2) + resource_type_title: str = betterproto.string_field(3) + target_name: str = betterproto.string_field(4) + target_kind: str = betterproto.string_field(5) + target_package: str = betterproto.string_field(6) + disabled: str = betterproto.string_field(7) + + +@dataclass +class NodeNotFoundOrDisabledMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - original_file_path: str = betterproto.string_field(2) - unique_id: str = betterproto.string_field(3) - resource_type_title: str = betterproto.string_field(4) - target_name: str = betterproto.string_field(5) - target_kind: str = betterproto.string_field(6) - target_package: str = betterproto.string_field(7) - disabled: str = betterproto.string_field(8) + data: "NodeNotFoundOrDisabled" = betterproto.message_field(2) @dataclass class JinjaLogWarning(betterproto.Message): """I061""" + node_info: "NodeInfo" = betterproto.message_field(1) + msg: str = betterproto.string_field(2) + + +@dataclass +class JinjaLogWarningMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - msg: str = betterproto.string_field(3) + data: "JinjaLogWarning" = betterproto.message_field(2) @dataclass class GitSparseCheckoutSubdirectory(betterproto.Message): """M001""" + subdir: str = betterproto.string_field(1) + + +@dataclass +class GitSparseCheckoutSubdirectoryMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - subdir: str = betterproto.string_field(2) + data: "GitSparseCheckoutSubdirectory" = betterproto.message_field(2) @dataclass class GitProgressCheckoutRevision(betterproto.Message): """M002""" + revision: str = betterproto.string_field(1) + + +@dataclass +class GitProgressCheckoutRevisionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - revision: str = betterproto.string_field(2) + data: "GitProgressCheckoutRevision" = betterproto.message_field(2) @dataclass class GitProgressUpdatingExistingDependency(betterproto.Message): """M003""" + dir: str = betterproto.string_field(1) + + +@dataclass +class GitProgressUpdatingExistingDependencyMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dir: str = betterproto.string_field(2) + data: "GitProgressUpdatingExistingDependency" = betterproto.message_field(2) @dataclass class GitProgressPullingNewDependency(betterproto.Message): """M004""" + dir: str = betterproto.string_field(1) + + +@dataclass +class GitProgressPullingNewDependencyMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dir: str = betterproto.string_field(2) + data: "GitProgressPullingNewDependency" = betterproto.message_field(2) @dataclass class GitNothingToDo(betterproto.Message): """M005""" + sha: str = betterproto.string_field(1) + + +@dataclass +class GitNothingToDoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - sha: str = betterproto.string_field(2) + data: "GitNothingToDo" = betterproto.message_field(2) @dataclass class GitProgressUpdatedCheckoutRange(betterproto.Message): """M006""" + start_sha: str = betterproto.string_field(1) + end_sha: str = betterproto.string_field(2) + + +@dataclass +class GitProgressUpdatedCheckoutRangeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - start_sha: str = betterproto.string_field(2) - end_sha: str = betterproto.string_field(3) + data: "GitProgressUpdatedCheckoutRange" = betterproto.message_field(2) @dataclass class GitProgressCheckedOutAt(betterproto.Message): """M007""" + end_sha: str = betterproto.string_field(1) + + +@dataclass +class GitProgressCheckedOutAtMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - end_sha: str = betterproto.string_field(2) + data: "GitProgressCheckedOutAt" = betterproto.message_field(2) @dataclass class RegistryProgressGETRequest(betterproto.Message): """M008""" + url: str = betterproto.string_field(1) + + +@dataclass +class RegistryProgressGETRequestMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - url: str = betterproto.string_field(2) + data: "RegistryProgressGETRequest" = betterproto.message_field(2) @dataclass class RegistryProgressGETResponse(betterproto.Message): """M009""" + url: str = betterproto.string_field(1) + resp_code: int = betterproto.int32_field(2) + + +@dataclass +class RegistryProgressGETResponseMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - url: str = betterproto.string_field(2) - resp_code: int = betterproto.int32_field(3) + data: "RegistryProgressGETResponse" = betterproto.message_field(2) @dataclass class SelectorReportInvalidSelector(betterproto.Message): """M010""" + valid_selectors: str = betterproto.string_field(1) + spec_method: str = betterproto.string_field(2) + raw_spec: str = betterproto.string_field(3) + + +@dataclass +class SelectorReportInvalidSelectorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - valid_selectors: str = betterproto.string_field(2) - spec_method: str = betterproto.string_field(3) - raw_spec: str = betterproto.string_field(4) + data: "SelectorReportInvalidSelector" = betterproto.message_field(2) @dataclass class JinjaLogInfo(betterproto.Message): """M011""" + node_info: "NodeInfo" = betterproto.message_field(1) + msg: str = betterproto.string_field(2) + + +@dataclass +class JinjaLogInfoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - msg: str = betterproto.string_field(3) + data: "JinjaLogInfo" = betterproto.message_field(2) @dataclass class JinjaLogDebug(betterproto.Message): """M012""" + node_info: "NodeInfo" = betterproto.message_field(1) + msg: str = betterproto.string_field(2) + + +@dataclass +class JinjaLogDebugMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - msg: str = betterproto.string_field(3) + data: "JinjaLogDebug" = betterproto.message_field(2) @dataclass class DepsNoPackagesFound(betterproto.Message): """M013""" + pass + + +@dataclass +class DepsNoPackagesFoundMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "DepsNoPackagesFound" = betterproto.message_field(2) @dataclass class DepsStartPackageInstall(betterproto.Message): """M014""" + package_name: str = betterproto.string_field(1) + + +@dataclass +class DepsStartPackageInstallMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - package_name: str = betterproto.string_field(2) + data: "DepsStartPackageInstall" = betterproto.message_field(2) @dataclass class DepsInstallInfo(betterproto.Message): """M015""" + version_name: str = betterproto.string_field(1) + + +@dataclass +class DepsInstallInfoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - version_name: str = betterproto.string_field(2) + data: "DepsInstallInfo" = betterproto.message_field(2) @dataclass class DepsUpdateAvailable(betterproto.Message): """M016""" + version_latest: str = betterproto.string_field(1) + + +@dataclass +class DepsUpdateAvailableMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - version_latest: str = betterproto.string_field(2) + data: "DepsUpdateAvailable" = betterproto.message_field(2) @dataclass class DepsUpToDate(betterproto.Message): """M017""" + pass + + +@dataclass +class DepsUpToDateMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "DepsUpToDate" = betterproto.message_field(2) @dataclass class DepsListSubdirectory(betterproto.Message): """M018""" + subdirectory: str = betterproto.string_field(1) + + +@dataclass +class DepsListSubdirectoryMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - subdirectory: str = betterproto.string_field(2) + data: "DepsListSubdirectory" = betterproto.message_field(2) @dataclass class DepsNotifyUpdatesAvailable(betterproto.Message): """M019""" + packages: "ListOfStrings" = betterproto.message_field(1) + + +@dataclass +class DepsNotifyUpdatesAvailableMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - packages: "ListOfStrings" = betterproto.message_field(2) + data: "DepsNotifyUpdatesAvailable" = betterproto.message_field(2) @dataclass class RetryExternalCall(betterproto.Message): """M020""" + attempt: int = betterproto.int32_field(1) + max: int = betterproto.int32_field(2) + + +@dataclass +class RetryExternalCallMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - attempt: int = betterproto.int32_field(2) - max: int = betterproto.int32_field(3) + data: "RetryExternalCall" = betterproto.message_field(2) @dataclass class RecordRetryException(betterproto.Message): """M021""" + exc: str = betterproto.string_field(1) + + +@dataclass +class RecordRetryExceptionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "RecordRetryException" = betterproto.message_field(2) @dataclass class RegistryIndexProgressGETRequest(betterproto.Message): """M022""" + url: str = betterproto.string_field(1) + + +@dataclass +class RegistryIndexProgressGETRequestMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - url: str = betterproto.string_field(2) + data: "RegistryIndexProgressGETRequest" = betterproto.message_field(2) @dataclass class RegistryIndexProgressGETResponse(betterproto.Message): """M023""" + url: str = betterproto.string_field(1) + resp_code: int = betterproto.int32_field(2) + + +@dataclass +class RegistryIndexProgressGETResponseMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - url: str = betterproto.string_field(2) - resp_code: int = betterproto.int32_field(3) + data: "RegistryIndexProgressGETResponse" = betterproto.message_field(2) @dataclass class RegistryResponseUnexpectedType(betterproto.Message): """M024""" + response: str = betterproto.string_field(1) + + +@dataclass +class RegistryResponseUnexpectedTypeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - response: str = betterproto.string_field(2) + data: "RegistryResponseUnexpectedType" = betterproto.message_field(2) @dataclass class RegistryResponseMissingTopKeys(betterproto.Message): """M025""" + response: str = betterproto.string_field(1) + + +@dataclass +class RegistryResponseMissingTopKeysMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - response: str = betterproto.string_field(2) + data: "RegistryResponseMissingTopKeys" = betterproto.message_field(2) @dataclass class RegistryResponseMissingNestedKeys(betterproto.Message): """M026""" + response: str = betterproto.string_field(1) + + +@dataclass +class RegistryResponseMissingNestedKeysMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - response: str = betterproto.string_field(2) + data: "RegistryResponseMissingNestedKeys" = betterproto.message_field(2) @dataclass class RegistryResponseExtraNestedKeys(betterproto.Message): """m027""" + response: str = betterproto.string_field(1) + + +@dataclass +class RegistryResponseExtraNestedKeysMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - response: str = betterproto.string_field(2) + data: "RegistryResponseExtraNestedKeys" = betterproto.message_field(2) @dataclass class DepsSetDownloadDirectory(betterproto.Message): """M028""" + path: str = betterproto.string_field(1) + + +@dataclass +class DepsSetDownloadDirectoryMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "DepsSetDownloadDirectory" = betterproto.message_field(2) @dataclass class DepsUnpinned(betterproto.Message): """M029""" + revision: str = betterproto.string_field(1) + git: str = betterproto.string_field(2) + + +@dataclass +class DepsUnpinnedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - revision: str = betterproto.string_field(2) - git: str = betterproto.string_field(3) + data: "DepsUnpinned" = betterproto.message_field(2) @dataclass class NoNodesForSelectionCriteria(betterproto.Message): """M030""" + spec_raw: str = betterproto.string_field(1) + + +@dataclass +class NoNodesForSelectionCriteriaMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - spec_raw: str = betterproto.string_field(2) + data: "NoNodesForSelectionCriteria" = betterproto.message_field(2) @dataclass class RunningOperationCaughtError(betterproto.Message): """Q001""" + exc: str = betterproto.string_field(1) + + +@dataclass +class RunningOperationCaughtErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "RunningOperationCaughtError" = betterproto.message_field(2) @dataclass class CompileComplete(betterproto.Message): """Q002""" + pass + + +@dataclass +class CompileCompleteMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "CompileComplete" = betterproto.message_field(2) @dataclass class FreshnessCheckComplete(betterproto.Message): """Q003""" + pass + + +@dataclass +class FreshnessCheckCompleteMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "FreshnessCheckComplete" = betterproto.message_field(2) @dataclass class SeedHeader(betterproto.Message): """Q004""" + header: str = betterproto.string_field(1) + + +@dataclass +class SeedHeaderMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - header: str = betterproto.string_field(2) + data: "SeedHeader" = betterproto.message_field(2) @dataclass class SeedHeaderSeparator(betterproto.Message): """Q005""" + len_header: int = betterproto.int32_field(1) + + +@dataclass +class SeedHeaderSeparatorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - len_header: int = betterproto.int32_field(2) + data: "SeedHeaderSeparator" = betterproto.message_field(2) @dataclass class SQLRunnerException(betterproto.Message): """Q006""" + exc: str = betterproto.string_field(1) + exc_info: str = betterproto.string_field(2) + + +@dataclass +class SQLRunnerExceptionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) - exc_info: str = betterproto.string_field(3) + data: "SQLRunnerException" = betterproto.message_field(2) @dataclass class LogTestResult(betterproto.Message): """Q007""" + node_info: "NodeInfo" = betterproto.message_field(1) + name: str = betterproto.string_field(2) + status: str = betterproto.string_field(3) + index: int = betterproto.int32_field(4) + num_models: int = betterproto.int32_field(5) + execution_time: float = betterproto.float_field(6) + num_failures: int = betterproto.int32_field(7) + + +@dataclass +class LogTestResultMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - status: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - num_models: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) - num_failures: int = betterproto.int32_field(8) + data: "LogTestResult" = betterproto.message_field(2) @dataclass class LogStartLine(betterproto.Message): """Q011""" + node_info: "NodeInfo" = betterproto.message_field(1) + description: str = betterproto.string_field(2) + index: int = betterproto.int32_field(3) + total: int = betterproto.int32_field(4) + + +@dataclass +class LogStartLineMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - description: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - total: int = betterproto.int32_field(5) + data: "LogStartLine" = betterproto.message_field(2) @dataclass class LogModelResult(betterproto.Message): """Q012""" + node_info: "NodeInfo" = betterproto.message_field(1) + description: str = betterproto.string_field(2) + status: str = betterproto.string_field(3) + index: int = betterproto.int32_field(4) + total: int = betterproto.int32_field(5) + execution_time: int = betterproto.int32_field(6) + + +@dataclass +class LogModelResultMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - description: str = betterproto.string_field(3) - status: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - total: int = betterproto.int32_field(6) - execution_time: int = betterproto.int32_field(7) + data: "LogModelResult" = betterproto.message_field(2) @dataclass class LogSnapshotResult(betterproto.Message): """Q015""" - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - description: str = betterproto.string_field(3) - status: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) + node_info: "NodeInfo" = betterproto.message_field(1) + description: str = betterproto.string_field(2) + status: str = betterproto.string_field(3) + index: int = betterproto.int32_field(4) + total: int = betterproto.int32_field(5) + execution_time: float = betterproto.float_field(6) cfg: Dict[str, str] = betterproto.map_field( - 8, betterproto.TYPE_STRING, betterproto.TYPE_STRING + 7, betterproto.TYPE_STRING, betterproto.TYPE_STRING ) +@dataclass +class LogSnapshotResultMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "LogSnapshotResult" = betterproto.message_field(2) + + @dataclass class LogSeedResult(betterproto.Message): """Q016""" + node_info: "NodeInfo" = betterproto.message_field(1) + status: str = betterproto.string_field(2) + result_message: str = betterproto.string_field(3) + index: int = betterproto.int32_field(4) + total: int = betterproto.int32_field(5) + execution_time: float = betterproto.float_field(6) + schema: str = betterproto.string_field(7) + relation: str = betterproto.string_field(8) + + +@dataclass +class LogSeedResultMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - status: str = betterproto.string_field(3) - result_message: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) - schema: str = betterproto.string_field(8) - relation: str = betterproto.string_field(9) + data: "LogSeedResult" = betterproto.message_field(2) @dataclass class LogFreshnessResult(betterproto.Message): """Q018""" + status: str = betterproto.string_field(1) + node_info: "NodeInfo" = betterproto.message_field(2) + index: int = betterproto.int32_field(3) + total: int = betterproto.int32_field(4) + execution_time: float = betterproto.float_field(5) + source_name: str = betterproto.string_field(6) + table_name: str = betterproto.string_field(7) + + +@dataclass +class LogFreshnessResultMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - status: str = betterproto.string_field(2) - node_info: "NodeInfo" = betterproto.message_field(3) - index: int = betterproto.int32_field(4) - total: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - source_name: str = betterproto.string_field(7) - table_name: str = betterproto.string_field(8) + data: "LogFreshnessResult" = betterproto.message_field(2) @dataclass class LogCancelLine(betterproto.Message): """Q022""" + conn_name: str = betterproto.string_field(1) + + +@dataclass +class LogCancelLineMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + data: "LogCancelLine" = betterproto.message_field(2) @dataclass class DefaultSelector(betterproto.Message): """Q023""" + name: str = betterproto.string_field(1) + + +@dataclass +class DefaultSelectorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) + data: "DefaultSelector" = betterproto.message_field(2) @dataclass class NodeStart(betterproto.Message): """Q024""" + node_info: "NodeInfo" = betterproto.message_field(1) + + +@dataclass +class NodeStartMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) + data: "NodeStart" = betterproto.message_field(2) @dataclass class NodeFinished(betterproto.Message): """Q025""" + node_info: "NodeInfo" = betterproto.message_field(1) + run_result: "RunResultMsg" = betterproto.message_field(2) + + +@dataclass +class NodeFinishedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - run_result: "RunResultMsg" = betterproto.message_field(4) + data: "NodeFinished" = betterproto.message_field(2) @dataclass class QueryCancelationUnsupported(betterproto.Message): """Q026""" + type: str = betterproto.string_field(1) + + +@dataclass +class QueryCancelationUnsupportedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - type: str = betterproto.string_field(2) + data: "QueryCancelationUnsupported" = betterproto.message_field(2) @dataclass class ConcurrencyLine(betterproto.Message): """Q027""" + num_threads: int = betterproto.int32_field(1) + target_name: str = betterproto.string_field(2) + node_count: int = betterproto.int32_field(3) + + +@dataclass +class ConcurrencyLineMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - num_threads: int = betterproto.int32_field(2) - target_name: str = betterproto.string_field(3) - node_count: int = betterproto.int32_field(4) + data: "ConcurrencyLine" = betterproto.message_field(2) @dataclass class WritingInjectedSQLForNode(betterproto.Message): """Q029""" + node_info: "NodeInfo" = betterproto.message_field(1) + + +@dataclass +class WritingInjectedSQLForNodeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) + data: "WritingInjectedSQLForNode" = betterproto.message_field(2) @dataclass class NodeCompiling(betterproto.Message): """Q030""" + node_info: "NodeInfo" = betterproto.message_field(1) + + +@dataclass +class NodeCompilingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) + data: "NodeCompiling" = betterproto.message_field(2) @dataclass class NodeExecuting(betterproto.Message): """Q031""" + node_info: "NodeInfo" = betterproto.message_field(1) + + +@dataclass +class NodeExecutingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) + data: "NodeExecuting" = betterproto.message_field(2) @dataclass class LogHookStartLine(betterproto.Message): """Q032""" + node_info: "NodeInfo" = betterproto.message_field(1) + statement: str = betterproto.string_field(2) + index: int = betterproto.int32_field(3) + total: int = betterproto.int32_field(4) + + +@dataclass +class LogHookStartLineMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - statement: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - total: int = betterproto.int32_field(5) + data: "LogHookStartLine" = betterproto.message_field(2) @dataclass class LogHookEndLine(betterproto.Message): """Q033""" + node_info: "NodeInfo" = betterproto.message_field(1) + statement: str = betterproto.string_field(2) + status: str = betterproto.string_field(3) + index: int = betterproto.int32_field(4) + total: int = betterproto.int32_field(5) + execution_time: float = betterproto.float_field(6) + + +@dataclass +class LogHookEndLineMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - statement: str = betterproto.string_field(3) - status: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) + data: "LogHookEndLine" = betterproto.message_field(2) @dataclass class SkippingDetails(betterproto.Message): """Q034""" + node_info: "NodeInfo" = betterproto.message_field(1) + resource_type: str = betterproto.string_field(2) + schema: str = betterproto.string_field(3) + node_name: str = betterproto.string_field(4) + index: int = betterproto.int32_field(5) + total: int = betterproto.int32_field(6) + + +@dataclass +class SkippingDetailsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - resource_type: str = betterproto.string_field(3) - schema: str = betterproto.string_field(4) - node_name: str = betterproto.string_field(5) - index: int = betterproto.int32_field(6) - total: int = betterproto.int32_field(7) + data: "SkippingDetails" = betterproto.message_field(2) @dataclass class NothingToDo(betterproto.Message): """Q035""" + pass + + +@dataclass +class NothingToDoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "NothingToDo" = betterproto.message_field(2) @dataclass class RunningOperationUncaughtError(betterproto.Message): """Q036""" + exc: str = betterproto.string_field(1) + + +@dataclass +class RunningOperationUncaughtErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "RunningOperationUncaughtError" = betterproto.message_field(2) @dataclass class EndRunResult(betterproto.Message): """Q037""" + results: List["RunResultMsg"] = betterproto.message_field(1) + elapsed_time: float = betterproto.float_field(2) + generated_at: datetime = betterproto.message_field(3) + success: bool = betterproto.bool_field(4) + + +@dataclass +class EndRunResultMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - results: List["RunResultMsg"] = betterproto.message_field(2) - elapsed_time: float = betterproto.float_field(3) - generated_at: datetime = betterproto.message_field(4) - success: bool = betterproto.bool_field(5) + data: "EndRunResult" = betterproto.message_field(2) @dataclass class NoNodesSelected(betterproto.Message): """Q038""" + pass + + +@dataclass +class NoNodesSelectedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "NoNodesSelected" = betterproto.message_field(2) @dataclass class CatchableExceptionOnRun(betterproto.Message): """W002""" + node_info: "NodeInfo" = betterproto.message_field(1) + exc: str = betterproto.string_field(2) + exc_info: str = betterproto.string_field(3) + + +@dataclass +class CatchableExceptionOnRunMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - exc: str = betterproto.string_field(3) - exc_info: str = betterproto.string_field(4) + data: "CatchableExceptionOnRun" = betterproto.message_field(2) @dataclass -class InternalExceptionOnRun(betterproto.Message): +class InternalErrorOnRun(betterproto.Message): """W003""" + build_path: str = betterproto.string_field(1) + exc: str = betterproto.string_field(2) + + +@dataclass +class InternalErrorOnRunMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - build_path: str = betterproto.string_field(2) - exc: str = betterproto.string_field(3) + data: "InternalErrorOnRun" = betterproto.message_field(2) @dataclass class GenericExceptionOnRun(betterproto.Message): """W004""" + build_path: str = betterproto.string_field(1) + unique_id: str = betterproto.string_field(2) + exc: str = betterproto.string_field(3) + + +@dataclass +class GenericExceptionOnRunMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - build_path: str = betterproto.string_field(2) - unique_id: str = betterproto.string_field(3) - exc: str = betterproto.string_field(4) + data: "GenericExceptionOnRun" = betterproto.message_field(2) @dataclass class NodeConnectionReleaseError(betterproto.Message): """W005""" + node_name: str = betterproto.string_field(1) + exc: str = betterproto.string_field(2) + exc_info: str = betterproto.string_field(3) + + +@dataclass +class NodeConnectionReleaseErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_name: str = betterproto.string_field(2) - exc: str = betterproto.string_field(3) - exc_info: str = betterproto.string_field(4) + data: "NodeConnectionReleaseError" = betterproto.message_field(2) @dataclass class FoundStats(betterproto.Message): """W006""" + stat_line: str = betterproto.string_field(1) + + +@dataclass +class FoundStatsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - stat_line: str = betterproto.string_field(2) + data: "FoundStats" = betterproto.message_field(2) @dataclass class MainKeyboardInterrupt(betterproto.Message): """Z001""" + pass + + +@dataclass +class MainKeyboardInterruptMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "MainKeyboardInterrupt" = betterproto.message_field(2) @dataclass class MainEncounteredError(betterproto.Message): """Z002""" + exc: str = betterproto.string_field(1) + + +@dataclass +class MainEncounteredErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "MainEncounteredError" = betterproto.message_field(2) @dataclass class MainStackTrace(betterproto.Message): """Z003""" + stack_trace: str = betterproto.string_field(1) + + +@dataclass +class MainStackTraceMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - stack_trace: str = betterproto.string_field(2) + data: "MainStackTrace" = betterproto.message_field(2) @dataclass class SystemErrorRetrievingModTime(betterproto.Message): """Z004""" + path: str = betterproto.string_field(1) + + +@dataclass +class SystemErrorRetrievingModTimeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "SystemErrorRetrievingModTime" = betterproto.message_field(2) @dataclass class SystemCouldNotWrite(betterproto.Message): """Z005""" + path: str = betterproto.string_field(1) + reason: str = betterproto.string_field(2) + exc: str = betterproto.string_field(3) + + +@dataclass +class SystemCouldNotWriteMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) - reason: str = betterproto.string_field(3) - exc: str = betterproto.string_field(4) + data: "SystemCouldNotWrite" = betterproto.message_field(2) @dataclass class SystemExecutingCmd(betterproto.Message): """Z006""" + cmd: List[str] = betterproto.string_field(1) + + +@dataclass +class SystemExecutingCmdMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - cmd: List[str] = betterproto.string_field(2) + data: "SystemExecutingCmd" = betterproto.message_field(2) @dataclass -class SystemStdOutMsg(betterproto.Message): +class SystemStdOut(betterproto.Message): """Z007""" + bmsg: bytes = betterproto.bytes_field(1) + + +@dataclass +class SystemStdOutMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - bmsg: bytes = betterproto.bytes_field(2) + data: "SystemStdOut" = betterproto.message_field(2) @dataclass -class SystemStdErrMsg(betterproto.Message): +class SystemStdErr(betterproto.Message): """Z008""" + bmsg: bytes = betterproto.bytes_field(1) + + +@dataclass +class SystemStdErrMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - bmsg: bytes = betterproto.bytes_field(2) + data: "SystemStdErr" = betterproto.message_field(2) @dataclass class SystemReportReturnCode(betterproto.Message): """Z009""" + returncode: int = betterproto.int32_field(1) + + +@dataclass +class SystemReportReturnCodeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - returncode: int = betterproto.int32_field(2) + data: "SystemReportReturnCode" = betterproto.message_field(2) @dataclass class TimingInfoCollected(betterproto.Message): """Z010""" + node_info: "NodeInfo" = betterproto.message_field(1) + timing_info: "TimingInfoMsg" = betterproto.message_field(2) + + +@dataclass +class TimingInfoCollectedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - timing_info: "TimingInfoMsg" = betterproto.message_field(3) + data: "TimingInfoCollected" = betterproto.message_field(2) @dataclass class LogDebugStackTrace(betterproto.Message): """Z011""" + exc_info: str = betterproto.string_field(1) + + +@dataclass +class LogDebugStackTraceMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc_info: str = betterproto.string_field(2) + data: "LogDebugStackTrace" = betterproto.message_field(2) @dataclass class CheckCleanPath(betterproto.Message): """Z012""" + path: str = betterproto.string_field(1) + + +@dataclass +class CheckCleanPathMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "CheckCleanPath" = betterproto.message_field(2) @dataclass class ConfirmCleanPath(betterproto.Message): """Z013""" + path: str = betterproto.string_field(1) + + +@dataclass +class ConfirmCleanPathMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "ConfirmCleanPath" = betterproto.message_field(2) @dataclass class ProtectedCleanPath(betterproto.Message): """Z014""" + path: str = betterproto.string_field(1) + + +@dataclass +class ProtectedCleanPathMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "ProtectedCleanPath" = betterproto.message_field(2) @dataclass class FinishedCleanPaths(betterproto.Message): """Z015""" + pass + + +@dataclass +class FinishedCleanPathsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "FinishedCleanPaths" = betterproto.message_field(2) @dataclass class OpenCommand(betterproto.Message): """Z016""" + open_cmd: str = betterproto.string_field(1) + profiles_dir: str = betterproto.string_field(2) + + +@dataclass +class OpenCommandMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - open_cmd: str = betterproto.string_field(2) - profiles_dir: str = betterproto.string_field(3) + data: "OpenCommand" = betterproto.message_field(2) @dataclass class EmptyLine(betterproto.Message): """Z017""" + pass + + +@dataclass +class EmptyLineMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "EmptyLine" = betterproto.message_field(2) @dataclass class ServingDocsPort(betterproto.Message): """Z018""" + address: str = betterproto.string_field(1) + port: int = betterproto.int32_field(2) + + +@dataclass +class ServingDocsPortMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - address: str = betterproto.string_field(2) - port: int = betterproto.int32_field(3) + data: "ServingDocsPort" = betterproto.message_field(2) @dataclass class ServingDocsAccessInfo(betterproto.Message): """Z019""" + port: str = betterproto.string_field(1) + + +@dataclass +class ServingDocsAccessInfoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - port: str = betterproto.string_field(2) + data: "ServingDocsAccessInfo" = betterproto.message_field(2) @dataclass class ServingDocsExitInfo(betterproto.Message): """Z020""" + pass + + +@dataclass +class ServingDocsExitInfoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "ServingDocsExitInfo" = betterproto.message_field(2) @dataclass class RunResultWarning(betterproto.Message): """Z021""" + resource_type: str = betterproto.string_field(1) + node_name: str = betterproto.string_field(2) + path: str = betterproto.string_field(3) + + +@dataclass +class RunResultWarningMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - resource_type: str = betterproto.string_field(2) - node_name: str = betterproto.string_field(3) - path: str = betterproto.string_field(4) + data: "RunResultWarning" = betterproto.message_field(2) @dataclass class RunResultFailure(betterproto.Message): """Z022""" + resource_type: str = betterproto.string_field(1) + node_name: str = betterproto.string_field(2) + path: str = betterproto.string_field(3) + + +@dataclass +class RunResultFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - resource_type: str = betterproto.string_field(2) - node_name: str = betterproto.string_field(3) - path: str = betterproto.string_field(4) + data: "RunResultFailure" = betterproto.message_field(2) @dataclass class StatsLine(betterproto.Message): """Z023""" - info: "EventInfo" = betterproto.message_field(1) stats: Dict[str, int] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_INT32 + 1, betterproto.TYPE_STRING, betterproto.TYPE_INT32 ) +@dataclass +class StatsLineMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "StatsLine" = betterproto.message_field(2) + + @dataclass class RunResultError(betterproto.Message): """Z024""" + msg: str = betterproto.string_field(1) + + +@dataclass +class RunResultErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "RunResultError" = betterproto.message_field(2) @dataclass class RunResultErrorNoMessage(betterproto.Message): """Z025""" + status: str = betterproto.string_field(1) + + +@dataclass +class RunResultErrorNoMessageMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - status: str = betterproto.string_field(2) + data: "RunResultErrorNoMessage" = betterproto.message_field(2) @dataclass class SQLCompiledPath(betterproto.Message): """Z026""" + path: str = betterproto.string_field(1) + + +@dataclass +class SQLCompiledPathMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "SQLCompiledPath" = betterproto.message_field(2) @dataclass class CheckNodeTestFailure(betterproto.Message): """Z027""" + relation_name: str = betterproto.string_field(1) + + +@dataclass +class CheckNodeTestFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - relation_name: str = betterproto.string_field(2) + data: "CheckNodeTestFailure" = betterproto.message_field(2) @dataclass class FirstRunResultError(betterproto.Message): """Z028""" + msg: str = betterproto.string_field(1) + + +@dataclass +class FirstRunResultErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "FirstRunResultError" = betterproto.message_field(2) @dataclass class AfterFirstRunResultError(betterproto.Message): """Z029""" + msg: str = betterproto.string_field(1) + + +@dataclass +class AfterFirstRunResultErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "AfterFirstRunResultError" = betterproto.message_field(2) @dataclass class EndOfRunSummary(betterproto.Message): """Z030""" + num_errors: int = betterproto.int32_field(1) + num_warnings: int = betterproto.int32_field(2) + keyboard_interrupt: bool = betterproto.bool_field(3) + + +@dataclass +class EndOfRunSummaryMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - num_errors: int = betterproto.int32_field(2) - num_warnings: int = betterproto.int32_field(3) - keyboard_interrupt: bool = betterproto.bool_field(4) + data: "EndOfRunSummary" = betterproto.message_field(2) @dataclass class LogSkipBecauseError(betterproto.Message): """Z034""" + schema: str = betterproto.string_field(1) + relation: str = betterproto.string_field(2) + index: int = betterproto.int32_field(3) + total: int = betterproto.int32_field(4) + + +@dataclass +class LogSkipBecauseErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - schema: str = betterproto.string_field(2) - relation: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - total: int = betterproto.int32_field(5) + data: "LogSkipBecauseError" = betterproto.message_field(2) @dataclass class EnsureGitInstalled(betterproto.Message): """Z036""" + pass + + +@dataclass +class EnsureGitInstalledMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "EnsureGitInstalled" = betterproto.message_field(2) @dataclass class DepsCreatingLocalSymlink(betterproto.Message): """Z037""" + pass + + +@dataclass +class DepsCreatingLocalSymlinkMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "DepsCreatingLocalSymlink" = betterproto.message_field(2) @dataclass class DepsSymlinkNotAvailable(betterproto.Message): """Z038""" + pass + + +@dataclass +class DepsSymlinkNotAvailableMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "DepsSymlinkNotAvailable" = betterproto.message_field(2) @dataclass class DisableTracking(betterproto.Message): """Z039""" + pass + + +@dataclass +class DisableTrackingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "DisableTracking" = betterproto.message_field(2) @dataclass class SendingEvent(betterproto.Message): """Z040""" + kwargs: str = betterproto.string_field(1) + + +@dataclass +class SendingEventMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - kwargs: str = betterproto.string_field(2) + data: "SendingEvent" = betterproto.message_field(2) @dataclass class SendEventFailure(betterproto.Message): """Z041""" + pass + + +@dataclass +class SendEventFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "SendEventFailure" = betterproto.message_field(2) @dataclass class FlushEvents(betterproto.Message): """Z042""" + pass + + +@dataclass +class FlushEventsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "FlushEvents" = betterproto.message_field(2) @dataclass class FlushEventsFailure(betterproto.Message): """Z043""" + pass + + +@dataclass +class FlushEventsFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "FlushEventsFailure" = betterproto.message_field(2) @dataclass class TrackingInitializeFailure(betterproto.Message): """Z044""" + exc_info: str = betterproto.string_field(1) + + +@dataclass +class TrackingInitializeFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc_info: str = betterproto.string_field(2) + data: "TrackingInitializeFailure" = betterproto.message_field(2) @dataclass class RunResultWarningMessage(betterproto.Message): """Z046""" + msg: str = betterproto.string_field(1) + + +@dataclass +class RunResultWarningMessageMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "RunResultWarningMessage" = betterproto.message_field(2) @dataclass @@ -2132,45 +2963,75 @@ class ListRunDetails(betterproto.Message): class IntegrationTestInfo(betterproto.Message): """T001""" + msg: str = betterproto.string_field(1) + + +@dataclass +class IntegrationTestInfoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "IntegrationTestInfo" = betterproto.message_field(2) @dataclass class IntegrationTestDebug(betterproto.Message): """T002""" + msg: str = betterproto.string_field(1) + + +@dataclass +class IntegrationTestDebugMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "IntegrationTestDebug" = betterproto.message_field(2) @dataclass class IntegrationTestWarn(betterproto.Message): """T003""" + msg: str = betterproto.string_field(1) + + +@dataclass +class IntegrationTestWarnMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "IntegrationTestWarn" = betterproto.message_field(2) @dataclass class IntegrationTestError(betterproto.Message): """T004""" + msg: str = betterproto.string_field(1) + + +@dataclass +class IntegrationTestErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "IntegrationTestError" = betterproto.message_field(2) @dataclass class IntegrationTestException(betterproto.Message): """T005""" + msg: str = betterproto.string_field(1) + + +@dataclass +class IntegrationTestExceptionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "IntegrationTestException" = betterproto.message_field(2) @dataclass class UnitTestInfo(betterproto.Message): """T006""" + msg: str = betterproto.string_field(1) + + +@dataclass +class UnitTestInfoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "UnitTestInfo" = betterproto.message_field(2) diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index fb3960a2530..059eb4b2a09 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -35,6 +35,7 @@ message NodeInfo { string node_status = 6; string node_started_at = 7; string node_finished_at = 8; + map meta = 9; } // RunResult @@ -69,617 +70,773 @@ message GenericMessage { // A001 message MainReportVersion { + string version = 1; + int32 log_version = 2; +} + +message MainReportVersionMsg { EventInfo info = 1; - string version = 2; - int32 log_version = 3; + MainReportVersion data = 2; } // A002 message MainReportArgs { + map args = 1; +} + +message MainReportArgsMsg { EventInfo info = 1; - map args = 2; + MainReportArgs data = 2; } // A003 message MainTrackingUserState { + string user_state = 1; +} + +message MainTrackingUserStateMsg { EventInfo info = 1; - string user_state = 2; + MainTrackingUserState data = 2; } // A004 message MergedFromState { + int32 num_merged = 1; + repeated string sample = 2; +} + +message MergedFromStateMsg { EventInfo info = 1; - int32 num_merged = 2; - repeated string sample = 3; + MergedFromState data = 2; } // A005 message MissingProfileTarget { + string profile_name = 1; + string target_name = 2; +} + +message MissingProfileTargetMsg { EventInfo info = 1; - string profile_name = 2; - string target_name = 3; + MissingProfileTarget data = 2; } // Skipped A006, A007 // A008 -message InvalidVarsYAML { - EventInfo info = 1; +message InvalidOptionYAML { + string option_name = 1; } -// A009 -message DbtProjectError { +message InvalidOptionYAMLMsg { EventInfo info = 1; + InvalidOptionYAML data = 2; } -// A010 -message DbtProjectErrorException { - EventInfo info = 1; - string exc = 2; +// A009 +message LogDbtProjectError { + string exc = 1; } -// A011 -message DbtProfileError { +message LogDbtProjectErrorMsg { EventInfo info = 1; + LogDbtProjectError data = 2; } -// A012 -message DbtProfileErrorException { - EventInfo info = 1; - string exc = 2; -} +// Skipped A010 -// A013 -message ProfileListTitle { - EventInfo info = 1; -} - -// A014 -message ListSingleProfile { - EventInfo info = 1; - string profile = 2; +// A011 +message LogDbtProfileError { + string exc = 1; + repeated string profiles = 2; } -// A015 -message NoDefinedProfiles { +message LogDbtProfileErrorMsg { EventInfo info = 1; + LogDbtProfileError data = 2; } -// A016 -message ProfileHelpMessage { - EventInfo info = 1; -} +// Skipped A012, A013, A014, A015, A016 // A017 message StarterProjectPath { + string dir = 1; +} + +message StarterProjectPathMsg { EventInfo info = 1; - string dir = 2; + StarterProjectPath data = 2; } // A018 message ConfigFolderDirectory { + string dir = 1; +} + +message ConfigFolderDirectoryMsg { EventInfo info = 1; - string dir = 2; + ConfigFolderDirectory data = 2; } // A019 message NoSampleProfileFound { + string adapter = 1; +} + +message NoSampleProfileFoundMsg { EventInfo info = 1; - string adapter = 2; + NoSampleProfileFound data = 2; } // A020 message ProfileWrittenWithSample { + string name = 1; + string path = 2; +} + +message ProfileWrittenWithSampleMsg { EventInfo info = 1; - string name = 2; - string path = 3; + ProfileWrittenWithSample data = 2; } // A021 message ProfileWrittenWithTargetTemplateYAML { + string name = 1; + string path = 2; +} + +message ProfileWrittenWithTargetTemplateYAMLMsg { EventInfo info = 1; - string name = 2; - string path = 3; + ProfileWrittenWithTargetTemplateYAMLMsg data = 2; } // A022 message ProfileWrittenWithProjectTemplateYAML { + string name = 1; + string path = 2; +} + +message ProfileWrittenWithProjectTemplateYAMLMsg { EventInfo info = 1; - string name = 2; - string path = 3; + ProfileWrittenWithProjectTemplateYAML data = 2; } // A023 message SettingUpProfile { +} + +message SettingUpProfileMsg { EventInfo info = 1; + SettingUpProfile data = 2; } // A024 message InvalidProfileTemplateYAML { +} + +message InvalidProfileTemplateYAMLMsg { EventInfo info = 1; + InvalidProfileTemplateYAML data = 2; } // A025 message ProjectNameAlreadyExists { + string name = 1; +} + +message ProjectNameAlreadyExistsMsg { EventInfo info = 1; - string name = 2; + ProjectNameAlreadyExists data = 2; } // A026 message ProjectCreated { + string project_name = 1; + string docs_url = 2; + string slack_url = 3; +} + +message ProjectCreatedMsg { EventInfo info = 1; - string project_name = 2; - string docs_url = 3; - string slack_url = 4; + ProjectCreated data = 2; } // D - Deprecation // D001 message PackageRedirectDeprecation { + string old_name = 1; + string new_name = 2; +} + +message PackageRedirectDeprecationMsg { EventInfo info = 1; - string old_name = 2; - string new_name = 3; + PackageRedirectDeprecation data = 2; } // D002 message PackageInstallPathDeprecation { +} + +message PackageInstallPathDeprecationMsg { EventInfo info = 1; + PackageInstallPathDeprecation data = 2; } // D003 message ConfigSourcePathDeprecation { + string deprecated_path = 1; + string exp_path = 2; +} + +message ConfigSourcePathDeprecationMsg { EventInfo info = 1; - string deprecated_path = 2; - string exp_path = 3; + ConfigSourcePathDeprecation data = 2; } // D004 message ConfigDataPathDeprecation { + string deprecated_path = 1; + string exp_path = 2; +} + +message ConfigDataPathDeprecationMsg { EventInfo info = 1; - string deprecated_path = 2; - string exp_path = 3; + ConfigDataPathDeprecation data = 2; } //D005 message AdapterDeprecationWarning { + string old_name = 1; + string new_name = 2; +} + +message AdapterDeprecationWarningMsg { EventInfo info = 1; - string old_name = 2; - string new_name = 3; + AdapterDeprecationWarning data = 2; } //D006 message MetricAttributesRenamed { + string metric_name = 1; +} + +message MetricAttributesRenamedMsg { EventInfo info = 1; - string metric_name = 2; + MetricAttributesRenamed data = 2; } //D007 message ExposureNameDeprecation { + string exposure = 1; +} + +message ExposureNameDeprecationMsg { + EventInfo info = 1; + ExposureNameDeprecation data = 2; +} + +//D008 +message InternalDeprecation { + string name = 1; + string reason = 2; + string suggested_action = 3; + string version = 4; +} + +message InternalDeprecationMsg { EventInfo info = 1; - string exposure = 2; + InternalDeprecation data = 2; } // E - DB Adapter // E001 message AdapterEventDebug { + NodeInfo node_info = 1; + string name = 2; + string base_msg = 3; + repeated string args = 4; +} + +message AdapterEventDebugMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - string base_msg = 4; - repeated string args = 5; + AdapterEventDebug data = 2; } // E002 message AdapterEventInfo { + NodeInfo node_info = 1; + string name = 2; + string base_msg = 3; + repeated string args = 4; +} + +message AdapterEventInfoMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - string base_msg = 4; - repeated string args = 5; + AdapterEventInfo data = 2; } // E003 message AdapterEventWarning { + NodeInfo node_info = 1; + string name = 2; + string base_msg = 3; + repeated string args = 4; +} + +message AdapterEventWarningMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - string base_msg = 4; - repeated string args = 5; + AdapterEventWarning data = 2; } // E004 message AdapterEventError { + NodeInfo node_info = 1; + string name = 2; + string base_msg = 3; + repeated string args = 4; + string exc_info = 5; +} + +message AdapterEventErrorMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - string base_msg = 4; - repeated string args = 5; - string exc_info = 6; + AdapterEventError data = 2; } // E005 message NewConnection { + NodeInfo node_info = 1; + string conn_type = 2; + string conn_name = 3; +} + +message NewConnectionMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string conn_type = 3; - string conn_name = 4; + NewConnection data = 2; } // E006 message ConnectionReused { + string conn_name = 1; + string orig_conn_name = 2; +} + +message ConnectionReusedMsg { EventInfo info = 1; - string conn_name = 2; + ConnectionReused data = 2; } // E007 message ConnectionLeftOpenInCleanup { + string conn_name = 1; +} + +message ConnectionLeftOpenInCleanupMsg { EventInfo info = 1; - string conn_name = 2; + ConnectionLeftOpen data = 2; } // E008 message ConnectionClosedInCleanup { + string conn_name = 1; +} + +message ConnectionClosedInCleanupMsg { EventInfo info = 1; - string conn_name = 2; + ConnectionClosedInCleanup data = 2; } // E009 message RollbackFailed { + NodeInfo node_info = 1; + string conn_name = 2; + string exc_info = 3; +} + +message RollbackFailedMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string conn_name = 3; - string exc_info = 4; + RollbackFailed data = 2; } // E010 message ConnectionClosed { + NodeInfo node_info = 1; + string conn_name = 2; +} + +message ConnectionClosedMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string conn_name = 3; + ConnectionClosed data = 2; } // E011 message ConnectionLeftOpen { + NodeInfo node_info = 1; + string conn_name = 2; +} + +message ConnectionLeftOpenMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string conn_name = 3; + ConnectionLeftOpen data = 2; } // E012 message Rollback { + NodeInfo node_info = 1; + string conn_name = 2; +} + +message RollbackMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string conn_name = 3; + Rollback data = 2; } // E013 message CacheMiss { + string conn_name = 1; + string database = 2; + string schema = 3; +} + +message CacheMissMsg { EventInfo info = 1; - string conn_name = 2; - string database = 3; - string schema = 4; + CacheMiss data = 2; } // E014 message ListRelations { + string database = 1; + string schema = 2; + repeated ReferenceKeyMsg relations = 3; +} + +message ListRelationsMsg { EventInfo info = 1; - string database = 2; - string schema = 3; - repeated ReferenceKeyMsg relations = 4; + ListRelations data = 2; } // E015 message ConnectionUsed { + NodeInfo node_info = 1; + string conn_type = 2; + string conn_name = 3; +} + +message ConnectionUsedMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string conn_type = 3; - string conn_name = 4; + ConnectionUsed data = 2; } // E016 message SQLQuery { - EventInfo info = 1; - NodeInfo node_info = 2; - string conn_name = 3; - string sql = 4; + NodeInfo node_info = 1; + string conn_name = 2; + string sql = 3; } -// E017 -message SQLQueryStatus { +message SQLQueryMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string status = 3; - float elapsed = 4; + SQLQuery data = 2; } -// E018 -message SQLCommit { - EventInfo info = 1; - NodeInfo node_info = 2; - string conn_name = 3; +// E017 +message SQLQueryStatus { + NodeInfo node_info = 1; + string status = 2; + float elapsed = 3; } -// E019 -message ColTypeChange { +message SQLQueryStatusMsg { EventInfo info = 1; - string orig_type = 2; - string new_type = 3; - ReferenceKeyMsg table = 4; + SQLQueryStatus data = 2; } -// E020 -message SchemaCreation { - EventInfo info = 1; - ReferenceKeyMsg relation = 2; +// E018 +message SQLCommit { + NodeInfo node_info = 1; + string conn_name = 2; } -// E021 -message SchemaDrop { +message SQLCommitMsg { EventInfo info = 1; - ReferenceKeyMsg relation = 2; + SQLCommit data = 2; } -// E022 -message UncachedRelation { - EventInfo info = 1; - ReferenceKeyMsg dep_key = 2; - ReferenceKeyMsg ref_key = 3; +// E019 +message ColTypeChange { + string orig_type = 1; + string new_type = 2; + ReferenceKeyMsg table = 3; } -// E023 -message AddLink { +message ColTypeChangeMsg { EventInfo info = 1; - ReferenceKeyMsg dep_key = 2; - ReferenceKeyMsg ref_key = 3; + ColTypeChange data = 2; } -// E024 -message AddRelation { - EventInfo info = 1; - ReferenceKeyMsg relation = 2; +// E020 +message SchemaCreation { + ReferenceKeyMsg relation = 1; } -// E025 -message DropMissingRelation { +message SchemaCreationMsg { EventInfo info = 1; - ReferenceKeyMsg relation = 2; + SchemaCreation data = 2; } -// E026 -message DropCascade { - EventInfo info = 1; - ReferenceKeyMsg dropped = 2; - repeated ReferenceKeyMsg consequences = 3; +// E021 +message SchemaDrop { + ReferenceKeyMsg relation = 1; } -// E027 -message DropRelation { +message SchemaDropMsg { EventInfo info = 1; - ReferenceKeyMsg dropped = 2; + SchemaDrop data = 2; } -// E028 -message UpdateReference { - EventInfo info = 1; - ReferenceKeyMsg old_key = 2; - ReferenceKeyMsg new_key = 3; - ReferenceKeyMsg cached_key = 4; +// E022 +message CacheAction { + string action = 1; + ReferenceKeyMsg ref_key = 2; + ReferenceKeyMsg ref_key_2 = 3; + ReferenceKeyMsg ref_key_3 = 4; + repeated ReferenceKeyMsg ref_list = 5; } -// E029 -message TemporaryRelation { +message CacheActionMsg { EventInfo info = 1; - ReferenceKeyMsg key = 2; + CacheAction data = 2; } -// E030 -message RenameSchema { - EventInfo info = 1; - ReferenceKeyMsg old_key = 2; - ReferenceKeyMsg new_key = 3; -} +// Skipping E023, E024, E025, E026, E027, E028, E029, E0230 // E031 -message DumpBeforeAddGraph { - EventInfo info = 1; - map dump = 2; +message CacheDumpGraph { + map dump = 1; + string before_after = 2; + string action = 3; } -// E032 -message DumpAfterAddGraph { +message CacheDumpGraphMsg { EventInfo info = 1; - map dump = 2; + CacheDumpGraph data = 2; } -// E033 -message DumpBeforeRenameSchema { - EventInfo info = 1; - map dump = 2; -} -// E034 -message DumpAfterRenameSchema { - EventInfo info = 1; - map dump = 2; -} +// Skipping E032, E033, E034 // E035 message AdapterImportError { + string exc = 1; +} + +message AdapterImportErrorMsg { EventInfo info = 1; - string exc = 2; + AdapterImportError data = 2; } // E036 message PluginLoadError { + string exc_info = 1; +} + +message PluginLoadErrorMsg { EventInfo info = 1; - string exc_info = 2; + PluginLoadError data = 2; } // E037 message NewConnectionOpening { + NodeInfo node_info = 1; + string connection_state = 2; +} + +message NewConnectionOpeningMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string connection_state = 3; + NewConnectionOpening data = 2; } // E038 message CodeExecution { + string conn_name = 1; + string code_content = 2; +} + +message CodeExecutionMsg { EventInfo info = 1; - string conn_name = 2; - string code_content = 3; + CodeExecution data = 2; } // E039 message CodeExecutionStatus { + string status = 1; + float elapsed = 2; +} + +message CodeExecutionStatusMsg { EventInfo info = 1; - string status = 2; - float elapsed = 3; + CodeExecutionStatus data = 2; } // E040 message CatalogGenerationError { + string exc = 1; +} + +message CatalogGenerationErrorMsg { EventInfo info = 1; - string exc = 2; + CatalogGenerationError data = 2; } // E041 message WriteCatalogFailure { + int32 num_exceptions = 1; +} + +message WriteCatalogFailureMsg { EventInfo info = 1; - int32 num_exceptions = 2; + WriteCatalogFailure data = 2; } // E042 message CatalogWritten { + string path = 1; +} + +message CatalogWrittenMsg { EventInfo info = 1; - string path = 2; + CatalogWritten data = 2; } // E043 message CannotGenerateDocs { +} + +message CannotGenerateDocsMsg { EventInfo info = 1; + CannotGenerateDocs data = 2; } // E044 message BuildingCatalog { - EventInfo info = 1; } -// E045 -message DatabaseErrorRunningHook { +message BuildingCatalogMsg { EventInfo info = 1; - string hook_type = 2; + BuildingCatalog data = 2; } -// E046 -message HooksRunning { - EventInfo info = 1; - int32 num_hooks = 2; - string hook_type = 3; +// E045 +message DatabaseErrorRunningHook { + string hook_type = 1; } -// E047 -message HookFinished { +message DatabaseErrorRunningHookMsg { EventInfo info = 1; - string stat_line = 2; - string execution = 3; - float execution_time = 4; + DatabaseErrorRunningHook data = 2; } - -// I - Project parsing - -// I001 -message ParseCmdStart { - EventInfo info = 1; +// E046 +message HooksRunning { + int32 num_hooks = 1; + string hook_type = 2; } -// I002 -message ParseCmdCompiling { +message HooksRunningMsg { EventInfo info = 1; + HooksRunning data = 2; } -// I003 -message ParseCmdWritingManifest { - EventInfo info = 1; +// E047 +message FinishedRunningStats { + string stat_line = 1; + string execution = 2; + float execution_time = 3; } -// I004 -message ParseCmdDone { +message FinishedRunningStatsMsg { EventInfo info = 1; + FinishedRunningStats data = 2; } -// I005 -message ManifestDependenciesLoaded { - EventInfo info = 1; -} -// I006 -message ManifestLoaderCreated { - EventInfo info = 1; -} +// I - Project parsing -// I007 -message ManifestLoaded { - EventInfo info = 1; +// I001 +message ParseCmdOut { + string msg = 1; } -// I008 -message ManifestChecked { +message ParseCmdOutMsg { EventInfo info = 1; + ParseCmdOut data = 2; } -// I009 -message ManifestFlatGraphBuilt { - EventInfo info = 1; -} +// Skipping I002, I003, I004, I005, I006, I007, I008, I009, I010 -// I010 -message ParseCmdPerfInfoPath { - EventInfo info = 1; - string path = 2; -} // I011 message GenericTestFileParse { + string path = 1; +} + +message GenericTestFileParseMsg { EventInfo info = 1; - string path = 2; + GenericTestFileParse data = 2; } // I012 message MacroFileParse { + string path = 1; +} + +message MacroFileParseMsg { EventInfo info = 1; - string path = 2; + MacroFileParse data = 2; } // Skipping I013 // I014 -message PartialParsingExceptionProcessingFile { +message PartialParsingErrorProcessingFile { + string file = 1; +} + +message PartialParsingErrorProcessingFileMsg { EventInfo info = 1; - string file = 2; + PartialParsingErrorProcessingFile data = 2; } // I016 -message PartialParsingException { +message PartialParsingError { + map exc_info = 1; +} + +message PartialParsingErrorMsg { EventInfo info = 1; - map exc_info = 2; + PartialParsingError data = 2; } // I017 message PartialParsingSkipParsing { - EventInfo info = 1; } +message PartialParsingSkipParsingMsg { + EventInfo info = 1; + PartialParsingSkipParsing data = 2; +} // Skipped I018, I019, I020, I021, I022, I023 - // I024 message UnableToPartialParse { + string reason = 1; +} + +message UnableToPartialParseMsg { EventInfo info = 1; - string reason = 2; + UnableToPartialParse data = 2; } // Skipped I025, I026, I027 @@ -687,15 +844,23 @@ message UnableToPartialParse { // I028 message PartialParsingNotEnabled { +} + +message PartialParsingNotEnabledMsg { EventInfo info = 1; + PartialParsingNotEnabled data = 2; } // I029 message ParsedFileLoadFailed { + string path = 1; + string exc = 2; + string exc_info = 3; +} + +message ParsedFileLoadFailedMsg { EventInfo info = 1; - string path = 2; - string exc = 3; - string exc_info = 4; + ParsedFileLoadFailed data = 2; } // Skipping I030 @@ -703,406 +868,646 @@ message ParsedFileLoadFailed { // I031 message StaticParserCausedJinjaRendering { + string path = 1; +} + +message StaticParserCausedJinjaRenderingMsg { EventInfo info = 1; - string path = 2; + StaticParserCausedJinjaRendering data = 2; } // I032 message UsingExperimentalParser { + string path = 1; +} + +message UsingExperimentalParserMsg { EventInfo info = 1; - string path = 2; + UsingExperimentalParser data = 2; } // I033 message SampleFullJinjaRendering { + string path = 1; +} + +message SampleFullJinjaRenderingMsg { EventInfo info = 1; - string path = 2; + SampleFullJinjaRendering data = 2; } // I034 message StaticParserFallbackJinjaRendering { + string path = 1; +} + +message StaticParserFallbackJinjaRenderingMsg { EventInfo info = 1; - string path = 2; + StaticParserFallbackJinjaRendering data = 2; } // I035 message StaticParsingMacroOverrideDetected { + string path = 1; +} + +message StaticParsingMacroOverrideDetectedMsg { EventInfo info = 1; - string path = 2; + StaticParsingMacroOverrideDetected data = 2; } // I036 message StaticParserSuccess { + string path = 1; +} + +message StaticParserSuccessMsg { EventInfo info = 1; - string path = 2; + StaticParserSuccess data = 2; } // I037 message StaticParserFailure { + string path = 1; +} + +message StaticParserFailureMsg { EventInfo info = 1; - string path = 2; + StaticParserFailure data = 2; } // I038 message ExperimentalParserSuccess { + string path = 1; +} + +message ExperimentalParserSuccessMsg { EventInfo info = 1; - string path = 2; + ExperimentalParserSuccess data = 2; } // I039 message ExperimentalParserFailure { + string path = 1; +} + +message ExperimentalParserFailureMsg { EventInfo info = 1; - string path = 2; + ExperimentalParserFailure data = 2; } // I040 message PartialParsingEnabled { + int32 deleted = 1; + int32 added = 2; + int32 changed = 3; +} + +message PartialParsingEnabledMsg { EventInfo info = 1; - int32 deleted = 2; - int32 added = 3; - int32 changed = 4; + PartialParsingEnabled data = 2; } // I041 message PartialParsingFile { + string file_id = 1; + string operation = 2; +} + +message PartialParsingFileMsg { EventInfo info = 1; - string file_id = 2; - string operation = 3; + PartialParsingFile data = 2; } // Skipped I042, I043, I044, I045, I046, I047, I048, I049 // I050 message InvalidDisabledTargetInTestNode { + string resource_type_title = 1; + string unique_id = 2; + string original_file_path = 3; + string target_kind = 4; + string target_name = 5; + string target_package = 6; +} + +message InvalidDisabledTargetInTestNodeMsg { EventInfo info = 1; - string resource_type_title = 2; - string unique_id = 3; - string original_file_path = 4; - string target_kind = 5; - string target_name = 6; - string target_package = 7; + InvalidDisabledTargetInTestNode data = 2; } // I051 message UnusedResourceConfigPath { + repeated string unused_config_paths = 1; +} + +message UnusedResourceConfigPathMsg { EventInfo info = 1; - repeated string unused_config_paths = 2; + UnusedResourceConfigPath data = 2; } // I052 message SeedIncreased { + string package_name = 1; + string name = 2; +} + +message SeedIncreasedMsg { EventInfo info = 1; - string package_name = 2; - string name = 3; + SeedIncreased data = 2; } // I053 message SeedExceedsLimitSamePath { + string package_name = 1; + string name = 2; +} + +message SeedExceedsLimitSamePathMsg { EventInfo info = 1; - string package_name = 2; - string name = 3; + SeedExceedsLimitSamePath data = 2; } // I054 message SeedExceedsLimitAndPathChanged { + string package_name = 1; + string name = 2; +} + +message SeedExceedsLimitAndPathChangedMsg { EventInfo info = 1; - string package_name = 2; - string name = 3; + SeedExceedsLimitAndPathChanged data = 2; } // I055 message SeedExceedsLimitChecksumChanged { + string package_name = 1; + string name = 2; + string checksum_name = 3; +} + +message SeedExceedsLimitChecksumChangedMsg { EventInfo info = 1; - string package_name = 2; - string name = 3; - string checksum_name = 4; + SeedExceedsLimitChecksumChanged data = 2; } // I056 message UnusedTables { + repeated string unused_tables = 1; +} + +message UnusedTablesMsg { EventInfo info = 1; - repeated string unused_tables = 2; + UnusedTables data = 2; } // I057 message WrongResourceSchemaFile { + string patch_name = 1; + string resource_type = 2; + string plural_resource_type = 3; + string yaml_key = 4; + string file_path = 5; +} + +message WrongResourceSchemaFileMsg { EventInfo info = 1; - string patch_name = 2; - string resource_type = 3; - string plural_resource_type = 4; - string yaml_key = 5; - string file_path = 6; + WrongResourceSchemaFile data = 2; } // I058 message NoNodeForYamlKey { + string patch_name = 1; + string yaml_key = 2; + string file_path = 3; +} + +message NoNodeForYamlKeyMsg { EventInfo info = 1; - string patch_name = 2; - string yaml_key = 3; - string file_path = 4; + NoNodeForYamlKey data = 2; } // I059 -message MacroPatchNotFound { +message MacroNotFoundForPatch { + string patch_name = 1; +} + +message MacroNotFoundForPatchMsg { EventInfo info = 1; - string patch_name = 2; + MacroNotFoundForPatch data = 2; } // I060 message NodeNotFoundOrDisabled { + string original_file_path = 1; + string unique_id = 2; + string resource_type_title = 3; + string target_name = 4; + string target_kind = 5; + string target_package = 6; + string disabled = 7; +} + +message NodeNotFoundOrDisabledMsg { EventInfo info = 1; - string original_file_path = 2; - string unique_id = 3; - string resource_type_title = 4; - string target_name = 5; - string target_kind = 6; - string target_package = 7; - string disabled = 8; + NodeNotFoundOrDisabled data = 2; } // I061 message JinjaLogWarning { + NodeInfo node_info = 1; + string msg = 2; +} + +message JinjaLogWarningMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string msg = 3; + JinjaLogWarning data = 2; } // M - Deps generation // M001 message GitSparseCheckoutSubdirectory { + string subdir = 1; +} + +message GitSparseCheckoutSubdirectoryMsg { EventInfo info = 1; - string subdir = 2; + GitSparseCheckoutSubdirectory data = 2; } // M002 message GitProgressCheckoutRevision { + string revision = 1; +} + +message GitProgressCheckoutRevisionMsg { EventInfo info = 1; - string revision = 2; + GitProgressCheckoutRevision data = 2; } // M003 message GitProgressUpdatingExistingDependency { + string dir = 1; +} + +message GitProgressUpdatingExistingDependencyMsg { EventInfo info = 1; - string dir = 2; + GitProgressUpdatingExistingDependency data = 2; } // M004 message GitProgressPullingNewDependency { + string dir = 1; +} + +message GitProgressPullingNewDependencyMsg { EventInfo info = 1; - string dir = 2; + GitProgressPullingNewDependency data = 2; } // M005 message GitNothingToDo { + string sha = 1; +} + +message GitNothingToDoMsg { EventInfo info = 1; - string sha = 2; + GitNothingToDo data = 2; } // M006 message GitProgressUpdatedCheckoutRange { + string start_sha = 1; + string end_sha = 2; +} + +message GitProgressUpdatedCheckoutRangeMsg { EventInfo info = 1; - string start_sha = 2; - string end_sha = 3; + GitProgressUpdatedCheckoutRange data = 2; } // M007 message GitProgressCheckedOutAt { + string end_sha = 1; +} + +message GitProgressCheckedOutAtMsg { EventInfo info = 1; - string end_sha = 2; + GitProgressCheckedOutAt data = 2; } // M008 message RegistryProgressGETRequest { + string url = 1; +} + +message RegistryProgressGETRequestMsg { EventInfo info = 1; - string url = 2; + RegistryProgressGETRequest data = 2; } // M009 message RegistryProgressGETResponse { + string url = 1; + int32 resp_code = 2; +} + +message RegistryProgressGETResponseMsg { EventInfo info = 1; - string url = 2; - int32 resp_code = 3; + RegistryProgressGETResponse data = 2; } // M010 message SelectorReportInvalidSelector { + string valid_selectors = 1; + string spec_method = 2; + string raw_spec = 3; +} + +message SelectorReportInvalidSelectorMsg { EventInfo info = 1; - string valid_selectors = 2; - string spec_method = 3; - string raw_spec = 4; + SelectorReportInvalidSelector data = 2; } // M011 message JinjaLogInfo { + NodeInfo node_info = 1; + string msg = 2; +} + +message JinjaLogInfoMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string msg = 3; + JinjaLogInfo data = 2; } // M012 message JinjaLogDebug { + NodeInfo node_info = 1; + string msg = 2; +} + +message JinjaLogDebugMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string msg = 3; + JinjaLogDebug data = 2; } // M013 message DepsNoPackagesFound { +} + +message DepsNoPackagesFoundMsg { EventInfo info = 1; + DepsNoPackagesFound data = 2; } // M014 message DepsStartPackageInstall { + string package_name = 1; +} + +message DepsStartPackageInstallMsg { EventInfo info = 1; - string package_name = 2; + DepsStartPackageInstall data = 2; } // M015 message DepsInstallInfo { + string version_name = 1; +} + +message DepsInstallInfoMsg { EventInfo info = 1; - string version_name = 2; + DepsInstallInfo data = 2; } // M016 message DepsUpdateAvailable { + string version_latest = 1; +} + +message DepsUpdateAvailableMsg { EventInfo info = 1; - string version_latest = 2; + DepsUpdateAvailable data = 2; } // M017 message DepsUpToDate { +} + +message DepsUpToDateMsg { EventInfo info = 1; + DepsUpToDate data = 2; } // M018 message DepsListSubdirectory { + string subdirectory = 1; +} + +message DepsListSubdirectoryMsg { EventInfo info = 1; - string subdirectory = 2; + DepsListSubdirectory data = 2; } // M019 message DepsNotifyUpdatesAvailable { + ListOfStrings packages = 1; +} + +message DepsNotifyUpdatesAvailableMsg { EventInfo info = 1; - ListOfStrings packages = 2; + DepsNotifyUpdatesAvailable data = 2; } // M020 message RetryExternalCall { + int32 attempt = 1; + int32 max = 2; +} + +message RetryExternalCallMsg { EventInfo info = 1; - int32 attempt = 2; - int32 max = 3; + RetryExternalCall data = 2; } // M021 message RecordRetryException { + string exc = 1; +} + +message RecordRetryExceptionMsg { EventInfo info = 1; - string exc = 2; + RecordRetryException data = 2; } // M022 message RegistryIndexProgressGETRequest { + string url = 1; +} + +message RegistryIndexProgressGETRequestMsg { EventInfo info = 1; - string url = 2; + RegistryIndexProgressGETRequest data = 2; } // M023 message RegistryIndexProgressGETResponse { + string url = 1; + int32 resp_code = 2; +} + +message RegistryIndexProgressGETResponseMsg { EventInfo info = 1; - string url = 2; - int32 resp_code = 3; + RegistryIndexProgressGETResponse data = 2; } // M024 message RegistryResponseUnexpectedType { + string response = 1; +} + +message RegistryResponseUnexpectedTypeMsg { EventInfo info = 1; - string response = 2; + RegistryResponseUnexpectedType data = 2; } // M025 message RegistryResponseMissingTopKeys { + string response = 1; +} + +message RegistryResponseMissingTopKeysMsg { EventInfo info = 1; - string response = 2; + RegistryResponseMissingTopKeys data = 2; } // M026 message RegistryResponseMissingNestedKeys { + string response = 1; +} + +message RegistryResponseMissingNestedKeysMsg { EventInfo info = 1; - string response = 2; + RegistryResponseMissingNestedKeys data = 2; } // m027 message RegistryResponseExtraNestedKeys { + string response = 1; +} + +message RegistryResponseExtraNestedKeysMsg { EventInfo info = 1; - string response = 2; + RegistryResponseExtraNestedKeys data = 2; } // M028 message DepsSetDownloadDirectory { + string path = 1; +} + +message DepsSetDownloadDirectoryMsg { EventInfo info = 1; - string path = 2; + DepsSetDownloadDirectory data = 2; } // M029 message DepsUnpinned { + string revision = 1; + string git = 2; +} + +message DepsUnpinnedMsg { EventInfo info = 1; - string revision = 2; - string git = 3; + DepsUnpinned data = 2; } // M030 message NoNodesForSelectionCriteria { + string spec_raw = 1; +} + +message NoNodesForSelectionCriteriaMsg { EventInfo info = 1; - string spec_raw = 2; + NoNodesForSelectionCriteria data = 2; } // Q - Node execution // Q001 message RunningOperationCaughtError { + string exc = 1; +} + +message RunningOperationCaughtErrorMsg { EventInfo info = 1; - string exc = 2; + RunningOperationCaughtError data = 2; } // Q002 message CompileComplete { +} + +message CompileCompleteMsg { EventInfo info = 1; + CompileComplete data = 2; } // Q003 message FreshnessCheckComplete { +} + +message FreshnessCheckCompleteMsg { EventInfo info = 1; + FreshnessCheckComplete data = 2; } // Q004 message SeedHeader { + string header = 1; +} + +message SeedHeaderMsg { EventInfo info = 1; - string header = 2; + SeedHeader data = 2; } // Q005 message SeedHeaderSeparator { + int32 len_header = 1; +} + +message SeedHeaderSeparatorMsg { EventInfo info = 1; - int32 len_header = 2; + SeedHeaderSeparator data = 2; } // Q006 message SQLRunnerException { - EventInfo info = 1; - string exc = 2; - string exc_info = 3; + string exc = 1; + string exc_info = 2; +} + +message SQLRunnerExceptionMsg { + EventInfo info = 1; + SQLRunnerException data = 2; } // Q007 message LogTestResult { + NodeInfo node_info = 1; + string name = 2; + string status = 3; + int32 index = 4; + int32 num_models = 5; + float execution_time = 6; + int32 num_failures = 7; +} + +message LogTestResultMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - string status = 4; - int32 index = 5; - int32 num_models = 6; - float execution_time = 7; - int32 num_failures = 8; + LogTestResult data = 2; } @@ -1111,63 +1516,83 @@ message LogTestResult { // Q011 message LogStartLine { + NodeInfo node_info = 1; + string description = 2; + int32 index = 3; + int32 total = 4; +} + +message LogStartLineMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string description = 3; - int32 index = 4; - int32 total = 5; + LogStartLine data = 2; } // Q012 message LogModelResult { + NodeInfo node_info = 1; + string description = 2; + string status = 3; + int32 index = 4; + int32 total = 5; + int32 execution_time = 6; +} + +message LogModelResultMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string description = 3; - string status = 4; - int32 index = 5; - int32 total = 6; - int32 execution_time = 7; + LogModelResult data = 2; } // skipped Q013, Q014 // Q015 message LogSnapshotResult { + NodeInfo node_info = 1; + string description = 2; + string status = 3; + int32 index = 4; + int32 total = 5; + float execution_time = 6; + map cfg = 7; +} + +message LogSnapshotResultMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string description = 3; - string status = 4; - int32 index = 5; - int32 total = 6; - float execution_time = 7; - map cfg = 8; + LogSnapshotResult data = 2; } // Q016 message LogSeedResult { + NodeInfo node_info = 1; + string status = 2; + string result_message = 3; + int32 index = 4; + int32 total = 5; + float execution_time = 6; + string schema = 7; + string relation = 8; +} + +message LogSeedResultMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string status = 3; - string result_message = 4; - int32 index = 5; - int32 total = 6; - float execution_time = 7; - string schema = 8; - string relation = 9; + LogSeedResult data = 2; } // Skipped Q017 // Q018 message LogFreshnessResult { + string status = 1; + NodeInfo node_info = 2; + int32 index = 3; + int32 total = 4; + float execution_time = 5; + string source_name = 6; + string table_name = 7; +} + +message LogFreshnessResultMsg { EventInfo info = 1; - string status = 2; - NodeInfo node_info = 3; - int32 index = 4; - int32 total = 5; - float execution_time = 6; - string source_name = 7; - string table_name = 8; + LogFreshnessResult data = 2; } @@ -1176,117 +1601,181 @@ message LogFreshnessResult { // Q022 message LogCancelLine { + string conn_name = 1; +} + +message LogCancelLineMsg { EventInfo info = 1; - string conn_name = 2; + LogCancelLine data = 2; } // Q023 message DefaultSelector { + string name = 1; +} + +message DefaultSelectorMsg { EventInfo info = 1; - string name = 2; + DefaultSelector data = 2; } // Q024 message NodeStart { + NodeInfo node_info = 1; +} + +message NodeStartMsg { EventInfo info = 1; - NodeInfo node_info = 2; + NodeStart data = 2; } // Q025 message NodeFinished { + NodeInfo node_info = 1; + RunResultMsg run_result = 2; +} + +message NodeFinishedMsg { EventInfo info = 1; - NodeInfo node_info = 2; - RunResultMsg run_result = 4; + NodeFinished data = 2; } // Q026 message QueryCancelationUnsupported { + string type = 1; +} + +message QueryCancelationUnsupportedMsg { EventInfo info = 1; - string type = 2; + QueryCancelationUnsupported data = 2; } // Q027 message ConcurrencyLine { + int32 num_threads = 1; + string target_name = 2; + int32 node_count = 3; +} + +message ConcurrencyLineMsg { EventInfo info = 1; - int32 num_threads = 2; - string target_name = 3; - int32 node_count = 4; + ConcurrencyLine data = 2; } // Skipped Q028 // Q029 message WritingInjectedSQLForNode { + NodeInfo node_info = 1; +} + +message WritingInjectedSQLForNodeMsg { EventInfo info = 1; - NodeInfo node_info = 2; + WritingInjectedSQLForNode data = 2; } // Q030 message NodeCompiling { + NodeInfo node_info = 1; +} + +message NodeCompilingMsg { EventInfo info = 1; - NodeInfo node_info = 2; + NodeCompiling data = 2; } // Q031 message NodeExecuting { + NodeInfo node_info = 1; +} + +message NodeExecutingMsg { EventInfo info = 1; - NodeInfo node_info = 2; + NodeExecuting data = 2; } // Q032 message LogHookStartLine { + NodeInfo node_info = 1; + string statement = 2; + int32 index = 3; + int32 total = 4; +} + +message LogHookStartLineMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string statement = 3; - int32 index = 4; - int32 total = 5; + LogHookStartLine data = 2; } // Q033 message LogHookEndLine { + NodeInfo node_info = 1; + string statement = 2; + string status = 3; + int32 index = 4; + int32 total = 5; + float execution_time = 6; +} + +message LogHookEndLineMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string statement = 3; - string status = 4; - int32 index = 5; - int32 total = 6; - float execution_time = 7; + LogHookEndLine data = 2; } // Q034 message SkippingDetails { + NodeInfo node_info = 1; + string resource_type = 2; + string schema = 3; + string node_name = 4; + int32 index = 5; + int32 total = 6; +} + +message SkippingDetailsMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string resource_type = 3; - string schema = 4; - string node_name = 5; - int32 index = 6; - int32 total = 7; + SkippingDetails data = 2; } // Q035 message NothingToDo { +} + +message NothingToDoMsg { EventInfo info = 1; + NothingToDo data = 2; } // Q036 message RunningOperationUncaughtError { + string exc = 1; +} + +message RunningOperationUncaughtErrorMsg { EventInfo info = 1; - string exc = 2; + RunningOperationUncaughtError data = 2; } // Q037 message EndRunResult { + repeated RunResultMsg results = 1; + float elapsed_time = 2; + google.protobuf.Timestamp generated_at = 3; + bool success = 4; +} + +message EndRunResultMsg { EventInfo info = 1; - repeated RunResultMsg results = 2; - float elapsed_time = 3; - google.protobuf.Timestamp generated_at = 4; - bool success = 5; + EndRunResult data = 2; } // Q038 message NoNodesSelected { +} + +message NoNodesSelectedMsg { EventInfo info = 1; + NoNodesSelected data = 2; } // W - Node testing @@ -1295,294 +1784,478 @@ message NoNodesSelected { // W002 message CatchableExceptionOnRun { + NodeInfo node_info = 1; + string exc = 2; + string exc_info = 3; +} + +message CatchableExceptionOnRunMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string exc = 3; - string exc_info = 4; + CatchableExceptionOnRun data = 2; } // W003 -message InternalExceptionOnRun { +message InternalErrorOnRun { + string build_path = 1; + string exc = 2; +} + +message InternalErrorOnRunMsg { EventInfo info = 1; - string build_path = 2; - string exc = 3; + InternalErrorOnRun data = 2; } // W004 message GenericExceptionOnRun { + string build_path = 1; + string unique_id = 2; + string exc = 3; +} + +message GenericExceptionOnRunMsg { EventInfo info = 1; - string build_path = 2; - string unique_id = 3; - string exc = 4; + GenericExceptionOnRun data = 2; } // W005 message NodeConnectionReleaseError { + string node_name = 1; + string exc = 2; + string exc_info = 3; +} + +message NodeConnectionReleaseErrorMsg { EventInfo info = 1; - string node_name = 2; - string exc = 3; - string exc_info = 4; + NodeConnectionReleaseError data = 2; } // W006 message FoundStats { + string stat_line = 1; +} + +message FoundStatsMsg { EventInfo info = 1; - string stat_line = 2; + FoundStats data = 2; } // Z - Misc // Z001 message MainKeyboardInterrupt { +} + +message MainKeyboardInterruptMsg { EventInfo info = 1; + MainKeyboardInterrupt data = 2; } // Z002 message MainEncounteredError { + string exc = 1; +} + +message MainEncounteredErrorMsg { EventInfo info = 1; - string exc = 2; + MainEncounteredError data = 2; } // Z003 message MainStackTrace { + string stack_trace = 1; +} + +message MainStackTraceMsg { EventInfo info = 1; - string stack_trace = 2; + MainStackTrace data = 2; } // Z004 message SystemErrorRetrievingModTime { + string path = 1; +} + +message SystemErrorRetrievingModTimeMsg { EventInfo info = 1; - string path = 2; + SystemErrorRetrievingModTime data = 2; } // Z005 message SystemCouldNotWrite { + string path = 1; + string reason = 2; + string exc = 3; +} + +message SystemCouldNotWriteMsg { EventInfo info = 1; - string path = 2; - string reason = 3; - string exc = 4; + SystemCouldNotWrite data = 2; } // Z006 message SystemExecutingCmd { + repeated string cmd = 1; +} + +message SystemExecutingCmdMsg { EventInfo info = 1; - repeated string cmd = 2; + SystemExecutingCmd data = 2; } // Z007 +message SystemStdOut{ + bytes bmsg = 1; +} + message SystemStdOutMsg { EventInfo info = 1; - bytes bmsg = 2; + SystemStdOut data = 2; } // Z008 +message SystemStdErr { + bytes bmsg = 1; +} + message SystemStdErrMsg { EventInfo info = 1; - bytes bmsg = 2; + SystemStdErr data = 2; } // Z009 message SystemReportReturnCode { + int32 returncode = 1; +} + +message SystemReportReturnCodeMsg { EventInfo info = 1; - int32 returncode = 2; + SystemReportReturnCode data = 2; } // Z010 message TimingInfoCollected { + NodeInfo node_info = 1; + TimingInfoMsg timing_info = 2; +} + +message TimingInfoCollectedMsg { EventInfo info = 1; - NodeInfo node_info = 2; - TimingInfoMsg timing_info = 3; + TimingInfoCollected data = 2; } // Z011 message LogDebugStackTrace { + string exc_info = 1; +} + +message LogDebugStackTraceMsg { EventInfo info = 1; - string exc_info = 2; + LogDebugStackTrace data = 2; } // Z012 message CheckCleanPath { + string path = 1; +} + +message CheckCleanPathMsg { EventInfo info = 1; - string path = 2; + CheckCleanPath data = 2; } // Z013 message ConfirmCleanPath { + string path = 1; +} + +message ConfirmCleanPathMsg { EventInfo info = 1; - string path = 2; + ConfirmCleanPath data = 2; } // Z014 message ProtectedCleanPath { + string path = 1; +} + +message ProtectedCleanPathMsg { EventInfo info = 1; - string path = 2; + ProtectedCleanPath data = 2; } // Z015 message FinishedCleanPaths { +} + +message FinishedCleanPathsMsg { EventInfo info = 1; + FinishedCleanPaths data = 2; } // Z016 message OpenCommand { + string open_cmd = 1; + string profiles_dir = 2; +} + +message OpenCommandMsg { EventInfo info = 1; - string open_cmd = 2; - string profiles_dir = 3; + OpenCommand data = 2; } // Z017 message EmptyLine { +} + +message EmptyLineMsg { EventInfo info = 1; + EmptyLine data = 2; } // Z018 message ServingDocsPort { + string address = 1; + int32 port = 2; +} + +message ServingDocsPortMsg { EventInfo info = 1; - string address = 2; - int32 port = 3; + ServingDocsPort data = 2; } // Z019 message ServingDocsAccessInfo { + string port = 1; +} + +message ServingDocsAccessInfoMsg { EventInfo info = 1; - string port = 2; + ServingDocsAccessInfo data = 2; } // Z020 message ServingDocsExitInfo { +} + +message ServingDocsExitInfoMsg { EventInfo info = 1; + ServingDocsExitInfo data = 2; } // Z021 message RunResultWarning { + string resource_type = 1; + string node_name = 2; + string path = 3; +} + +message RunResultWarningMsg { EventInfo info = 1; - string resource_type = 2; - string node_name = 3; - string path = 4; + RunResultWarning data = 2; } // Z022 message RunResultFailure { + string resource_type = 1; + string node_name = 2; + string path = 3; +} + +message RunResultFailureMsg { EventInfo info = 1; - string resource_type = 2; - string node_name = 3; - string path = 4; + RunResultFailure data = 2; } // Z023 message StatsLine { + map stats = 1; +} + +message StatsLineMsg { EventInfo info = 1; - map stats = 2; + StatsLine data = 2; } // Z024 message RunResultError { + string msg = 1; +} + +message RunResultErrorMsg { EventInfo info = 1; - string msg = 2; + RunResultError data = 2; } // Z025 message RunResultErrorNoMessage { + string status = 1; +} + +message RunResultErrorNoMessageMsg { EventInfo info = 1; - string status = 2; + RunResultErrorNoMessage data = 2; } // Z026 message SQLCompiledPath { + string path = 1; +} + +message SQLCompiledPathMsg { EventInfo info = 1; - string path = 2; + SQLCompiledPath data = 2; } // Z027 message CheckNodeTestFailure { + string relation_name = 1; +} + +message CheckNodeTestFailureMsg { EventInfo info = 1; - string relation_name = 2; + CheckNodeTestFailure data = 2; } // Z028 message FirstRunResultError { + string msg = 1; +} + +message FirstRunResultErrorMsg { EventInfo info = 1; - string msg = 2; + FirstRunResultError data = 2; } // Z029 message AfterFirstRunResultError { + string msg = 1; +} + +message AfterFirstRunResultErrorMsg { EventInfo info = 1; - string msg = 2; + AfterFirstRunResultError data = 2; } // Z030 message EndOfRunSummary { + int32 num_errors = 1; + int32 num_warnings = 2; + bool keyboard_interrupt = 3; +} + +message EndOfRunSummaryMsg { EventInfo info = 1; - int32 num_errors = 2; - int32 num_warnings = 3; - bool keyboard_interrupt = 4; + EndOfRunSummary data = 2; } // Skipped Z031, Z032, Z033 // Z034 message LogSkipBecauseError { + string schema = 1; + string relation = 2; + int32 index = 3; + int32 total = 4; +} + +message LogSkipBecauseErrorMsg { EventInfo info = 1; - string schema = 2; - string relation = 3; - int32 index = 4; - int32 total = 5; + LogSkipBecauseError data = 2; } // Z036 message EnsureGitInstalled { +} + +message EnsureGitInstalledMsg { EventInfo info = 1; + EnsureGitInstalled data = 2; } // Z037 message DepsCreatingLocalSymlink { +} + +message DepsCreatingLocalSymlinkMsg { EventInfo info = 1; + DepsCreatingLocalSymlink data = 2; } // Z038 message DepsSymlinkNotAvailable { +} + +message DepsSymlinkNotAvailableMsg { EventInfo info = 1; + DepsSymlinkNotAvailable data = 2; } // Z039 message DisableTracking { +} + +message DisableTrackingMsg { EventInfo info = 1; + DisableTracking data = 2; } // Z040 message SendingEvent { + string kwargs = 1; +} + +message SendingEventMsg { EventInfo info = 1; - string kwargs = 2; + SendingEvent data = 2; } // Z041 message SendEventFailure { +} + +message SendEventFailureMsg { EventInfo info = 1; + SendEventFailure data = 2; } // Z042 message FlushEvents { +} + +message FlushEventsMsg { EventInfo info = 1; + FlushEvents data = 2; } // Z043 message FlushEventsFailure { +} + +message FlushEventsFailureMsg { EventInfo info = 1; + FlushEventsFailure data = 2; } // Z044 message TrackingInitializeFailure { + string exc_info = 1; +} + +message TrackingInitializeFailureMsg { EventInfo info = 1; - string exc_info = 2; + TrackingInitializeFailure data = 2; } // Skipped Z045 // Z046 message RunResultWarningMessage { + string msg = 1; +} + +message RunResultWarningMessageMsg { EventInfo info = 1; - string msg = 2; + RunResultWarningMessage data = 2; } // Z047 @@ -1674,36 +2347,60 @@ message ListRunDetails { // T001 message IntegrationTestInfo { + string msg = 1; +} + +message IntegrationTestInfoMsg { EventInfo info = 1; - string msg = 2; + IntegrationTestInfo data = 2; } // T002 message IntegrationTestDebug { + string msg = 1; +} + +message IntegrationTestDebugMsg { EventInfo info = 1; - string msg = 2; + IntegrationTestDebug data = 2; } // T003 message IntegrationTestWarn { + string msg = 1; +} + +message IntegrationTestWarnMsg { EventInfo info = 1; - string msg = 2; + IntegrationTestWarn data = 2; } // T004 message IntegrationTestError { + string msg = 1; +} + +message IntegrationTestErrorMsg { EventInfo info = 1; - string msg = 2; + IntegrationTestError data = 2; } // T005 message IntegrationTestException { + string msg = 1; +} + +message IntegrationTestExceptionMsg { EventInfo info = 1; - string msg = 2; + IntegrationTestException data = 2; } // T006 message UnitTestInfo { + string msg = 1; +} + +message UnitTestInfoMsg { EventInfo info = 1; - string msg = 2; + UnitTestInfo data = 2; } diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index 6d001ed7716..c4dfeeb5c16 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -11,6 +11,7 @@ Cache, AdapterEventStringFunctor, EventStringFunctor, + EventLevel, ) from dbt.events.format import format_fancy_output_line, pluralize @@ -107,88 +108,49 @@ def message(self) -> str: @dataclass -class InvalidVarsYAML(ErrorLevel, pt.InvalidVarsYAML): +class InvalidOptionYAML(ErrorLevel, pt.InvalidOptionYAML): def code(self): return "A008" def message(self) -> str: - return "The YAML provided in the --vars argument is not valid." + return f"The YAML provided in the --{self.option_name} argument is not valid." @dataclass -class DbtProjectError(ErrorLevel, pt.DbtProjectError): +class LogDbtProjectError(ErrorLevel, pt.LogDbtProjectError): def code(self): return "A009" def message(self) -> str: - return "Encountered an error while reading the project:" - + msg = "Encountered an error while reading the project:" + if self.exc: + msg += f" ERROR: {str(self.exc)}" + return msg -@dataclass -class DbtProjectErrorException(ErrorLevel, pt.DbtProjectErrorException): - def code(self): - return "A010" - def message(self) -> str: - return f" ERROR: {str(self.exc)}" +# Skipped A010 @dataclass -class DbtProfileError(ErrorLevel, pt.DbtProfileError): +class LogDbtProfileError(ErrorLevel, pt.LogDbtProfileError): def code(self): return "A011" def message(self) -> str: - return "Encountered an error while reading profiles:" - - -@dataclass -class DbtProfileErrorException(ErrorLevel, pt.DbtProfileErrorException): - def code(self): - return "A012" - - def message(self) -> str: - return f" ERROR: {str(self.exc)}" - - -@dataclass -class ProfileListTitle(InfoLevel, pt.ProfileListTitle): - def code(self): - return "A013" - - def message(self) -> str: - return "Defined profiles:" - - -@dataclass -class ListSingleProfile(InfoLevel, pt.ListSingleProfile): - def code(self): - return "A014" - - def message(self) -> str: - return f" - {self.profile}" - - -@dataclass -class NoDefinedProfiles(InfoLevel, pt.NoDefinedProfiles): - def code(self): - return "A015" - - def message(self) -> str: - return "There are no profiles defined in your profiles.yml file" - - -@dataclass -class ProfileHelpMessage(InfoLevel, pt.ProfileHelpMessage): - def code(self): - return "A016" + msg = "Encountered an error while reading profiles:\n" f" ERROR: {str(self.exc)}" + if self.profiles: + msg += "Defined profiles:\n" + for profile in self.profiles: + msg += f" - {profile}" + else: + msg += "There are no profiles defined in your profiles.yml file" - def message(self) -> str: - return """ + msg += """ For more information on configuring profiles, please consult the dbt docs: https://docs.getdbt.com/docs/configure-your-profile """ + return msg @dataclass @@ -415,6 +377,22 @@ def message(self): return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) +@dataclass +class InternalDeprecation(WarnLevel, pt.InternalDeprecation): + def code(self): + return "D008" + + def message(self): + extra_reason = "" + if self.reason: + extra_reason = f"\n{self.reason}" + msg = ( + f"`{self.name}` is deprecated and will be removed in dbt-core version {self.version}\n\n" + f"Adapter maintainers can resolve this deprecation by {self.suggested_action}. {extra_reason}" + ) + return warning_tag(msg) + + # ======================================================= # E - DB Adapter # ======================================================= @@ -471,7 +449,7 @@ def code(self): return "E006" def message(self) -> str: - return f"Re-using an available connection from the pool (formerly {self.conn_name})" + return f"Re-using an available connection from the pool (formerly {self.orig_conn_name}, now {self.conn_name})" @dataclass @@ -614,130 +592,54 @@ def message(self) -> str: return f'Dropping schema "{self.relation}".' -# TODO pretty sure this is only ever called in dead code -# see: core/dbt/adapters/cache.py _add_link vs add_link @dataclass -class UncachedRelation(DebugLevel, Cache, pt.UncachedRelation): +class CacheAction(DebugLevel, Cache, pt.CacheAction): def code(self): return "E022" - def message(self) -> str: - return ( - f"{self.dep_key} references {str(self.ref_key)} " - f"but {self.ref_key.database}.{self.ref_key.schema}" - "is not in the cache, skipping assumed external relation" - ) - - -@dataclass -class AddLink(DebugLevel, Cache, pt.AddLink): - def code(self): - return "E023" - - def message(self) -> str: - return f"adding link, {self.dep_key} references {self.ref_key}" - - -@dataclass -class AddRelation(DebugLevel, Cache, pt.AddRelation): - def code(self): - return "E024" - - def message(self) -> str: - return f"Adding relation: {str(self.relation)}" - - -@dataclass -class DropMissingRelation(DebugLevel, Cache, pt.DropMissingRelation): - def code(self): - return "E025" - - def message(self) -> str: - return f"dropped a nonexistent relationship: {str(self.relation)}" - - -@dataclass -class DropCascade(DebugLevel, Cache, pt.DropCascade): - def code(self): - return "E026" - - def message(self) -> str: - return f"drop {self.dropped} is cascading to {self.consequences}" - - -@dataclass -class DropRelation(DebugLevel, Cache, pt.DropRelation): - def code(self): - return "E027" - - def message(self) -> str: - return f"Dropping relation: {self.dropped}" - - -@dataclass -class UpdateReference(DebugLevel, Cache, pt.UpdateReference): - def code(self): - return "E028" - - def message(self) -> str: - return ( - f"updated reference from {self.old_key} -> {self.cached_key} to " - f"{self.new_key} -> {self.cached_key}" - ) - - -@dataclass -class TemporaryRelation(DebugLevel, Cache, pt.TemporaryRelation): - def code(self): - return "E029" - - def message(self) -> str: - return f"old key {self.key} not found in self.relations, assuming temporary" - + def message(self): + if self.action == "add_link": + return f"adding link, {self.ref_key} references {self.ref_key_2}" + elif self.action == "add_relation": + return f"adding relation: {str(self.ref_key)}" + elif self.action == "drop_missing_relation": + return f"dropped a nonexistent relationship: {str(self.ref_key)}" + elif self.action == "drop_cascade": + return f"drop {self.ref_key} is cascading to {self.ref_list}" + elif self.action == "drop_relation": + return f"Dropping relation: {self.ref_key}" + elif self.action == "update_reference": + return ( + f"updated reference from {self.ref_key} -> {self.ref_key_3} to " + f"{self.ref_key_2} -> {self.ref_key_3}" + ) + elif self.action == "temporary_relation": + return f"old key {self.ref_key} not found in self.relations, assuming temporary" + elif self.action == "rename_relation": + return f"Renaming relation {self.ref_key} to {self.ref_key_2}" + elif self.action == "uncached_relation": + return ( + f"{self.ref_key_2} references {str(self.ref_key)} " + f"but {self.ref_key.database}.{self.ref_key.schema}" + "is not in the cache, skipping assumed external relation" + ) + else: + return f"{self.ref_key}" -@dataclass -class RenameSchema(DebugLevel, Cache, pt.RenameSchema): - def code(self): - return "E030" - def message(self) -> str: - return f"Renaming relation {self.old_key} to {self.new_key}" +# Skipping E023, E024, E025, E026, E027, E028, E029, E030 @dataclass -class DumpBeforeAddGraph(DebugLevel, Cache, pt.DumpBeforeAddGraph): +class CacheDumpGraph(DebugLevel, Cache, pt.CacheDumpGraph): def code(self): return "E031" def message(self) -> str: - return f"before adding : {self.dump}" - + return f"{self.before_after} {self.action} : {self.dump}" -@dataclass -class DumpAfterAddGraph(DebugLevel, Cache, pt.DumpAfterAddGraph): - def code(self): - return "E032" - def message(self) -> str: - return f"after adding: {self.dump}" - - -@dataclass -class DumpBeforeRenameSchema(DebugLevel, Cache, pt.DumpBeforeRenameSchema): - def code(self): - return "E033" - - def message(self) -> str: - return f"before rename: {self.dump}" - - -@dataclass -class DumpAfterRenameSchema(DebugLevel, Cache, pt.DumpAfterRenameSchema): - def code(self): - return "E034" - - def message(self) -> str: - return f"after rename: {self.dump}" +# Skipping E032, E033, E034 @dataclass @@ -755,7 +657,7 @@ def code(self): return "E036" def message(self): - pass + return f"{self.exc_info}" @dataclass @@ -853,7 +755,7 @@ def message(self) -> str: @dataclass -class HookFinished(InfoLevel, pt.HookFinished): +class FinishedRunningStats(InfoLevel, pt.FinishedRunningStats): def code(self): return "E047" @@ -867,93 +769,15 @@ def message(self) -> str: @dataclass -class ParseCmdStart(InfoLevel, pt.ParseCmdStart): +class ParseCmdOut(InfoLevel, pt.ParseCmdOut): def code(self): return "I001" def message(self) -> str: - return "Start parsing." - - -@dataclass -class ParseCmdCompiling(InfoLevel, pt.ParseCmdCompiling): - def code(self): - return "I002" - - def message(self) -> str: - return "Compiling." - - -@dataclass -class ParseCmdWritingManifest(InfoLevel, pt.ParseCmdWritingManifest): - def code(self): - return "I003" - - def message(self) -> str: - return "Writing manifest." - - -@dataclass -class ParseCmdDone(InfoLevel, pt.ParseCmdDone): - def code(self): - return "I004" - - def message(self) -> str: - return "Done." - - -@dataclass -class ManifestDependenciesLoaded(InfoLevel, pt.ManifestDependenciesLoaded): - def code(self): - return "I005" - - def message(self) -> str: - return "Dependencies loaded" - - -@dataclass -class ManifestLoaderCreated(InfoLevel, pt.ManifestLoaderCreated): - def code(self): - return "I006" - - def message(self) -> str: - return "ManifestLoader created" - - -@dataclass -class ManifestLoaded(InfoLevel, pt.ManifestLoaded): - def code(self): - return "I007" - - def message(self) -> str: - return "Manifest loaded" - - -@dataclass -class ManifestChecked(InfoLevel, pt.ManifestChecked): - def code(self): - return "I008" - - def message(self) -> str: - return "Manifest checked" - - -@dataclass -class ManifestFlatGraphBuilt(InfoLevel, pt.ManifestFlatGraphBuilt): - def code(self): - return "I009" - - def message(self) -> str: - return "Flat graph built" - + return self.msg -@dataclass -class ParseCmdPerfInfoPath(InfoLevel, pt.ParseCmdPerfInfoPath): - def code(self): - return "I010" - def message(self) -> str: - return f"Performance info: {self.path}" +# Skipping I002, I003, I004, I005, I006, I007, I008, I009, I010 @dataclass @@ -978,7 +802,7 @@ def message(self) -> str: @dataclass -class PartialParsingExceptionProcessingFile(DebugLevel, pt.PartialParsingExceptionProcessingFile): +class PartialParsingErrorProcessingFile(DebugLevel, pt.PartialParsingErrorProcessingFile): def code(self): return "I014" @@ -990,7 +814,7 @@ def message(self) -> str: @dataclass -class PartialParsingException(DebugLevel, pt.PartialParsingException): +class PartialParsingError(DebugLevel, pt.PartialParsingError): def code(self): return "I016" @@ -1153,7 +977,7 @@ def message(self) -> str: @dataclass -class InvalidDisabledTargetInTestNode(WarnLevel, pt.InvalidDisabledTargetInTestNode): +class InvalidDisabledTargetInTestNode(DebugLevel, pt.InvalidDisabledTargetInTestNode): def code(self): return "I050" @@ -1290,7 +1114,7 @@ def message(self) -> str: @dataclass -class MacroPatchNotFound(WarnLevel, pt.MacroPatchNotFound): +class MacroNotFoundForPatch(WarnLevel, pt.MacroNotFoundForPatch): def code(self): return "I059" @@ -1723,17 +1547,16 @@ def message(self) -> str: @classmethod def status_to_level(cls, status): # The statuses come from TestStatus - # TODO should this return EventLevel enum instead? level_lookup = { - "fail": "error", - "pass": "info", - "warn": "warn", - "error": "error", + "fail": EventLevel.ERROR, + "pass": EventLevel.INFO, + "warn": EventLevel.WARN, + "error": EventLevel.ERROR, } if status in level_lookup: return level_lookup[status] else: - return "info" + return EventLevel.INFO # Skipped Q008, Q009, Q010 @@ -1855,15 +1678,15 @@ def status_to_level(cls, status): # The statuses come from FreshnessStatus # TODO should this return EventLevel enum instead? level_lookup = { - "runtime error": "error", - "pass": "info", - "warn": "warn", - "error": "error", + "runtime error": EventLevel.ERROR, + "pass": EventLevel.INFO, + "warn": EventLevel.WARN, + "error": EventLevel.ERROR, } if status in level_lookup: return level_lookup[status] else: - return "info" + return EventLevel.INFO # Skipped Q019, Q020, Q021 @@ -2056,7 +1879,7 @@ def message(self) -> str: @dataclass -class InternalExceptionOnRun(DebugLevel, pt.InternalExceptionOnRun): +class InternalErrorOnRun(DebugLevel, pt.InternalErrorOnRun): def code(self): return "W003" @@ -2164,7 +1987,7 @@ def message(self) -> str: @dataclass -class SystemStdOutMsg(DebugLevel, pt.SystemStdOutMsg): +class SystemStdOut(DebugLevel, pt.SystemStdOut): def code(self): return "Z007" @@ -2173,7 +1996,7 @@ def message(self) -> str: @dataclass -class SystemStdErrMsg(DebugLevel, pt.SystemStdErrMsg): +class SystemStdErr(DebugLevel, pt.SystemStdErr): def code(self): return "Z008" diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index 515ec86054b..4e7b6c9fe6a 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -3,8 +3,8 @@ import re from typing import Any, Dict, List, Mapping, NoReturn, Optional, Union -# from dbt.contracts.graph import ManifestNode # or ParsedNode? from dbt.dataclass_schema import ValidationError +from dbt.internal_deprecations import deprecated from dbt.events.functions import warn_or_error from dbt.events.helpers import env_secrets, scrub_secrets from dbt.events.types import JinjaLogWarning @@ -38,7 +38,7 @@ def data(self): } -class InternalException(Exception): +class DbtInternalError(Exception): def __init__(self, msg: str): self.stack: List = [] self.msg = scrub_secrets(msg, env_secrets()) @@ -79,7 +79,7 @@ def __str__(self): return lines[0] + "\n" + "\n".join([" " + line for line in lines[1:]]) -class RuntimeException(RuntimeError, Exception): +class DbtRuntimeError(RuntimeError, Exception): CODE = 10001 MESSAGE = "Runtime error" @@ -172,72 +172,7 @@ def data(self): return result -class RPCFailureResult(RuntimeException): - CODE = 10002 - MESSAGE = "RPC execution error" - - -class RPCTimeoutException(RuntimeException): - CODE = 10008 - MESSAGE = "RPC timeout error" - - def __init__(self, timeout: Optional[float]): - super().__init__(self.MESSAGE) - self.timeout = timeout - - def data(self): - result = super().data() - result.update( - { - "timeout": self.timeout, - "message": f"RPC timed out after {self.timeout}s", - } - ) - return result - - -class RPCKilledException(RuntimeException): - CODE = 10009 - MESSAGE = "RPC process killed" - - def __init__(self, signum: int): - self.signum = signum - self.msg = f"RPC process killed by signal {self.signum}" - super().__init__(self.msg) - - def data(self): - return { - "signum": self.signum, - "message": self.msg, - } - - -class RPCCompiling(RuntimeException): - CODE = 10010 - MESSAGE = 'RPC server is compiling the project, call the "status" method for' " compile status" - - def __init__(self, msg: str = None, node=None): - if msg is None: - msg = "compile in progress" - super().__init__(msg, node) - - -class RPCLoadException(RuntimeException): - CODE = 10011 - MESSAGE = ( - 'RPC server failed to compile project, call the "status" method for' " compile status" - ) - - def __init__(self, cause: Dict[str, Any]): - self.cause = cause - self.msg = f'{self.MESSAGE}: {self.cause["message"]}' - super().__init__(self.msg) - - def data(self): - return {"cause": self.cause, "message": self.msg} - - -class DatabaseException(RuntimeException): +class DbtDatabaseError(DbtRuntimeError): CODE = 10003 MESSAGE = "Database Error" @@ -247,14 +182,14 @@ def process_stack(self): if hasattr(self.node, "build_path") and self.node.build_path: lines.append(f"compiled Code at {self.node.build_path}") - return lines + RuntimeException.process_stack(self) + return lines + DbtRuntimeError.process_stack(self) @property def type(self): return "Database" -class CompilationException(RuntimeException): +class CompilationError(DbtRuntimeError): CODE = 10004 MESSAGE = "Compilation Error" @@ -274,16 +209,16 @@ def _fix_dupe_msg(self, path_1: str, path_2: str, name: str, type_name: str) -> ) -class RecursionException(RuntimeException): +class RecursionError(DbtRuntimeError): pass -class ValidationException(RuntimeException): +class DbtValidationError(DbtRuntimeError): CODE = 10005 MESSAGE = "Validation Error" -class ParsingException(RuntimeException): +class ParsingError(DbtRuntimeError): CODE = 10015 MESSAGE = "Parsing Error" @@ -293,7 +228,7 @@ def type(self): # TODO: this isn't raised in the core codebase. Is it raised elsewhere? -class JSONValidationException(ValidationException): +class JSONValidationError(DbtValidationError): def __init__(self, typename, errors): self.typename = typename self.errors = errors @@ -303,11 +238,11 @@ def __init__(self, typename, errors): def __reduce__(self): # see https://stackoverflow.com/a/36342588 for why this is necessary - return (JSONValidationException, (self.typename, self.errors)) + return (JSONValidationError, (self.typename, self.errors)) -class IncompatibleSchemaException(RuntimeException): - def __init__(self, expected: str, found: Optional[str]): +class IncompatibleSchemaError(DbtRuntimeError): + def __init__(self, expected: str, found: Optional[str] = None): self.expected = expected self.found = found self.filename = "input file" @@ -334,11 +269,11 @@ def get_message(self) -> str: MESSAGE = "Incompatible Schema" -class JinjaRenderingException(CompilationException): +class JinjaRenderingError(CompilationError): pass -class UndefinedMacroException(CompilationException): +class UndefinedMacroError(CompilationError): def __str__(self, prefix: str = "! ") -> str: msg = super().__str__(prefix) return ( @@ -348,28 +283,16 @@ def __str__(self, prefix: str = "! ") -> str: ) -class UnknownAsyncIDException(Exception): - CODE = 10012 - MESSAGE = "RPC server got an unknown async ID" - - def __init__(self, task_id): - self.task_id = task_id - - def __str__(self): - return f"{self.MESSAGE}: {self.task_id}" - - -class AliasException(ValidationException): +class AliasError(DbtValidationError): pass -class DependencyException(Exception): - # this can happen due to raise_dependency_error and its callers +class DependencyError(Exception): CODE = 10006 MESSAGE = "Dependency Error" -class DbtConfigError(RuntimeException): +class DbtConfigError(DbtRuntimeError): CODE = 10007 MESSAGE = "DBT Configuration Error" @@ -387,7 +310,7 @@ def __str__(self, prefix="! ") -> str: return f"{msg}\n\nError encountered in {self.path}" -class FailFastException(RuntimeException): +class FailFastError(DbtRuntimeError): CODE = 10013 MESSAGE = "FailFast Error" @@ -412,7 +335,7 @@ class DbtProfileError(DbtConfigError): pass -class SemverException(Exception): +class SemverError(Exception): def __init__(self, msg: str = None): self.msg = msg if msg is not None: @@ -421,22 +344,22 @@ def __init__(self, msg: str = None): super().__init__() -class VersionsNotCompatibleException(SemverException): +class VersionsNotCompatibleError(SemverError): pass -class NotImplementedException(Exception): +class NotImplementedError(Exception): def __init__(self, msg: str): self.msg = msg self.formatted_msg = f"ERROR: {self.msg}" super().__init__(self.formatted_msg) -class FailedToConnectException(DatabaseException): +class FailedToConnectError(DbtDatabaseError): pass -class CommandError(RuntimeException): +class CommandError(DbtRuntimeError): def __init__(self, cwd: str, cmd: List[str], msg: str = "Error running command"): cmd_scrubbed = list(scrub_secrets(cmd_txt, env_secrets()) for cmd_txt in cmd) super().__init__(msg) @@ -483,7 +406,7 @@ def __str__(self): return f"{self.msg} running: {self.cmd}" -class InvalidConnectionException(RuntimeException): +class InvalidConnectionError(DbtRuntimeError): def __init__(self, thread_id, known: List): self.thread_id = thread_id self.known = known @@ -492,17 +415,17 @@ def __init__(self, thread_id, known: List): ) -class InvalidSelectorException(RuntimeException): +class InvalidSelectorError(DbtRuntimeError): def __init__(self, name: str): self.name = name super().__init__(name) -class DuplicateYamlKeyException(CompilationException): +class DuplicateYamlKeyError(CompilationError): pass -class ConnectionException(Exception): +class ConnectionError(Exception): """ There was a problem with the connection that returned a bad response, timed out, or resulted in a file that is corrupt. @@ -512,7 +435,7 @@ class ConnectionException(Exception): # event level exception -class EventCompilationException(CompilationException): +class EventCompilationError(CompilationError): def __init__(self, msg: str, node): self.msg = scrub_secrets(msg, env_secrets()) self.node = node @@ -520,7 +443,7 @@ def __init__(self, msg: str, node): # compilation level exceptions -class GraphDependencyNotFound(CompilationException): +class GraphDependencyNotFoundError(CompilationError): def __init__(self, node, dependency: str): self.node = node self.dependency = dependency @@ -534,21 +457,21 @@ def get_message(self) -> str: # client level exceptions -class NoSupportedLanguagesFound(CompilationException): +class NoSupportedLanguagesFoundError(CompilationError): def __init__(self, node): self.node = node self.msg = f"No supported_languages found in materialization macro {self.node.name}" super().__init__(msg=self.msg) -class MaterializtionMacroNotUsed(CompilationException): +class MaterializtionMacroNotUsedError(CompilationError): def __init__(self, node): self.node = node self.msg = "Only materialization macros can be used with this function" super().__init__(msg=self.msg) -class UndefinedCompilation(CompilationException): +class UndefinedCompilationError(CompilationError): def __init__(self, name: str, node): self.name = name self.node = node @@ -556,20 +479,20 @@ def __init__(self, name: str, node): super().__init__(msg=self.msg) -class CaughtMacroExceptionWithNode(CompilationException): +class CaughtMacroErrorWithNodeError(CompilationError): def __init__(self, exc, node): self.exc = exc self.node = node super().__init__(msg=str(exc)) -class CaughtMacroException(CompilationException): +class CaughtMacroError(CompilationError): def __init__(self, exc): self.exc = exc super().__init__(msg=str(exc)) -class MacroNameNotString(CompilationException): +class MacroNameNotStringError(CompilationError): def __init__(self, kwarg_value): self.kwarg_value = kwarg_value super().__init__(msg=self.get_message()) @@ -582,7 +505,7 @@ def get_message(self) -> str: return msg -class MissingControlFlowStartTag(CompilationException): +class MissingControlFlowStartTagError(CompilationError): def __init__(self, tag, expected_tag: str, tag_parser): self.tag = tag self.expected_tag = expected_tag @@ -598,7 +521,7 @@ def get_message(self) -> str: return msg -class UnexpectedControlFlowEndTag(CompilationException): +class UnexpectedControlFlowEndTagError(CompilationError): def __init__(self, tag, expected_tag: str, tag_parser): self.tag = tag self.expected_tag = expected_tag @@ -614,7 +537,7 @@ def get_message(self) -> str: return msg -class UnexpectedMacroEOF(CompilationException): +class UnexpectedMacroEOFError(CompilationError): def __init__(self, expected_name: str, actual_name: str): self.expected_name = expected_name self.actual_name = actual_name @@ -625,7 +548,7 @@ def get_message(self) -> str: return msg -class MacroNamespaceNotString(CompilationException): +class MacroNamespaceNotStringError(CompilationError): def __init__(self, kwarg_type: Any): self.kwarg_type = kwarg_type super().__init__(msg=self.get_message()) @@ -638,7 +561,7 @@ def get_message(self) -> str: return msg -class NestedTags(CompilationException): +class NestedTagsError(CompilationError): def __init__(self, outer, inner): self.outer = outer self.inner = inner @@ -653,7 +576,7 @@ def get_message(self) -> str: return msg -class BlockDefinitionNotAtTop(CompilationException): +class BlockDefinitionNotAtTopError(CompilationError): def __init__(self, tag_parser, tag_start): self.tag_parser = tag_parser self.tag_start = tag_start @@ -668,7 +591,7 @@ def get_message(self) -> str: return msg -class MissingCloseTag(CompilationException): +class MissingCloseTagError(CompilationError): def __init__(self, block_type_name: str, linecount: int): self.block_type_name = block_type_name self.linecount = linecount @@ -679,7 +602,7 @@ def get_message(self) -> str: return msg -class GitCloningProblem(RuntimeException): +class UnknownGitCloningProblemError(DbtRuntimeError): def __init__(self, repo: str): self.repo = scrub_secrets(repo, env_secrets()) super().__init__(msg=self.get_message()) @@ -692,7 +615,19 @@ def get_message(self) -> str: return msg -class GitCloningError(InternalException): +class BadSpecError(DbtInternalError): + def __init__(self, repo, revision, error): + self.repo = repo + self.revision = revision + self.stderr = scrub_secrets(error.stderr.strip(), env_secrets()) + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"Error checking out spec='{self.revision}' for repo {self.repo}\n{self.stderr}" + return msg + + +class GitCloningError(DbtInternalError): def __init__(self, repo: str, revision: str, error: CommandResultError): self.repo = repo self.revision = revision @@ -711,19 +646,11 @@ def get_message(self) -> str: return scrub_secrets(msg, env_secrets()) -class GitCheckoutError(InternalException): - def __init__(self, repo: str, revision: str, error: CommandResultError): - self.repo = repo - self.revision = revision - self.stderr = error.stderr.strip() - super().__init__(msg=self.get_message()) - - def get_message(self) -> str: - msg = f"Error checking out spec='{self.revision}' for repo {self.repo}\n{self.stderr}" - return scrub_secrets(msg, env_secrets()) +class GitCheckoutError(BadSpecError): + pass -class InvalidMaterializationArg(CompilationException): +class MaterializationArgError(CompilationError): def __init__(self, name: str, argument: str): self.name = name self.argument = argument @@ -734,7 +661,22 @@ def get_message(self) -> str: return msg -class SymbolicLinkError(CompilationException): +class OperationError(CompilationError): + def __init__(self, operation_name): + self.operation_name = operation_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"dbt encountered an error when attempting to create a {self.operation_name}. " + "If this error persists, please create an issue at: \n\n" + "https://github.com/dbt-labs/dbt-core" + ) + + return msg + + +class SymbolicLinkError(CompilationError): def __init__(self): super().__init__(msg=self.get_message()) @@ -749,23 +691,21 @@ def get_message(self) -> str: # context level exceptions - - -class ZipStrictWrongType(CompilationException): +class ZipStrictWrongTypeError(CompilationError): def __init__(self, exc): self.exc = exc msg = str(self.exc) super().__init__(msg=msg) -class SetStrictWrongType(CompilationException): +class SetStrictWrongTypeError(CompilationError): def __init__(self, exc): self.exc = exc msg = str(self.exc) super().__init__(msg=msg) -class LoadAgateTableValueError(CompilationException): +class LoadAgateTableValueError(CompilationError): def __init__(self, exc: ValueError, node): self.exc = exc self.node = node @@ -773,7 +713,7 @@ def __init__(self, exc: ValueError, node): super().__init__(msg=msg) -class LoadAgateTableNotSeed(CompilationException): +class LoadAgateTableNotSeedError(CompilationError): def __init__(self, resource_type, node): self.resource_type = resource_type self.node = node @@ -781,14 +721,14 @@ def __init__(self, resource_type, node): super().__init__(msg=msg) -class MacrosSourcesUnWriteable(CompilationException): +class MacrosSourcesUnWriteableError(CompilationError): def __init__(self, node): self.node = node msg = 'cannot "write" macros or sources' super().__init__(msg=msg) -class PackageNotInDeps(CompilationException): +class PackageNotInDepsError(CompilationError): def __init__(self, package_name: str, node): self.package_name = package_name self.node = node @@ -796,7 +736,7 @@ def __init__(self, package_name: str, node): super().__init__(msg=msg) -class OperationsCannotRefEphemeralNodes(CompilationException): +class OperationsCannotRefEphemeralNodesError(CompilationError): def __init__(self, target_name: str, node): self.target_name = target_name self.node = node @@ -804,7 +744,7 @@ def __init__(self, target_name: str, node): super().__init__(msg=msg) -class InvalidPersistDocsValueType(CompilationException): +class PersistDocsValueTypeError(CompilationError): def __init__(self, persist_docs: Any): self.persist_docs = persist_docs msg = ( @@ -814,14 +754,14 @@ def __init__(self, persist_docs: Any): super().__init__(msg=msg) -class InvalidInlineModelConfig(CompilationException): +class InlineModelConfigError(CompilationError): def __init__(self, node): self.node = node msg = "Invalid inline model config" super().__init__(msg=msg) -class ConflictingConfigKeys(CompilationException): +class ConflictingConfigKeysError(CompilationError): def __init__(self, oldkey: str, newkey: str, node): self.oldkey = oldkey self.newkey = newkey @@ -830,7 +770,7 @@ def __init__(self, oldkey: str, newkey: str, node): super().__init__(msg=msg) -class InvalidNumberSourceArgs(CompilationException): +class NumberSourceArgsError(CompilationError): def __init__(self, args, node): self.args = args self.node = node @@ -838,7 +778,7 @@ def __init__(self, args, node): super().__init__(msg=msg) -class RequiredVarNotFound(CompilationException): +class RequiredVarNotFoundError(CompilationError): def __init__(self, var_name: str, merged: Dict, node): self.var_name = var_name self.merged = merged @@ -858,14 +798,14 @@ def get_message(self) -> str: return msg -class PackageNotFoundForMacro(CompilationException): +class PackageNotFoundForMacroError(CompilationError): def __init__(self, package_name: str): self.package_name = package_name msg = f"Could not find package '{self.package_name}'" super().__init__(msg=msg) -class DisallowSecretEnvVar(ParsingException): +class SecretEnvVarLocationError(ParsingError): def __init__(self, env_var_name: str): self.env_var_name = env_var_name super().__init__(msg=self.get_message()) @@ -878,7 +818,7 @@ def get_message(self) -> str: return msg -class InvalidMacroArgType(CompilationException): +class MacroArgTypeError(CompilationError): def __init__(self, method_name: str, arg_name: str, got_value: Any, expected_type): self.method_name = method_name self.arg_name = arg_name @@ -896,7 +836,7 @@ def get_message(self) -> str: return msg -class InvalidBoolean(CompilationException): +class BooleanError(CompilationError): def __init__(self, return_value: Any, macro_name: str): self.return_value = return_value self.macro_name = macro_name @@ -910,7 +850,7 @@ def get_message(self) -> str: return msg -class RefInvalidArgs(CompilationException): +class RefArgsError(CompilationError): def __init__(self, node, args): self.node = node self.args = args @@ -921,7 +861,7 @@ def get_message(self) -> str: return msg -class MetricInvalidArgs(CompilationException): +class MetricArgsError(CompilationError): def __init__(self, node, args): self.node = node self.args = args @@ -932,7 +872,7 @@ def get_message(self) -> str: return msg -class RefBadContext(CompilationException): +class RefBadContextError(CompilationError): def __init__(self, node, args): self.node = node self.args = args @@ -961,7 +901,7 @@ def get_message(self) -> str: return msg -class InvalidDocArgs(CompilationException): +class DocArgsError(CompilationError): def __init__(self, node, args): self.node = node self.args = args @@ -972,8 +912,8 @@ def get_message(self) -> str: return msg -class DocTargetNotFound(CompilationException): - def __init__(self, node, target_doc_name: str, target_doc_package: Optional[str]): +class DocTargetNotFoundError(CompilationError): + def __init__(self, node, target_doc_name: str, target_doc_package: Optional[str] = None): self.node = node self.target_doc_name = target_doc_name self.target_doc_package = target_doc_package @@ -987,7 +927,7 @@ def get_message(self) -> str: return msg -class MacroInvalidDispatchArg(CompilationException): +class MacroDispatchArgError(CompilationError): def __init__(self, macro_name: str): self.macro_name = macro_name super().__init__(msg=self.get_message()) @@ -1006,7 +946,7 @@ def get_message(self) -> str: return msg -class DuplicateMacroName(CompilationException): +class DuplicateMacroNameError(CompilationError): def __init__(self, node_1, node_2, namespace: str): self.node_1 = node_1 self.node_2 = node_2 @@ -1032,7 +972,7 @@ def get_message(self) -> str: # parser level exceptions -class InvalidDictParse(ParsingException): +class DictParseError(ParsingError): def __init__(self, exc: ValidationError, node): self.exc = exc self.node = node @@ -1040,7 +980,7 @@ def __init__(self, exc: ValidationError, node): super().__init__(msg=msg) -class InvalidConfigUpdate(ParsingException): +class ConfigUpdateError(ParsingError): def __init__(self, exc: ValidationError, node): self.exc = exc self.node = node @@ -1048,7 +988,7 @@ def __init__(self, exc: ValidationError, node): super().__init__(msg=msg) -class PythonParsingException(ParsingException): +class PythonParsingError(ParsingError): def __init__(self, exc: SyntaxError, node): self.exc = exc self.node = node @@ -1060,7 +1000,7 @@ def get_message(self) -> str: return msg -class PythonLiteralEval(ParsingException): +class PythonLiteralEvalError(ParsingError): def __init__(self, exc: Exception, node): self.exc = exc self.node = node @@ -1076,14 +1016,14 @@ def get_message(self) -> str: return msg -class InvalidModelConfig(ParsingException): +class ModelConfigError(ParsingError): def __init__(self, exc: ValidationError, node): self.msg = self.validator_error_message(exc) self.node = node super().__init__(msg=self.msg) -class YamlParseListFailure(ParsingException): +class YamlParseListError(ParsingError): def __init__( self, path: str, @@ -1108,7 +1048,7 @@ def get_message(self) -> str: return msg -class YamlParseDictFailure(ParsingException): +class YamlParseDictError(ParsingError): def __init__( self, path: str, @@ -1133,8 +1073,13 @@ def get_message(self) -> str: return msg -class YamlLoadFailure(ParsingException): - def __init__(self, project_name: Optional[str], path: str, exc: ValidationException): +class YamlLoadError(ParsingError): + def __init__( + self, + path: str, + exc: DbtValidationError, + project_name: Optional[str] = None, + ): self.project_name = project_name self.path = path self.exc = exc @@ -1148,49 +1093,54 @@ def get_message(self) -> str: return msg -class InvalidTestConfig(ParsingException): +class TestConfigError(ParsingError): def __init__(self, exc: ValidationError, node): self.msg = self.validator_error_message(exc) self.node = node super().__init__(msg=self.msg) -class InvalidSchemaConfig(ParsingException): +class SchemaConfigError(ParsingError): def __init__(self, exc: ValidationError, node): self.msg = self.validator_error_message(exc) self.node = node super().__init__(msg=self.msg) -class InvalidSnapshopConfig(ParsingException): +class SnapshopConfigError(ParsingError): def __init__(self, exc: ValidationError, node): self.msg = self.validator_error_message(exc) self.node = node super().__init__(msg=self.msg) -class SameKeyNested(CompilationException): +class SameKeyNestedError(CompilationError): def __init__(self): msg = "Test cannot have the same key at the top-level and in config" super().__init__(msg=msg) -class TestArgIncludesModel(CompilationException): +class TestArgIncludesModelError(CompilationError): def __init__(self): msg = 'Test arguments include "model", which is a reserved argument' super().__init__(msg=msg) -class UnexpectedTestNamePattern(CompilationException): +class UnexpectedTestNamePatternError(CompilationError): def __init__(self, test_name: str): self.test_name = test_name msg = f"Test name string did not match expected pattern: {self.test_name}" super().__init__(msg=msg) -class CustomMacroPopulatingConfigValues(CompilationException): +class CustomMacroPopulatingConfigValueError(CompilationError): def __init__( - self, target_name: str, column_name: Optional[str], name: str, key: str, err_msg: str + self, + target_name: str, + name: str, + key: str, + err_msg: str, + column_name: Optional[str] = None, ): self.target_name = target_name self.column_name = column_name @@ -1220,21 +1170,21 @@ def get_message(self) -> str: return msg -class TagsNotListOfStrings(CompilationException): +class TagsNotListOfStringsError(CompilationError): def __init__(self, tags: Any): self.tags = tags msg = f"got {self.tags} ({type(self.tags)}) for tags, expected a list of strings" super().__init__(msg=msg) -class TagNotString(CompilationException): +class TagNotStringError(CompilationError): def __init__(self, tag: Any): self.tag = tag msg = f"got {self.tag} ({type(self.tag)}) for tag, expected a str" super().__init__(msg=msg) -class TestNameNotString(ParsingException): +class TestNameNotStringError(ParsingError): def __init__(self, test_name: Any): self.test_name = test_name super().__init__(msg=self.get_message()) @@ -1245,7 +1195,7 @@ def get_message(self) -> str: return msg -class TestArgsNotDict(ParsingException): +class TestArgsNotDictError(ParsingError): def __init__(self, test_args: Any): self.test_args = test_args super().__init__(msg=self.get_message()) @@ -1256,7 +1206,7 @@ def get_message(self) -> str: return msg -class TestDefinitionDictLength(ParsingException): +class TestDefinitionDictLengthError(ParsingError): def __init__(self, test): self.test = test super().__init__(msg=self.get_message()) @@ -1270,7 +1220,7 @@ def get_message(self) -> str: return msg -class TestInvalidType(ParsingException): +class TestTypeError(ParsingError): def __init__(self, test: Any): self.test = test super().__init__(msg=self.get_message()) @@ -1281,7 +1231,7 @@ def get_message(self) -> str: # This is triggered across multiple files -class EnvVarMissing(ParsingException): +class EnvVarMissingError(ParsingError): def __init__(self, var: str): self.var = var super().__init__(msg=self.get_message()) @@ -1291,7 +1241,7 @@ def get_message(self) -> str: return msg -class TargetNotFound(CompilationException): +class TargetNotFoundError(CompilationError): def __init__( self, node, @@ -1330,7 +1280,7 @@ def get_message(self) -> str: return msg -class DuplicateSourcePatchName(CompilationException): +class DuplicateSourcePatchNameError(CompilationError): def __init__(self, patch_1, patch_2): self.patch_1 = patch_1 self.patch_2 = patch_2 @@ -1352,7 +1302,7 @@ def get_message(self) -> str: return msg -class DuplicateMacroPatchName(CompilationException): +class DuplicateMacroPatchNameError(CompilationError): def __init__(self, patch_1, existing_patch_path): self.patch_1 = patch_1 self.existing_patch_path = existing_patch_path @@ -1373,7 +1323,7 @@ def get_message(self) -> str: # core level exceptions -class DuplicateAlias(AliasException): +class DuplicateAliasError(AliasError): def __init__(self, kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str): self.kwargs = kwargs self.aliases = aliases @@ -1390,9 +1340,7 @@ def get_message(self) -> str: # Postgres Exceptions - - -class UnexpectedDbReference(NotImplementedException): +class UnexpectedDbReferenceError(NotImplementedError): def __init__(self, adapter, database, expected): self.adapter = adapter self.database = database @@ -1404,7 +1352,7 @@ def get_message(self) -> str: return msg -class CrossDbReferenceProhibited(CompilationException): +class CrossDbReferenceProhibitedError(CompilationError): def __init__(self, adapter, exc_msg: str): self.adapter = adapter self.exc_msg = exc_msg @@ -1415,7 +1363,7 @@ def get_message(self) -> str: return msg -class IndexConfigNotDict(CompilationException): +class IndexConfigNotDictError(CompilationError): def __init__(self, raw_index: Any): self.raw_index = raw_index super().__init__(msg=self.get_message()) @@ -1429,7 +1377,7 @@ def get_message(self) -> str: return msg -class InvalidIndexConfig(CompilationException): +class IndexConfigError(CompilationError): def __init__(self, exc: TypeError): self.exc = exc super().__init__(msg=self.get_message()) @@ -1441,7 +1389,7 @@ def get_message(self) -> str: # adapters exceptions -class InvalidMacroResult(CompilationException): +class MacroResultError(CompilationError): def __init__(self, freshness_macro_name: str, table): self.freshness_macro_name = freshness_macro_name self.table = table @@ -1453,7 +1401,7 @@ def get_message(self) -> str: return msg -class SnapshotTargetNotSnapshotTable(CompilationException): +class SnapshotTargetNotSnapshotTableError(CompilationError): def __init__(self, missing: List): self.missing = missing super().__init__(msg=self.get_message()) @@ -1465,7 +1413,7 @@ def get_message(self) -> str: return msg -class SnapshotTargetIncomplete(CompilationException): +class SnapshotTargetIncompleteError(CompilationError): def __init__(self, extra: List, missing: List): self.extra = extra self.missing = missing @@ -1481,7 +1429,7 @@ def get_message(self) -> str: return msg -class RenameToNoneAttempted(CompilationException): +class RenameToNoneAttemptedError(CompilationError): def __init__(self, src_name: str, dst_name: str, name: str): self.src_name = src_name self.dst_name = dst_name @@ -1490,21 +1438,21 @@ def __init__(self, src_name: str, dst_name: str, name: str): super().__init__(msg=self.msg) -class NullRelationDropAttempted(CompilationException): +class NullRelationDropAttemptedError(CompilationError): def __init__(self, name: str): self.name = name self.msg = f"Attempted to drop a null relation for {self.name}" super().__init__(msg=self.msg) -class NullRelationCacheAttempted(CompilationException): +class NullRelationCacheAttemptedError(CompilationError): def __init__(self, name: str): self.name = name self.msg = f"Attempted to cache a null relation for {self.name}" super().__init__(msg=self.msg) -class InvalidQuoteConfigType(CompilationException): +class QuoteConfigTypeError(CompilationError): def __init__(self, quote_config: Any): self.quote_config = quote_config super().__init__(msg=self.get_message()) @@ -1517,7 +1465,7 @@ def get_message(self) -> str: return msg -class MultipleDatabasesNotAllowed(CompilationException): +class MultipleDatabasesNotAllowedError(CompilationError): def __init__(self, databases): self.databases = databases super().__init__(msg=self.get_message()) @@ -1527,26 +1475,25 @@ def get_message(self) -> str: return msg -class RelationTypeNull(CompilationException): +class RelationTypeNullError(CompilationError): def __init__(self, relation): self.relation = relation self.msg = f"Tried to drop relation {self.relation}, but its type is null." super().__init__(msg=self.msg) -class MaterializationNotAvailable(CompilationException): - def __init__(self, model, adapter_type: str): - self.model = model +class MaterializationNotAvailableError(CompilationError): + def __init__(self, materialization, adapter_type: str): + self.materialization = materialization self.adapter_type = adapter_type super().__init__(msg=self.get_message()) def get_message(self) -> str: - materialization = self.model.get_materialization() - msg = f"Materialization '{materialization}' is not available for {self.adapter_type}!" + msg = f"Materialization '{self.materialization}' is not available for {self.adapter_type}!" return msg -class RelationReturnedMultipleResults(CompilationException): +class RelationReturnedMultipleResultsError(CompilationError): def __init__(self, kwargs: Mapping[str, Any], matches: List): self.kwargs = kwargs self.matches = matches @@ -1561,7 +1508,7 @@ def get_message(self) -> str: return msg -class ApproximateMatch(CompilationException): +class ApproximateMatchError(CompilationError): def __init__(self, target, relation): self.target = target self.relation = relation @@ -1579,8 +1526,7 @@ def get_message(self) -> str: return msg -# adapters exceptions -class UnexpectedNull(DatabaseException): +class UnexpectedNullError(DbtDatabaseError): def __init__(self, field_name: str, source): self.field_name = field_name self.source = source @@ -1591,7 +1537,7 @@ def __init__(self, field_name: str, source): super().__init__(msg) -class UnexpectedNonTimestamp(DatabaseException): +class UnexpectedNonTimestampError(DbtDatabaseError): def __init__(self, field_name: str, source, dt: Any): self.field_name = field_name self.source = source @@ -1604,7 +1550,7 @@ def __init__(self, field_name: str, source, dt: Any): # deps exceptions -class MultipleVersionGitDeps(DependencyException): +class MultipleVersionGitDepsError(DependencyError): def __init__(self, git: str, requested): self.git = git self.requested = requested @@ -1615,7 +1561,7 @@ def __init__(self, git: str, requested): super().__init__(msg) -class DuplicateProjectDependency(DependencyException): +class DuplicateProjectDependencyError(DependencyError): def __init__(self, project_name: str): self.project_name = project_name msg = ( @@ -1625,7 +1571,7 @@ def __init__(self, project_name: str): super().__init__(msg) -class DuplicateDependencyToRoot(DependencyException): +class DuplicateDependencyToRootError(DependencyError): def __init__(self, project_name: str): self.project_name = project_name msg = ( @@ -1636,7 +1582,7 @@ def __init__(self, project_name: str): super().__init__(msg) -class MismatchedDependencyTypes(DependencyException): +class MismatchedDependencyTypeError(DependencyError): def __init__(self, new, old): self.new = new self.old = old @@ -1647,7 +1593,7 @@ def __init__(self, new, old): super().__init__(msg) -class PackageVersionNotFound(DependencyException): +class PackageVersionNotFoundError(DependencyError): def __init__( self, package_name: str, @@ -1683,7 +1629,7 @@ def get_message(self) -> str: return msg -class PackageNotFound(DependencyException): +class PackageNotFoundError(DependencyError): def __init__(self, package_name: str): self.package_name = package_name msg = f"Package {self.package_name} was not found in the package index" @@ -1691,37 +1637,35 @@ def __init__(self, package_name: str): # config level exceptions - - -class ProfileConfigInvalid(DbtProfileError): +class ProfileConfigError(DbtProfileError): def __init__(self, exc: ValidationError): self.exc = exc msg = self.validator_error_message(self.exc) super().__init__(msg=msg) -class ProjectContractInvalid(DbtProjectError): +class ProjectContractError(DbtProjectError): def __init__(self, exc: ValidationError): self.exc = exc msg = self.validator_error_message(self.exc) super().__init__(msg=msg) -class ProjectContractBroken(DbtProjectError): +class ProjectContractBrokenError(DbtProjectError): def __init__(self, exc: ValidationError): self.exc = exc msg = self.validator_error_message(self.exc) super().__init__(msg=msg) -class ConfigContractBroken(DbtProjectError): +class ConfigContractBrokenError(DbtProjectError): def __init__(self, exc: ValidationError): self.exc = exc msg = self.validator_error_message(self.exc) super().__init__(msg=msg) -class NonUniquePackageName(CompilationException): +class NonUniquePackageNameError(CompilationError): def __init__(self, project_name: str): self.project_name = project_name super().__init__(msg=self.get_message()) @@ -1736,7 +1680,7 @@ def get_message(self) -> str: return msg -class UninstalledPackagesFound(CompilationException): +class UninstalledPackagesFoundError(CompilationError): def __init__( self, count_packages_specified: int, @@ -1759,22 +1703,34 @@ def get_message(self) -> str: return msg -class VarsArgNotYamlDict(CompilationException): - def __init__(self, var_type): +class OptionNotYamlDictError(CompilationError): + def __init__(self, var_type, option_name): self.var_type = var_type + self.option_name = option_name super().__init__(msg=self.get_message()) def get_message(self) -> str: type_name = self.var_type.__name__ - msg = f"The --vars argument must be a YAML dictionary, but was of type '{type_name}'" + msg = f"The --{self.option_name} argument must be a YAML dictionary, but was of type '{type_name}'" return msg # contracts level +class UnrecognizedCredentialTypeError(CompilationError): + def __init__(self, typename: str, supported_types: List): + self.typename = typename + self.supported_types = supported_types + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = 'Unrecognized credentials type "{}" - supported types are ({})'.format( + self.typename, ", ".join('"{}"'.format(t) for t in self.supported_types) + ) + return msg -class DuplicateMacroInPackage(CompilationException): +class DuplicateMacroInPackageError(CompilationError): def __init__(self, macro, macro_mapping: Mapping): self.macro = macro self.macro_mapping = macro_mapping @@ -1803,7 +1759,7 @@ def get_message(self) -> str: return msg -class DuplicateMaterializationName(CompilationException): +class DuplicateMaterializationNameError(CompilationError): def __init__(self, macro, other_macro): self.macro = macro self.other_macro = other_macro @@ -1823,7 +1779,30 @@ def get_message(self) -> str: # jinja exceptions -class MissingConfig(CompilationException): +class PatchTargetNotFoundError(CompilationError): + def __init__(self, patches: Dict): + self.patches = patches + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + patch_list = "\n\t".join( + f"model {p.name} (referenced in path {p.original_file_path})" + for p in self.patches.values() + ) + msg = f"dbt could not find models for the following patches:\n\t{patch_list}" + return msg + + +class MacroNotFoundError(CompilationError): + def __init__(self, node, target_macro_id: str): + self.node = node + self.target_macro_id = target_macro_id + msg = f"'{self.node.unique_id}' references macro '{self.target_macro_id}' which is not defined!" + + super().__init__(msg=msg) + + +class MissingConfigError(CompilationError): def __init__(self, unique_id: str, name: str): self.unique_id = unique_id self.name = name @@ -1833,25 +1812,24 @@ def __init__(self, unique_id: str, name: str): super().__init__(msg=msg) -class MissingMaterialization(CompilationException): - def __init__(self, model, adapter_type): - self.model = model +class MissingMaterializationError(CompilationError): + def __init__(self, materialization, adapter_type): + self.materialization = materialization self.adapter_type = adapter_type super().__init__(msg=self.get_message()) def get_message(self) -> str: - materialization = self.model.get_materialization() valid_types = "'default'" if self.adapter_type != "default": valid_types = f"'default' and '{self.adapter_type}'" - msg = f"No materialization '{materialization}' was found for adapter {self.adapter_type}! (searched types {valid_types})" + msg = f"No materialization '{self.materialization}' was found for adapter {self.adapter_type}! (searched types {valid_types})" return msg -class MissingRelation(CompilationException): +class MissingRelationError(CompilationError): def __init__(self, relation, model=None): self.relation = relation self.model = model @@ -1859,7 +1837,7 @@ def __init__(self, relation, model=None): super().__init__(msg=msg) -class AmbiguousAlias(CompilationException): +class AmbiguousAliasError(CompilationError): def __init__(self, node_1, node_2, duped_name=None): self.node_1 = node_1 self.node_2 = node_2 @@ -1880,7 +1858,7 @@ def get_message(self) -> str: return msg -class AmbiguousCatalogMatch(CompilationException): +class AmbiguousCatalogMatchError(CompilationError): def __init__(self, unique_id: str, match_1, match_2): self.unique_id = unique_id self.match_1 = match_1 @@ -1904,14 +1882,14 @@ def get_message(self) -> str: return msg -class CacheInconsistency(InternalException): +class CacheInconsistencyError(DbtInternalError): def __init__(self, msg: str): self.msg = msg formatted_msg = f"Cache inconsistency detected: {self.msg}" super().__init__(msg=formatted_msg) -class NewNameAlreadyInCache(CacheInconsistency): +class NewNameAlreadyInCacheError(CacheInconsistencyError): def __init__(self, old_key: str, new_key: str): self.old_key = old_key self.new_key = new_key @@ -1921,21 +1899,21 @@ def __init__(self, old_key: str, new_key: str): super().__init__(msg) -class ReferencedLinkNotCached(CacheInconsistency): +class ReferencedLinkNotCachedError(CacheInconsistencyError): def __init__(self, referenced_key: str): self.referenced_key = referenced_key msg = f"in add_link, referenced link key {self.referenced_key} not in cache!" super().__init__(msg) -class DependentLinkNotCached(CacheInconsistency): +class DependentLinkNotCachedError(CacheInconsistencyError): def __init__(self, dependent_key: str): self.dependent_key = dependent_key msg = f"in add_link, dependent link key {self.dependent_key} not in cache!" super().__init__(msg) -class TruncatedModelNameCausedCollision(CacheInconsistency): +class TruncatedModelNameCausedCollisionError(CacheInconsistencyError): def __init__(self, new_key, relations: Dict): self.new_key = new_key self.relations = relations @@ -1962,14 +1940,14 @@ def get_message(self) -> str: return msg -class NoneRelationFound(CacheInconsistency): +class NoneRelationFoundError(CacheInconsistencyError): def __init__(self): msg = "in get_relations, a None relation was found in the cache!" super().__init__(msg) # this is part of the context and also raised in dbt.contracts.relation.py -class DataclassNotDict(CompilationException): +class DataclassNotDictError(CompilationError): def __init__(self, obj: Any): self.obj = obj super().__init__(msg=self.get_message()) @@ -1983,7 +1961,7 @@ def get_message(self) -> str: return msg -class DependencyNotFound(CompilationException): +class DependencyNotFoundError(CompilationError): def __init__(self, node, node_description, required_pkg): self.node = node self.node_description = node_description @@ -2000,7 +1978,7 @@ def get_message(self) -> str: return msg -class DuplicatePatchPath(CompilationException): +class DuplicatePatchPathError(CompilationError): def __init__(self, patch_1, existing_patch_path): self.patch_1 = patch_1 self.existing_patch_path = existing_patch_path @@ -2022,8 +2000,8 @@ def get_message(self) -> str: return msg -# should this inherit ParsingException instead? -class DuplicateResourceName(CompilationException): +# should this inherit ParsingError instead? +class DuplicateResourceNameError(CompilationError): def __init__(self, node_1, node_2): self.node_1 = node_1 self.node_2 = node_2 @@ -2075,7 +2053,7 @@ def get_message(self) -> str: return msg -class InvalidPropertyYML(CompilationException): +class PropertyYMLError(CompilationError): def __init__(self, path: str, issue: str): self.path = path self.issue = issue @@ -2090,14 +2068,14 @@ def get_message(self) -> str: return msg -class PropertyYMLMissingVersion(InvalidPropertyYML): +class PropertyYMLMissingVersionError(PropertyYMLError): def __init__(self, path: str): self.path = path self.issue = f"the yml property file {self.path} is missing a version tag" super().__init__(self.path, self.issue) -class PropertyYMLVersionNotInt(InvalidPropertyYML): +class PropertyYMLVersionNotIntError(PropertyYMLError): def __init__(self, path: str, version: Any): self.path = path self.version = version @@ -2108,7 +2086,7 @@ def __init__(self, path: str, version: Any): super().__init__(self.path, self.issue) -class PropertyYMLInvalidTag(InvalidPropertyYML): +class PropertyYMLInvalidTagError(PropertyYMLError): def __init__(self, path: str, version: int): self.path = path self.version = version @@ -2116,7 +2094,7 @@ def __init__(self, path: str, version: int): super().__init__(self.path, self.issue) -class RelationWrongType(CompilationException): +class RelationWrongTypeError(CompilationError): def __init__(self, relation, expected_type, model=None): self.relation = relation self.expected_type = expected_type @@ -2134,144 +2112,375 @@ def get_message(self) -> str: return msg +# not modifying these since rpc should be deprecated soon +class UnknownAsyncIDException(Exception): + CODE = 10012 + MESSAGE = "RPC server got an unknown async ID" + + def __init__(self, task_id): + self.task_id = task_id + + def __str__(self): + return f"{self.MESSAGE}: {self.task_id}" + + +class RPCFailureResult(DbtRuntimeError): + CODE = 10002 + MESSAGE = "RPC execution error" + + +class RPCTimeoutException(DbtRuntimeError): + CODE = 10008 + MESSAGE = "RPC timeout error" + + def __init__(self, timeout: Optional[float] = None): + super().__init__(self.MESSAGE) + self.timeout = timeout + + def data(self): + result = super().data() + result.update( + { + "timeout": self.timeout, + "message": f"RPC timed out after {self.timeout}s", + } + ) + return result + + +class RPCKilledException(DbtRuntimeError): + CODE = 10009 + MESSAGE = "RPC process killed" + + def __init__(self, signum: int): + self.signum = signum + self.msg = f"RPC process killed by signal {self.signum}" + super().__init__(self.msg) + + def data(self): + return { + "signum": self.signum, + "message": self.msg, + } + + +class RPCCompiling(DbtRuntimeError): + CODE = 10010 + MESSAGE = 'RPC server is compiling the project, call the "status" method for' " compile status" + + def __init__(self, msg: str = None, node=None): + if msg is None: + msg = "compile in progress" + super().__init__(msg, node) + + +class RPCLoadException(DbtRuntimeError): + CODE = 10011 + MESSAGE = ( + 'RPC server failed to compile project, call the "status" method for' " compile status" + ) + + def __init__(self, cause: Dict[str, Any]): + self.cause = cause + self.msg = f'{self.MESSAGE}: {self.cause["message"]}' + super().__init__(self.msg) + + def data(self): + return {"cause": self.cause, "message": self.msg} + + # These are copies of what's in dbt/context/exceptions_jinja.py to not immediately break adapters # utilizing these functions as exceptions. These are direct copies to avoid circular imports. # They will be removed in 1 (or 2?) versions. Issue to be created to ensure it happens. # TODO: add deprecation to functions +DEPRECATION_VERSION = "1.5.0" +SUGGESTED_ACTION = "using `raise {exception}` directly instead" +REASON = "See https://github.com/dbt-labs/dbt-core/issues/6393 for more details" + + +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="JinjaLogWarning"), + reason=REASON, +) def warn(msg, node=None): warn_or_error(JinjaLogWarning(msg=msg, node_info=get_node_info())) return "" +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MissingConfigError"), + reason=REASON, +) def missing_config(model, name) -> NoReturn: - raise MissingConfig(unique_id=model.unique_id, name=name) + raise MissingConfigError(unique_id=model.unique_id, name=name) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MissingMaterializationError"), + reason=REASON, +) def missing_materialization(model, adapter_type) -> NoReturn: - raise MissingMaterialization(model=model, adapter_type=adapter_type) + materialization = model.config.materialized + raise MissingMaterializationError(materialization=materialization, adapter_type=adapter_type) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MissingRelationError"), + reason=REASON, +) def missing_relation(relation, model=None) -> NoReturn: - raise MissingRelation(relation, model) + raise MissingRelationError(relation, model) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="AmbiguousAliasError"), + reason=REASON, +) def raise_ambiguous_alias(node_1, node_2, duped_name=None) -> NoReturn: - raise AmbiguousAlias(node_1, node_2, duped_name) + raise AmbiguousAliasError(node_1, node_2, duped_name) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="AmbiguousCatalogMatchError"), + reason=REASON, +) def raise_ambiguous_catalog_match(unique_id, match_1, match_2) -> NoReturn: - raise AmbiguousCatalogMatch(unique_id, match_1, match_2) + raise AmbiguousCatalogMatchError(unique_id, match_1, match_2) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="CacheInconsistencyError"), + reason=REASON, +) def raise_cache_inconsistent(message) -> NoReturn: - raise CacheInconsistency(message) + raise CacheInconsistencyError(message) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DataclassNotDictError"), + reason=REASON, +) def raise_dataclass_not_dict(obj) -> NoReturn: - raise DataclassNotDict(obj) + raise DataclassNotDictError(obj) -# note: this is called all over the code in addition to in jinja +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="CompilationError"), + reason=REASON, +) def raise_compiler_error(msg, node=None) -> NoReturn: - raise CompilationException(msg, node) + raise CompilationError(msg, node) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DbtDatabaseError"), + reason=REASON, +) def raise_database_error(msg, node=None) -> NoReturn: - raise DatabaseException(msg, node) + raise DbtDatabaseError(msg, node) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DependencyNotFoundError"), + reason=REASON, +) def raise_dep_not_found(node, node_description, required_pkg) -> NoReturn: - raise DependencyNotFound(node, node_description, required_pkg) + raise DependencyNotFoundError(node, node_description, required_pkg) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DependencyError"), + reason=REASON, +) def raise_dependency_error(msg) -> NoReturn: - raise DependencyException(scrub_secrets(msg, env_secrets())) + raise DependencyError(scrub_secrets(msg, env_secrets())) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DuplicatePatchPathError"), + reason=REASON, +) def raise_duplicate_patch_name(patch_1, existing_patch_path) -> NoReturn: - raise DuplicatePatchPath(patch_1, existing_patch_path) + raise DuplicatePatchPathError(patch_1, existing_patch_path) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateResourceNameError"), + reason=REASON, +) def raise_duplicate_resource_name(node_1, node_2) -> NoReturn: - raise DuplicateResourceName(node_1, node_2) + raise DuplicateResourceNameError(node_1, node_2) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="PropertyYMLError"), + reason=REASON, +) def raise_invalid_property_yml_version(path, issue) -> NoReturn: - raise InvalidPropertyYML(path, issue) + raise PropertyYMLError(path, issue) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="NotImplementedError"), + reason=REASON, +) def raise_not_implemented(msg) -> NoReturn: - raise NotImplementedException(msg) + raise NotImplementedError(msg) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="RelationWrongTypeError"), + reason=REASON, +) def relation_wrong_type(relation, expected_type, model=None) -> NoReturn: - raise RelationWrongType(relation, expected_type, model) + raise RelationWrongTypeError(relation, expected_type, model) # these were implemented in core so deprecating here by calling the new exception directly + + +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateAliasError"), + reason=REASON, +) def raise_duplicate_alias( kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str ) -> NoReturn: - raise DuplicateAlias(kwargs, aliases, canonical_key) + raise DuplicateAliasError(kwargs, aliases, canonical_key) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateSourcePatchNameError"), + reason=REASON, +) def raise_duplicate_source_patch_name(patch_1, patch_2): - raise DuplicateSourcePatchName(patch_1, patch_2) + raise DuplicateSourcePatchNameError(patch_1, patch_2) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateMacroPatchNameError"), + reason=REASON, +) def raise_duplicate_macro_patch_name(patch_1, existing_patch_path): - raise DuplicateMacroPatchName(patch_1, existing_patch_path) + raise DuplicateMacroPatchNameError(patch_1, existing_patch_path) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateMacroNameError"), + reason=REASON, +) def raise_duplicate_macro_name(node_1, node_2, namespace) -> NoReturn: - raise DuplicateMacroName(node_1, node_2, namespace) + raise DuplicateMacroNameError(node_1, node_2, namespace) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="ApproximateMatchError"), + reason=REASON, +) def approximate_relation_match(target, relation): - raise ApproximateMatch(target, relation) + raise ApproximateMatchError(target, relation) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="RelationReturnedMultipleResultsError"), + reason=REASON, +) def get_relation_returned_multiple_results(kwargs, matches): - raise RelationReturnedMultipleResults(kwargs, matches) + raise RelationReturnedMultipleResultsError(kwargs, matches) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="OperationError"), + reason=REASON, +) def system_error(operation_name): - # Note: This was converted for core to use SymbolicLinkError because it's the only way it was used. Maintaining flexibility here for now. - msg = ( - f"dbt encountered an error when attempting to {operation_name}. " - "If this error persists, please create an issue at: \n\n" - "https://github.com/dbt-labs/dbt-core" - ) - raise CompilationException(msg) + raise OperationError(operation_name) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="InvalidMaterializationArgError"), + reason=REASON, +) def invalid_materialization_argument(name, argument): - raise InvalidMaterializationArg(name, argument) + raise MaterializationArgError(name, argument) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="BadSpecError"), + reason=REASON, +) def bad_package_spec(repo, spec, error_message): - msg = f"Error checking out spec='{spec}' for repo {repo}\n{error_message}" - raise InternalException(scrub_secrets(msg, env_secrets())) + raise BadSpecError(spec, repo, error_message) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="CommandResultError"), + reason=REASON, +) def raise_git_cloning_error(error: CommandResultError) -> NoReturn: - error.cmd = list(scrub_secrets(str(error.cmd), env_secrets())) raise error +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="UnknownGitCloningProblemError"), + reason=REASON, +) def raise_git_cloning_problem(repo) -> NoReturn: - raise GitCloningProblem(repo) + raise UnknownGitCloningProblemError(repo) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MacroDispatchArgError"), + reason=REASON, +) def macro_invalid_dispatch_arg(macro_name) -> NoReturn: - raise MacroInvalidDispatchArg(macro_name) + raise MacroDispatchArgError(macro_name) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="GraphDependencyNotFoundError"), + reason=REASON, +) def dependency_not_found(node, dependency): - raise GraphDependencyNotFound(node, dependency) + raise GraphDependencyNotFoundError(node, dependency) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="TargetNotFoundError"), + reason=REASON, +) def target_not_found( node, target_name: str, @@ -2279,7 +2488,7 @@ def target_not_found( target_package: Optional[str] = None, disabled: Optional[bool] = None, ) -> NoReturn: - raise TargetNotFound( + raise TargetNotFoundError( node=node, target_name=target_name, target_kind=target_kind, @@ -2288,83 +2497,153 @@ def target_not_found( ) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DocTargetNotFoundError"), + reason=REASON, +) def doc_target_not_found( - model, target_doc_name: str, target_doc_package: Optional[str] + model, target_doc_name: str, target_doc_package: Optional[str] = None ) -> NoReturn: - raise DocTargetNotFound( + raise DocTargetNotFoundError( node=model, target_doc_name=target_doc_name, target_doc_package=target_doc_package ) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DocArgsError"), + reason=REASON, +) def doc_invalid_args(model, args) -> NoReturn: - raise InvalidDocArgs(node=model, args=args) + raise DocArgsError(node=model, args=args) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="RefBadContextError"), + reason=REASON, +) def ref_bad_context(model, args) -> NoReturn: - raise RefBadContext(node=model, args=args) + raise RefBadContextError(node=model, args=args) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MetricArgsError"), + reason=REASON, +) def metric_invalid_args(model, args) -> NoReturn: - raise MetricInvalidArgs(node=model, args=args) + raise MetricArgsError(node=model, args=args) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="RefArgsError"), + reason=REASON, +) def ref_invalid_args(model, args) -> NoReturn: - raise RefInvalidArgs(node=model, args=args) + raise RefArgsError(node=model, args=args) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="BooleanError"), + reason=REASON, +) def invalid_bool_error(got_value, macro_name) -> NoReturn: - raise InvalidBoolean(return_value=got_value, macro_name=macro_name) + raise BooleanError(return_value=got_value, macro_name=macro_name) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MacroArgTypeError"), + reason=REASON, +) def invalid_type_error(method_name, arg_name, got_value, expected_type) -> NoReturn: - """Raise a CompilationException when an adapter method available to macros + """Raise a InvalidMacroArgType when an adapter method available to macros has changed. """ - raise InvalidMacroArgType(method_name, arg_name, got_value, expected_type) + raise MacroArgTypeError(method_name, arg_name, got_value, expected_type) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="SecretEnvVarLocationError"), + reason=REASON, +) def disallow_secret_env_var(env_var_name) -> NoReturn: """Raise an error when a secret env var is referenced outside allowed rendering contexts""" - raise DisallowSecretEnvVar(env_var_name) + raise SecretEnvVarLocationError(env_var_name) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="ParsingError"), + reason=REASON, +) def raise_parsing_error(msg, node=None) -> NoReturn: - raise ParsingException(msg, node) + raise ParsingError(msg, node) -# These are the exceptions functions that were not called within dbt-core but will remain here but deprecated to give a chance to rework -# TODO: is this valid? Should I create a special exception class for this? +# These are the exceptions functions that were not called within dbt-core but will remain +# here deprecated to give a chance for adapters to rework +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="UnrecognizedCredentialTypeError"), + reason=REASON, +) def raise_unrecognized_credentials_type(typename, supported_types): - msg = 'Unrecognized credentials type "{}" - supported types are ({})'.format( - typename, ", ".join('"{}"'.format(t) for t in supported_types) - ) - raise CompilationException(msg) + raise UnrecognizedCredentialTypeError(typename, supported_types) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="PatchTargetNotFoundError"), + reason=REASON, +) def raise_patch_targets_not_found(patches): - patch_list = "\n\t".join( - f"model {p.name} (referenced in path {p.original_file_path})" for p in patches.values() - ) - msg = f"dbt could not find models for the following patches:\n\t{patch_list}" - raise CompilationException(msg) + raise PatchTargetNotFoundError(patches) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="RelationReturnedMultipleResultsError"), + reason=REASON, +) def multiple_matching_relations(kwargs, matches): - raise RelationReturnedMultipleResults(kwargs, matches) + raise RelationReturnedMultipleResultsError(kwargs, matches) -# while this isn't in our code I wouldn't be surpised it's in adapter code +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MaterializationNotAvailableError"), + reason=REASON, +) def materialization_not_available(model, adapter_type): - raise MaterializationNotAvailable(model, adapter_type) + materialization = model.config.materialized + raise MaterializationNotAvailableError( + materialization=materialization, adapter_type=adapter_type + ) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MacroNotFoundError"), + reason=REASON, +) def macro_not_found(model, target_macro_id): - msg = f"'{model.unique_id}' references macro '{target_macro_id}' which is not defined!" - raise CompilationException(msg=msg, node=model) + raise MacroNotFoundError(node=model, target_macro_id=target_macro_id) # adapters use this to format messages. it should be deprecated but live on for now +# TODO: What should the message here be? +@deprecated( + version=DEPRECATION_VERSION, + suggested_action="Format this message in the adapter", + reason="`validator_error_message` is now a mathod on DbtRuntimeError", +) def validator_error_message(exc): """Given a dbt.dataclass_schema.ValidationError (which is basically a jsonschema.ValidationError), return the relevant parts as a string diff --git a/core/dbt/flags.py b/core/dbt/flags.py index cecc024d7f4..b7f8ae95ef2 100644 --- a/core/dbt/flags.py +++ b/core/dbt/flags.py @@ -29,6 +29,7 @@ USE_EXPERIMENTAL_PARSER = None STATIC_PARSER = None WARN_ERROR = None +WARN_ERROR_OPTIONS = None WRITE_JSON = None PARTIAL_PARSE = None USE_COLORS = None @@ -54,6 +55,7 @@ "INDIRECT_SELECTION", "TARGET_PATH", "LOG_PATH", + "WARN_ERROR_OPTIONS", ] _NON_DBT_ENV_FLAGS = ["DO_NOT_TRACK"] @@ -66,6 +68,7 @@ "USE_EXPERIMENTAL_PARSER": False, "STATIC_PARSER": True, "WARN_ERROR": False, + "WARN_ERROR_OPTIONS": "{}", "WRITE_JSON": True, "PARTIAL_PARSE": True, "USE_COLORS": True, @@ -130,7 +133,7 @@ def set_from_args(args, user_config): # N.B. Multiple `globals` are purely for line length. # Because `global` is a parser directive (as opposed to a language construct) # black insists in putting them all on one line - global STRICT_MODE, FULL_REFRESH, WARN_ERROR, USE_EXPERIMENTAL_PARSER, STATIC_PARSER + global STRICT_MODE, FULL_REFRESH, WARN_ERROR, WARN_ERROR_OPTIONS, USE_EXPERIMENTAL_PARSER, STATIC_PARSER global WRITE_JSON, PARTIAL_PARSE, USE_COLORS, STORE_FAILURES, PROFILES_DIR, DEBUG, LOG_FORMAT global INDIRECT_SELECTION, VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS global PRINTER_WIDTH, WHICH, LOG_CACHE_EVENTS, QUIET, NO_PRINT, CACHE_SELECTED_ONLY @@ -146,6 +149,8 @@ def set_from_args(args, user_config): USE_EXPERIMENTAL_PARSER = get_flag_value("USE_EXPERIMENTAL_PARSER", args, user_config) STATIC_PARSER = get_flag_value("STATIC_PARSER", args, user_config) WARN_ERROR = get_flag_value("WARN_ERROR", args, user_config) + WARN_ERROR_OPTIONS = get_flag_value("WARN_ERROR_OPTIONS", args, user_config) + _check_mutually_exclusive(["WARN_ERROR", "WARN_ERROR_OPTIONS"], args, user_config) WRITE_JSON = get_flag_value("WRITE_JSON", args, user_config) PARTIAL_PARSE = get_flag_value("PARTIAL_PARSE", args, user_config) USE_COLORS = get_flag_value("USE_COLORS", args, user_config) @@ -178,7 +183,7 @@ def _set_overrides_from_env(): def get_flag_value(flag, args, user_config): - flag_value = _load_flag_value(flag, args, user_config) + flag_value, _ = _load_flag_value(flag, args, user_config) if flag == "PRINTER_WIDTH": # must be ints flag_value = int(flag_value) @@ -188,20 +193,36 @@ def get_flag_value(flag, args, user_config): return flag_value +def _check_mutually_exclusive(group, args, user_config): + set_flag = None + for flag in group: + flag_set_by_user = not _flag_value_from_default(flag, args, user_config) + if flag_set_by_user and set_flag: + raise ValueError(f"{flag.lower()}: not allowed with argument {set_flag.lower()}") + elif flag_set_by_user: + set_flag = flag + + +def _flag_value_from_default(flag, args, user_config): + _, from_default = _load_flag_value(flag, args, user_config) + + return from_default + + def _load_flag_value(flag, args, user_config): lc_flag = flag.lower() flag_value = getattr(args, lc_flag, None) if flag_value is not None: - return flag_value + return flag_value, False flag_value = _get_flag_value_from_env(flag) if flag_value is not None: - return flag_value + return flag_value, False if user_config is not None and getattr(user_config, lc_flag, None) is not None: - return getattr(user_config, lc_flag) + return getattr(user_config, lc_flag), False - return flag_defaults[flag] + return flag_defaults[flag], True def _get_flag_value_from_env(flag): @@ -211,11 +232,10 @@ def _get_flag_value_from_env(flag): if env_value is None or env_value == "": return None - env_value = env_value.lower() if flag in _NON_BOOLEAN_FLAGS: flag_value = env_value else: - flag_value = env_set_bool(env_value) + flag_value = env_set_bool(env_value.lower()) return flag_value @@ -229,6 +249,7 @@ def get_flag_dict(): "use_experimental_parser": USE_EXPERIMENTAL_PARSER, "static_parser": STATIC_PARSER, "warn_error": WARN_ERROR, + "warn_error_options": WARN_ERROR_OPTIONS, "write_json": WRITE_JSON, "partial_parse": PARTIAL_PARSE, "use_colors": USE_COLORS, diff --git a/core/dbt/graph/cli.py b/core/dbt/graph/cli.py index 6059de6b042..a5581ed1d78 100644 --- a/core/dbt/graph/cli.py +++ b/core/dbt/graph/cli.py @@ -7,7 +7,7 @@ from typing import Dict, List, Optional, Tuple, Any, Union from dbt.contracts.selection import SelectorDefinition, SelectorFile -from dbt.exceptions import InternalException, ValidationException +from dbt.exceptions import DbtInternalError, DbtValidationError from .selector_spec import ( SelectionUnion, @@ -44,12 +44,14 @@ def parse_union( components=intersection_components, expect_exists=expect_exists, raw=raw_spec, + indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION), ) ) return SelectionUnion( components=union_components, expect_exists=False, raw=components, + indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION), ) @@ -78,9 +80,12 @@ def parse_difference( include, DEFAULT_INCLUDES, indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION) ) excluded = parse_union_from_default( - exclude, DEFAULT_EXCLUDES, indirect_selection=IndirectSelection.Eager + exclude, DEFAULT_EXCLUDES, indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION) + ) + return SelectionDifference( + components=[included, excluded], + indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION), ) - return SelectionDifference(components=[included, excluded]) RawDefinition = Union[str, Dict[str, Any]] @@ -89,15 +94,15 @@ def parse_difference( def _get_list_dicts(dct: Dict[str, Any], key: str) -> List[RawDefinition]: result: List[RawDefinition] = [] if key not in dct: - raise InternalException(f"Expected to find key {key} in dict, only found {list(dct)}") + raise DbtInternalError(f"Expected to find key {key} in dict, only found {list(dct)}") values = dct[key] if not isinstance(values, list): - raise ValidationException(f'Invalid value for key "{key}". Expected a list.') + raise DbtValidationError(f'Invalid value for key "{key}". Expected a list.') for value in values: if isinstance(value, dict): for value_key in value: if not isinstance(value_key, str): - raise ValidationException( + raise DbtValidationError( f'Expected all keys to "{key}" dict to be strings, ' f'but "{value_key}" is a "{type(value_key)}"' ) @@ -105,7 +110,7 @@ def _get_list_dicts(dct: Dict[str, Any], key: str) -> List[RawDefinition]: elif isinstance(value, str): result.append(value) else: - raise ValidationException( + raise DbtValidationError( f'Invalid value type {type(value)} in key "{key}", expected ' f"dict or str (value: {value})." ) @@ -135,7 +140,7 @@ def _parse_include_exclude_subdefs( # do not allow multiple exclude: defs at the same level if diff_arg is not None: yaml_sel_cfg = yaml.dump(definition) - raise ValidationException( + raise DbtValidationError( f"You cannot provide multiple exclude arguments to the " f"same selector set operator:\n{yaml_sel_cfg}" ) @@ -177,7 +182,7 @@ def parse_dict_definition(definition: Dict[str, Any], result={}) -> SelectionSpe key = list(definition)[0] value = definition[key] if not isinstance(key, str): - raise ValidationException( + raise DbtValidationError( f'Expected definition key to be a "str", got one of type ' f'"{type(key)}" ({key})' ) dct = { @@ -187,7 +192,7 @@ def parse_dict_definition(definition: Dict[str, Any], result={}) -> SelectionSpe elif definition.get("method") == "selector": sel_def = definition.get("value") if sel_def not in result: - raise ValidationException(f"Existing selector definition for {sel_def} not found.") + raise DbtValidationError(f"Existing selector definition for {sel_def} not found.") return result[definition["value"]]["definition"] elif "method" in definition and "value" in definition: dct = definition @@ -195,7 +200,7 @@ def parse_dict_definition(definition: Dict[str, Any], result={}) -> SelectionSpe diff_arg = _parse_exclusions(definition, result=result) dct = {k: v for k, v in dct.items() if k != "exclude"} else: - raise ValidationException( + raise DbtValidationError( f'Expected either 1 key or else "method" ' f'and "value" keys, but got {list(definition)}' ) @@ -221,7 +226,7 @@ def parse_from_definition( and len(definition) > 1 ): keys = ",".join(definition.keys()) - raise ValidationException( + raise DbtValidationError( f"Only a single 'union' or 'intersection' key is allowed " f"in a root level selector definition; found {keys}." ) @@ -234,7 +239,7 @@ def parse_from_definition( elif isinstance(definition, dict): return parse_dict_definition(definition, result=result) else: - raise ValidationException( + raise DbtValidationError( f"Expected to find union, intersection, str or dict, instead " f"found {type(definition)}: {definition}" ) diff --git a/core/dbt/graph/graph.py b/core/dbt/graph/graph.py index 2dda596e073..9c20750cd54 100644 --- a/core/dbt/graph/graph.py +++ b/core/dbt/graph/graph.py @@ -2,7 +2,7 @@ from itertools import product import networkx as nx # type: ignore -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError UniqueId = NewType("UniqueId", str) @@ -27,7 +27,7 @@ def __iter__(self) -> Iterator[UniqueId]: def ancestors(self, node: UniqueId, max_depth: Optional[int] = None) -> Set[UniqueId]: """Returns all nodes having a path to `node` in `graph`""" if not self.graph.has_node(node): - raise InternalException(f"Node {node} not found in the graph!") + raise DbtInternalError(f"Node {node} not found in the graph!") return { child for _, child in nx.bfs_edges(self.graph, node, reverse=True, depth_limit=max_depth) @@ -36,7 +36,7 @@ def ancestors(self, node: UniqueId, max_depth: Optional[int] = None) -> Set[Uniq def descendants(self, node: UniqueId, max_depth: Optional[int] = None) -> Set[UniqueId]: """Returns all nodes reachable from `node` in `graph`""" if not self.graph.has_node(node): - raise InternalException(f"Node {node} not found in the graph!") + raise DbtInternalError(f"Node {node} not found in the graph!") return {child for _, child in nx.bfs_edges(self.graph, node, depth_limit=max_depth)} def select_childrens_parents(self, selected: Set[UniqueId]) -> Set[UniqueId]: diff --git a/core/dbt/graph/selector.py b/core/dbt/graph/selector.py index ed91596712b..fdae6327d0e 100644 --- a/core/dbt/graph/selector.py +++ b/core/dbt/graph/selector.py @@ -9,8 +9,8 @@ from dbt.events.types import SelectorReportInvalidSelector, NoNodesForSelectionCriteria from dbt.node_types import NodeType from dbt.exceptions import ( - InternalException, - InvalidSelectorException, + DbtInternalError, + InvalidSelectorError, ) from dbt.contracts.graph.nodes import GraphMemberNode from dbt.contracts.graph.manifest import Manifest @@ -78,7 +78,7 @@ def get_nodes_from_criteria( nodes = self.graph.nodes() try: collected = self.select_included(nodes, spec) - except InvalidSelectorException: + except InvalidSelectorError: valid_selectors = ", ".join(self.SELECTOR_METHODS) fire_event( SelectorReportInvalidSelector( @@ -134,7 +134,9 @@ def select_nodes_recursively(self, spec: SelectionSpec) -> Tuple[Set[UniqueId], initial_direct = spec.combined(direct_sets) indirect_nodes = spec.combined(indirect_sets) - direct_nodes = self.incorporate_indirect_nodes(initial_direct, indirect_nodes) + direct_nodes = self.incorporate_indirect_nodes( + initial_direct, indirect_nodes, spec.indirect_selection + ) if spec.expect_exists and len(direct_nodes) == 0: warn_or_error(NoNodesForSelectionCriteria(spec_raw=str(spec.raw))) @@ -181,7 +183,7 @@ def _is_match(self, unique_id: UniqueId) -> bool: elif unique_id in self.manifest.metrics: node = self.manifest.metrics[unique_id] else: - raise InternalException(f"Node {unique_id} not found in the manifest!") + raise DbtInternalError(f"Node {unique_id} not found in the manifest!") return self.node_is_match(node) def filter_selection(self, selected: Set[UniqueId]) -> Set[UniqueId]: @@ -197,7 +199,7 @@ def expand_selection( ) -> Tuple[Set[UniqueId], Set[UniqueId]]: # Test selection by default expands to include an implicitly/indirectly selected tests. # `dbt test -m model_a` also includes tests that directly depend on `model_a`. - # Expansion has two modes, EAGER and CAUTIOUS. + # Expansion has three modes, EAGER, CAUTIOUS and BUILDABLE. # # EAGER mode: If ANY parent is selected, select the test. # @@ -205,11 +207,22 @@ def expand_selection( # - If ALL parents are selected, select the test. # - If ANY parent is missing, return it separately. We'll keep it around # for later and see if its other parents show up. + # + # BUILDABLE mode: + # - If ALL parents are selected, or the parents of the test are themselves parents of the selected, select the test. + # - If ANY parent is missing, return it separately. We'll keep it around + # for later and see if its other parents show up. + # # Users can opt out of inclusive EAGER mode by passing --indirect-selection cautious # CLI argument or by specifying `indirect_selection: true` in a yaml selector direct_nodes = set(selected) indirect_nodes = set() + selected_and_parents = set() + if indirect_selection == IndirectSelection.Buildable: + selected_and_parents = selected.union(self.graph.select_parents(selected)).union( + self.manifest.sources + ) for unique_id in self.graph.select_successors(selected): if unique_id in self.manifest.nodes: @@ -220,14 +233,20 @@ def expand_selection( node.depends_on_nodes ) <= set(selected): direct_nodes.add(unique_id) - # if not: + elif indirect_selection == IndirectSelection.Buildable and set( + node.depends_on_nodes + ) <= set(selected_and_parents): + direct_nodes.add(unique_id) else: indirect_nodes.add(unique_id) return direct_nodes, indirect_nodes def incorporate_indirect_nodes( - self, direct_nodes: Set[UniqueId], indirect_nodes: Set[UniqueId] = set() + self, + direct_nodes: Set[UniqueId], + indirect_nodes: Set[UniqueId] = set(), + indirect_selection: IndirectSelection = IndirectSelection.Eager, ) -> Set[UniqueId]: # Check tests previously selected indirectly to see if ALL their # parents are now present. @@ -238,11 +257,19 @@ def incorporate_indirect_nodes( selected = set(direct_nodes) - for unique_id in indirect_nodes: - if unique_id in self.manifest.nodes: - node = self.manifest.nodes[unique_id] - if set(node.depends_on_nodes) <= set(selected): - selected.add(unique_id) + if indirect_selection == IndirectSelection.Cautious: + for unique_id in indirect_nodes: + if unique_id in self.manifest.nodes: + node = self.manifest.nodes[unique_id] + if set(node.depends_on_nodes) <= set(selected): + selected.add(unique_id) + elif indirect_selection == IndirectSelection.Buildable: + selected_and_parents = selected.union(self.graph.select_parents(selected)) + for unique_id in indirect_nodes: + if unique_id in self.manifest.nodes: + node = self.manifest.nodes[unique_id] + if set(node.depends_on_nodes) <= set(selected_and_parents): + selected.add(unique_id) return selected diff --git a/core/dbt/graph/selector_methods.py b/core/dbt/graph/selector_methods.py index c77625649bc..2c73d480dae 100644 --- a/core/dbt/graph/selector_methods.py +++ b/core/dbt/graph/selector_methods.py @@ -19,8 +19,8 @@ ) from dbt.contracts.state import PreviousState from dbt.exceptions import ( - InternalException, - RuntimeException, + DbtInternalError, + DbtRuntimeError, ) from dbt.node_types import NodeType @@ -207,7 +207,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu "`${{source_name}}.${{target_name}}`, or " "`${{package_name}}.${{source_name}}.${{target_name}}" ).format(selector) - raise RuntimeException(msg) + raise DbtRuntimeError(msg) for node, real_node in self.source_nodes(included_nodes): if target_package not in (real_node.package_name, SELECTOR_GLOB): @@ -234,7 +234,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu "the form ${{exposure_name}} or " "${{exposure_package.exposure_name}}" ).format(selector) - raise RuntimeException(msg) + raise DbtRuntimeError(msg) for node, real_node in self.exposure_nodes(included_nodes): if target_package not in (real_node.package_name, SELECTOR_GLOB): @@ -259,7 +259,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu "the form ${{metric_name}} or " "${{metric_package.metric_name}}" ).format(selector) - raise RuntimeException(msg) + raise DbtRuntimeError(msg) for node, real_node in self.metric_nodes(included_nodes): if target_package not in (real_node.package_name, SELECTOR_GLOB): @@ -367,7 +367,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu try: resource_type = NodeType(selector) except ValueError as exc: - raise RuntimeException(f'Invalid resource_type selector "{selector}"') from exc + raise DbtRuntimeError(f'Invalid resource_type selector "{selector}"') from exc for node, real_node in self.parsed_nodes(included_nodes): if real_node.resource_type == resource_type: yield node @@ -390,7 +390,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu elif selector in ("singular", "data"): search_type = SingularTestNode else: - raise RuntimeException( + raise DbtRuntimeError( f'Invalid test type selector {selector}: expected "generic" or ' '"singular"' ) @@ -407,7 +407,7 @@ def __init__(self, *args, **kwargs): def _macros_modified(self) -> List[str]: # we checked in the caller! if self.previous_state is None or self.previous_state.manifest is None: - raise InternalException("No comparison manifest in _macros_modified") + raise DbtInternalError("No comparison manifest in _macros_modified") old_macros = self.previous_state.manifest.macros new_macros = self.manifest.macros @@ -496,7 +496,7 @@ def check_new(self, old: Optional[SelectorTarget], new: SelectorTarget) -> bool: def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: if self.previous_state is None or self.previous_state.manifest is None: - raise RuntimeException("Got a state selector method, but no comparison manifest") + raise DbtRuntimeError("Got a state selector method, but no comparison manifest") state_checks = { # it's new if there is no old version @@ -514,7 +514,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu if selector in state_checks: checker = state_checks[selector] else: - raise RuntimeException( + raise DbtRuntimeError( f'Got an invalid selector "{selector}", expected one of ' f'"{list(state_checks)}"' ) @@ -538,7 +538,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu class ResultSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: if self.previous_state is None or self.previous_state.results is None: - raise InternalException("No comparison run_results") + raise DbtInternalError("No comparison run_results") matches = set( result.unique_id for result in self.previous_state.results if result.status == selector ) @@ -551,13 +551,11 @@ class SourceStatusSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: if self.previous_state is None or self.previous_state.sources is None: - raise InternalException( + raise DbtInternalError( "No previous state comparison freshness results in sources.json" ) elif self.previous_state.sources_current is None: - raise InternalException( - "No current state comparison freshness results in sources.json" - ) + raise DbtInternalError("No current state comparison freshness results in sources.json") current_state_sources = { result.unique_id: getattr(result, "max_loaded_at", 0) @@ -633,7 +631,7 @@ def __init__( def get_method(self, method: MethodName, method_arguments: List[str]) -> SelectorMethod: if method not in self.SELECTOR_METHODS: - raise InternalException( + raise DbtInternalError( f'Method name "{method}" is a valid node selection ' f"method name, but it is not handled" ) diff --git a/core/dbt/graph/selector_spec.py b/core/dbt/graph/selector_spec.py index 991ae7fcb89..af7ae014163 100644 --- a/core/dbt/graph/selector_spec.py +++ b/core/dbt/graph/selector_spec.py @@ -7,7 +7,7 @@ from typing import Set, Iterator, List, Optional, Dict, Union, Any, Iterable, Tuple from .graph import UniqueId from .selector_methods import MethodName -from dbt.exceptions import RuntimeException, InvalidSelectorException +from dbt.exceptions import DbtRuntimeError, InvalidSelectorError RAW_SELECTOR_PATTERN = re.compile( @@ -24,6 +24,7 @@ class IndirectSelection(StrEnum): Eager = "eager" Cautious = "cautious" + Buildable = "buildable" def _probably_path(value: str): @@ -46,7 +47,7 @@ def _match_to_int(match: Dict[str, str], key: str) -> Optional[int]: try: return int(raw) except ValueError as exc: - raise RuntimeException(f"Invalid node spec - could not handle parent depth {raw}") from exc + raise DbtRuntimeError(f"Invalid node spec - could not handle parent depth {raw}") from exc SelectionSpec = Union[ @@ -72,7 +73,7 @@ class SelectionCriteria: def __post_init__(self): if self.children and self.childrens_parents: - raise RuntimeException( + raise DbtRuntimeError( f'Invalid node spec {self.raw} - "@" prefix and "+" suffix ' "are incompatible" ) @@ -95,9 +96,7 @@ def parse_method(cls, groupdict: Dict[str, Any]) -> Tuple[MethodName, List[str]] try: method_name = MethodName(method_parts[0]) except ValueError as exc: - raise InvalidSelectorException( - f"'{method_parts[0]}' is not a valid method name" - ) from exc + raise InvalidSelectorError(f"'{method_parts[0]}' is not a valid method name") from exc method_arguments: List[str] = method_parts[1:] @@ -111,7 +110,7 @@ def selection_criteria_from_dict( indirect_selection: IndirectSelection = IndirectSelection.Eager, ) -> "SelectionCriteria": if "value" not in dct: - raise RuntimeException(f'Invalid node spec "{raw}" - no search value!') + raise DbtRuntimeError(f'Invalid node spec "{raw}" - no search value!') method_name, method_arguments = cls.parse_method(dct) parents_depth = _match_to_int(dct, "parents_depth") @@ -162,7 +161,7 @@ def from_single_spec( result = RAW_SELECTOR_PATTERN.match(raw) if result is None: # bad spec! - raise RuntimeException(f'Invalid selector spec "{raw}"') + raise DbtRuntimeError(f'Invalid selector spec "{raw}"') return cls.selection_criteria_from_dict( raw, result.groupdict(), indirect_selection=indirect_selection @@ -173,12 +172,14 @@ class BaseSelectionGroup(dbtClassMixin, Iterable[SelectionSpec], metaclass=ABCMe def __init__( self, components: Iterable[SelectionSpec], + indirect_selection: IndirectSelection = IndirectSelection.Eager, expect_exists: bool = False, raw: Any = None, ): self.components: List[SelectionSpec] = list(components) self.expect_exists = expect_exists self.raw = raw + self.indirect_selection = indirect_selection def __iter__(self) -> Iterator[SelectionSpec]: for component in self.components: diff --git a/core/dbt/helper_types.py b/core/dbt/helper_types.py index a8ff90fa75f..84f253b00c6 100644 --- a/core/dbt/helper_types.py +++ b/core/dbt/helper_types.py @@ -7,15 +7,16 @@ from datetime import timedelta from pathlib import Path from typing import Tuple, AbstractSet, Union +from hologram import FieldEncoder, JsonDict +from mashumaro.types import SerializableType +from typing import Callable, cast, Generic, Optional, TypeVar, List from dbt.dataclass_schema import ( dbtClassMixin, ValidationError, StrEnum, ) -from hologram import FieldEncoder, JsonDict -from mashumaro.types import SerializableType -from typing import Callable, cast, Generic, Optional, TypeVar +import dbt.events.types as dbt_event_types class Port(int, SerializableType): @@ -88,6 +89,65 @@ class NoValue(dbtClassMixin): novalue: NVEnum = field(default_factory=lambda: NVEnum.novalue) +@dataclass +class IncludeExclude(dbtClassMixin): + INCLUDE_ALL = ("all", "*") + + include: Union[str, List[str]] + exclude: List[str] = field(default_factory=list) + + def __post_init__(self): + if isinstance(self.include, str) and self.include not in self.INCLUDE_ALL: + raise ValidationError( + f"include must be one of {self.INCLUDE_ALL} or a list of strings" + ) + + if self.exclude and self.include not in self.INCLUDE_ALL: + raise ValidationError( + f"exclude can only be specified if include is one of {self.INCLUDE_ALL}" + ) + + if isinstance(self.include, list): + self._validate_items(self.include) + + if isinstance(self.exclude, list): + self._validate_items(self.exclude) + + def includes(self, item_name: str): + return ( + item_name in self.include or self.include in self.INCLUDE_ALL + ) and item_name not in self.exclude + + def _validate_items(self, items: List[str]): + pass + + +class WarnErrorOptions(IncludeExclude): + # TODO: this method can be removed once the click CLI is in use + @classmethod + def from_yaml_string(cls, warn_error_options_str: Optional[str]): + + # TODO: resolve circular import + from dbt.config.utils import parse_cli_yaml_string + + warn_error_options_str = ( + str(warn_error_options_str) if warn_error_options_str is not None else "{}" + ) + warn_error_options = parse_cli_yaml_string(warn_error_options_str, "warn-error-options") + return cls( + include=warn_error_options.get("include", []), + exclude=warn_error_options.get("exclude", []), + ) + + def _validate_items(self, items: List[str]): + valid_exception_names = set( + [name for name, cls in dbt_event_types.__dict__.items() if isinstance(cls, type)] + ) + for item in items: + if item not in valid_exception_names: + raise ValidationError(f"{item} is not a valid dbt error name.") + + dbtClassMixin.register_field_encoders( { Port: PortEncoder(), diff --git a/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql b/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql index 5033178be49..ca972c9f258 100644 --- a/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql +++ b/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql @@ -1,8 +1,10 @@ -{% macro get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} +{% macro get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates=none) -%} + -- back compat for old kwarg name + {% set incremental_predicates = kwargs.get('predicates', incremental_predicates) %} {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, incremental_predicates) }} {%- endmacro %} -{% macro default__get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} +{% macro default__get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates=none) -%} {%- set predicates = [] if incremental_predicates is none else [] + incremental_predicates -%} {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute="name")) -%} {%- set merge_update_columns = config.get('merge_update_columns') -%} diff --git a/core/dbt/include/global_project/macros/python_model/python.sql b/core/dbt/include/global_project/macros/python_model/python.sql index c56ff7f31c8..64da81ae646 100644 --- a/core/dbt/include/global_project/macros/python_model/python.sql +++ b/core/dbt/include/global_project/macros/python_model/python.sql @@ -3,7 +3,7 @@ {%- set ref_dict = {} -%} {%- for _ref in model.refs -%} {%- set resolved = ref(*_ref) -%} - {%- do ref_dict.update({_ref | join("."): resolved.quote(database=False, schema=False, identifier=False) | string}) -%} + {%- do ref_dict.update({_ref | join("."): resolved | string | replace('"', '\"')}) -%} {%- endfor -%} def ref(*args,dbt_load_df_function): @@ -18,7 +18,7 @@ def ref(*args,dbt_load_df_function): {%- set source_dict = {} -%} {%- for _source in model.sources -%} {%- set resolved = source(*_source) -%} - {%- do source_dict.update({_source | join("."): resolved.quote(database=False, schema=False, identifier=False) | string}) -%} + {%- do source_dict.update({_source | join("."): resolved | string | replace('"', '\"')}) -%} {%- endfor -%} def source(*args, dbt_load_df_function): @@ -33,8 +33,8 @@ def source(*args, dbt_load_df_function): {% set config_dbt_used = zip(model.config.config_keys_used, model.config.config_keys_defaults) | list %} {%- for key, default in config_dbt_used -%} {# weird type testing with enum, would be much easier to write this logic in Python! #} - {%- if key == 'language' -%} - {%- set value = 'python' -%} + {%- if key == "language" -%} + {%- set value = "python" -%} {%- endif -%} {%- set value = model.config.get(key, default) -%} {%- do config_dict.update({key: value}) -%} @@ -62,11 +62,12 @@ class config: class this: """dbt.this() or dbt.this.identifier""" - database = '{{ this.database }}' - schema = '{{ this.schema }}' - identifier = '{{ this.identifier }}' + database = "{{ this.database }}" + schema = "{{ this.schema }}" + identifier = "{{ this.identifier }}" + {% set this_relation_name = this | string | replace('"', '\\"') %} def __repr__(self): - return '{{ this }}' + return "{{ this_relation_name }}" class dbtObj: diff --git a/core/dbt/internal_deprecations.py b/core/dbt/internal_deprecations.py new file mode 100644 index 00000000000..fbc435026b6 --- /dev/null +++ b/core/dbt/internal_deprecations.py @@ -0,0 +1,26 @@ +import functools +from typing import Optional + +from dbt.events.functions import warn_or_error +from dbt.events.types import InternalDeprecation + + +def deprecated(suggested_action: str, version: str, reason: Optional[str]): + def inner(func): + @functools.wraps(func) + def wrapped(*args, **kwargs): + name = func.__name__ + + warn_or_error( + InternalDeprecation( + name=name, + suggested_action=suggested_action, + version=version, + reason=reason, + ) + ) # TODO: pass in event? + return func(*args, **kwargs) + + return wrapped + + return inner diff --git a/core/dbt/lib.py b/core/dbt/lib.py index f4b9ab5be0e..2726f101b00 100644 --- a/core/dbt/lib.py +++ b/core/dbt/lib.py @@ -4,7 +4,7 @@ from dbt.contracts.results import RunningStatus, collect_timing_info from dbt.events.functions import fire_event from dbt.events.types import NodeCompiling, NodeExecuting -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError from dbt import flags from dbt.task.sql import SqlCompileRunner from dataclasses import dataclass @@ -125,7 +125,7 @@ def get_task_by_type(type): elif type == "run_operation": return RunOperationTask - raise RuntimeException("not a valid task") + raise DbtRuntimeError("not a valid task") def create_task(type, args, manifest, config): diff --git a/core/dbt/main.py b/core/dbt/main.py index 197f20fc8c0..429d823be52 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -46,9 +46,9 @@ from dbt.config.profile import read_user_config from dbt.exceptions import ( Exception as dbtException, - InternalException, - NotImplementedException, - FailedToConnectException, + DbtInternalError, + NotImplementedError, + FailedToConnectError, ) @@ -92,7 +92,7 @@ def add_optional_argument_inverse( ): mutex_group = self.add_mutually_exclusive_group() if not name.startswith("--"): - raise InternalException( + raise DbtInternalError( 'cannot handle optional argument without "--" prefix: ' f'got "{name}"' ) if dest is None: @@ -207,7 +207,7 @@ def track_run(task): try: yield dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type="ok") - except (NotImplementedException, FailedToConnectException) as e: + except (NotImplementedError, FailedToConnectError) as e: fire_event(MainEncounteredError(exc=str(e))) dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type="error") except Exception: @@ -220,7 +220,7 @@ def track_run(task): def run_from_args(parsed): log_cache_events(getattr(parsed, "log_cache_events", False)) - # this will convert DbtConfigErrors into RuntimeExceptions + # this will convert DbtConfigErrors into DbtRuntimeError # task could be any one of the task objects task = parsed.cls.from_args(args=parsed) @@ -351,7 +351,7 @@ def _build_init_subparser(subparsers, base_subparser): dest="skip_profile_setup", action="store_true", help=""" - Skip interative profile setup. + Skip interactive profile setup. """, ) sub.set_defaults(cls=init_task.InitTask, which="init", rpc_method=None) @@ -385,7 +385,7 @@ def _build_build_subparser(subparsers, base_subparser): ) sub.add_argument( "--indirect-selection", - choices=["eager", "cautious"], + choices=["eager", "cautious", "buildable"], default="eager", dest="indirect_selection", help=""" @@ -486,7 +486,7 @@ def _build_snapshot_subparser(subparsers, base_subparser): return sub -def _add_defer_argument(*subparsers): +def _add_defer_arguments(*subparsers): for sub in subparsers: sub.add_optional_argument_inverse( "--defer", @@ -499,10 +499,6 @@ def _add_defer_argument(*subparsers): """, default=flags.DEFER_MODE, ) - - -def _add_favor_state_argument(*subparsers): - for sub in subparsers: sub.add_optional_argument_inverse( "--favor-state", enable_help=""" @@ -580,7 +576,7 @@ def _build_docs_generate_subparser(subparsers, base_subparser): Do not run "dbt compile" as part of docs generation """, ) - _add_defer_argument(generate_sub) + _add_defer_arguments(generate_sub) return generate_sub @@ -763,7 +759,7 @@ def _build_test_subparser(subparsers, base_subparser): ) sub.add_argument( "--indirect-selection", - choices=["eager", "cautious"], + choices=["eager", "cautious", "buildable"], default="eager", dest="indirect_selection", help=""" @@ -869,7 +865,7 @@ def _build_list_subparser(subparsers, base_subparser): ) sub.add_argument( "--indirect-selection", - choices=["eager", "cautious"], + choices=["eager", "cautious", "buildable"], default="eager", dest="indirect_selection", help=""" @@ -1006,18 +1002,32 @@ def parse_args(args, cls=DBTArgumentParser): """, ) - p.add_argument( + warn_error_flag = p.add_mutually_exclusive_group() + warn_error_flag.add_argument( "--warn-error", action="store_true", default=None, help=""" If dbt would normally warn, instead raise an exception. Examples - include --models that selects nothing, deprecations, configurations + include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations, and missing sources/refs in tests. """, ) + warn_error_flag.add_argument( + "--warn-error-options", + default=None, + help=""" + If dbt would normally warn, instead raise an exception based on + include/exclude configuration. Examples include --select that selects + nothing, deprecations, configurations with no associated models, + invalid test configurations, and missing sources/refs in tests. + This argument should be a YAML string, with keys 'include' or 'exclude'. + eg. '{"include": "all", "exclude": ["NoNodesForSelectionCriteria"]}' + """, + ) + p.add_argument( "--no-version-check", dest="version_check", @@ -1178,9 +1188,7 @@ def parse_args(args, cls=DBTArgumentParser): # list_sub sets up its own arguments. _add_selection_arguments(run_sub, compile_sub, generate_sub, test_sub, snapshot_sub, seed_sub) # --defer - _add_defer_argument(run_sub, test_sub, build_sub, snapshot_sub, compile_sub) - # --favor-state - _add_favor_state_argument(run_sub, test_sub, build_sub, snapshot_sub) + _add_defer_arguments(run_sub, test_sub, build_sub, snapshot_sub, compile_sub) # --full-refresh _add_table_mutability_arguments(run_sub, compile_sub, build_sub) diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index 9c245214d83..1f01aff36f1 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -18,7 +18,7 @@ from dbt.contracts.graph.manifest import Manifest from dbt.contracts.graph.nodes import ManifestNode, BaseNode from dbt.contracts.graph.unparsed import UnparsedNode, Docs -from dbt.exceptions import InternalException, InvalidConfigUpdate, InvalidDictParse +from dbt.exceptions import DbtInternalError, ConfigUpdateError, DictParseError from dbt import hooks from dbt.node_types import NodeType, ModelLanguage from dbt.parser.search import FileBlock @@ -76,7 +76,7 @@ def __init__(self, config: RuntimeConfig, manifest: Manifest, component: str) -> root_project_name=config.project_name, ) if macro is None: - raise InternalException(f"No macro with name generate_{component}_name found") + raise DbtInternalError(f"No macro with name generate_{component}_name found") root_context = generate_generate_name_macro_context(macro, config, manifest) self.updater = MacroGenerator(macro, root_context) @@ -224,7 +224,7 @@ def _create_parsetime_node( original_file_path=block.path.original_file_path, raw_code=block.contents, ) - raise InvalidDictParse(exc, node=node) + raise DictParseError(exc, node=node) def _context_for(self, parsed_node: IntermediateNode, config: ContextConfig) -> Dict[str, Any]: return generate_parser_model_context(parsed_node, self.root_project, self.manifest, config) @@ -345,7 +345,7 @@ def initial_config(self, fqn: List[str]) -> ContextConfig: self.project.project_name, ) else: - raise InternalException( + raise DbtInternalError( f"Got an unexpected project version={config_version}, expected 2" ) @@ -363,7 +363,7 @@ def render_update(self, node: IntermediateNode, config: ContextConfig) -> None: self.update_parsed_node_config(node, config, context=context) except ValidationError as exc: # we got a ValidationError - probably bad types in config() - raise InvalidConfigUpdate(exc, node=node) from exc + raise ConfigUpdateError(exc, node=node) from exc def add_result_node(self, block: FileBlock, node: ManifestNode): if node.config.enabled: diff --git a/core/dbt/parser/generic_test.py b/core/dbt/parser/generic_test.py index 822dd5b2d85..ea281e1c993 100644 --- a/core/dbt/parser/generic_test.py +++ b/core/dbt/parser/generic_test.py @@ -2,7 +2,7 @@ import jinja2 -from dbt.exceptions import ParsingException +from dbt.exceptions import ParsingError from dbt.clients import jinja from dbt.contracts.graph.nodes import GenericTestNode, Macro from dbt.contracts.graph.unparsed import UnparsedMacro @@ -51,14 +51,14 @@ def parse_unparsed_generic_test(self, base_node: UnparsedMacro) -> Iterable[Macr ) if isinstance(t, jinja.BlockTag) ] - except ParsingException as exc: + except ParsingError as exc: exc.add_node(base_node) raise for block in blocks: try: ast = jinja.parse(block.full_block) - except ParsingException as e: + except ParsingError as e: e.add_node(base_node) raise @@ -68,7 +68,7 @@ def parse_unparsed_generic_test(self, base_node: UnparsedMacro) -> Iterable[Macr if len(generic_test_nodes) != 1: # things have gone disastrously wrong, we thought we only # parsed one block! - raise ParsingException( + raise ParsingError( f"Found multiple generic tests in {block.full_block}, expected 1", node=base_node, ) diff --git a/core/dbt/parser/generic_test_builders.py b/core/dbt/parser/generic_test_builders.py index af0282c953f..678f7de9df3 100644 --- a/core/dbt/parser/generic_test_builders.py +++ b/core/dbt/parser/generic_test_builders.py @@ -22,17 +22,17 @@ UnparsedExposure, ) from dbt.exceptions import ( - CustomMacroPopulatingConfigValues, - SameKeyNested, - TagNotString, - TagsNotListOfStrings, - TestArgIncludesModel, - TestArgsNotDict, - TestDefinitionDictLength, - TestInvalidType, - TestNameNotString, - UnexpectedTestNamePattern, - UndefinedMacroException, + CustomMacroPopulatingConfigValueError, + SameKeyNestedError, + TagNotStringError, + TagsNotListOfStringsError, + TestArgIncludesModelError, + TestArgsNotDictError, + TestDefinitionDictLengthError, + TestTypeError, + TestNameNotStringError, + UnexpectedTestNamePatternError, + UndefinedMacroError, ) from dbt.parser.search import FileBlock @@ -234,7 +234,7 @@ def __init__( test_name, test_args = self.extract_test_args(test, column_name) self.args: Dict[str, Any] = test_args if "model" in self.args: - raise TestArgIncludesModel() + raise TestArgIncludesModelError() self.package_name: str = package_name self.target: Testable = target @@ -242,7 +242,7 @@ def __init__( match = self.TEST_NAME_PATTERN.match(test_name) if match is None: - raise UnexpectedTestNamePattern(test_name) + raise UnexpectedTestNamePatternError(test_name) groups = match.groupdict() self.name: str = groups["test_name"] @@ -259,20 +259,20 @@ def __init__( value = self.args.pop(key, None) # 'modifier' config could be either top level arg or in config if value and "config" in self.args and key in self.args["config"]: - raise SameKeyNested() + raise SameKeyNestedError() if not value and "config" in self.args: value = self.args["config"].pop(key, None) if isinstance(value, str): try: value = get_rendered(value, render_ctx, native=True) - except UndefinedMacroException as e: - raise CustomMacroPopulatingConfigValues( + except UndefinedMacroError as e: + raise CustomMacroPopulatingConfigValueError( target_name=self.target.name, column_name=column_name, name=self.name, key=key, - err_msg=e.msg + err_msg=e.msg, ) if value is not None: @@ -310,7 +310,7 @@ def _bad_type(self) -> TypeError: @staticmethod def extract_test_args(test, name=None) -> Tuple[str, Dict[str, Any]]: if not isinstance(test, dict): - raise TestInvalidType(test) + raise TestTypeError(test) # If the test is a dictionary with top-level keys, the test name is "test_name" # and the rest are arguments @@ -324,13 +324,13 @@ def extract_test_args(test, name=None) -> Tuple[str, Dict[str, Any]]: else: test = list(test.items()) if len(test) != 1: - raise TestDefinitionDictLength(test) + raise TestDefinitionDictLengthError(test) test_name, test_args = test[0] if not isinstance(test_args, dict): - raise TestArgsNotDict(test_args) + raise TestArgsNotDictError(test_args) if not isinstance(test_name, str): - raise TestNameNotString(test_name) + raise TestNameNotStringError(test_name) test_args = deepcopy(test_args) if name is not None: test_args["column_name"] = name @@ -421,10 +421,10 @@ def tags(self) -> List[str]: if isinstance(tags, str): tags = [tags] if not isinstance(tags, list): - raise TagsNotListOfStrings(tags) + raise TagsNotListOfStringsError(tags) for tag in tags: if not isinstance(tag, str): - raise TagNotString(tag) + raise TagNotStringError(tag) return tags[:] def macro_name(self) -> str: diff --git a/core/dbt/parser/hooks.py b/core/dbt/parser/hooks.py index d05ea136dc5..d96257a0e71 100644 --- a/core/dbt/parser/hooks.py +++ b/core/dbt/parser/hooks.py @@ -4,7 +4,7 @@ from dbt.context.context_config import ContextConfig from dbt.contracts.files import FilePath from dbt.contracts.graph.nodes import HookNode -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.node_types import NodeType, RunHookType from dbt.parser.base import SimpleParser from dbt.parser.search import FileBlock @@ -46,7 +46,7 @@ def get_hook_defs(self) -> List[str]: elif self.hook_type == RunHookType.End: hooks = self.project.on_run_end else: - raise InternalException( + raise DbtInternalError( 'hook_type must be one of "{}" or "{}" (got {})'.format( RunHookType.Start, RunHookType.End, self.hook_type ) diff --git a/core/dbt/parser/macros.py b/core/dbt/parser/macros.py index 7c5336b8ccf..1a9ee03d57d 100644 --- a/core/dbt/parser/macros.py +++ b/core/dbt/parser/macros.py @@ -6,7 +6,7 @@ from dbt.contracts.graph.unparsed import UnparsedMacro from dbt.contracts.graph.nodes import Macro from dbt.contracts.files import FilePath, SourceFile -from dbt.exceptions import ParsingException +from dbt.exceptions import ParsingError from dbt.events.functions import fire_event from dbt.events.types import MacroFileParse from dbt.node_types import NodeType @@ -56,14 +56,14 @@ def parse_unparsed_macros(self, base_node: UnparsedMacro) -> Iterable[Macro]: ) if isinstance(t, jinja.BlockTag) ] - except ParsingException as exc: + except ParsingError as exc: exc.add_node(base_node) raise for block in blocks: try: ast = jinja.parse(block.full_block) - except ParsingException as e: + except ParsingError as e: e.add_node(base_node) raise @@ -72,7 +72,7 @@ def parse_unparsed_macros(self, base_node: UnparsedMacro) -> Iterable[Macro]: if len(macro_nodes) != 1: # things have gone disastrously wrong, we thought we only # parsed one block! - raise ParsingException( + raise ParsingError( f"Found multiple macros in {block.full_block}, expected 1", node=base_node ) diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index 108b73e06f4..fbfada4fc2a 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -7,6 +7,7 @@ from typing import Dict, Optional, Mapping, Callable, Any, List, Type, Union, Tuple from itertools import chain import time +from dbt.events.base_types import EventLevel import dbt.exceptions import dbt.tracking @@ -20,8 +21,8 @@ from dbt.helper_types import PathSet from dbt.events.functions import fire_event, get_invocation_id, warn_or_error from dbt.events.types import ( - PartialParsingExceptionProcessingFile, - PartialParsingException, + PartialParsingErrorProcessingFile, + PartialParsingError, PartialParsingSkipParsing, UnableToPartialParse, PartialParsingNotEnabled, @@ -60,7 +61,7 @@ ResultNode, ) from dbt.contracts.util import Writable -from dbt.exceptions import TargetNotFound, AmbiguousAlias +from dbt.exceptions import TargetNotFoundError, AmbiguousAliasError from dbt.parser.base import Parser from dbt.parser.analysis import AnalysisParser from dbt.parser.generic_test import GenericTestParser @@ -277,9 +278,9 @@ def load(self): source_file = self.manifest.files[file_id] if source_file: parse_file_type = source_file.parse_file_type - fire_event(PartialParsingExceptionProcessingFile(file=file_id)) + fire_event(PartialParsingErrorProcessingFile(file=file_id)) exc_info["parse_file_type"] = parse_file_type - fire_event(PartialParsingException(exc_info=exc_info)) + fire_event(PartialParsingError(exc_info=exc_info)) # Send event if dbt.tracking.active_user is not None: @@ -961,19 +962,20 @@ def invalid_target_fail_unless_test( target_kind: str, target_package: Optional[str] = None, disabled: Optional[bool] = None, + should_warn_if_disabled: bool = True, ): if node.resource_type == NodeType.Test: if disabled: - fire_event( - InvalidDisabledTargetInTestNode( - resource_type_title=node.resource_type.title(), - unique_id=node.unique_id, - original_file_path=node.original_file_path, - target_kind=target_kind, - target_name=target_name, - target_package=target_package if target_package else "", - ) + event = InvalidDisabledTargetInTestNode( + resource_type_title=node.resource_type.title(), + unique_id=node.unique_id, + original_file_path=node.original_file_path, + target_kind=target_kind, + target_name=target_name, + target_package=target_package if target_package else "", ) + + fire_event(event, EventLevel.WARN if should_warn_if_disabled else None) else: warn_or_error( NodeNotFoundOrDisabled( @@ -987,7 +989,7 @@ def invalid_target_fail_unless_test( ) ) else: - raise TargetNotFound( + raise TargetNotFoundError( node=node, target_name=target_name, target_kind=target_kind, @@ -1015,11 +1017,13 @@ def _check_resource_uniqueness( existing_node = names_resources.get(name) if existing_node is not None: - raise dbt.exceptions.DuplicateResourceName(existing_node, node) + raise dbt.exceptions.DuplicateResourceNameError(existing_node, node) existing_alias = alias_resources.get(full_node_name) if existing_alias is not None: - raise AmbiguousAlias(node_1=existing_alias, node_2=node, duped_name=full_node_name) + raise AmbiguousAliasError( + node_1=existing_alias, node_2=node, duped_name=full_node_name + ) names_resources[name] = node alias_resources[full_node_name] = node @@ -1111,7 +1115,7 @@ def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposur elif len(ref) == 2: target_model_package, target_model_name = ref else: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Refs should always be 1 or 2 arguments - got {len(ref)}" ) @@ -1132,6 +1136,7 @@ def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposur target_kind="node", target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), + should_warn_if_disabled=False, ) continue @@ -1154,7 +1159,7 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: M elif len(ref) == 2: target_model_package, target_model_name = ref else: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Refs should always be 1 or 2 arguments - got {len(ref)}" ) @@ -1175,6 +1180,7 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: M target_kind="node", target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), + should_warn_if_disabled=False, ) continue @@ -1204,7 +1210,7 @@ def _process_metrics_for_node( elif len(metric) == 2: target_metric_package, target_metric_name = metric else: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Metric references should always be 1 or 2 arguments - got {len(metric)}" ) @@ -1249,7 +1255,7 @@ def _process_refs_for_node(manifest: Manifest, current_project: str, node: Manif elif len(ref) == 2: target_model_package, target_model_name = ref else: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Refs should always be 1 or 2 arguments - got {len(ref)}" ) @@ -1270,6 +1276,7 @@ def _process_refs_for_node(manifest: Manifest, current_project: str, node: Manif target_kind="node", target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), + should_warn_if_disabled=False, ) continue diff --git a/core/dbt/parser/models.py b/core/dbt/parser/models.py index 39bb18be714..597200abba5 100644 --- a/core/dbt/parser/models.py +++ b/core/dbt/parser/models.py @@ -30,11 +30,11 @@ import ast from dbt.dataclass_schema import ValidationError from dbt.exceptions import ( - InvalidModelConfig, - ParsingException, - PythonLiteralEval, - PythonParsingException, - UndefinedMacroException, + ModelConfigError, + ParsingError, + PythonLiteralEvalError, + PythonParsingError, + UndefinedMacroError, ) dbt_function_key_words = set(["ref", "source", "config", "get"]) @@ -66,13 +66,13 @@ def visit_FunctionDef(self, node: ast.FunctionDef) -> None: def check_error(self, node): if self.num_model_def != 1: - raise ParsingException( + raise ParsingError( f"dbt allows exactly one model defined per python file, found {self.num_model_def}", node=node, ) if len(self.dbt_errors) != 0: - raise ParsingException("\n".join(self.dbt_errors), node=node) + raise ParsingError("\n".join(self.dbt_errors), node=node) class PythonParseVisitor(ast.NodeVisitor): @@ -96,7 +96,7 @@ def _safe_eval(self, node): try: return ast.literal_eval(node) except (SyntaxError, ValueError, TypeError, MemoryError, RecursionError) as exc: - raise PythonLiteralEval(exc, node=self.dbt_node) from exc + raise PythonLiteralEvalError(exc, node=self.dbt_node) from exc def _get_call_literals(self, node): # List of literals @@ -176,9 +176,9 @@ def verify_python_model_code(node): node, ) if rendered_python != node.raw_code: - raise ParsingException("") - except (UndefinedMacroException, ParsingException): - raise ParsingException("No jinja in python model code is allowed", node=node) + raise ParsingError("") + except (UndefinedMacroError, ParsingError): + raise ParsingError("No jinja in python model code is allowed", node=node) class ModelParser(SimpleSQLParser[ModelNode]): @@ -202,7 +202,7 @@ def parse_python_model(self, node, config, context): try: tree = ast.parse(node.raw_code, filename=node.original_file_path) except SyntaxError as exc: - raise PythonParsingException(exc, node=node) from exc + raise PythonParsingError(exc, node=node) from exc # Only parse if AST tree has instructions in body if tree.body: @@ -219,12 +219,12 @@ def parse_python_model(self, node, config, context): if func == "get": num_args = len(args) if num_args == 0: - raise ParsingException( + raise ParsingError( "dbt.config.get() requires at least one argument", node=node, ) if num_args > 2: - raise ParsingException( + raise ParsingError( f"dbt.config.get() takes at most 2 arguments ({num_args} given)", node=node, ) @@ -255,7 +255,7 @@ def render_update(self, node: ModelNode, config: ContextConfig) -> None: except ValidationError as exc: # we got a ValidationError - probably bad types in config() - raise InvalidModelConfig(exc, node=node) from exc + raise ModelConfigError(exc, node=node) from exc return elif not flags.STATIC_PARSER: diff --git a/core/dbt/parser/partial.py b/core/dbt/parser/partial.py index eafb49efe76..d6afe223278 100644 --- a/core/dbt/parser/partial.py +++ b/core/dbt/parser/partial.py @@ -8,6 +8,7 @@ parse_file_type_to_parser, ) from dbt.events.functions import fire_event +from dbt.events.base_types import EventLevel from dbt.events.types import ( PartialParsingEnabled, PartialParsingFile, @@ -155,7 +156,11 @@ def build_file_diff(self): self.macro_child_map = self.saved_manifest.build_macro_child_map() deleted = len(deleted) + len(deleted_schema_files) changed = len(changed) + len(changed_schema_files) - fire_event(PartialParsingEnabled(deleted=deleted, added=len(added), changed=changed)) + event = PartialParsingEnabled(deleted=deleted, added=len(added), changed=changed) + if os.environ.get("DBT_PP_TEST"): + fire_event(event, level=EventLevel.INFO) + else: + fire_event(event) self.file_diff = file_diff # generate the list of files that need parsing diff --git a/core/dbt/parser/read_files.py b/core/dbt/parser/read_files.py index ccb6b1b0790..531e5f39560 100644 --- a/core/dbt/parser/read_files.py +++ b/core/dbt/parser/read_files.py @@ -12,7 +12,7 @@ ) from dbt.parser.schemas import yaml_from_file, schema_file_keys, check_format_version -from dbt.exceptions import ParsingException +from dbt.exceptions import ParsingError from dbt.parser.search import filesystem_search from typing import Optional @@ -75,21 +75,21 @@ def validate_yaml(file_path, dct): f"The schema file at {file_path} is " f"invalid because the value of '{key}' is not a list" ) - raise ParsingException(msg) + raise ParsingError(msg) for element in dct[key]: if not isinstance(element, dict): msg = ( f"The schema file at {file_path} is " f"invalid because a list element for '{key}' is not a dictionary" ) - raise ParsingException(msg) + raise ParsingError(msg) if "name" not in element: msg = ( f"The schema file at {file_path} is " f"invalid because a list element for '{key}' does not have a " "name attribute." ) - raise ParsingException(msg) + raise ParsingError(msg) # Special processing for big seed files diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 5e81c83fdfb..482eb5b6e35 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -50,25 +50,25 @@ UnparsedSourceDefinition, ) from dbt.exceptions import ( - CompilationException, - DuplicateMacroPatchName, - DuplicatePatchPath, - DuplicateSourcePatchName, - JSONValidationException, - InternalException, - InvalidSchemaConfig, - InvalidTestConfig, - ParsingException, - PropertyYMLInvalidTag, - PropertyYMLMissingVersion, - PropertyYMLVersionNotInt, - ValidationException, - YamlLoadFailure, - YamlParseDictFailure, - YamlParseListFailure, + CompilationError, + DuplicateMacroPatchNameError, + DuplicatePatchPathError, + DuplicateSourcePatchNameError, + JSONValidationError, + DbtInternalError, + SchemaConfigError, + TestConfigError, + ParsingError, + PropertyYMLInvalidTagError, + PropertyYMLMissingVersionError, + PropertyYMLVersionNotIntError, + DbtValidationError, + YamlLoadError, + YamlParseDictError, + YamlParseListError, ) from dbt.events.functions import warn_or_error -from dbt.events.types import WrongResourceSchemaFile, NoNodeForYamlKey, MacroPatchNotFound +from dbt.events.types import WrongResourceSchemaFile, NoNodeForYamlKey, MacroNotFoundForPatch from dbt.node_types import NodeType from dbt.parser.base import SimpleParser from dbt.parser.search import FileBlock @@ -102,8 +102,10 @@ def yaml_from_file(source_file: SchemaSourceFile) -> Dict[str, Any]: try: # source_file.contents can sometimes be None return load_yaml_text(source_file.contents or "", source_file.path) - except ValidationException as e: - raise YamlLoadFailure(source_file.project_name, source_file.path.relative_path, e) + except DbtValidationError as e: + raise YamlLoadError( + project_name=source_file.project_name, path=source_file.path.relative_path, exc=e + ) class ParserRef: @@ -255,7 +257,7 @@ def get_hashable_md(data: Union[str, int, float, List, Dict]) -> Union[str, List original_file_path=target.original_file_path, raw_code=raw_code, ) - raise InvalidTestConfig(exc, node) + raise TestConfigError(exc, node) # lots of time spent in this method def _parse_generic_test( @@ -278,20 +280,20 @@ def _parse_generic_test( self.store_env_vars(target, schema_file_id, self.schema_yaml_vars.env_vars) self.schema_yaml_vars.env_vars = {} - except ParsingException as exc: + except ParsingError as exc: context = _trimmed(str(target)) msg = "Invalid test config given in {}:\n\t{}\n\t@: {}".format( target.original_file_path, exc.msg, context ) - raise ParsingException(msg) from exc + raise ParsingError(msg) from exc - except CompilationException as exc: + except CompilationError as exc: context = _trimmed(str(target)) msg = ( "Invalid generic test configuration given in " f"{target.original_file_path}: \n{exc.msg}\n\t@: {context}" ) - raise CompilationException(msg) from exc + raise CompilationError(msg) from exc original_name = os.path.basename(target.original_file_path) compiled_path = get_pseudo_test_path(builder.compiled_name, original_name) @@ -397,7 +399,7 @@ def render_test_update(self, node, config, builder, schema_file_id): # env_vars should have been updated in the context env_var method except ValidationError as exc: # we got a ValidationError - probably bad types in config() - raise InvalidSchemaConfig(exc, node=node) from exc + raise SchemaConfigError(exc, node=node) from exc def parse_node(self, block: GenericTestBlock) -> GenericTestNode: """In schema parsing, we rewrite most of the part of parse_node that @@ -537,16 +539,16 @@ def parse_file(self, block: FileBlock, dct: Dict = None) -> None: def check_format_version(file_path, yaml_dct) -> None: if "version" not in yaml_dct: - raise PropertyYMLMissingVersion(file_path) + raise PropertyYMLMissingVersionError(file_path) version = yaml_dct["version"] # if it's not an integer, the version is malformed, or not # set. Either way, only 'version: 2' is supported. if not isinstance(version, int): - raise PropertyYMLVersionNotInt(file_path, version) + raise PropertyYMLVersionNotIntError(file_path, version) if version != 2: - raise PropertyYMLInvalidTag(file_path, version) + raise PropertyYMLInvalidTagError(file_path, version) Parsed = TypeVar("Parsed", UnpatchedSourceDefinition, ParsedNodePatch, ParsedMacroPatch) @@ -594,7 +596,7 @@ def root_project(self): def get_key_dicts(self) -> Iterable[Dict[str, Any]]: data = self.yaml.data.get(self.key, []) if not isinstance(data, list): - raise ParsingException( + raise ParsingError( "{} must be a list, got {} instead: ({})".format( self.key, type(data), _trimmed(str(data)) ) @@ -607,12 +609,10 @@ def get_key_dicts(self) -> Iterable[Dict[str, Any]]: # check that entry is a dict and that all dict values # are strings if coerce_dict_str(entry) is None: - raise YamlParseListFailure( - path, self.key, data, "expected a dict with string keys" - ) + raise YamlParseListError(path, self.key, data, "expected a dict with string keys") if "name" not in entry: - raise ParsingException("Entry did not contain a name") + raise ParsingError("Entry did not contain a name") # Render the data (except for tests and descriptions). # See the SchemaYamlRenderer @@ -631,8 +631,8 @@ def render_entry(self, dct): try: # This does a deep_map which will fail if there are circular references dct = self.renderer.render_data(dct) - except ParsingException as exc: - raise ParsingException( + except ParsingError as exc: + raise ParsingError( f"Failed to render {self.yaml.file.path.original_file_path} from " f"project {self.project.project_name}: {exc}" ) from exc @@ -655,8 +655,8 @@ def _target_from_dict(self, cls: Type[T], data: Dict[str, Any]) -> T: try: cls.validate(data) return cls.from_dict(data) - except (ValidationError, JSONValidationException) as exc: - raise YamlParseDictFailure(path, self.key, data, exc) + except (ValidationError, JSONValidationError) as exc: + raise YamlParseDictError(path, self.key, data, exc) # The other parse method returns TestBlocks. This one doesn't. # This takes the yaml dictionaries in 'sources' keys and uses them @@ -677,7 +677,7 @@ def parse(self) -> List[TestBlock]: # source patches must be unique key = (patch.overrides, patch.name) if key in self.manifest.source_patches: - raise DuplicateSourcePatchName(patch, self.manifest.source_patches[key]) + raise DuplicateSourcePatchNameError(patch, self.manifest.source_patches[key]) self.manifest.source_patches[key] = patch source_file.source_patches.append(key) else: @@ -780,8 +780,8 @@ def get_unparsed_target(self) -> Iterable[NonSourceTarget]: self.normalize_meta_attribute(data, path) self.normalize_docs_attribute(data, path) node = self._target_type().from_dict(data) - except (ValidationError, JSONValidationException) as exc: - raise YamlParseDictFailure(path, self.key, data, exc) + except (ValidationError, JSONValidationError) as exc: + raise YamlParseDictError(path, self.key, data, exc) else: yield node @@ -790,7 +790,7 @@ def get_unparsed_target(self) -> Iterable[NonSourceTarget]: def normalize_attribute(self, data, path, attribute): if attribute in data: if "config" in data and attribute in data["config"]: - raise ParsingException( + raise ParsingError( f""" In {path}: found {attribute} dictionary in 'config' dictionary and as top-level key. Remove the top-level key and define it under 'config' dictionary only. @@ -858,7 +858,7 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: elif patch.yaml_key == "analyses": unique_id = self.manifest.analysis_lookup.get_unique_id(patch.name, None) else: - raise InternalException( + raise DbtInternalError( f"Unexpected yaml_key {patch.yaml_key} for patch in " f"file {source_file.path.original_file_path}" ) @@ -877,7 +877,7 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: "unique id cannot be enabled in the schema file. They must be enabled " "in `dbt_project.yml` or in the sql files." ) - raise ParsingException(msg) + raise ParsingError(msg) # all nodes in the disabled dict have the same unique_id so just grab the first one # to append with the uniqe id @@ -905,7 +905,7 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: if node: if node.patch_path: package_name, existing_file_path = node.patch_path.split("://") - raise DuplicatePatchPath(patch, existing_file_path) + raise DuplicatePatchPathError(patch, existing_file_path) source_file.append_patch(patch.yaml_key, node.unique_id) # re-calculate the node config with the patch config. Always do this @@ -957,11 +957,11 @@ def parse_patch(self, block: TargetBlock[UnparsedMacroUpdate], refs: ParserRef) unique_id = f"macro.{patch.package_name}.{patch.name}" macro = self.manifest.macros.get(unique_id) if not macro: - warn_or_error(MacroPatchNotFound(patch_name=patch.name)) + warn_or_error(MacroNotFoundForPatch(patch_name=patch.name)) return if macro.patch_path: package_name, existing_file_path = macro.patch_path.split("://") - raise DuplicateMacroPatchName(patch, existing_file_path) + raise DuplicateMacroPatchNameError(patch, existing_file_path) source_file.macro_patches[patch.name] = unique_id macro.patch(patch) @@ -997,7 +997,7 @@ def parse_exposure(self, unparsed: UnparsedExposure): ) if not isinstance(config, ExposureConfig): - raise InternalException( + raise DbtInternalError( f"Calculated a {type(config)} for an exposure, but expected an ExposureConfig" ) @@ -1063,8 +1063,8 @@ def parse(self): try: UnparsedExposure.validate(data) unparsed = UnparsedExposure.from_dict(data) - except (ValidationError, JSONValidationException) as exc: - raise YamlParseDictFailure(self.yaml.path, self.key, data, exc) + except (ValidationError, JSONValidationError) as exc: + raise YamlParseDictError(self.yaml.path, self.key, data, exc) self.parse_exposure(unparsed) @@ -1100,7 +1100,7 @@ def parse_metric(self, unparsed: UnparsedMetric): ) if not isinstance(config, MetricConfig): - raise InternalException( + raise DbtInternalError( f"Calculated a {type(config)} for a metric, but expected a MetricConfig" ) @@ -1180,6 +1180,6 @@ def parse(self): UnparsedMetric.validate(data) unparsed = UnparsedMetric.from_dict(data) - except (ValidationError, JSONValidationException) as exc: - raise YamlParseDictFailure(self.yaml.path, self.key, data, exc) + except (ValidationError, JSONValidationError) as exc: + raise YamlParseDictError(self.yaml.path, self.key, data, exc) self.parse_metric(unparsed) diff --git a/core/dbt/parser/search.py b/core/dbt/parser/search.py index f8ccc974be4..75e7fa6636c 100644 --- a/core/dbt/parser/search.py +++ b/core/dbt/parser/search.py @@ -7,7 +7,7 @@ from dbt.clients.system import find_matching from dbt.config import Project from dbt.contracts.files import FilePath, AnySourceFile -from dbt.exceptions import ParsingException, InternalException +from dbt.exceptions import ParsingError, DbtInternalError # What's the point of wrapping a SourceFile with this class? @@ -73,7 +73,7 @@ def filesystem_search( file_path_list = [] for result in find_matching(root, relative_dirs, ext, ignore_spec): if "searched_path" not in result or "relative_path" not in result: - raise InternalException("Invalid result from find_matching: {}".format(result)) + raise DbtInternalError("Invalid result from find_matching: {}".format(result)) file_match = FilePath( searched_path=result["searched_path"], relative_path=result["relative_path"], @@ -113,7 +113,7 @@ def extract_blocks(self, source_file: FileBlock) -> Iterable[BlockTag]: assert isinstance(block, BlockTag) yield block - except ParsingException as exc: + except ParsingError as exc: if exc.node is None: exc.add_node(source_file) raise diff --git a/core/dbt/parser/snapshots.py b/core/dbt/parser/snapshots.py index dffc7d90641..72aec4ee976 100644 --- a/core/dbt/parser/snapshots.py +++ b/core/dbt/parser/snapshots.py @@ -4,7 +4,7 @@ from dbt.dataclass_schema import ValidationError from dbt.contracts.graph.nodes import IntermediateSnapshotNode, SnapshotNode -from dbt.exceptions import InvalidSnapshopConfig +from dbt.exceptions import SnapshopConfigError from dbt.node_types import NodeType from dbt.parser.base import SQLParser from dbt.parser.search import BlockContents, BlockSearcher, FileBlock @@ -68,7 +68,7 @@ def transform(self, node: IntermediateSnapshotNode) -> SnapshotNode: self.set_snapshot_attributes(parsed_node) return parsed_node except ValidationError as exc: - raise InvalidSnapshopConfig(exc, node) + raise SnapshopConfigError(exc, node) def parse_file(self, file_block: FileBlock) -> None: blocks = BlockSearcher( diff --git a/core/dbt/parser/sources.py b/core/dbt/parser/sources.py index cc9acea98c3..098ebde09c6 100644 --- a/core/dbt/parser/sources.py +++ b/core/dbt/parser/sources.py @@ -26,7 +26,7 @@ ) from dbt.events.functions import warn_or_error from dbt.events.types import UnusedTables -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.node_types import NodeType from dbt.parser.schemas import SchemaParser, ParserRef @@ -150,7 +150,7 @@ def parse_source(self, target: UnpatchedSourceDefinition) -> SourceDefinition: ) if not isinstance(config, SourceConfig): - raise InternalException( + raise DbtInternalError( f"Calculated a {type(config)} for a source, but expected a SourceConfig" ) diff --git a/core/dbt/parser/sql.py b/core/dbt/parser/sql.py index 82d09c12d6b..98e28aadc19 100644 --- a/core/dbt/parser/sql.py +++ b/core/dbt/parser/sql.py @@ -5,7 +5,7 @@ from dbt.contracts.graph.manifest import SourceFile from dbt.contracts.graph.nodes import SqlNode, Macro from dbt.contracts.graph.unparsed import UnparsedMacro -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.node_types import NodeType from dbt.parser.base import SimpleSQLParser from dbt.parser.macros import MacroParser @@ -35,7 +35,7 @@ def resource_type(self) -> NodeType: def get_compiled_path(block: FileBlock): # we do it this way to make mypy happy if not isinstance(block, SqlBlock): - raise InternalException( + raise DbtInternalError( "While parsing SQL operation, got an actual file block instead of " "an SQL block: {}".format(block) ) diff --git a/core/dbt/semver.py b/core/dbt/semver.py index 7f8913c3600..24f00b333a1 100644 --- a/core/dbt/semver.py +++ b/core/dbt/semver.py @@ -5,7 +5,7 @@ from packaging import version as packaging_version -from dbt.exceptions import VersionsNotCompatibleException +from dbt.exceptions import VersionsNotCompatibleError import dbt.utils from dbt.dataclass_schema import dbtClassMixin, StrEnum @@ -94,7 +94,7 @@ def from_version_string(cls, version_string): match = _VERSION_REGEX.match(version_string) if not match: - raise dbt.exceptions.SemverException( + raise dbt.exceptions.SemverError( f'"{version_string}" is not a valid semantic version.' ) @@ -222,7 +222,7 @@ def _try_combine_exact(self, a, b): if a.compare(b) == 0: return a else: - raise VersionsNotCompatibleException() + raise VersionsNotCompatibleError() def _try_combine_lower_bound_with_exact(self, lower, exact): comparison = lower.compare(exact) @@ -230,7 +230,7 @@ def _try_combine_lower_bound_with_exact(self, lower, exact): if comparison < 0 or (comparison == 0 and lower.matcher == Matchers.GREATER_THAN_OR_EQUAL): return exact - raise VersionsNotCompatibleException() + raise VersionsNotCompatibleError() def _try_combine_lower_bound(self, a, b): if b.is_unbounded: @@ -258,7 +258,7 @@ def _try_combine_upper_bound_with_exact(self, upper, exact): if comparison > 0 or (comparison == 0 and upper.matcher == Matchers.LESS_THAN_OR_EQUAL): return exact - raise VersionsNotCompatibleException() + raise VersionsNotCompatibleError() def _try_combine_upper_bound(self, a, b): if b.is_unbounded: @@ -291,7 +291,7 @@ def reduce(self, other): end = self._try_combine_upper_bound(self.end, other.end) if start.compare(end) > 0: - raise VersionsNotCompatibleException() + raise VersionsNotCompatibleError() return VersionRange(start=start, end=end) @@ -379,8 +379,8 @@ def reduce_versions(*args): for version_specifier in version_specifiers: to_return = to_return.reduce(version_specifier.to_range()) - except VersionsNotCompatibleException: - raise VersionsNotCompatibleException( + except VersionsNotCompatibleError: + raise VersionsNotCompatibleError( "Could not find a satisfactory version from options: {}".format([str(a) for a in args]) ) @@ -394,7 +394,7 @@ def versions_compatible(*args): try: reduce_versions(*args) return True - except VersionsNotCompatibleException: + except VersionsNotCompatibleError: return False diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index e448a15c1d2..63449de10c7 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -4,6 +4,7 @@ import traceback from abc import ABCMeta, abstractmethod from typing import Type, Union, Dict, Any, Optional +from datetime import datetime from dbt import tracking from dbt import flags @@ -16,24 +17,18 @@ RunningStatus, ) from dbt.exceptions import ( - NotImplementedException, - CompilationException, - RuntimeException, - InternalException, + NotImplementedError, + CompilationError, + DbtRuntimeError, + DbtInternalError, ) from dbt.logger import log_manager from dbt.events.functions import fire_event from dbt.events.types import ( - DbtProjectError, - DbtProjectErrorException, - DbtProfileError, - DbtProfileErrorException, - ProfileListTitle, - ListSingleProfile, - NoDefinedProfiles, - ProfileHelpMessage, + LogDbtProjectError, + LogDbtProfileError, CatchableExceptionOnRun, - InternalExceptionOnRun, + InternalErrorOnRun, GenericExceptionOnRun, NodeConnectionReleaseError, LogDebugStackTrace, @@ -102,33 +97,20 @@ def from_args(cls, args): # for the clean or deps tasks config = cls.ConfigType.from_args(args) except dbt.exceptions.DbtProjectError as exc: - fire_event(DbtProjectError()) - fire_event(DbtProjectErrorException(exc=str(exc))) + fire_event(LogDbtProjectError(exc=str(exc))) tracking.track_invalid_invocation(args=args, result_type=exc.result_type) - raise dbt.exceptions.RuntimeException("Could not run dbt") from exc + raise dbt.exceptions.DbtRuntimeError("Could not run dbt") from exc except dbt.exceptions.DbtProfileError as exc: - fire_event(DbtProfileError()) - fire_event(DbtProfileErrorException(exc=str(exc))) - - all_profiles = read_profiles(flags.PROFILES_DIR).keys() - - if len(all_profiles) > 0: - fire_event(ProfileListTitle()) - for profile in all_profiles: - fire_event(ListSingleProfile(profile=profile)) - else: - fire_event(NoDefinedProfiles()) - - fire_event(ProfileHelpMessage()) - + all_profile_names = list(read_profiles(flags.PROFILES_DIR).keys()) + fire_event(LogDbtProfileError(exc=str(exc), profiles=all_profile_names)) tracking.track_invalid_invocation(args=args, result_type=exc.result_type) - raise dbt.exceptions.RuntimeException("Could not run dbt") from exc + raise dbt.exceptions.DbtRuntimeError("Could not run dbt") from exc return cls(args, config) @abstractmethod def run(self): - raise dbt.exceptions.NotImplementedException("Not Implemented") + raise dbt.exceptions.NotImplementedError("Not Implemented") def interpret_results(self, results): return True @@ -142,7 +124,7 @@ def get_nearest_project_dir(args): if os.path.exists(project_file): return args.project_dir else: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( "fatal: Invalid --project-dir flag. Not a dbt project. " "Missing dbt_project.yml file" ) @@ -156,7 +138,7 @@ def get_nearest_project_dir(args): return cwd cwd = os.path.dirname(cwd) - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( "fatal: Not a dbt project (or any of the parent directories). " "Missing dbt_project.yml file" ) @@ -227,6 +209,9 @@ def run_with_hooks(self, manifest): self.before_execute() result = self.safe_run(manifest) + self.node.update_event_status( + node_status=result.status, finished_at=datetime.utcnow().isoformat() + ) if not self.node.is_ephemeral_model: self.after_execute(result) @@ -347,7 +332,7 @@ def _handle_catchable_exception(self, e, ctx): return str(e) def _handle_internal_exception(self, e, ctx): - fire_event(InternalExceptionOnRun(build_path=self.node.build_path, exc=str(e))) + fire_event(InternalErrorOnRun(build_path=self.node.build_path, exc=str(e))) return str(e) def _handle_generic_exception(self, e, ctx): @@ -363,10 +348,10 @@ def _handle_generic_exception(self, e, ctx): return str(e) def handle_exception(self, e, ctx): - catchable_errors = (CompilationException, RuntimeException) + catchable_errors = (CompilationError, DbtRuntimeError) if isinstance(e, catchable_errors): error = self._handle_catchable_exception(e, ctx) - elif isinstance(e, InternalException): + elif isinstance(e, DbtInternalError): error = self._handle_internal_exception(e, ctx) else: error = self._handle_generic_exception(e, ctx) @@ -421,16 +406,16 @@ def _safe_release_connection(self): return None def before_execute(self): - raise NotImplementedException() + raise NotImplementedError() def execute(self, compiled_node, manifest): - raise NotImplementedException() + raise NotImplementedError() def run(self, compiled_node, manifest): return self.execute(compiled_node, manifest) def after_execute(self, result): - raise NotImplementedException() + raise NotImplementedError() def _skip_caused_by_ephemeral_failure(self): if self.skip_cause is None or self.skip_cause.node is None: @@ -456,7 +441,7 @@ def on_skip(self): ) print_run_result_error(result=self.skip_cause, newline=False) if self.skip_cause is None: # mypy appeasement - raise InternalException( + raise DbtInternalError( "Skip cause not set but skip was somehow caused by an ephemeral failure" ) # set an error so dbt will exit with an error code @@ -467,6 +452,9 @@ def on_skip(self): ) ) else: + # 'skipped' nodes should not have a value for 'node_finished_at' + # they do have 'node_started_at', which is set in GraphRunnableTask.call_runner + self.node.update_event_status(node_status=RunStatus.Skipped) fire_event( SkippingDetails( resource_type=self.node.resource_type, diff --git a/core/dbt/task/build.py b/core/dbt/task/build.py index aabc561bd7c..8a5dc39c9b7 100644 --- a/core/dbt/task/build.py +++ b/core/dbt/task/build.py @@ -5,7 +5,7 @@ from dbt.adapters.factory import get_adapter from dbt.contracts.results import NodeStatus -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.graph import ResourceTypeSelector from dbt.node_types import NodeType from dbt.task.test import TestSelector @@ -44,7 +44,7 @@ def resource_types(self): def get_node_selector(self) -> ResourceTypeSelector: if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get node selection") + raise DbtInternalError("manifest and graph must be set to get node selection") resource_types = self.resource_types @@ -66,7 +66,7 @@ def get_runner_type(self, node): def compile_manifest(self): if self.manifest is None: - raise InternalException("compile_manifest called before manifest was loaded") + raise DbtInternalError("compile_manifest called before manifest was loaded") adapter = get_adapter(self.config) compiler = adapter.get_compiler() self.graph = compiler.compile(self.manifest, add_test_edges=True) diff --git a/core/dbt/task/compile.py b/core/dbt/task/compile.py index 740d35d37e9..7d2bc0482db 100644 --- a/core/dbt/task/compile.py +++ b/core/dbt/task/compile.py @@ -6,7 +6,7 @@ from dbt.contracts.graph.manifest import WritableManifest from dbt.contracts.results import RunStatus, RunResult -from dbt.exceptions import InternalException, RuntimeException +from dbt.exceptions import DbtInternalError, DbtRuntimeError from dbt.graph import ResourceTypeSelector from dbt.events.functions import fire_event from dbt.events.types import CompileComplete @@ -43,7 +43,7 @@ def raise_on_first_error(self): def get_node_selector(self) -> ResourceTypeSelector: if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get perform node selection") + raise DbtInternalError("manifest and graph must be set to get perform node selection") return ResourceTypeSelector( graph=self.graph, manifest=self.manifest, @@ -63,12 +63,12 @@ def _get_deferred_manifest(self) -> Optional[WritableManifest]: state = self.previous_state if state is None: - raise RuntimeException( + raise DbtRuntimeError( "Received a --defer argument, but no value was provided to --state" ) if state.manifest is None: - raise RuntimeException(f'Could not find manifest in --state path: "{self.args.state}"') + raise DbtRuntimeError(f'Could not find manifest in --state path: "{self.args.state}"') return state.manifest def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]): @@ -76,13 +76,14 @@ def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]): if deferred_manifest is None: return if self.manifest is None: - raise InternalException( + raise DbtInternalError( "Expected to defer to manifest, but there is no runtime manifest to defer from!" ) self.manifest.merge_from_artifact( adapter=adapter, other=deferred_manifest, selected=selected_uids, + favor_state=bool(self.args.favor_state), ) # TODO: is it wrong to write the manifest here? I think it's right... self.write_manifest() diff --git a/core/dbt/task/freshness.py b/core/dbt/task/freshness.py index 704368cf24f..819bc4164a3 100644 --- a/core/dbt/task/freshness.py +++ b/core/dbt/task/freshness.py @@ -15,8 +15,8 @@ SourceFreshnessResult, FreshnessStatus, ) -from dbt.exceptions import RuntimeException, InternalException -from dbt.events.functions import fire_event, info +from dbt.exceptions import DbtRuntimeError, DbtInternalError +from dbt.events.functions import fire_event from dbt.events.types import ( FreshnessCheckComplete, LogStartLine, @@ -33,7 +33,7 @@ class FreshnessRunner(BaseRunner): def on_skip(self): - raise RuntimeException("Freshness: nodes cannot be skipped!") + raise DbtRuntimeError("Freshness: nodes cannot be skipped!") def before_execute(self): description = "freshness of {0.source_name}.{0.name}".format(self.node) @@ -56,7 +56,6 @@ def after_execute(self, result): level = LogFreshnessResult.status_to_level(str(result.status)) fire_event( LogFreshnessResult( - info=info(level=level), status=result.status, source_name=source_name, table_name=table_name, @@ -64,7 +63,8 @@ def after_execute(self, result): total=self.num_nodes, execution_time=result.execution_time, node_info=self.node.node_info, - ) + ), + level=level, ) def error_result(self, node, message, start_time, timing_info): @@ -100,7 +100,7 @@ def execute(self, compiled_node, manifest): # therefore loaded_at_field should be a str. If this invariant is # broken, raise! if compiled_node.loaded_at_field is None: - raise InternalException( + raise DbtInternalError( "Got to execute for source freshness of a source that has no loaded_at_field!" ) @@ -132,7 +132,7 @@ def execute(self, compiled_node, manifest): def compile(self, manifest): if self.node.resource_type != NodeType.Source: # should be unreachable... - raise RuntimeException("fresnhess runner: got a non-Source") + raise DbtRuntimeError("fresnhess runner: got a non-Source") # we don't do anything interesting when we compile a source node return self.node @@ -147,6 +147,10 @@ def node_is_match(self, node): class FreshnessTask(GraphRunnableTask): + def defer_to_manifest(self, adapter, selected_uids): + # freshness don't defer + return + def result_path(self): if self.args.output: return os.path.realpath(self.args.output) @@ -158,7 +162,7 @@ def raise_on_first_error(self): def get_node_selector(self): if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get perform node selection") + raise DbtInternalError("manifest and graph must be set to get perform node selection") return FreshnessSelector( graph=self.graph, manifest=self.manifest, diff --git a/core/dbt/task/generate.py b/core/dbt/task/generate.py index 87723a530a1..19fa4c1bde9 100644 --- a/core/dbt/task/generate.py +++ b/core/dbt/task/generate.py @@ -22,7 +22,7 @@ ColumnMetadata, CatalogArtifact, ) -from dbt.exceptions import InternalException, AmbiguousCatalogMatch +from dbt.exceptions import DbtInternalError, AmbiguousCatalogMatchError from dbt.include.global_project import DOCS_INDEX_FILE_PATH from dbt.events.functions import fire_event from dbt.events.types import ( @@ -81,7 +81,7 @@ def get_table(self, data: PrimitiveDict) -> CatalogTable: str(data["table_name"]), ) except KeyError as exc: - raise dbt.exceptions.CompilationException( + raise dbt.exceptions.CompilationError( "Catalog information missing required key {} (got {})".format(exc, data) ) table: CatalogTable @@ -119,7 +119,7 @@ def make_unique_id_map( unique_ids = source_map.get(table.key(), set()) for unique_id in unique_ids: if unique_id in sources: - raise AmbiguousCatalogMatch( + raise AmbiguousCatalogMatchError( unique_id, sources[unique_id].to_dict(omit_none=True), table.to_dict(omit_none=True), @@ -201,7 +201,7 @@ def get_unique_id_mapping( class GenerateTask(CompileTask): def _get_manifest(self) -> Manifest: if self.manifest is None: - raise InternalException("manifest should not be None in _get_manifest") + raise DbtInternalError("manifest should not be None in _get_manifest") return self.manifest def run(self) -> CatalogArtifact: @@ -232,7 +232,7 @@ def run(self) -> CatalogArtifact: shutil.copytree(asset_path, to_asset_path) if self.manifest is None: - raise InternalException("self.manifest was None in run!") + raise DbtInternalError("self.manifest was None in run!") adapter = get_adapter(self.config) with adapter.connection_named("generate_catalog"): diff --git a/core/dbt/task/init.py b/core/dbt/task/init.py index b1769d2e729..f3a7dd28e75 100644 --- a/core/dbt/task/init.py +++ b/core/dbt/task/init.py @@ -252,7 +252,7 @@ def run(self): try: move_to_nearest_project_dir(self.args) in_project = True - except dbt.exceptions.RuntimeException: + except dbt.exceptions.DbtRuntimeError: in_project = False if in_project: diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py index d5ffee2337c..01f1864ccae 100644 --- a/core/dbt/task/list.py +++ b/core/dbt/task/list.py @@ -14,7 +14,7 @@ NoNodesSelected, ListRunDetails, ) -from dbt.exceptions import RuntimeException, InternalException +from dbt.exceptions import DbtRuntimeError, DbtInternalError class ListTask(GraphRunnableTask): @@ -49,9 +49,9 @@ def __init__(self, args, config): super().__init__(args, config) if self.args.models: if self.args.select: - raise RuntimeException('"models" and "select" are mutually exclusive arguments') + raise DbtRuntimeError('"models" and "select" are mutually exclusive arguments') if self.args.resource_types: - raise RuntimeException( + raise DbtRuntimeError( '"models" and "resource_type" are mutually exclusive ' "arguments" ) @@ -63,7 +63,7 @@ def _iterate_selected_nodes(self): warn_or_error(NoNodesSelected()) return if self.manifest is None: - raise InternalException("manifest is None in _iterate_selected_nodes") + raise DbtInternalError("manifest is None in _iterate_selected_nodes") for node in nodes: if node in self.manifest.nodes: yield self.manifest.nodes[node] @@ -74,7 +74,7 @@ def _iterate_selected_nodes(self): elif node in self.manifest.metrics: yield self.manifest.metrics[node] else: - raise RuntimeException( + raise DbtRuntimeError( f'Got an unexpected result from node selection: "{node}"' f"Expected a source or a node!" ) @@ -134,7 +134,7 @@ def run(self): elif output == "path": generator = self.generate_paths else: - raise InternalException("Invalid output {}".format(output)) + raise DbtInternalError("Invalid output {}".format(output)) return self.output_results(generator()) @@ -175,9 +175,13 @@ def selection_arg(self): else: return self.args.select + def defer_to_manifest(self, adapter, selected_uids): + # list don't defer + return + def get_node_selector(self): if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get perform node selection") + raise DbtInternalError("manifest and graph must be set to get perform node selection") if self.resource_types == [NodeType.Test]: return TestSelector( graph=self.graph, diff --git a/core/dbt/task/parse.py b/core/dbt/task/parse.py index 5460bf0f3d0..8ce2b9c5b2c 100644 --- a/core/dbt/task/parse.py +++ b/core/dbt/task/parse.py @@ -11,18 +11,7 @@ from dbt.parser.manifest import Manifest, ManifestLoader, _check_manifest from dbt.logger import DbtProcessState from dbt.clients.system import write_file -from dbt.events.types import ( - ManifestDependenciesLoaded, - ManifestLoaderCreated, - ManifestLoaded, - ManifestChecked, - ManifestFlatGraphBuilt, - ParseCmdStart, - ParseCmdCompiling, - ParseCmdWritingManifest, - ParseCmdDone, - ParseCmdPerfInfoPath, -) +from dbt.events.types import ParseCmdOut from dbt.events.functions import fire_event from dbt.graph import Graph import time @@ -50,7 +39,7 @@ def write_manifest(self): def write_perf_info(self): path = os.path.join(self.config.target_path, PERF_INFO_FILE_NAME) write_file(path, json.dumps(self.loader._perf_info, cls=dbt.utils.JSONEncoder, indent=4)) - fire_event(ParseCmdPerfInfoPath(path=path)) + fire_event(ParseCmdOut(msg=f"Performance info: {path}")) # This method takes code that normally exists in other files # and pulls it in here, to simplify logging and make the @@ -68,20 +57,20 @@ def get_full_manifest(self): with PARSING_STATE: start_load_all = time.perf_counter() projects = root_config.load_dependencies() - fire_event(ManifestDependenciesLoaded()) + fire_event(ParseCmdOut(msg="Dependencies loaded")) loader = ManifestLoader(root_config, projects, macro_hook) - fire_event(ManifestLoaderCreated()) + fire_event(ParseCmdOut(msg="ManifestLoader created")) manifest = loader.load() - fire_event(ManifestLoaded()) + fire_event(ParseCmdOut(msg="Manifest loaded")) _check_manifest(manifest, root_config) - fire_event(ManifestChecked()) + fire_event(ParseCmdOut(msg="Manifest checked")) manifest.build_flat_graph() - fire_event(ManifestFlatGraphBuilt()) + fire_event(ParseCmdOut(msg="Flat graph built")) loader._perf_info.load_all_elapsed = time.perf_counter() - start_load_all self.loader = loader self.manifest = manifest - fire_event(ManifestLoaded()) + fire_event(ParseCmdOut(msg="Manifest finished loading")) def compile_manifest(self): adapter = get_adapter(self.config) @@ -89,14 +78,14 @@ def compile_manifest(self): self.graph = compiler.compile(self.manifest) def run(self): - fire_event(ParseCmdStart()) + fire_event(ParseCmdOut(msg="Start parsing.")) self.get_full_manifest() if self.args.compile: - fire_event(ParseCmdCompiling()) + fire_event(ParseCmdOut(msg="Compiling.")) self.compile_manifest() if self.args.write_manifest: - fire_event(ParseCmdWritingManifest()) + fire_event(ParseCmdOut(msg="Writing manifest.")) self.write_manifest() self.write_perf_info() - fire_event(ParseCmdDone()) + fire_event(ParseCmdOut(msg="Done.")) diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index bc8f9a2de75..411c57af663 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -21,23 +21,24 @@ from dbt.contracts.graph.nodes import HookNode, ResultNode from dbt.contracts.results import NodeStatus, RunResult, RunStatus, RunningStatus, BaseResult from dbt.exceptions import ( - CompilationException, - InternalException, - MissingMaterialization, - RuntimeException, - ValidationException, + CompilationError, + DbtInternalError, + MissingMaterializationError, + DbtRuntimeError, + DbtValidationError, ) -from dbt.events.functions import fire_event, get_invocation_id, info +from dbt.events.functions import fire_event, get_invocation_id from dbt.events.types import ( DatabaseErrorRunningHook, EmptyLine, HooksRunning, - HookFinished, + FinishedRunningStats, LogModelResult, LogStartLine, LogHookEndLine, LogHookStartLine, ) +from dbt.events.base_types import EventLevel from dbt.logger import ( TextOnly, HookMetadata, @@ -105,7 +106,7 @@ def get_hook(source, index): def track_model_run(index, num_nodes, run_model_result): if tracking.active_user is None: - raise InternalException("cannot track model run with no active user") + raise DbtInternalError("cannot track model run with no active user") invocation_id = get_invocation_id() tracking.track_model_run( { @@ -134,14 +135,14 @@ def _validate_materialization_relations_dict(inp: Dict[Any, Any], model) -> List 'Invalid return value from materialization, "relations" ' "not found, got keys: {}".format(list(inp)) ) - raise CompilationException(msg, node=model) from None + raise CompilationError(msg, node=model) from None if not isinstance(relations_value, list): msg = ( 'Invalid return value from materialization, "relations" ' "not a list, got: {}".format(relations_value) ) - raise CompilationException(msg, node=model) from None + raise CompilationError(msg, node=model) from None relations: List[BaseRelation] = [] for relation in relations_value: @@ -150,7 +151,7 @@ def _validate_materialization_relations_dict(inp: Dict[Any, Any], model) -> List "Invalid return value from materialization, " '"relations" contains non-Relation: {}'.format(relation) ) - raise CompilationException(msg, node=model) + raise CompilationError(msg, node=model) assert isinstance(relation, BaseRelation) relations.append(relation) @@ -186,10 +187,10 @@ def print_result_line(self, result): description = self.describe_node() if result.status == NodeStatus.Error: status = result.status - level = "error" + level = EventLevel.ERROR else: status = result.message - level = "info" + level = EventLevel.INFO fire_event( LogModelResult( description=description, @@ -198,8 +199,8 @@ def print_result_line(self, result): total=self.num_nodes, execution_time=result.execution_time, node_info=self.node.node_info, - info=info(level=level), - ) + ), + level=level, ) def before_execute(self): @@ -212,7 +213,7 @@ def after_execute(self, result): def _build_run_model_result(self, model, context): result = context["load_result"]("main") if not result: - raise RuntimeException("main is not being called during running model") + raise DbtRuntimeError("main is not being called during running model") adapter_response = {} if isinstance(result.response, dbtClassMixin): adapter_response = result.response.to_dict(omit_none=True) @@ -233,7 +234,7 @@ def _materialization_relations(self, result: Any, model) -> List[BaseRelation]: 'The materialization ("{}") did not explicitly return a ' "list of relations to add to the cache.".format(str(model.get_materialization())) ) - raise CompilationException(msg, node=model) + raise CompilationError(msg, node=model) if isinstance(result, dict): return _validate_materialization_relations_dict(result, model) @@ -242,7 +243,7 @@ def _materialization_relations(self, result: Any, model) -> List[BaseRelation]: "Invalid return value from materialization, expected a dict " 'with key "relations", got: {}'.format(str(result)) ) - raise CompilationException(msg, node=model) + raise CompilationError(msg, node=model) def execute(self, model, manifest): context = generate_runtime_model_context(model, self.config, manifest) @@ -252,10 +253,12 @@ def execute(self, model, manifest): ) if materialization_macro is None: - raise MissingMaterialization(model=model, adapter_type=self.adapter.type()) + raise MissingMaterializationError( + materialization=model.get_materialization(), adapter_type=self.adapter.type() + ) if "config" not in context: - raise InternalException( + raise DbtInternalError( "Invalid materialization context generated, missing config: {}".format(context) ) context_config = context["config"] @@ -264,7 +267,7 @@ def execute(self, model, manifest): model_lang_supported = model.language in materialization_macro.supported_languages if mat_has_supported_langs and not model_lang_supported: str_langs = [str(lang) for lang in materialization_macro.supported_languages] - raise ValidationException( + raise DbtValidationError( f'Materialization "{materialization_macro.name}" only supports languages {str_langs}; ' f'got "{model.language}"' ) @@ -312,7 +315,7 @@ def _hook_keyfunc(self, hook: HookNode) -> Tuple[str, Optional[int]]: def get_hooks_by_type(self, hook_type: RunHookType) -> List[HookNode]: if self.manifest is None: - raise InternalException("self.manifest was None in get_hooks_by_type") + raise DbtInternalError("self.manifest was None in get_hooks_by_type") nodes = self.manifest.nodes.values() # find all hooks defined in the manifest (could be multiple projects) @@ -392,7 +395,7 @@ def safe_run_hooks( ) -> None: try: self.run_hooks(adapter, hook_type, extra_context) - except RuntimeException as exc: + except DbtRuntimeError as exc: fire_event(DatabaseErrorRunningHook(hook_type=hook_type.value)) self.node_results.append( BaseResult( @@ -418,7 +421,9 @@ def print_results_line(self, results, execution_time): with TextOnly(): fire_event(EmptyLine()) fire_event( - HookFinished(stat_line=stat_line, execution=execution, execution_time=execution_time) + FinishedRunningStats( + stat_line=stat_line, execution=execution, execution_time=execution_time + ) ) def before_run(self, adapter, selected_uids: AbstractSet[str]): @@ -454,7 +459,7 @@ def after_run(self, adapter, results): def get_node_selector(self) -> ResourceTypeSelector: if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get perform node selection") + raise DbtInternalError("manifest and graph must be set to get perform node selection") return ResourceTypeSelector( graph=self.graph, manifest=self.manifest, diff --git a/core/dbt/task/run_operation.py b/core/dbt/task/run_operation.py index e510c70c37d..63384f1c21f 100644 --- a/core/dbt/task/run_operation.py +++ b/core/dbt/task/run_operation.py @@ -10,7 +10,7 @@ from dbt.adapters.factory import get_adapter from dbt.config.utils import parse_cli_vars from dbt.contracts.results import RunOperationResultsArtifact -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.events.functions import fire_event from dbt.events.types import ( RunningOperationCaughtError, @@ -34,7 +34,7 @@ def _get_kwargs(self) -> Dict[str, Any]: def compile_manifest(self) -> None: if self.manifest is None: - raise InternalException("manifest was None in compile_manifest") + raise DbtInternalError("manifest was None in compile_manifest") def _run_unsafe(self) -> agate.Table: adapter = get_adapter(self.config) diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index 14005203296..fee5fadc891 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -44,10 +44,10 @@ from dbt.contracts.results import NodeStatus, RunExecutionResult, RunningStatus from dbt.contracts.state import PreviousState from dbt.exceptions import ( - InternalException, - NotImplementedException, - RuntimeException, - FailFastException, + DbtInternalError, + NotImplementedError, + DbtRuntimeError, + FailFastError, ) from dbt.graph import GraphQueue, NodeSelector, SelectionSpec, parse_difference, Graph @@ -83,7 +83,7 @@ def load_manifest(self): def compile_manifest(self): if self.manifest is None: - raise InternalException("compile_manifest called before manifest was loaded") + raise DbtInternalError("compile_manifest called before manifest was loaded") # we cannot get adapter in init since it will break rpc #5579 adapter = get_adapter(self.config) @@ -150,7 +150,11 @@ def get_selection_spec(self) -> SelectionSpec: @abstractmethod def get_node_selector(self) -> NodeSelector: - raise NotImplementedException(f"get_node_selector not implemented for task {type(self)}") + raise NotImplementedError(f"get_node_selector not implemented for task {type(self)}") + + @abstractmethod + def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]): + raise NotImplementedError(f"defer_to_manifest not implemented for task {type(self)}") def get_graph_queue(self) -> GraphQueue: selector = self.get_node_selector() @@ -160,7 +164,7 @@ def get_graph_queue(self) -> GraphQueue: def _runtime_initialize(self): super()._runtime_initialize() if self.manifest is None or self.graph is None: - raise InternalException("_runtime_initialize never loaded the manifest and graph!") + raise DbtInternalError("_runtime_initialize never loaded the manifest and graph!") self.job_queue = self.get_graph_queue() @@ -172,7 +176,7 @@ def _runtime_initialize(self): elif uid in self.manifest.sources: self._flattened_nodes.append(self.manifest.sources[uid]) else: - raise InternalException( + raise DbtInternalError( f"Node selection returned {uid}, expected a node or a source" ) @@ -182,7 +186,7 @@ def raise_on_first_error(self): return False def get_runner_type(self, node): - raise NotImplementedException("Not Implemented") + raise NotImplementedError("Not Implemented") def result_path(self): return os.path.join(self.config.target_path, RESULT_FILE_NAME) @@ -222,10 +226,6 @@ def call_runner(self, runner): status: Dict[str, str] = {} try: result = runner.run_with_hooks(self.manifest) - status = runner.get_result_status(result) - runner.node.update_event_status( - node_status=result.status, finished_at=datetime.utcnow().isoformat() - ) finally: finishctx = TimestampNamed("finished_at") with finishctx, DbtModelState(status): @@ -242,7 +242,7 @@ def call_runner(self, runner): fail_fast = flags.FAIL_FAST if result.status in (NodeStatus.Error, NodeStatus.Fail) and fail_fast: - self._raise_next_tick = FailFastException( + self._raise_next_tick = FailFastError( msg="Failing early due to test failure or runtime error", result=result, node=getattr(result, "node", None), @@ -251,7 +251,7 @@ def call_runner(self, runner): # if we raise inside a thread, it'll just get silently swallowed. # stash the error message we want here, and it will check the # next 'tick' - should be soon since our thread is about to finish! - self._raise_next_tick = RuntimeException(result.message) + self._raise_next_tick = DbtRuntimeError(result.message) return result @@ -276,7 +276,7 @@ def _raise_set_error(self): def run_queue(self, pool): """Given a pool, submit jobs from the queue to the pool.""" if self.job_queue is None: - raise InternalException("Got to run_queue with no job queue set") + raise DbtInternalError("Got to run_queue with no job queue set") def callback(result): """Note: mark_done, at a minimum, must happen here or dbt will @@ -285,7 +285,7 @@ def callback(result): self._handle_result(result) if self.job_queue is None: - raise InternalException("Got to run_queue callback with no job queue set") + raise DbtInternalError("Got to run_queue callback with no job queue set") self.job_queue.mark_done(result.node.unique_id) while not self.job_queue.empty(): @@ -327,7 +327,7 @@ def _handle_result(self, result): node = result.node if self.manifest is None: - raise InternalException("manifest was None in _handle_result") + raise DbtInternalError("manifest was None in _handle_result") if isinstance(node, SourceDefinition): self.manifest.update_source(node) @@ -383,7 +383,7 @@ def execute_nodes(self): try: self.run_queue(pool) - except FailFastException as failure: + except FailFastError as failure: self._cancel_connections(pool) print_run_result_error(failure.result) raise @@ -400,7 +400,7 @@ def execute_nodes(self): def _mark_dependent_errors(self, node_id, result, cause): if self.graph is None: - raise InternalException("graph is None in _mark_dependent_errors") + raise DbtInternalError("graph is None in _mark_dependent_errors") for dep_node_id in self.graph.get_dependent_nodes(node_id): self._skipped_children[dep_node_id] = cause @@ -419,6 +419,7 @@ def populate_adapter_cache(self, adapter, required_schemas: Set[BaseRelation] = def before_run(self, adapter, selected_uids: AbstractSet[str]): with adapter.connection_named("master"): self.populate_adapter_cache(adapter) + self.defer_to_manifest(adapter, selected_uids) def after_run(self, adapter, results): pass @@ -453,7 +454,7 @@ def run(self): self._runtime_initialize() if self._flattened_nodes is None: - raise InternalException("after _runtime_initialize, _flattened_nodes was still None") + raise DbtInternalError("after _runtime_initialize, _flattened_nodes was still None") if len(self._flattened_nodes) == 0: with TextOnly(): @@ -509,7 +510,7 @@ def interpret_results(cls, results): def get_model_schemas(self, adapter, selected_uids: Iterable[str]) -> Set[BaseRelation]: if self.manifest is None: - raise InternalException("manifest was None in get_model_schemas") + raise DbtInternalError("manifest was None in get_model_schemas") result: Set[BaseRelation] = set() for node in self.manifest.nodes.values(): diff --git a/core/dbt/task/seed.py b/core/dbt/task/seed.py index 5c922a5ba90..58b6aa25bda 100644 --- a/core/dbt/task/seed.py +++ b/core/dbt/task/seed.py @@ -6,10 +6,10 @@ ) from dbt.contracts.results import RunStatus -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.graph import ResourceTypeSelector from dbt.logger import TextOnly -from dbt.events.functions import fire_event, info +from dbt.events.functions import fire_event from dbt.events.types import ( SeedHeader, SeedHeaderSeparator, @@ -17,6 +17,7 @@ LogSeedResult, LogStartLine, ) +from dbt.events.base_types import EventLevel from dbt.node_types import NodeType from dbt.contracts.results import NodeStatus @@ -46,10 +47,9 @@ def compile(self, manifest): def print_result_line(self, result): model = result.node - level = "error" if result.status == NodeStatus.Error else "info" + level = EventLevel.ERROR if result.status == NodeStatus.Error else EventLevel.INFO fire_event( LogSeedResult( - info=info(level=level), status=result.status, result_message=result.message, index=self.node_index, @@ -58,7 +58,8 @@ def print_result_line(self, result): schema=self.node.schema, relation=model.alias, node_info=model.node_info, - ) + ), + level=level, ) @@ -72,7 +73,7 @@ def raise_on_first_error(self): def get_node_selector(self): if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get perform node selection") + raise DbtInternalError("manifest and graph must be set to get perform node selection") return ResourceTypeSelector( graph=self.graph, manifest=self.manifest, diff --git a/core/dbt/task/snapshot.py b/core/dbt/task/snapshot.py index 44ccbd88361..f5e8a549bb2 100644 --- a/core/dbt/task/snapshot.py +++ b/core/dbt/task/snapshot.py @@ -1,7 +1,8 @@ from .run import ModelRunner, RunTask -from dbt.exceptions import InternalException -from dbt.events.functions import fire_event, info +from dbt.exceptions import DbtInternalError +from dbt.events.functions import fire_event +from dbt.events.base_types import EventLevel from dbt.events.types import LogSnapshotResult from dbt.graph import ResourceTypeSelector from dbt.node_types import NodeType @@ -15,10 +16,9 @@ def describe_node(self): def print_result_line(self, result): model = result.node cfg = model.config.to_dict(omit_none=True) - level = "error" if result.status == NodeStatus.Error else "info" + level = EventLevel.ERROR if result.status == NodeStatus.Error else EventLevel.INFO fire_event( LogSnapshotResult( - info=info(level=level), status=result.status, description=self.get_node_representation(), cfg=cfg, @@ -26,7 +26,8 @@ def print_result_line(self, result): total=self.num_nodes, execution_time=result.execution_time, node_info=model.node_info, - ) + ), + level=level, ) @@ -36,7 +37,7 @@ def raise_on_first_error(self): def get_node_selector(self): if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get perform node selection") + raise DbtInternalError("manifest and graph must be set to get perform node selection") return ResourceTypeSelector( graph=self.graph, manifest=self.manifest, diff --git a/core/dbt/task/sql.py b/core/dbt/task/sql.py index 4a267bd91bf..4f662383d74 100644 --- a/core/dbt/task/sql.py +++ b/core/dbt/task/sql.py @@ -25,7 +25,7 @@ def __init__(self, config, adapter, node, node_index, num_nodes): def handle_exception(self, e, ctx): fire_event(SQLRunnerException(exc=str(e), exc_info=traceback.format_exc())) if isinstance(e, dbt.exceptions.Exception): - if isinstance(e, dbt.exceptions.RuntimeException): + if isinstance(e, dbt.exceptions.DbtRuntimeError): e.add_node(ctx.node) return e @@ -51,7 +51,7 @@ def error_result(self, node, error, start_time, timing_info): raise error def ephemeral_result(self, node, start_time, timing_info): - raise dbt.exceptions.NotImplementedException("cannot execute ephemeral nodes remotely!") + raise dbt.exceptions.NotImplementedError("cannot execute ephemeral nodes remotely!") class SqlCompileRunner(GenericSqlRunner[RemoteCompileResult]): diff --git a/core/dbt/task/test.py b/core/dbt/task/test.py index 26d6d46f028..e7f449873aa 100644 --- a/core/dbt/task/test.py +++ b/core/dbt/task/test.py @@ -16,15 +16,15 @@ from dbt.contracts.results import TestStatus, PrimitiveDict, RunResult from dbt.context.providers import generate_runtime_model_context from dbt.clients.jinja import MacroGenerator -from dbt.events.functions import fire_event, info +from dbt.events.functions import fire_event from dbt.events.types import ( LogTestResult, LogStartLine, ) from dbt.exceptions import ( - InternalException, - InvalidBoolean, - MissingMaterialization, + DbtInternalError, + BooleanError, + MissingMaterializationError, ) from dbt.graph import ( ResourceTypeSelector, @@ -51,7 +51,7 @@ def convert_bool_type(field) -> bool: try: return bool(strtobool(field)) # type: ignore except ValueError: - raise InvalidBoolean(field, "get_test_sql") + raise BooleanError(field, "get_test_sql") # need this so we catch both true bools and 0/1 return bool(field) @@ -68,14 +68,14 @@ def print_result_line(self, result): fire_event( LogTestResult( name=model.name, - info=info(level=LogTestResult.status_to_level(str(result.status))), status=str(result.status), index=self.node_index, num_models=self.num_nodes, execution_time=result.execution_time, node_info=model.node_info, num_failures=result.failures, - ) + ), + level=LogTestResult.status_to_level(str(result.status)), ) def print_start_line(self): @@ -91,9 +91,7 @@ def print_start_line(self): def before_execute(self): self.print_start_line() - def execute_test( - self, test: TestNode, manifest: Manifest - ) -> TestResultData: + def execute_test(self, test: TestNode, manifest: Manifest) -> TestResultData: context = generate_runtime_model_context(test, self.config, manifest) materialization_macro = manifest.find_materialization_macro_by_name( @@ -101,10 +99,12 @@ def execute_test( ) if materialization_macro is None: - raise MissingMaterialization(model=test, adapter_type=self.adapter.type()) + raise MissingMaterializationError( + materialization=test.get_materialization(), adapter_type=self.adapter.type() + ) if "config" not in context: - raise InternalException( + raise DbtInternalError( "Invalid materialization context generated, missing config: {}".format(context) ) @@ -118,14 +118,14 @@ def execute_test( table = result["table"] num_rows = len(table.rows) if num_rows != 1: - raise InternalException( + raise DbtInternalError( f"dbt internally failed to execute {test.unique_id}: " f"Returned {num_rows} rows, but expected " f"1 row" ) num_cols = len(table.columns) if num_cols != 3: - raise InternalException( + raise DbtInternalError( f"dbt internally failed to execute {test.unique_id}: " f"Returned {num_cols} columns, but expected " f"3 columns" @@ -203,7 +203,7 @@ def raise_on_first_error(self): def get_node_selector(self) -> TestSelector: if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get perform node selection") + raise DbtInternalError("manifest and graph must be set to get perform node selection") return TestSelector( graph=self.graph, manifest=self.manifest, diff --git a/core/dbt/tests/fixtures/project.py b/core/dbt/tests/fixtures/project.py index 2d7ae5ded67..5fcdca408c9 100644 --- a/core/dbt/tests/fixtures/project.py +++ b/core/dbt/tests/fixtures/project.py @@ -6,7 +6,7 @@ import warnings import yaml -from dbt.exceptions import CompilationException, DatabaseException +from dbt.exceptions import CompilationError, DbtDatabaseError import dbt.flags as flags from dbt.config.runtime import RuntimeConfig from dbt.adapters.factory import get_adapter, register_adapter, reset_adapters, get_adapter_by_type @@ -249,7 +249,9 @@ def clean_up_logging(): # otherwise this will fail. So to test errors in those areas, you need to copy the files # into the project in the tests instead of putting them in the fixtures. @pytest.fixture(scope="class") -def adapter(unique_schema, project_root, profiles_root, profiles_yml, dbt_project_yml, clean_up_logging): +def adapter( + unique_schema, project_root, profiles_root, profiles_yml, dbt_project_yml, clean_up_logging +): # The profiles.yml and dbt_project.yml should already be written out args = Namespace( profiles_dir=str(profiles_root), project_dir=str(project_root), target=None, profile=None @@ -494,10 +496,10 @@ def project( # a `load_dependencies` method. # Macros gets executed as part of drop_scheme in core/dbt/adapters/sql/impl.py. When # the macros have errors (which is what we're actually testing for...) they end up - # throwing CompilationExceptions or DatabaseExceptions + # throwing CompilationErrorss or DatabaseErrors try: project.drop_test_schema() - except (KeyError, AttributeError, CompilationException, DatabaseException): + except (KeyError, AttributeError, CompilationError, DbtDatabaseError): pass os.chdir(orig_cwd) cleanup_event_logger() diff --git a/core/dbt/tests/util.py b/core/dbt/tests/util.py index a159b605d80..03e752405ad 100644 --- a/core/dbt/tests/util.py +++ b/core/dbt/tests/util.py @@ -15,7 +15,12 @@ from dbt.main import handle_and_check from dbt.logger import log_manager from dbt.contracts.graph.manifest import Manifest -from dbt.events.functions import fire_event, capture_stdout_logs, stop_capture_stdout_logs, reset_metadata_vars +from dbt.events.functions import ( + fire_event, + capture_stdout_logs, + stop_capture_stdout_logs, + reset_metadata_vars, +) from dbt.events.test_types import IntegrationTestDebug # ============================================================================= diff --git a/core/dbt/utils.py b/core/dbt/utils.py index 987371b6b02..3f31a806156 100644 --- a/core/dbt/utils.py +++ b/core/dbt/utils.py @@ -15,7 +15,7 @@ from pathlib import PosixPath, WindowsPath from contextlib import contextmanager -from dbt.exceptions import ConnectionException, DuplicateAlias +from dbt.exceptions import ConnectionError, DuplicateAliasError from dbt.events.functions import fire_event from dbt.events.types import RetryExternalCall, RecordRetryException from dbt import flags @@ -92,13 +92,13 @@ def get_model_name_or_none(model): def get_dbt_macro_name(name): if name is None: - raise dbt.exceptions.InternalException("Got None for a macro name!") + raise dbt.exceptions.DbtInternalError("Got None for a macro name!") return f"{MACRO_PREFIX}{name}" def get_dbt_docs_name(name): if name is None: - raise dbt.exceptions.InternalException("Got None for a doc name!") + raise dbt.exceptions.DbtInternalError("Got None for a doc name!") return f"{DOCS_PREFIX}{name}" @@ -228,7 +228,7 @@ def deep_map_render(func: Callable[[Any, Tuple[Union[str, int], ...]], Any], val return _deep_map_render(func, value, ()) except RuntimeError as exc: if "maximum recursion depth exceeded" in str(exc): - raise dbt.exceptions.RecursionException("Cycle detected in deep_map_render") + raise dbt.exceptions.RecursionError("Cycle detected in deep_map_render") raise @@ -365,7 +365,7 @@ def translate_mapping(self, kwargs: Mapping[str, Any]) -> Dict[str, Any]: for key, value in kwargs.items(): canonical_key = self.aliases.get(key, key) if canonical_key in result: - raise DuplicateAlias(kwargs, self.aliases, canonical_key) + raise DuplicateAliasError(kwargs, self.aliases, canonical_key) result[canonical_key] = self.translate_value(value) return result @@ -385,7 +385,7 @@ def translate(self, value: Mapping[str, Any]) -> Dict[str, Any]: return self.translate_mapping(value) except RuntimeError as exc: if "maximum recursion depth exceeded" in str(exc): - raise dbt.exceptions.RecursionException( + raise dbt.exceptions.RecursionError( "Cycle detected in a value passed to translate!" ) raise @@ -403,7 +403,7 @@ def translate_aliases( :returns: A dict containing all the values in kwargs referenced by their canonical key. - :raises: `AliasException`, if a canonical key is defined more than once. + :raises: `AliasError`, if a canonical key is defined more than once. """ translator = Translator(aliases, recurse) return translator.translate(kwargs) @@ -624,7 +624,7 @@ def _connection_exception_retry(fn, max_attempts: int, attempt: int = 0): time.sleep(1) return _connection_exception_retry(fn, max_attempts, attempt + 1) else: - raise ConnectionException("External connection exception occurred: " + str(exc)) + raise ConnectionError("External connection exception occurred: " + str(exc)) # This is used to serialize the args in the run_results and in the logs. @@ -657,9 +657,10 @@ def args_to_dict(args): "store_failures", "use_experimental_parser", ) + default_empty_yaml_dict_keys = ("vars", "warn_error_options") if key in default_false_keys and var_args[key] is False: continue - if key == "vars" and var_args[key] == "{}": + if key in default_empty_yaml_dict_keys and var_args[key] == "{}": continue # this was required for a test case if isinstance(var_args[key], PosixPath) or isinstance(var_args[key], WindowsPath): @@ -683,3 +684,10 @@ def cast_to_int(integer: Optional[int]) -> int: return 0 else: return integer + + +def cast_dict_to_dict_of_strings(dct): + new_dct = {} + for k, v in dct.items(): + new_dct[str(k)] = str(v) + return new_dct diff --git a/core/dbt/version.py b/core/dbt/version.py index d668a902ae6..d836e2b4a43 100644 --- a/core/dbt/version.py +++ b/core/dbt/version.py @@ -71,7 +71,7 @@ def _get_core_msg_lines(installed, latest) -> Tuple[List[List[str]], str]: latest_line = ["latest", latest_s, green("Up to date!")] if installed > latest: - latest_line[2] = green("Ahead of latest version!") + latest_line[2] = yellow("Ahead of latest version!") elif installed < latest: latest_line[2] = yellow("Update available!") update_info = ( @@ -145,7 +145,7 @@ def _get_plugin_msg_info( compatibility_msg = yellow("Update available!") needs_update = True elif plugin > latest_plugin: - compatibility_msg = green("Ahead of latest version!") + compatibility_msg = yellow("Ahead of latest version!") else: compatibility_msg = green("Up to date!") @@ -235,5 +235,5 @@ def _get_adapter_plugin_names() -> Iterator[str]: yield plugin_name -__version__ = "1.4.0b1" +__version__ = "1.5.0a1" installed = get_installed_version() diff --git a/core/setup.py b/core/setup.py index c2c04458ace..b5c43cc184a 100644 --- a/core/setup.py +++ b/core/setup.py @@ -25,7 +25,7 @@ package_name = "dbt-core" -package_version = "1.4.0b1" +package_version = "1.5.0a1" description = """With dbt, data analysts and engineers can build analytics \ the way engineers build applications.""" @@ -54,7 +54,7 @@ "hologram>=0.0.14,<=0.0.15", "isodate>=0.6,<0.7", "logbook>=1.5,<1.6", - "mashumaro[msgpack]==3.2", + "mashumaro[msgpack]==3.3.1", "minimal-snowplow-tracker==0.0.2", "networkx>=2.3,<2.8.1;python_version<'3.8'", "networkx>=2.3,<3;python_version>='3.8'", diff --git a/docker/Dockerfile b/docker/Dockerfile index 72332c35de9..4061e1e9746 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -14,12 +14,12 @@ FROM --platform=$build_for python:3.10.7-slim-bullseye as base # N.B. The refs updated automagically every release via bumpversion # N.B. dbt-postgres is currently found in the core codebase so a value of dbt-core@ is correct -ARG dbt_core_ref=dbt-core@v1.4.0b1 -ARG dbt_postgres_ref=dbt-core@v1.4.0b1 -ARG dbt_redshift_ref=dbt-redshift@v1.4.0b1 -ARG dbt_bigquery_ref=dbt-bigquery@v1.4.0b1 -ARG dbt_snowflake_ref=dbt-snowflake@v1.4.0b1 -ARG dbt_spark_ref=dbt-spark@v1.4.0b1 +ARG dbt_core_ref=dbt-core@v1.5.0a1 +ARG dbt_postgres_ref=dbt-core@v1.5.0a1 +ARG dbt_redshift_ref=dbt-redshift@v1.5.0a1 +ARG dbt_bigquery_ref=dbt-bigquery@v1.5.0a1 +ARG dbt_snowflake_ref=dbt-snowflake@v1.5.0a1 +ARG dbt_spark_ref=dbt-spark@v1.5.0a1 # special case args ARG dbt_spark_version=all ARG dbt_third_party diff --git a/plugins/postgres/dbt/adapters/postgres/__version__.py b/plugins/postgres/dbt/adapters/postgres/__version__.py index 27cfeecd9e8..219c289b1bf 100644 --- a/plugins/postgres/dbt/adapters/postgres/__version__.py +++ b/plugins/postgres/dbt/adapters/postgres/__version__.py @@ -1 +1 @@ -version = "1.4.0b1" +version = "1.5.0a1" diff --git a/plugins/postgres/dbt/adapters/postgres/connections.py b/plugins/postgres/dbt/adapters/postgres/connections.py index df24b0f9118..afa74a46339 100644 --- a/plugins/postgres/dbt/adapters/postgres/connections.py +++ b/plugins/postgres/dbt/adapters/postgres/connections.py @@ -73,19 +73,19 @@ def exception_handler(self, sql): logger.debug("Failed to release connection!") pass - raise dbt.exceptions.DatabaseException(str(e).strip()) from e + raise dbt.exceptions.DbtDatabaseError(str(e).strip()) from e except Exception as e: logger.debug("Error running SQL: {}", sql) logger.debug("Rolling back transaction.") self.rollback_if_open() - if isinstance(e, dbt.exceptions.RuntimeException): + if isinstance(e, dbt.exceptions.DbtRuntimeError): # during a sql query, an internal to dbt exception was raised. # this sounds a lot like a signal handler and probably has # useful information, so raise it without modification. raise - raise dbt.exceptions.RuntimeException(e) from e + raise dbt.exceptions.DbtRuntimeError(e) from e @classmethod def open(cls, connection): diff --git a/plugins/postgres/dbt/adapters/postgres/impl.py b/plugins/postgres/dbt/adapters/postgres/impl.py index 78b86234eae..9a5d5d3f8f6 100644 --- a/plugins/postgres/dbt/adapters/postgres/impl.py +++ b/plugins/postgres/dbt/adapters/postgres/impl.py @@ -9,11 +9,11 @@ from dbt.adapters.postgres import PostgresRelation from dbt.dataclass_schema import dbtClassMixin, ValidationError from dbt.exceptions import ( - CrossDbReferenceProhibited, - IndexConfigNotDict, - InvalidIndexConfig, - RuntimeException, - UnexpectedDbReference, + CrossDbReferenceProhibitedError, + IndexConfigNotDictError, + IndexConfigError, + DbtRuntimeError, + UnexpectedDbReferenceError, ) import dbt.utils @@ -46,9 +46,9 @@ def parse(cls, raw_index) -> Optional["PostgresIndexConfig"]: cls.validate(raw_index) return cls.from_dict(raw_index) except ValidationError as exc: - raise InvalidIndexConfig(exc) + raise IndexConfigError(exc) except TypeError: - raise IndexConfigNotDict(raw_index) + raise IndexConfigNotDictError(raw_index) @dataclass @@ -74,7 +74,7 @@ def verify_database(self, database): database = database.strip('"') expected = self.config.credentials.database if database.lower() != expected.lower(): - raise UnexpectedDbReference(self.type(), database, expected) + raise UnexpectedDbReferenceError(self.type(), database, expected) # return an empty string on success so macros can call this return "" @@ -107,8 +107,8 @@ def _get_catalog_schemas(self, manifest): schemas = super()._get_catalog_schemas(manifest) try: return schemas.flatten() - except RuntimeException as exc: - raise CrossDbReferenceProhibited(self.type(), exc.msg) + except DbtRuntimeError as exc: + raise CrossDbReferenceProhibitedError(self.type(), exc.msg) def _link_cached_relations(self, manifest): schemas: Set[str] = set() diff --git a/plugins/postgres/dbt/adapters/postgres/relation.py b/plugins/postgres/dbt/adapters/postgres/relation.py index 0f3296c1818..43c8c724a74 100644 --- a/plugins/postgres/dbt/adapters/postgres/relation.py +++ b/plugins/postgres/dbt/adapters/postgres/relation.py @@ -1,7 +1,7 @@ from dbt.adapters.base import Column from dataclasses import dataclass from dbt.adapters.base.relation import BaseRelation -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError @dataclass(frozen=True, eq=False, repr=False) @@ -14,7 +14,7 @@ def __post_init__(self): and self.type is not None and len(self.identifier) > self.relation_max_name_length() ): - raise RuntimeException( + raise DbtRuntimeError( f"Relation name '{self.identifier}' " f"is longer than {self.relation_max_name_length()} characters" ) diff --git a/plugins/postgres/setup.py b/plugins/postgres/setup.py index 00a91759aec..ade5f95121b 100644 --- a/plugins/postgres/setup.py +++ b/plugins/postgres/setup.py @@ -41,7 +41,7 @@ def _dbt_psycopg2_name(): package_name = "dbt-postgres" -package_version = "1.4.0b1" +package_version = "1.5.0a1" description = """The postgres adapter plugin for dbt (data build tool)""" this_directory = os.path.abspath(os.path.dirname(__file__)) diff --git a/pyproject.toml b/pyproject.toml index 4d9d26d4ff5..bcf52f2414c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,6 +6,6 @@ namespace_packages = true [tool.black] # TODO: remove global exclusion of tests when testing overhaul is complete -force-exclude = 'test' +force-exclude = 'test/' line-length = 99 target-version = ['py38'] diff --git a/test/integration/018_adapter_ddl_tests/models/materialized.sql b/test/integration/018_adapter_ddl_tests/models/materialized.sql deleted file mode 100644 index edd9c8e04bf..00000000000 --- a/test/integration/018_adapter_ddl_tests/models/materialized.sql +++ /dev/null @@ -1,9 +0,0 @@ -{{ - config( - materialized = "table", - sort = 'first_name', - dist = 'first_name' - ) -}} - -select * from {{ this.schema }}.seed diff --git a/test/integration/018_adapter_ddl_tests/seed.sql b/test/integration/018_adapter_ddl_tests/seed.sql deleted file mode 100644 index 695cfbeffdf..00000000000 --- a/test/integration/018_adapter_ddl_tests/seed.sql +++ /dev/null @@ -1,110 +0,0 @@ -create table {schema}.seed ( - id BIGSERIAL PRIMARY KEY, - first_name VARCHAR(50), - last_name VARCHAR(50), - email VARCHAR(50), - gender VARCHAR(50), - ip_address VARCHAR(20) -); - - -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jack', 'Hunter', 'jhunter0@pbs.org', 'Male', '59.80.20.168'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Kathryn', 'Walker', 'kwalker1@ezinearticles.com', 'Female', '194.121.179.35'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gerald', 'Ryan', 'gryan2@com.com', 'Male', '11.3.212.243'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Bonnie', 'Spencer', 'bspencer3@ameblo.jp', 'Female', '216.32.196.175'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Harold', 'Taylor', 'htaylor4@people.com.cn', 'Male', '253.10.246.136'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jacqueline', 'Griffin', 'jgriffin5@t.co', 'Female', '16.13.192.220'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Wanda', 'Arnold', 'warnold6@google.nl', 'Female', '232.116.150.64'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Craig', 'Ortiz', 'cortiz7@sciencedaily.com', 'Male', '199.126.106.13'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gary', 'Day', 'gday8@nih.gov', 'Male', '35.81.68.186'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Rose', 'Wright', 'rwright9@yahoo.co.jp', 'Female', '236.82.178.100'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Raymond', 'Kelley', 'rkelleya@fc2.com', 'Male', '213.65.166.67'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gerald', 'Robinson', 'grobinsonb@disqus.com', 'Male', '72.232.194.193'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Mildred', 'Martinez', 'mmartinezc@samsung.com', 'Female', '198.29.112.5'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Dennis', 'Arnold', 'darnoldd@google.com', 'Male', '86.96.3.250'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Judy', 'Gray', 'jgraye@opensource.org', 'Female', '79.218.162.245'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Theresa', 'Garza', 'tgarzaf@epa.gov', 'Female', '21.59.100.54'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gerald', 'Robertson', 'grobertsong@csmonitor.com', 'Male', '131.134.82.96'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Philip', 'Hernandez', 'phernandezh@adobe.com', 'Male', '254.196.137.72'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Julia', 'Gonzalez', 'jgonzalezi@cam.ac.uk', 'Female', '84.240.227.174'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Andrew', 'Davis', 'adavisj@patch.com', 'Male', '9.255.67.25'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Kimberly', 'Harper', 'kharperk@foxnews.com', 'Female', '198.208.120.253'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Mark', 'Martin', 'mmartinl@marketwatch.com', 'Male', '233.138.182.153'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Cynthia', 'Ruiz', 'cruizm@google.fr', 'Female', '18.178.187.201'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Samuel', 'Carroll', 'scarrolln@youtu.be', 'Male', '128.113.96.122'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jennifer', 'Larson', 'jlarsono@vinaora.com', 'Female', '98.234.85.95'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ashley', 'Perry', 'aperryp@rakuten.co.jp', 'Female', '247.173.114.52'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Howard', 'Rodriguez', 'hrodriguezq@shutterfly.com', 'Male', '231.188.95.26'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Amy', 'Brooks', 'abrooksr@theatlantic.com', 'Female', '141.199.174.118'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Louise', 'Warren', 'lwarrens@adobe.com', 'Female', '96.105.158.28'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Tina', 'Watson', 'twatsont@myspace.com', 'Female', '251.142.118.177'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Janice', 'Kelley', 'jkelleyu@creativecommons.org', 'Female', '239.167.34.233'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Terry', 'Mccoy', 'tmccoyv@bravesites.com', 'Male', '117.201.183.203'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jeffrey', 'Morgan', 'jmorganw@surveymonkey.com', 'Male', '78.101.78.149'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Louis', 'Harvey', 'lharveyx@sina.com.cn', 'Male', '51.50.0.167'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Philip', 'Miller', 'pmillery@samsung.com', 'Male', '103.255.222.110'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Willie', 'Marshall', 'wmarshallz@ow.ly', 'Male', '149.219.91.68'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Patrick', 'Lopez', 'plopez10@redcross.org', 'Male', '250.136.229.89'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Adam', 'Jenkins', 'ajenkins11@harvard.edu', 'Male', '7.36.112.81'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Benjamin', 'Cruz', 'bcruz12@linkedin.com', 'Male', '32.38.98.15'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ruby', 'Hawkins', 'rhawkins13@gmpg.org', 'Female', '135.171.129.255'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Carlos', 'Barnes', 'cbarnes14@a8.net', 'Male', '240.197.85.140'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ruby', 'Griffin', 'rgriffin15@bravesites.com', 'Female', '19.29.135.24'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Sean', 'Mason', 'smason16@icq.com', 'Male', '159.219.155.249'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Anthony', 'Payne', 'apayne17@utexas.edu', 'Male', '235.168.199.218'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Steve', 'Cruz', 'scruz18@pcworld.com', 'Male', '238.201.81.198'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Anthony', 'Garcia', 'agarcia19@flavors.me', 'Male', '25.85.10.18'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Doris', 'Lopez', 'dlopez1a@sphinn.com', 'Female', '245.218.51.238'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Susan', 'Nichols', 'snichols1b@freewebs.com', 'Female', '199.99.9.61'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Wanda', 'Ferguson', 'wferguson1c@yahoo.co.jp', 'Female', '236.241.135.21'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Andrea', 'Pierce', 'apierce1d@google.co.uk', 'Female', '132.40.10.209'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Lawrence', 'Phillips', 'lphillips1e@jugem.jp', 'Male', '72.226.82.87'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Judy', 'Gilbert', 'jgilbert1f@multiply.com', 'Female', '196.250.15.142'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Eric', 'Williams', 'ewilliams1g@joomla.org', 'Male', '222.202.73.126'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ralph', 'Romero', 'rromero1h@sogou.com', 'Male', '123.184.125.212'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jean', 'Wilson', 'jwilson1i@ocn.ne.jp', 'Female', '176.106.32.194'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Lori', 'Reynolds', 'lreynolds1j@illinois.edu', 'Female', '114.181.203.22'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Donald', 'Moreno', 'dmoreno1k@bbc.co.uk', 'Male', '233.249.97.60'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Steven', 'Berry', 'sberry1l@eepurl.com', 'Male', '186.193.50.50'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Theresa', 'Shaw', 'tshaw1m@people.com.cn', 'Female', '120.37.71.222'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('John', 'Stephens', 'jstephens1n@nationalgeographic.com', 'Male', '191.87.127.115'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Richard', 'Jacobs', 'rjacobs1o@state.tx.us', 'Male', '66.210.83.155'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Andrew', 'Lawson', 'alawson1p@over-blog.com', 'Male', '54.98.36.94'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Peter', 'Morgan', 'pmorgan1q@rambler.ru', 'Male', '14.77.29.106'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Nicole', 'Garrett', 'ngarrett1r@zimbio.com', 'Female', '21.127.74.68'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Joshua', 'Kim', 'jkim1s@edublogs.org', 'Male', '57.255.207.41'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ralph', 'Roberts', 'rroberts1t@people.com.cn', 'Male', '222.143.131.109'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('George', 'Montgomery', 'gmontgomery1u@smugmug.com', 'Male', '76.75.111.77'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gerald', 'Alvarez', 'galvarez1v@flavors.me', 'Male', '58.157.186.194'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Donald', 'Olson', 'dolson1w@whitehouse.gov', 'Male', '69.65.74.135'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Carlos', 'Morgan', 'cmorgan1x@pbs.org', 'Male', '96.20.140.87'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Aaron', 'Stanley', 'astanley1y@webnode.com', 'Male', '163.119.217.44'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Virginia', 'Long', 'vlong1z@spiegel.de', 'Female', '204.150.194.182'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Robert', 'Berry', 'rberry20@tripadvisor.com', 'Male', '104.19.48.241'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Antonio', 'Brooks', 'abrooks21@unesco.org', 'Male', '210.31.7.24'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ruby', 'Garcia', 'rgarcia22@ovh.net', 'Female', '233.218.162.214'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jack', 'Hanson', 'jhanson23@blogtalkradio.com', 'Male', '31.55.46.199'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Kathryn', 'Nelson', 'knelson24@walmart.com', 'Female', '14.189.146.41'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jason', 'Reed', 'jreed25@printfriendly.com', 'Male', '141.189.89.255'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('George', 'Coleman', 'gcoleman26@people.com.cn', 'Male', '81.189.221.144'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Rose', 'King', 'rking27@ucoz.com', 'Female', '212.123.168.231'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Johnny', 'Holmes', 'jholmes28@boston.com', 'Male', '177.3.93.188'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Katherine', 'Gilbert', 'kgilbert29@altervista.org', 'Female', '199.215.169.61'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Joshua', 'Thomas', 'jthomas2a@ustream.tv', 'Male', '0.8.205.30'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Julie', 'Perry', 'jperry2b@opensource.org', 'Female', '60.116.114.192'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Richard', 'Perry', 'rperry2c@oracle.com', 'Male', '181.125.70.232'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Kenneth', 'Ruiz', 'kruiz2d@wikimedia.org', 'Male', '189.105.137.109'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jose', 'Morgan', 'jmorgan2e@webnode.com', 'Male', '101.134.215.156'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Donald', 'Campbell', 'dcampbell2f@goo.ne.jp', 'Male', '102.120.215.84'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Debra', 'Collins', 'dcollins2g@uol.com.br', 'Female', '90.13.153.235'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jesse', 'Johnson', 'jjohnson2h@stumbleupon.com', 'Male', '225.178.125.53'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Elizabeth', 'Stone', 'estone2i@histats.com', 'Female', '123.184.126.221'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Angela', 'Rogers', 'arogers2j@goodreads.com', 'Female', '98.104.132.187'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Emily', 'Dixon', 'edixon2k@mlb.com', 'Female', '39.190.75.57'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Albert', 'Scott', 'ascott2l@tinypic.com', 'Male', '40.209.13.189'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Barbara', 'Peterson', 'bpeterson2m@ow.ly', 'Female', '75.249.136.180'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Adam', 'Greene', 'agreene2n@fastcompany.com', 'Male', '184.173.109.144'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Earl', 'Sanders', 'esanders2o@hc360.com', 'Male', '247.34.90.117'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Angela', 'Brooks', 'abrooks2p@mtv.com', 'Female', '10.63.249.126'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Harold', 'Foster', 'hfoster2q@privacy.gov.au', 'Male', '139.214.40.244'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Carl', 'Meyer', 'cmeyer2r@disqus.com', 'Male', '204.117.7.88'); diff --git a/test/integration/018_adapter_ddl_tests/test_adapter_ddl.py b/test/integration/018_adapter_ddl_tests/test_adapter_ddl.py deleted file mode 100644 index 99162efde67..00000000000 --- a/test/integration/018_adapter_ddl_tests/test_adapter_ddl.py +++ /dev/null @@ -1,23 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - -class TestAdapterDDL(DBTIntegrationTest): - - def setUp(self): - DBTIntegrationTest.setUp(self) - - self.run_sql_file("seed.sql") - - @property - def schema(self): - return "adaper_ddl_018" - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_sort_and_dist_keys_are_nops_on_postgres(self): - results = self.run_dbt(['run']) - self.assertEqual(len(results), 1) - - self.assertTablesEqual("seed","materialized") diff --git a/test/integration/022_timezones_tests/models/timezones.sql b/test/integration/022_timezones_tests/models/timezones.sql deleted file mode 100644 index 87d565487e1..00000000000 --- a/test/integration/022_timezones_tests/models/timezones.sql +++ /dev/null @@ -1,10 +0,0 @@ - -{{ - config( - materialized='table' - ) -}} - -select - '{{ run_started_at.astimezone(modules.pytz.timezone("America/New_York")) }}' as run_started_at_est, - '{{ run_started_at }}' as run_started_at_utc diff --git a/test/integration/022_timezones_tests/test_timezones.py b/test/integration/022_timezones_tests/test_timezones.py deleted file mode 100644 index 993f9dcb83f..00000000000 --- a/test/integration/022_timezones_tests/test_timezones.py +++ /dev/null @@ -1,52 +0,0 @@ -from freezegun import freeze_time -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestTimezones(DBTIntegrationTest): - - @property - def schema(self): - return "timezones_022" - - @property - def models(self): - return "models" - - @property - def profile_config(self): - return { - 'test': { - 'outputs': { - 'dev': { - 'type': 'postgres', - 'threads': 1, - 'host': self.database_host, - 'port': 5432, - 'user': "root", - 'pass': "password", - 'dbname': 'dbt', - 'schema': self.unique_schema() - }, - }, - 'target': 'dev' - } - } - - @property - def query(self): - return """ - select - run_started_at_est, - run_started_at_utc - from {schema}.timezones - """.format(schema=self.unique_schema()) - - @freeze_time("2017-01-01 03:00:00", tz_offset=0) - @use_profile('postgres') - def test_postgres_run_started_at(self): - results = self.run_dbt(['run']) - self.assertEqual(len(results), 1) - result = self.run_sql(self.query, fetch='all')[0] - est, utc = result - self.assertEqual(utc, '2017-01-01 03:00:00+00:00') - self.assertEqual(est, '2016-12-31 22:00:00-05:00') diff --git a/test/integration/033_event_tracking_tests/model-compilation-error/bad_ref.sql b/test/integration/033_event_tracking_tests/model-compilation-error/bad_ref.sql deleted file mode 100644 index 06dd3b0d29c..00000000000 --- a/test/integration/033_event_tracking_tests/model-compilation-error/bad_ref.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select * from {{ ref('does_not_exist') }} diff --git a/test/integration/033_event_tracking_tests/models/example.sql b/test/integration/033_event_tracking_tests/models/example.sql deleted file mode 100644 index 2cd691ea7b4..00000000000 --- a/test/integration/033_event_tracking_tests/models/example.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select 1 as id diff --git a/test/integration/033_event_tracking_tests/models/example_2.sql b/test/integration/033_event_tracking_tests/models/example_2.sql deleted file mode 100644 index 6e892d91c47..00000000000 --- a/test/integration/033_event_tracking_tests/models/example_2.sql +++ /dev/null @@ -1,4 +0,0 @@ - -select * from {{ ref('example') }} -union all -select * from {{ ref('example') }} diff --git a/test/integration/033_event_tracking_tests/models/model_error.sql b/test/integration/033_event_tracking_tests/models/model_error.sql deleted file mode 100644 index 45c65306faf..00000000000 --- a/test/integration/033_event_tracking_tests/models/model_error.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select * from a_thing_that_does_not_exist diff --git a/test/integration/033_event_tracking_tests/models/schema.yml b/test/integration/033_event_tracking_tests/models/schema.yml deleted file mode 100644 index 5ac3436dc22..00000000000 --- a/test/integration/033_event_tracking_tests/models/schema.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: 2 -models: -- name: example - columns: - - name: id - tests: - - unique -- name: example_2 - columns: - - name: id - tests: - - unique diff --git a/test/integration/033_event_tracking_tests/models/snapshottable.sql b/test/integration/033_event_tracking_tests/models/snapshottable.sql deleted file mode 100644 index 3c9a65a221a..00000000000 --- a/test/integration/033_event_tracking_tests/models/snapshottable.sql +++ /dev/null @@ -1,4 +0,0 @@ - -select - 1 as id, - '2018-07-15T00:00:00Z'::timestamp as updated_at diff --git a/test/integration/033_event_tracking_tests/seeds/example_seed.csv b/test/integration/033_event_tracking_tests/seeds/example_seed.csv deleted file mode 100644 index bfde6bfa0b8..00000000000 --- a/test/integration/033_event_tracking_tests/seeds/example_seed.csv +++ /dev/null @@ -1,2 +0,0 @@ -a,b,c -1,2,3 diff --git a/test/integration/033_event_tracking_tests/snapshots/a.sql b/test/integration/033_event_tracking_tests/snapshots/a.sql deleted file mode 100644 index dd90278e560..00000000000 --- a/test/integration/033_event_tracking_tests/snapshots/a.sql +++ /dev/null @@ -1,4 +0,0 @@ -{% snapshot snapshotted %} - {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at')}} - select * from {{ schema }}.snapshottable -{% endsnapshot %} diff --git a/test/integration/033_event_tracking_tests/test_events.py b/test/integration/033_event_tracking_tests/test_events.py deleted file mode 100644 index 1bcbbcec688..00000000000 --- a/test/integration/033_event_tracking_tests/test_events.py +++ /dev/null @@ -1,986 +0,0 @@ -# NOTE: turning off event tracking tests! [#3631](https://github.com/dbt-labs/dbt-core/issues/3631) -# from test.integration.base import DBTIntegrationTest, use_profile -# import hashlib -# import os - -# from unittest.mock import call, ANY, patch - -# import dbt.exceptions -# import dbt.version -# import dbt.tracking -# import dbt.utils - - -# # immutably creates a new array with the value inserted at the index -# def inserted(value, index, arr): -# x = [] -# for i in range(0, len(arr)): -# if i == index: -# x.append(value) -# x.append(arr[i]) -# else: -# x.append(arr[i]) -# return x - -# class TestEventTracking(DBTIntegrationTest): -# maxDiff = None - -# @property -# def profile_config(self): -# return { -# 'config': { -# 'send_anonymous_usage_stats': True -# } -# } - -# @property -# def schema(self): -# return "event_tracking_033" - -# @staticmethod -# def dir(path): -# return path.lstrip("/") - -# @property -# def models(self): -# return self.dir("models") - -# # TODO : Handle the subject. Should be the same every time! -# # TODO : Regex match a uuid for user_id, invocation_id? - -# @patch('dbt.tracking.tracker.track_struct_event') -# def run_event_test( -# self, -# cmd, -# expected_calls, -# expected_contexts, -# track_fn, -# expect_pass=True, -# expect_raise=False -# ): -# self.run_dbt(["deps"]) -# track_fn.reset_mock() - -# project_id = hashlib.md5( -# self.config.project_name.encode('utf-8')).hexdigest() -# version = str(dbt.version.get_installed_version()) - -# if expect_raise: -# with self.assertRaises(BaseException): -# self.run_dbt(cmd, expect_pass=expect_pass) -# else: -# self.run_dbt(cmd, expect_pass=expect_pass) - -# user_id = dbt.tracking.active_user.id -# invocation_id = dbt.tracking.active_user.invocation_id - -# self.assertTrue(len(user_id) > 0) -# self.assertTrue(len(invocation_id) > 0) - -# track_fn.assert_has_calls(expected_calls) - -# ordered_contexts = [] - -# for (args, kwargs) in track_fn.call_args_list: -# ordered_contexts.append( -# [context.__dict__ for context in kwargs['context']] -# ) - -# populated_contexts = [] - -# for context in expected_contexts: -# if callable(context): -# populated_contexts.append(context( -# project_id, user_id, invocation_id, version)) -# else: -# populated_contexts.append(context) - -# return ordered_contexts == populated_contexts - -# def load_context(self): - -# def populate(project_id, user_id, invocation_id, version): -# return [{ -# 'schema': 'iglu:com.dbt/load_all_timing/jsonschema/1-0-3', -# 'data': { -# 'invocation_id': invocation_id, -# 'project_id': project_id, -# 'parsed_path_count': ANY, -# 'path_count': ANY, -# 'is_partial_parse_enabled': ANY, -# 'is_static_analysis_enabled': ANY, -# 'static_analysis_path_count': ANY, -# 'static_analysis_parsed_path_count': ANY, -# 'load_all_elapsed': ANY, -# 'read_files_elapsed': ANY, -# 'load_macros_elapsed': ANY, -# 'parse_project_elapsed': ANY, -# 'patch_sources_elapsed': ANY, -# 'process_manifest_elapsed': ANY, -# }, -# }] -# return populate - -# def resource_counts_context(self): -# return [ -# { -# 'schema': 'iglu:com.dbt/resource_counts/jsonschema/1-0-0', -# 'data': { -# 'models': ANY, -# 'tests': ANY, -# 'snapshots': ANY, -# 'analyses': ANY, -# 'macros': ANY, -# 'operations': ANY, -# 'seeds': ANY, -# 'sources': ANY, -# 'exposures': ANY, -# } -# } -# ] - -# def build_context( -# self, -# command, -# progress, -# result_type=None, -# adapter_type='postgres' -# ): - -# def populate( -# project_id, -# user_id, -# invocation_id, -# version -# ): -# return [ -# { -# 'schema': 'iglu:com.dbt/invocation/jsonschema/1-0-1', -# 'data': { -# 'project_id': project_id, -# 'user_id': user_id, -# 'invocation_id': invocation_id, -# 'command': command, -# 'options': None, # TODO : Add options to compile cmd! -# 'version': version, - -# 'run_type': 'regular', -# 'adapter_type': adapter_type, -# 'progress': progress, - -# 'result_type': result_type, -# 'result': None, -# } -# }, -# { -# 'schema': 'iglu:com.dbt/platform/jsonschema/1-0-0', -# 'data': ANY -# }, -# { -# 'schema': 'iglu:com.dbt/invocation_env/jsonschema/1-0-0', -# 'data': ANY -# } -# ] - -# return populate - -# def run_context( -# self, -# materialization, -# hashed_contents, -# model_id, -# index, -# total, -# status, -# ): -# timing = [] -# error = False - -# if status != 'ERROR': -# timing = [ANY, ANY] -# else: -# error = True - -# def populate(project_id, user_id, invocation_id, version): -# return [{ -# 'schema': 'iglu:com.dbt/run_model/jsonschema/1-0-1', -# 'data': { -# 'invocation_id': invocation_id, - -# 'model_materialization': materialization, - -# 'execution_time': ANY, -# 'hashed_contents': hashed_contents, -# 'model_id': model_id, - -# 'index': index, -# 'total': total, - -# 'run_status': status, -# 'run_error': error, -# 'run_skipped': False, - -# 'timing': timing, -# }, -# }] - -# return populate - - -# class TestEventTrackingSuccess(TestEventTracking): -# @property -# def packages_config(self): -# return { -# 'packages': [ -# { -# 'git': 'https://github.com/dbt-labs/dbt-integration-project', -# 'revision': 'dbt/1.0.0', -# }, -# ], -# } - -# @property -# def project_config(self): -# return { -# 'config-version': 2, -# "seed-paths": [self.dir("data")], -# "test-paths": [self.dir("test")], -# 'seeds': { -# 'quote_columns': False, -# } -# } - -# @use_profile("postgres") -# def test__postgres_event_tracking_compile(self): -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# expected_contexts = [ -# self.build_context('compile', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# self.build_context('compile', 'end', result_type='ok') -# ] - -# test_result_A = self.run_event_test( -# ["compile", "--vars", "sensitive_thing: abc"], -# expected_calls_A, -# expected_contexts -# ) - -# test_result_B = self.run_event_test( -# ["compile", "--vars", "sensitive_thing: abc"], -# expected_calls_B, -# expected_contexts -# ) - -# self.assertTrue(test_result_A or test_result_B) - -# @use_profile("postgres") -# def test__postgres_event_tracking_deps(self): -# package_context = [ -# { -# 'schema': 'iglu:com.dbt/invocation/jsonschema/1-0-1', -# 'data': { -# 'project_id': '098f6bcd4621d373cade4e832627b4f6', -# 'user_id': ANY, -# 'invocation_id': ANY, -# 'version': ANY, -# 'command': 'deps', -# 'run_type': 'regular', -# 'options': None, -# 'adapter_type': 'postgres' -# } -# }, -# { -# 'schema': 'iglu:com.dbt/package_install/jsonschema/1-0-0', -# 'data': { -# 'name': 'c5552991412d1cd86e5c20a87f3518d5', -# 'source': 'git', -# 'version': '6deb95629194572d44ca26c4bc25b573' -# } -# } -# ] - -# expected_calls = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='package', -# label=ANY, -# property_='install', -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_contexts = [ -# self.build_context('deps', 'start'), -# package_context, -# self.build_context('deps', 'end', result_type='ok') -# ] - -# test_result = self.run_event_test(["deps"], expected_calls, expected_contexts) -# self.assertTrue(test_result) - -# @use_profile("postgres") -# def test__postgres_event_tracking_seed(self): -# def seed_context(project_id, user_id, invocation_id, version): -# return [{ -# 'schema': 'iglu:com.dbt/run_model/jsonschema/1-0-1', -# 'data': { -# 'invocation_id': invocation_id, - -# 'model_materialization': 'seed', - -# 'execution_time': ANY, -# 'hashed_contents': 'd41d8cd98f00b204e9800998ecf8427e', -# 'model_id': '39bc2cd707d99bd3e600d2faaafad7ae', - -# 'index': 1, -# 'total': 1, - -# 'run_status': 'SUCCESS', -# 'run_error': False, -# 'run_skipped': False, - -# 'timing': [ANY, ANY], -# }, -# }] - -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='run_model', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# expected_contexts = [ -# self.build_context('seed', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# seed_context, -# self.build_context('seed', 'end', result_type='ok') -# ] - -# test_result_A = self.run_event_test(["seed"], expected_calls_A, expected_contexts) -# test_result_B = self.run_event_test(["seed"], expected_calls_B, expected_contexts) - -# self.assertTrue(test_result_A or test_result_B) - -# @use_profile("postgres") -# def test__postgres_event_tracking_models(self): -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='run_model', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='run_model', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# hashed = '20ff78afb16c8b3b8f83861b1d3b99bd' -# # this hashed contents field changes on azure postgres tests, I believe -# # due to newlines again -# if os.name == 'nt': -# hashed = '52cf9d1db8f0a18ca64ef64681399746' - -# expected_contexts = [ -# self.build_context('run', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# self.run_context( -# hashed_contents='1e5789d34cddfbd5da47d7713aa9191c', -# model_id='4fbacae0e1b69924b22964b457148fb8', -# index=1, -# total=2, -# status='SUCCESS', -# materialization='view' -# ), -# self.run_context( -# hashed_contents=hashed, -# model_id='57994a805249953b31b738b1af7a1eeb', -# index=2, -# total=2, -# status='SUCCESS', -# materialization='view' -# ), -# self.build_context('run', 'end', result_type='ok') -# ] - -# test_result_A = self.run_event_test( -# ["run", "--model", "example", "example_2"], -# expected_calls_A, -# expected_contexts -# ) - -# test_result_B = self.run_event_test( -# ["run", "--model", "example", "example_2"], -# expected_calls_A, -# expected_contexts -# ) - -# self.assertTrue(test_result_A or test_result_B) - -# @use_profile("postgres") -# def test__postgres_event_tracking_model_error(self): -# # cmd = ["run", "--model", "model_error"] -# # self.run_event_test(cmd, event_run_model_error, expect_pass=False) - -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='run_model', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# expected_contexts = [ -# self.build_context('run', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# self.run_context( -# hashed_contents='4419e809ce0995d99026299e54266037', -# model_id='576c3d4489593f00fad42b97c278641e', -# index=1, -# total=1, -# status='ERROR', -# materialization='view' -# ), -# self.build_context('run', 'end', result_type='ok') -# ] - -# test_result_A = self.run_event_test( -# ["run", "--model", "model_error"], -# expected_calls_A, -# expected_contexts, -# expect_pass=False -# ) - -# test_result_B = self.run_event_test( -# ["run", "--model", "model_error"], -# expected_calls_B, -# expected_contexts, -# expect_pass=False -# ) - -# self.assertTrue(test_result_A or test_result_B) - -# @use_profile("postgres") -# def test__postgres_event_tracking_tests(self): -# # TODO: dbt does not track events for tests, but it should! -# self.run_dbt(["deps"]) -# self.run_dbt(["run", "--model", "example", "example_2"]) - -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# expected_contexts = [ -# self.build_context('test', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# self.build_context('test', 'end', result_type='ok') -# ] - -# test_result_A = self.run_event_test( -# ["test"], -# expected_calls_A, -# expected_contexts, -# expect_pass=False -# ) - -# test_result_B = self.run_event_test( -# ["test"], -# expected_calls_A, -# expected_contexts, -# expect_pass=False -# ) - -# self.assertTrue(test_result_A or test_result_B) - - -# class TestEventTrackingCompilationError(TestEventTracking): -# @property -# def project_config(self): -# return { -# 'config-version': 2, -# "model-paths": [self.dir("model-compilation-error")], -# } - -# @use_profile("postgres") -# def test__postgres_event_tracking_with_compilation_error(self): -# expected_calls = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_contexts = [ -# self.build_context('compile', 'start'), -# self.build_context('compile', 'end', result_type='error') -# ] - -# test_result = self.run_event_test( -# ["compile"], -# expected_calls, -# expected_contexts, -# expect_pass=False, -# expect_raise=True -# ) - -# self.assertTrue(test_result) - - -# class TestEventTrackingUnableToConnect(TestEventTracking): - -# @property -# def profile_config(self): -# return { -# 'config': { -# 'send_anonymous_usage_stats': True -# }, -# 'test': { -# 'outputs': { -# 'default2': { -# 'type': 'postgres', -# 'threads': 4, -# 'host': self.database_host, -# 'port': 5432, -# 'user': 'root', -# 'pass': 'password', -# 'dbname': 'dbt', -# 'schema': self.unique_schema() -# }, -# 'noaccess': { -# 'type': 'postgres', -# 'threads': 4, -# 'host': self.database_host, -# 'port': 5432, -# 'user': 'BAD', -# 'pass': 'bad_password', -# 'dbname': 'dbt', -# 'schema': self.unique_schema() -# } -# }, -# 'target': 'default2' -# } -# } - -# @use_profile("postgres") -# def test__postgres_event_tracking_unable_to_connect(self): -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# expected_contexts = [ -# self.build_context('run', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# self.build_context('run', 'end', result_type='error') -# ] - -# test_result_A = self.run_event_test( -# ["run", "--target", "noaccess", "--models", "example"], -# expected_calls_A, -# expected_contexts, -# expect_pass=False -# ) - -# test_result_B = self.run_event_test( -# ["run", "--target", "noaccess", "--models", "example"], -# expected_calls_B, -# expected_contexts, -# expect_pass=False -# ) - -# self.assertTrue(test_result_A or test_result_B) - - -# class TestEventTrackingSnapshot(TestEventTracking): -# @property -# def project_config(self): -# return { -# 'config-version': 2, -# "snapshot-paths": ['snapshots'] -# } - -# @use_profile("postgres") -# def test__postgres_event_tracking_snapshot(self): -# self.run_dbt(["run", "--models", "snapshottable"]) - -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='run_model', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# # the model here has a raw_code that contains the schema, which changes -# expected_contexts = [ -# self.build_context('snapshot', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# self.run_context( -# hashed_contents=ANY, -# model_id='820793a4def8d8a38d109a9709374849', -# index=1, -# total=1, -# status='SUCCESS', -# materialization='snapshot' -# ), -# self.build_context('snapshot', 'end', result_type='ok') -# ] - -# test_result_A = self.run_event_test( -# ["snapshot"], -# expected_calls_A, -# expected_contexts -# ) - -# test_result_B = self.run_event_test( -# ["snapshot"], -# expected_calls_B, -# expected_contexts -# ) - -# self.assertTrue(test_result_A or test_result_B) - - -# class TestEventTrackingCatalogGenerate(TestEventTracking): -# @use_profile("postgres") -# def test__postgres_event_tracking_catalog_generate(self): -# # create a model for the catalog -# self.run_dbt(["run", "--models", "example"]) - -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY, -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY, -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# expected_contexts = [ -# self.build_context('generate', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# self.build_context('generate', 'end', result_type='ok') -# ] - -# test_result_A = self.run_event_test( -# ["docs", "generate"], -# expected_calls_A, -# expected_contexts -# ) - -# test_result_B = self.run_event_test( -# ["docs", "generate"], -# expected_calls_B, -# expected_contexts -# ) - -# self.assertTrue(test_result_A or test_result_B) diff --git a/test/integration/035_docs_blocks_tests/test_docs_blocks.py b/test/integration/035_docs_blocks_tests/test_docs_blocks.py deleted file mode 100644 index dacddf394f9..00000000000 --- a/test/integration/035_docs_blocks_tests/test_docs_blocks.py +++ /dev/null @@ -1,184 +0,0 @@ -import json -import os - -from test.integration.base import DBTIntegrationTest, use_profile - -import dbt.exceptions - -class TestGoodDocsBlocks(DBTIntegrationTest): - @property - def schema(self): - return 'docs_blocks_035' - - @staticmethod - def dir(path): - return os.path.normpath(path) - - @property - def models(self): - return self.dir("models") - - @use_profile('postgres') - def test_postgres_valid_doc_ref(self): - self.assertEqual(len(self.run_dbt()), 1) - - self.assertTrue(os.path.exists('./target/manifest.json')) - - with open('./target/manifest.json') as fp: - manifest = json.load(fp) - - model_data = manifest['nodes']['model.test.model'] - self.assertEqual( - model_data['description'], - 'My model is just a copy of the seed' - ) - self.assertEqual( - { - 'name': 'id', - 'description': 'The user ID number', - 'data_type': None, - 'meta': {}, - 'quote': None, - 'tags': [], - }, - model_data['columns']['id'] - ) - self.assertEqual( - { - 'name': 'first_name', - 'description': "The user's first name", - 'data_type': None, - 'meta': {}, - 'quote': None, - 'tags': [], - }, - model_data['columns']['first_name'] - ) - - self.assertEqual( - { - 'name': 'last_name', - 'description': "The user's last name", - 'data_type': None, - 'meta': {}, - 'quote': None, - 'tags': [], - }, - model_data['columns']['last_name'] - ) - self.assertEqual(len(model_data['columns']), 3) - - @use_profile('postgres') - def test_postgres_alternative_docs_path(self): - self.use_default_project({"docs-paths": [self.dir("docs")]}) - self.assertEqual(len(self.run_dbt()), 1) - - self.assertTrue(os.path.exists('./target/manifest.json')) - - with open('./target/manifest.json') as fp: - manifest = json.load(fp) - - model_data = manifest['nodes']['model.test.model'] - self.assertEqual( - model_data['description'], - 'Alt text about the model' - ) - self.assertEqual( - { - 'name': 'id', - 'description': 'The user ID number with alternative text', - 'data_type': None, - 'meta': {}, - 'quote': None, - 'tags': [], - }, - model_data['columns']['id'] - ) - self.assertEqual( - { - 'name': 'first_name', - 'description': "The user's first name", - 'data_type': None, - 'meta': {}, - 'quote': None, - 'tags': [], - }, - model_data['columns']['first_name'] - ) - - self.assertEqual( - { - 'name': 'last_name', - 'description': "The user's last name in this other file", - 'data_type': None, - 'meta': {}, - 'quote': None, - 'tags': [], - }, - model_data['columns']['last_name'] - ) - self.assertEqual(len(model_data['columns']), 3) - - @use_profile('postgres') - def test_postgres_alternative_docs_path_missing(self): - self.use_default_project({"docs-paths": [self.dir("not-docs")]}) - with self.assertRaises(dbt.exceptions.CompilationException): - self.run_dbt() - - -class TestMissingDocsBlocks(DBTIntegrationTest): - @property - def schema(self): - return 'docs_blocks_035' - - @staticmethod - def dir(path): - return os.path.normpath(path) - - @property - def models(self): - return self.dir("missing_docs_models") - - @use_profile('postgres') - def test_postgres_missing_doc_ref(self): - # The run should fail since we could not find the docs reference. - with self.assertRaises(dbt.exceptions.CompilationException): - self.run_dbt() - - -class TestBadDocsBlocks(DBTIntegrationTest): - @property - def schema(self): - return 'docs_blocks_035' - - @staticmethod - def dir(path): - return os.path.normpath(path) - - @property - def models(self): - return self.dir("invalid_name_models") - - @use_profile('postgres') - def test_postgres_invalid_doc_ref(self): - # The run should fail since we could not find the docs reference. - with self.assertRaises(dbt.exceptions.CompilationException): - self.run_dbt(expect_pass=False) - -class TestDuplicateDocsBlock(DBTIntegrationTest): - @property - def schema(self): - return 'docs_blocks_035' - - @staticmethod - def dir(path): - return os.path.normpath(path) - - @property - def models(self): - return self.dir("duplicate_docs") - - @use_profile('postgres') - def test_postgres_duplicate_doc_ref(self): - with self.assertRaises(dbt.exceptions.CompilationException): - self.run_dbt(expect_pass=False) diff --git a/test/integration/037_external_reference_tests/models/my_model.sql b/test/integration/037_external_reference_tests/models/my_model.sql deleted file mode 100644 index 5d10e607ed7..00000000000 --- a/test/integration/037_external_reference_tests/models/my_model.sql +++ /dev/null @@ -1,7 +0,0 @@ -{{ - config( - materialized = "view" - ) -}} - -select * from "{{ this.schema + 'z' }}"."external" diff --git a/test/integration/037_external_reference_tests/standalone_models/my_model.sql b/test/integration/037_external_reference_tests/standalone_models/my_model.sql deleted file mode 100644 index 2cd691ea7b4..00000000000 --- a/test/integration/037_external_reference_tests/standalone_models/my_model.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select 1 as id diff --git a/test/integration/037_external_reference_tests/test_external_reference.py b/test/integration/037_external_reference_tests/test_external_reference.py deleted file mode 100644 index d5a7e129e3a..00000000000 --- a/test/integration/037_external_reference_tests/test_external_reference.py +++ /dev/null @@ -1,78 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - -class TestExternalReference(DBTIntegrationTest): - @property - def schema(self): - return "external_reference_037" - - @property - def models(self): - return "models" - - def setUp(self): - super().setUp() - self.use_default_project() - self.external_schema = self.unique_schema()+'z' - self.run_sql( - 'create schema "{}"'.format(self.external_schema) - ) - self.run_sql( - 'create table "{}"."external" (id integer)' - .format(self.external_schema) - ) - self.run_sql( - 'insert into "{}"."external" values (1), (2)' - .format(self.external_schema) - ) - - def tearDown(self): - # This has to happen before we drop the external schema, because - # otherwise postgres hangs forever. - self._drop_schemas() - with self.get_connection(): - self._drop_schema_named(self.default_database, self.external_schema) - super().tearDown() - - @use_profile('postgres') - def test__postgres__external_reference(self): - self.assertEqual(len(self.run_dbt()), 1) - # running it again should succeed - self.assertEqual(len(self.run_dbt()), 1) - - -# The opposite of the test above -- check that external relations that -# depend on a dbt model do not create issues with caching -class TestExternalDependency(DBTIntegrationTest): - @property - def schema(self): - return "external_dependency_037" - - @property - def models(self): - return "standalone_models" - - def tearDown(self): - # This has to happen before we drop the external schema, because - # otherwise postgres hangs forever. - self._drop_schemas() - with self.get_connection(): - self._drop_schema_named(self.default_database, self.external_schema) - super().tearDown() - - @use_profile('postgres') - def test__postgres__external_reference(self): - self.assertEqual(len(self.run_dbt()), 1) - - # create a view outside of the dbt schema that depends on this model - self.external_schema = self.unique_schema()+'zz' - self.run_sql( - 'create schema "{}"'.format(self.external_schema) - ) - self.run_sql( - 'create view "{}"."external" as (select * from {}.my_model)' - .format(self.external_schema, self.unique_schema()) - ) - - # running it again should succeed - self.assertEqual(len(self.run_dbt()), 1) - diff --git a/test/integration/038_caching_tests/test_caching.py b/test/integration/038_caching_tests/test_caching.py deleted file mode 100644 index 1967e912628..00000000000 --- a/test/integration/038_caching_tests/test_caching.py +++ /dev/null @@ -1,67 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -from dbt.adapters.factory import FACTORY - -class TestBaseCaching(DBTIntegrationTest): - @property - def schema(self): - return "caching_038" - - @property - def project_config(self): - return { - 'config-version': 2, - 'quoting': { - 'identifier': False, - 'schema': False, - } - } - - def run_and_get_adapter(self): - # we want to inspect the adapter that dbt used for the run, which is - # not self.adapter. You can't do this until after you've run dbt once. - self.run_dbt(['run']) - return FACTORY.adapters[self.adapter_type] - - def cache_run(self): - adapter = self.run_and_get_adapter() - self.assertEqual(len(adapter.cache.relations), 1) - relation = next(iter(adapter.cache.relations.values())) - self.assertEqual(relation.inner.schema, self.unique_schema()) - self.assertEqual(relation.schema, self.unique_schema().lower()) - - self.run_dbt(['run']) - self.assertEqual(len(adapter.cache.relations), 1) - second_relation = next(iter(adapter.cache.relations.values())) - self.assertEqual(relation, second_relation) - -class TestCachingLowercaseModel(TestBaseCaching): - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_cache(self): - self.cache_run() - -class TestCachingUppercaseModel(TestBaseCaching): - @property - def models(self): - return "shouting_models" - - @use_profile('postgres') - def test_postgres_cache(self): - self.cache_run() - -class TestCachingSelectedSchemaOnly(TestBaseCaching): - @property - def models(self): - return "models_multi_schemas" - - def run_and_get_adapter(self): - # select only the 'model' in the default schema - self.run_dbt(['--cache-selected-only', 'run', '--select', 'model']) - return FACTORY.adapters[self.adapter_type] - - @use_profile('postgres') - def test_postgres_cache(self): - self.cache_run() diff --git a/test/integration/043_custom_aliases_tests/macros-configs/macros.sql b/test/integration/043_custom_aliases_tests/macros-configs/macros.sql deleted file mode 100644 index a50044ea09f..00000000000 --- a/test/integration/043_custom_aliases_tests/macros-configs/macros.sql +++ /dev/null @@ -1,17 +0,0 @@ - -{#-- Verify that the config['alias'] key is present #} -{% macro generate_alias_name(custom_alias_name, node) -%} - {%- if custom_alias_name is none -%} - {{ node.name }} - {%- else -%} - custom_{{ node.config['alias'] if 'alias' in node.config else '' | trim }} - {%- endif -%} -{%- endmacro %} - -{% macro string_literal(s) -%} - {{ adapter.dispatch('string_literal', macro_namespace='test')(s) }} -{%- endmacro %} - -{% macro default__string_literal(s) %} - '{{ s }}'::text -{% endmacro %} diff --git a/test/integration/043_custom_aliases_tests/macros/macros.sql b/test/integration/043_custom_aliases_tests/macros/macros.sql deleted file mode 100644 index a29f223b075..00000000000 --- a/test/integration/043_custom_aliases_tests/macros/macros.sql +++ /dev/null @@ -1,17 +0,0 @@ - -{% macro generate_alias_name(custom_alias_name, node) -%} - {%- if custom_alias_name is none -%} - {{ node.name }} - {%- else -%} - custom_{{ custom_alias_name | trim }} - {%- endif -%} -{%- endmacro %} - - -{% macro string_literal(s) -%} - {{ adapter.dispatch('string_literal', macro_namespace='test')(s) }} -{%- endmacro %} - -{% macro default__string_literal(s) %} - '{{ s }}'::text -{% endmacro %} diff --git a/test/integration/043_custom_aliases_tests/models/model1.sql b/test/integration/043_custom_aliases_tests/models/model1.sql deleted file mode 100644 index 000ce2ed6c5..00000000000 --- a/test/integration/043_custom_aliases_tests/models/model1.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table', alias='alias') }} - -select {{ string_literal(this.name) }} as model_name diff --git a/test/integration/043_custom_aliases_tests/models/model2.sql b/test/integration/043_custom_aliases_tests/models/model2.sql deleted file mode 100644 index a2de8f099ea..00000000000 --- a/test/integration/043_custom_aliases_tests/models/model2.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select {{ string_literal(this.name) }} as model_name diff --git a/test/integration/043_custom_aliases_tests/models/schema.yml b/test/integration/043_custom_aliases_tests/models/schema.yml deleted file mode 100644 index 4d43836e482..00000000000 --- a/test/integration/043_custom_aliases_tests/models/schema.yml +++ /dev/null @@ -1,15 +0,0 @@ -version: 2 - -models: - - name: model1 - columns: - - name: model_name - tests: - - accepted_values: - values: ['custom_alias'] - - name: model2 - columns: - - name: model_name - tests: - - accepted_values: - values: ['model2'] diff --git a/test/integration/043_custom_aliases_tests/test_custom_aliases.py b/test/integration/043_custom_aliases_tests/test_custom_aliases.py deleted file mode 100644 index 1acc9dd5224..00000000000 --- a/test/integration/043_custom_aliases_tests/test_custom_aliases.py +++ /dev/null @@ -1,39 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestAliases(DBTIntegrationTest): - @property - def schema(self): - return "custom_aliases_043" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - "macro-paths": ['macros'], - } - - @use_profile('postgres') - def test_postgres_customer_alias_name(self): - results = self.run_dbt(['run']) - self.assertEqual(len(results), 2) - self.run_dbt(['test']) - - -class TestAliasesWithConfig(TestAliases): - @property - def project_config(self): - return { - 'config-version': 2, - "macro-paths": ['macros-configs'], - } - - @use_profile('postgres') - def test_postgres_customer_alias_name(self): - results = self.run_dbt(['run']) - self.assertEqual(len(results), 2) - self.run_dbt(['test']) diff --git a/test/integration/055_ref_override_tests/macros/ref_override_macro.sql b/test/integration/055_ref_override_tests/macros/ref_override_macro.sql deleted file mode 100644 index a4a85b50324..00000000000 --- a/test/integration/055_ref_override_tests/macros/ref_override_macro.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Macro to override ref and always return the same result -{% macro ref(modelname) %} -{% do return(builtins.ref(modelname).replace_path(identifier='seed_2')) %} -{% endmacro %} \ No newline at end of file diff --git a/test/integration/055_ref_override_tests/models/ref_override.sql b/test/integration/055_ref_override_tests/models/ref_override.sql deleted file mode 100644 index 3bbf936ae2e..00000000000 --- a/test/integration/055_ref_override_tests/models/ref_override.sql +++ /dev/null @@ -1,3 +0,0 @@ -select - * -from {{ ref('seed_1') }} \ No newline at end of file diff --git a/test/integration/055_ref_override_tests/seeds/seed_1.csv b/test/integration/055_ref_override_tests/seeds/seed_1.csv deleted file mode 100644 index 4de2771bdac..00000000000 --- a/test/integration/055_ref_override_tests/seeds/seed_1.csv +++ /dev/null @@ -1,4 +0,0 @@ -a,b -1,2 -2,4 -3,6 \ No newline at end of file diff --git a/test/integration/055_ref_override_tests/seeds/seed_2.csv b/test/integration/055_ref_override_tests/seeds/seed_2.csv deleted file mode 100644 index eeadef9495c..00000000000 --- a/test/integration/055_ref_override_tests/seeds/seed_2.csv +++ /dev/null @@ -1,4 +0,0 @@ -a,b -6,2 -12,4 -18,6 \ No newline at end of file diff --git a/test/integration/055_ref_override_tests/test_ref_override.py b/test/integration/055_ref_override_tests/test_ref_override.py deleted file mode 100644 index 748379b447c..00000000000 --- a/test/integration/055_ref_override_tests/test_ref_override.py +++ /dev/null @@ -1,30 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestRefOverride(DBTIntegrationTest): - @property - def schema(self): - return "dbt_ref_override_055" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seed-paths': ['seeds'], - "macro-paths": ["macros"], - 'seeds': { - 'quote_columns': False, - }, - } - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_ref_override(self): - self.run_dbt(['seed']) - self.run_dbt(['run']) - # We want it to equal seed_2 and not seed_1. If it's - # still pointing at seed_1 then the override hasn't worked. - self.assertTablesEqual('ref_override', 'seed_2') diff --git a/test/integration/057_run_query_tests/test_pg_types.py b/test/integration/057_run_query_tests/test_pg_types.py deleted file mode 100644 index d6553bb9e8e..00000000000 --- a/test/integration/057_run_query_tests/test_pg_types.py +++ /dev/null @@ -1,25 +0,0 @@ - -from test.integration.base import DBTIntegrationTest, use_profile -import json - -class TestPostgresTypes(DBTIntegrationTest): - - @property - def schema(self): - return "pg_query_types_057" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'macro-paths': ['macros'], - } - - @use_profile('postgres') - def test__postgres_nested_types(self): - result = self.run_dbt(['run-operation', 'test_array_results']) - self.assertTrue(result.success) diff --git a/test/integration/060_persist_docs_tests/models-column-missing/missing_column.sql b/test/integration/060_persist_docs_tests/models-column-missing/missing_column.sql deleted file mode 100644 index 642b0f14a19..00000000000 --- a/test/integration/060_persist_docs_tests/models-column-missing/missing_column.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(materialized='table') }} -select 1 as id, 'Ed' as name diff --git a/test/integration/060_persist_docs_tests/models-column-missing/schema.yaml b/test/integration/060_persist_docs_tests/models-column-missing/schema.yaml deleted file mode 100644 index aa7b4f88820..00000000000 --- a/test/integration/060_persist_docs_tests/models-column-missing/schema.yaml +++ /dev/null @@ -1,8 +0,0 @@ -version: 2 -models: - - name: missing_column - columns: - - name: id - description: "test id column description" - - name: column_that_does_not_exist - description: "comment that cannot be created" diff --git a/test/integration/060_persist_docs_tests/models/my_fun_docs.md b/test/integration/060_persist_docs_tests/models/my_fun_docs.md deleted file mode 100644 index f3c0fbf55ec..00000000000 --- a/test/integration/060_persist_docs_tests/models/my_fun_docs.md +++ /dev/null @@ -1,10 +0,0 @@ -{% docs my_fun_doc %} -name Column description "with double quotes" -and with 'single quotes' as welll as other; -'''abc123''' -reserved -- characters --- -/* comment */ -Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting - -{% enddocs %} diff --git a/test/integration/060_persist_docs_tests/models/no_docs_model.sql b/test/integration/060_persist_docs_tests/models/no_docs_model.sql deleted file mode 100644 index e39a7a1566f..00000000000 --- a/test/integration/060_persist_docs_tests/models/no_docs_model.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as id, 'Alice' as name diff --git a/test/integration/060_persist_docs_tests/models/table_model.sql b/test/integration/060_persist_docs_tests/models/table_model.sql deleted file mode 100644 index c0e93c3f307..00000000000 --- a/test/integration/060_persist_docs_tests/models/table_model.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(materialized='table') }} -select 1 as id, 'Joe' as name diff --git a/test/integration/060_persist_docs_tests/models/view_model.sql b/test/integration/060_persist_docs_tests/models/view_model.sql deleted file mode 100644 index a6f96a16d5d..00000000000 --- a/test/integration/060_persist_docs_tests/models/view_model.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(materialized='view') }} -select 2 as id, 'Bob' as name diff --git a/test/integration/060_persist_docs_tests/seeds/seed.csv b/test/integration/060_persist_docs_tests/seeds/seed.csv deleted file mode 100644 index 1a728c8ab74..00000000000 --- a/test/integration/060_persist_docs_tests/seeds/seed.csv +++ /dev/null @@ -1,3 +0,0 @@ -id,name -1,Alice -2,Bob diff --git a/test/integration/060_persist_docs_tests/test_persist_docs.py b/test/integration/060_persist_docs_tests/test_persist_docs.py deleted file mode 100644 index 89fecf6383e..00000000000 --- a/test/integration/060_persist_docs_tests/test_persist_docs.py +++ /dev/null @@ -1,126 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import os - -import json - - -class BasePersistDocsTest(DBTIntegrationTest): - @property - def schema(self): - return "persist_docs_060" - - @property - def models(self): - return "models" - - def _assert_common_comments(self, *comments): - for comment in comments: - assert '"with double quotes"' in comment - assert """'''abc123'''""" in comment - assert '\n' in comment - assert 'Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting' in comment - assert '/* comment */' in comment - if os.name == 'nt': - assert '--\r\n' in comment or '--\n' in comment - else: - assert '--\n' in comment - - def _assert_has_table_comments(self, table_node): - table_comment = table_node['metadata']['comment'] - assert table_comment.startswith('Table model description') - - table_id_comment = table_node['columns']['id']['comment'] - assert table_id_comment.startswith('id Column description') - - table_name_comment = table_node['columns']['name']['comment'] - assert table_name_comment.startswith( - 'Some stuff here and then a call to') - - self._assert_common_comments( - table_comment, table_id_comment, table_name_comment - ) - - def _assert_has_view_comments(self, view_node, has_node_comments=True, - has_column_comments=True): - view_comment = view_node['metadata']['comment'] - if has_node_comments: - assert view_comment.startswith('View model description') - self._assert_common_comments(view_comment) - else: - assert view_comment is None - - view_id_comment = view_node['columns']['id']['comment'] - if has_column_comments: - assert view_id_comment.startswith('id Column description') - self._assert_common_comments(view_id_comment) - else: - assert view_id_comment is None - - view_name_comment = view_node['columns']['name']['comment'] - assert view_name_comment is None - - -class TestPersistDocs(BasePersistDocsTest): - @property - def project_config(self): - return { - 'config-version': 2, - 'models': { - 'test': { - '+persist_docs': { - "relation": True, - "columns": True, - }, - } - } - } - - def run_has_comments_pglike(self): - self.run_dbt() - self.run_dbt(['docs', 'generate']) - with open('target/catalog.json') as fp: - catalog_data = json.load(fp) - assert 'nodes' in catalog_data - assert len(catalog_data['nodes']) == 3 - table_node = catalog_data['nodes']['model.test.table_model'] - view_node = self._assert_has_table_comments(table_node) - - view_node = catalog_data['nodes']['model.test.view_model'] - self._assert_has_view_comments(view_node) - - no_docs_node = catalog_data['nodes']['model.test.no_docs_model'] - self._assert_has_view_comments(no_docs_node, False, False) - - @use_profile('postgres') - def test_postgres_comments(self): - self.run_has_comments_pglike() - -class TestPersistDocsColumnMissing(BasePersistDocsTest): - @property - def project_config(self): - return { - 'config-version': 2, - 'models': { - 'test': { - '+persist_docs': { - "columns": True, - }, - } - } - } - - @property - def models(self): - return 'models-column-missing' - - @use_profile('postgres') - def test_postgres_missing_column(self): - self.run_dbt() - self.run_dbt(['docs', 'generate']) - with open('target/catalog.json') as fp: - catalog_data = json.load(fp) - assert 'nodes' in catalog_data - - table_node = catalog_data['nodes']['model.test.missing_column'] - table_id_comment = table_node['columns']['id']['comment'] - assert table_id_comment.startswith('test id column description') diff --git a/test/integration/062_defer_state_tests/changed_models/ephemeral_model.sql b/test/integration/062_defer_state_tests/changed_models/ephemeral_model.sql deleted file mode 100644 index 2f976e3a9b5..00000000000 --- a/test/integration/062_defer_state_tests/changed_models/ephemeral_model.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(materialized='ephemeral') }} -select * from {{ ref('view_model') }} diff --git a/test/integration/062_defer_state_tests/changed_models/schema.yml b/test/integration/062_defer_state_tests/changed_models/schema.yml deleted file mode 100644 index 1ec506d3d19..00000000000 --- a/test/integration/062_defer_state_tests/changed_models/schema.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: 2 -models: - - name: view_model - columns: - - name: id - tests: - - unique - - not_null - - name: name diff --git a/test/integration/062_defer_state_tests/changed_models/table_model.sql b/test/integration/062_defer_state_tests/changed_models/table_model.sql deleted file mode 100644 index 65909318bab..00000000000 --- a/test/integration/062_defer_state_tests/changed_models/table_model.sql +++ /dev/null @@ -1,5 +0,0 @@ -{{ config(materialized='table') }} -select * from {{ ref('ephemeral_model') }} - --- establish a macro dependency to trigger state:modified.macros --- depends on: {{ my_macro() }} \ No newline at end of file diff --git a/test/integration/062_defer_state_tests/changed_models/view_model.sql b/test/integration/062_defer_state_tests/changed_models/view_model.sql deleted file mode 100644 index bddbbb23cc2..00000000000 --- a/test/integration/062_defer_state_tests/changed_models/view_model.sql +++ /dev/null @@ -1 +0,0 @@ -select * from no.such.table diff --git a/test/integration/062_defer_state_tests/changed_models_bad/ephemeral_model.sql b/test/integration/062_defer_state_tests/changed_models_bad/ephemeral_model.sql deleted file mode 100644 index 5155dfa475e..00000000000 --- a/test/integration/062_defer_state_tests/changed_models_bad/ephemeral_model.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(materialized='ephemeral') }} -select * from no.such.table diff --git a/test/integration/062_defer_state_tests/changed_models_bad/schema.yml b/test/integration/062_defer_state_tests/changed_models_bad/schema.yml deleted file mode 100644 index 1ec506d3d19..00000000000 --- a/test/integration/062_defer_state_tests/changed_models_bad/schema.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: 2 -models: - - name: view_model - columns: - - name: id - tests: - - unique - - not_null - - name: name diff --git a/test/integration/062_defer_state_tests/changed_models_bad/table_model.sql b/test/integration/062_defer_state_tests/changed_models_bad/table_model.sql deleted file mode 100644 index 65909318bab..00000000000 --- a/test/integration/062_defer_state_tests/changed_models_bad/table_model.sql +++ /dev/null @@ -1,5 +0,0 @@ -{{ config(materialized='table') }} -select * from {{ ref('ephemeral_model') }} - --- establish a macro dependency to trigger state:modified.macros --- depends on: {{ my_macro() }} \ No newline at end of file diff --git a/test/integration/062_defer_state_tests/changed_models_bad/view_model.sql b/test/integration/062_defer_state_tests/changed_models_bad/view_model.sql deleted file mode 100644 index bddbbb23cc2..00000000000 --- a/test/integration/062_defer_state_tests/changed_models_bad/view_model.sql +++ /dev/null @@ -1 +0,0 @@ -select * from no.such.table diff --git a/test/integration/062_defer_state_tests/changed_models_missing/schema.yml b/test/integration/062_defer_state_tests/changed_models_missing/schema.yml deleted file mode 100644 index 1ec506d3d19..00000000000 --- a/test/integration/062_defer_state_tests/changed_models_missing/schema.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: 2 -models: - - name: view_model - columns: - - name: id - tests: - - unique - - not_null - - name: name diff --git a/test/integration/062_defer_state_tests/changed_models_missing/table_model.sql b/test/integration/062_defer_state_tests/changed_models_missing/table_model.sql deleted file mode 100644 index 22b040d2c8b..00000000000 --- a/test/integration/062_defer_state_tests/changed_models_missing/table_model.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(materialized='table') }} -select 1 as fun diff --git a/test/integration/062_defer_state_tests/changed_models_missing/view_model.sql b/test/integration/062_defer_state_tests/changed_models_missing/view_model.sql deleted file mode 100644 index 4b91aa0f2fa..00000000000 --- a/test/integration/062_defer_state_tests/changed_models_missing/view_model.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('seed') }} diff --git a/test/integration/062_defer_state_tests/macros/infinite_macros.sql b/test/integration/062_defer_state_tests/macros/infinite_macros.sql deleted file mode 100644 index 81d2083d3bb..00000000000 --- a/test/integration/062_defer_state_tests/macros/infinite_macros.sql +++ /dev/null @@ -1,13 +0,0 @@ -{# trigger infinite recursion if not handled #} - -{% macro my_infinitely_recursive_macro() %} - {{ return(adapter.dispatch('my_infinitely_recursive_macro')()) }} -{% endmacro %} - -{% macro default__my_infinitely_recursive_macro() %} - {% if unmet_condition %} - {{ my_infinitely_recursive_macro() }} - {% else %} - {{ return('') }} - {% endif %} -{% endmacro %} diff --git a/test/integration/062_defer_state_tests/macros/macros.sql b/test/integration/062_defer_state_tests/macros/macros.sql deleted file mode 100644 index 79519c1b60b..00000000000 --- a/test/integration/062_defer_state_tests/macros/macros.sql +++ /dev/null @@ -1,3 +0,0 @@ -{% macro my_macro() %} - {% do log('in a macro' ) %} -{% endmacro %} diff --git a/test/integration/062_defer_state_tests/models/ephemeral_model.sql b/test/integration/062_defer_state_tests/models/ephemeral_model.sql deleted file mode 100644 index 2f976e3a9b5..00000000000 --- a/test/integration/062_defer_state_tests/models/ephemeral_model.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(materialized='ephemeral') }} -select * from {{ ref('view_model') }} diff --git a/test/integration/062_defer_state_tests/models/exposures.yml b/test/integration/062_defer_state_tests/models/exposures.yml deleted file mode 100644 index 489dec3c3c4..00000000000 --- a/test/integration/062_defer_state_tests/models/exposures.yml +++ /dev/null @@ -1,8 +0,0 @@ -version: 2 -exposures: - - name: my_exposure - type: application - depends_on: - - ref('view_model') - owner: - email: test@example.com diff --git a/test/integration/062_defer_state_tests/models/schema.yml b/test/integration/062_defer_state_tests/models/schema.yml deleted file mode 100644 index 342335148bf..00000000000 --- a/test/integration/062_defer_state_tests/models/schema.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: 2 -models: - - name: view_model - columns: - - name: id - tests: - - unique: - severity: error - - not_null - - name: name diff --git a/test/integration/062_defer_state_tests/models/table_model.sql b/test/integration/062_defer_state_tests/models/table_model.sql deleted file mode 100644 index 65909318bab..00000000000 --- a/test/integration/062_defer_state_tests/models/table_model.sql +++ /dev/null @@ -1,5 +0,0 @@ -{{ config(materialized='table') }} -select * from {{ ref('ephemeral_model') }} - --- establish a macro dependency to trigger state:modified.macros --- depends on: {{ my_macro() }} \ No newline at end of file diff --git a/test/integration/062_defer_state_tests/models/view_model.sql b/test/integration/062_defer_state_tests/models/view_model.sql deleted file mode 100644 index 72cb07a5ef4..00000000000 --- a/test/integration/062_defer_state_tests/models/view_model.sql +++ /dev/null @@ -1,4 +0,0 @@ -select * from {{ ref('seed') }} - --- establish a macro dependency that trips infinite recursion if not handled --- depends on: {{ my_infinitely_recursive_macro() }} \ No newline at end of file diff --git a/test/integration/062_defer_state_tests/previous_state/manifest.json b/test/integration/062_defer_state_tests/previous_state/manifest.json deleted file mode 100644 index 6ab63f3f563..00000000000 --- a/test/integration/062_defer_state_tests/previous_state/manifest.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "metadata": { - "dbt_schema_version": "https://schemas.getdbt.com/dbt/manifest/v3.json", - "dbt_version": "0.21.1" - } -} diff --git a/test/integration/062_defer_state_tests/seeds/seed.csv b/test/integration/062_defer_state_tests/seeds/seed.csv deleted file mode 100644 index 1a728c8ab74..00000000000 --- a/test/integration/062_defer_state_tests/seeds/seed.csv +++ /dev/null @@ -1,3 +0,0 @@ -id,name -1,Alice -2,Bob diff --git a/test/integration/062_defer_state_tests/snapshots/my_snapshot.sql b/test/integration/062_defer_state_tests/snapshots/my_snapshot.sql deleted file mode 100644 index 6a7d2b31bfa..00000000000 --- a/test/integration/062_defer_state_tests/snapshots/my_snapshot.sql +++ /dev/null @@ -1,14 +0,0 @@ -{% snapshot my_cool_snapshot %} - - {{ - config( - target_database=database, - target_schema=schema, - unique_key='id', - strategy='check', - check_cols=['id'], - ) - }} - select * from {{ ref('view_model') }} - -{% endsnapshot %} diff --git a/test/integration/062_defer_state_tests/test_defer_state.py b/test/integration/062_defer_state_tests/test_defer_state.py deleted file mode 100644 index 058e43ef05f..00000000000 --- a/test/integration/062_defer_state_tests/test_defer_state.py +++ /dev/null @@ -1,344 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import copy -import json -import os -import shutil - -import pytest -import dbt.exceptions - - -class TestDeferState(DBTIntegrationTest): - @property - def schema(self): - return "defer_state_062" - - @property - def models(self): - return "models" - - def setUp(self): - self.other_schema = None - super().setUp() - self._created_schemas.add(self.other_schema) - - @property - def project_config(self): - return { - 'config-version': 2, - 'seeds': { - 'test': { - 'quote_columns': False, - } - } - } - - def get_profile(self, adapter_type): - if self.other_schema is None: - self.other_schema = self.unique_schema() + '_other' - profile = super().get_profile(adapter_type) - default_name = profile['test']['target'] - profile['test']['outputs']['otherschema'] = copy.deepcopy(profile['test']['outputs'][default_name]) - profile['test']['outputs']['otherschema']['schema'] = self.other_schema - return profile - - def copy_state(self): - assert not os.path.exists('state') - os.makedirs('state') - shutil.copyfile('target/manifest.json', 'state/manifest.json') - - def run_and_compile_defer(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['run']) - assert len(results) == 2 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['test']) - assert len(results) == 2 - - # copy files - self.copy_state() - - # defer test, it succeeds - results, success = self.run_dbt_and_check(['compile', '--state', 'state', '--defer']) - self.assertEqual(len(results.results), 6) - self.assertEqual(results.results[0].node.name, "seed") - self.assertTrue(success) - - def run_and_snapshot_defer(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['run']) - assert len(results) == 2 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['test']) - assert len(results) == 2 - - # snapshot succeeds without --defer - results = self.run_dbt(['snapshot']) - - # no state, snapshot fails - with pytest.raises(dbt.exceptions.RuntimeException): - results = self.run_dbt(['snapshot', '--state', 'state', '--defer']) - - # copy files - self.copy_state() - - # defer test, it succeeds - results = self.run_dbt(['snapshot', '--state', 'state', '--defer']) - - # favor_state test, it succeeds - results = self.run_dbt(['snapshot', '--state', 'state', '--defer', '--favor-state']) - - def run_and_defer(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['run']) - assert len(results) == 2 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['test']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - - # test tests first, because run will change things - # no state, wrong schema, failure. - self.run_dbt(['test', '--target', 'otherschema'], expect_pass=False) - - # no state, run also fails - self.run_dbt(['run', '--target', 'otherschema'], expect_pass=False) - - # defer test, it succeeds - results = self.run_dbt(['test', '-m', 'view_model+', '--state', 'state', '--defer', '--target', 'otherschema']) - - # with state it should work though - results = self.run_dbt(['run', '-m', 'view_model', '--state', 'state', '--defer', '--target', 'otherschema']) - assert self.other_schema not in results[0].node.compiled_code - assert self.unique_schema() in results[0].node.compiled_code - - with open('target/manifest.json') as fp: - data = json.load(fp) - assert data['nodes']['seed.test.seed']['deferred'] - - assert len(results) == 1 - - def run_and_defer_favor_state(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['run']) - assert len(results) == 2 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['test']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - - # test tests first, because run will change things - # no state, wrong schema, failure. - self.run_dbt(['test', '--target', 'otherschema'], expect_pass=False) - - # no state, run also fails - self.run_dbt(['run', '--target', 'otherschema'], expect_pass=False) - - # defer test, it succeeds - results = self.run_dbt(['test', '-m', 'view_model+', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) - - # with state it should work though - results = self.run_dbt(['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) - assert self.other_schema not in results[0].node.compiled_code - assert self.unique_schema() in results[0].node.compiled_code - - with open('target/manifest.json') as fp: - data = json.load(fp) - assert data['nodes']['seed.test.seed']['deferred'] - - assert len(results) == 1 - - def run_switchdirs_defer(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - results = self.run_dbt(['run']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - - self.use_default_project({'model-paths': ['changed_models']}) - # the sql here is just wrong, so it should fail - self.run_dbt( - ['run', '-m', 'view_model', '--state', 'state', '--defer', '--target', 'otherschema'], - expect_pass=False, - ) - # but this should work since we just use the old happy model - self.run_dbt( - ['run', '-m', 'table_model', '--state', 'state', '--defer', '--target', 'otherschema'], - expect_pass=True, - ) - - self.use_default_project({'model-paths': ['changed_models_bad']}) - # this should fail because the table model refs a broken ephemeral - # model, which it should see - self.run_dbt( - ['run', '-m', 'table_model', '--state', 'state', '--defer', '--target', 'otherschema'], - expect_pass=False, - ) - - def run_switchdirs_defer_favor_state(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - results = self.run_dbt(['run']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - - self.use_default_project({'model-paths': ['changed_models']}) - # the sql here is just wrong, so it should fail - self.run_dbt( - ['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], - expect_pass=False, - ) - # but this should work since we just use the old happy model - self.run_dbt( - ['run', '-m', 'table_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], - expect_pass=True, - ) - - self.use_default_project({'model-paths': ['changed_models_bad']}) - # this should fail because the table model refs a broken ephemeral - # model, which it should see - self.run_dbt( - ['run', '-m', 'table_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], - expect_pass=False, - ) - - def run_defer_iff_not_exists(self): - results = self.run_dbt(['seed', '--target', 'otherschema']) - assert len(results) == 1 - results = self.run_dbt(['run', '--target', 'otherschema']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - results = self.run_dbt(['seed']) - assert len(results) == 1 - results = self.run_dbt(['run', '--state', 'state', '--defer']) - assert len(results) == 2 - - # because the seed now exists in our schema, we shouldn't defer it - assert self.other_schema not in results[0].node.compiled_code - assert self.unique_schema() in results[0].node.compiled_code - - def run_defer_iff_not_exists_favor_state(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - results = self.run_dbt(['run']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - results = self.run_dbt(['seed']) - assert len(results) == 1 - results = self.run_dbt(['run', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) - assert len(results) == 2 - - # because the seed exists in other schema, we should defer it - assert self.other_schema not in results[0].node.compiled_code - assert self.unique_schema() in results[0].node.compiled_code - - def run_defer_deleted_upstream(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - results = self.run_dbt(['run']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - - self.use_default_project({'model-paths': ['changed_models_missing']}) - # ephemeral_model is now gone. previously this caused a - # keyerror (dbt#2875), now it should pass - self.run_dbt( - ['run', '-m', 'view_model', '--state', 'state', '--defer', '--target', 'otherschema'], - expect_pass=True, - ) - - # despite deferral, test should use models just created in our schema - results = self.run_dbt(['test', '--state', 'state', '--defer']) - assert self.other_schema not in results[0].node.compiled_code - assert self.unique_schema() in results[0].node.compiled_code - - def run_defer_deleted_upstream_favor_state(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - results = self.run_dbt(['run']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - - self.use_default_project({'model-paths': ['changed_models_missing']}) - - self.run_dbt( - ['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], - expect_pass=True, - ) - - # despite deferral, test should use models just created in our schema - results = self.run_dbt(['test', '--state', 'state', '--defer', '--favor-state']) - assert self.other_schema not in results[0].node.compiled_code - assert self.unique_schema() in results[0].node.compiled_code - - @use_profile('postgres') - def test_postgres_state_changetarget(self): - self.run_and_defer() - - # make sure these commands don't work with --defer - with pytest.raises(SystemExit): - self.run_dbt(['seed', '--defer']) - - @use_profile('postgres') - def test_postgres_state_changetarget_favor_state(self): - self.run_and_defer_favor_state() - - # make sure these commands don't work with --defer - with pytest.raises(SystemExit): - self.run_dbt(['seed', '--defer']) - - @use_profile('postgres') - def test_postgres_state_changedir(self): - self.run_switchdirs_defer() - - @use_profile('postgres') - def test_postgres_state_changedir_favor_state(self): - self.run_switchdirs_defer_favor_state() - - @use_profile('postgres') - def test_postgres_state_defer_iffnotexists(self): - self.run_defer_iff_not_exists() - - @use_profile('postgres') - def test_postgres_state_defer_iffnotexists_favor_state(self): - self.run_defer_iff_not_exists_favor_state() - - @use_profile('postgres') - def test_postgres_state_defer_deleted_upstream(self): - self.run_defer_deleted_upstream() - - @use_profile('postgres') - def test_postgres_state_defer_deleted_upstream_favor_state(self): - self.run_defer_deleted_upstream_favor_state() - - @use_profile('postgres') - def test_postgres_state_snapshot_defer(self): - self.run_and_snapshot_defer() - - @use_profile('postgres') - def test_postgres_state_compile_defer(self): - self.run_and_compile_defer() diff --git a/test/integration/062_defer_state_tests/test_modified_state.py b/test/integration/062_defer_state_tests/test_modified_state.py deleted file mode 100644 index 5f64cd66ae1..00000000000 --- a/test/integration/062_defer_state_tests/test_modified_state.py +++ /dev/null @@ -1,211 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import os -import random -import shutil -import string - -import pytest - -from dbt.exceptions import CompilationException, IncompatibleSchemaException - - -class TestModifiedState(DBTIntegrationTest): - @property - def schema(self): - return "modified_state_062" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'macro-paths': ['macros'], - 'seeds': { - 'test': { - 'quote_columns': True, - } - } - } - - def _symlink_test_folders(self): - # dbt's normal symlink behavior breaks this test. Copy the files - # so we can freely modify them. - for entry in os.listdir(self.test_original_source_path): - src = os.path.join(self.test_original_source_path, entry) - tst = os.path.join(self.test_root_dir, entry) - if entry in {'models', 'seeds', 'macros', 'previous_state'}: - shutil.copytree(src, tst) - elif os.path.isdir(entry) or entry.endswith('.sql'): - os.symlink(src, tst) - - def copy_state(self): - assert not os.path.exists('state') - os.makedirs('state') - shutil.copyfile('target/manifest.json', 'state/manifest.json') - - def setUp(self): - super().setUp() - self.run_dbt(['seed']) - self.run_dbt(['run']) - self.copy_state() - - @use_profile('postgres') - def test_postgres_changed_seed_contents_state(self): - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True) - assert len(results) == 0 - with open('seeds/seed.csv') as fp: - fp.readline() - newline = fp.newlines - with open('seeds/seed.csv', 'a') as fp: - fp.write(f'3,carl{newline}') - - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'state:modified', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'state:modified+', '--state', './state']) - assert len(results) == 7 - assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'} - - shutil.rmtree('./state') - self.copy_state() - - with open('seeds/seed.csv', 'a') as fp: - # assume each line is ~2 bytes + len(name) - target_size = 1*1024*1024 - line_size = 64 - - num_lines = target_size // line_size - - maxlines = num_lines + 4 - - for idx in range(4, maxlines): - value = ''.join(random.choices(string.ascii_letters, k=62)) - fp.write(f'{idx},{value}{newline}') - - # now if we run again, we should get a warning - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.seed' - - with pytest.raises(CompilationException) as exc: - self.run_dbt(['--warn-error', 'ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state']) - assert '>1MB' in str(exc.value) - - shutil.rmtree('./state') - self.copy_state() - - # once it's in path mode, we don't mark it as modified if it changes - with open('seeds/seed.csv', 'a') as fp: - fp.write(f'{random},test{newline}') - - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True) - assert len(results) == 0 - - @use_profile('postgres') - def test_postgres_changed_seed_config(self): - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True) - assert len(results) == 0 - - self.use_default_project({'seeds': {'test': {'quote_columns': False}}}) - - # quoting change -> seed changed - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.seed' - - @use_profile('postgres') - def test_postgres_unrendered_config_same(self): - results = self.run_dbt(['ls', '--resource-type', 'model', '--select', 'state:modified', '--state', './state'], expect_pass=True) - assert len(results) == 0 - - # although this is the default value, dbt will recognize it as a change - # for previously-unconfigured models, because it's been explicitly set - self.use_default_project({'models': {'test': {'materialized': 'view'}}}) - results = self.run_dbt(['ls', '--resource-type', 'model', '--select', 'state:modified', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.view_model' - - @use_profile('postgres') - def test_postgres_changed_model_contents(self): - results = self.run_dbt(['run', '--models', 'state:modified', '--state', './state']) - assert len(results) == 0 - - with open('models/table_model.sql') as fp: - fp.readline() - newline = fp.newlines - - with open('models/table_model.sql', 'w') as fp: - fp.write("{{ config(materialized='table') }}") - fp.write(newline) - fp.write("select * from {{ ref('seed') }}") - fp.write(newline) - - results = self.run_dbt(['run', '--models', 'state:modified', '--state', './state']) - assert len(results) == 1 - assert results[0].node.name == 'table_model' - - @use_profile('postgres') - def test_postgres_new_macro(self): - with open('macros/macros.sql') as fp: - fp.readline() - newline = fp.newlines - - new_macro = '{% macro my_other_macro() %}{% endmacro %}' + newline - - # add a new macro to a new file - with open('macros/second_macro.sql', 'w') as fp: - fp.write(new_macro) - - results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state']) - assert len(results) == 0 - - os.remove('macros/second_macro.sql') - # add a new macro to the existing file - with open('macros/macros.sql', 'a') as fp: - fp.write(new_macro) - - results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state']) - assert len(results) == 0 - - @use_profile('postgres') - def test_postgres_changed_macro_contents(self): - with open('macros/macros.sql') as fp: - fp.readline() - newline = fp.newlines - - # modify an existing macro - with open('macros/macros.sql', 'w') as fp: - fp.write("{% macro my_macro() %}") - fp.write(newline) - fp.write(" {% do log('in a macro', info=True) %}") - fp.write(newline) - fp.write('{% endmacro %}') - fp.write(newline) - - # table_model calls this macro - results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state']) - assert len(results) == 1 - - @use_profile('postgres') - def test_postgres_changed_exposure(self): - with open('models/exposures.yml', 'a') as fp: - fp.write(' name: John Doe\n') - - results, stdout = self.run_dbt_and_capture(['run', '--models', '+state:modified', '--state', './state']) - assert len(results) == 1 - assert results[0].node.name == 'view_model' - - @use_profile('postgres') - def test_postgres_previous_version_manifest(self): - # This tests that a different schema version in the file throws an error - with self.assertRaises(IncompatibleSchemaException) as exc: - results = self.run_dbt(['ls', '-s', 'state:modified', '--state', './previous_state']) - self.assertEqual(exc.CODE, 10014) diff --git a/test/integration/062_defer_state_tests/test_run_results_state.py b/test/integration/062_defer_state_tests/test_run_results_state.py deleted file mode 100644 index 4f59c6faa75..00000000000 --- a/test/integration/062_defer_state_tests/test_run_results_state.py +++ /dev/null @@ -1,436 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import os -import random -import shutil -import string - -import pytest - -from dbt.exceptions import CompilationException - - -class TestRunResultsState(DBTIntegrationTest): - @property - def schema(self): - return "run_results_state_062" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'macro-paths': ['macros'], - 'seeds': { - 'test': { - 'quote_columns': True, - } - } - } - - def _symlink_test_folders(self): - # dbt's normal symlink behavior breaks this test. Copy the files - # so we can freely modify them. - for entry in os.listdir(self.test_original_source_path): - src = os.path.join(self.test_original_source_path, entry) - tst = os.path.join(self.test_root_dir, entry) - if entry in {'models', 'seeds', 'macros'}: - shutil.copytree(src, tst) - elif os.path.isdir(entry) or entry.endswith('.sql'): - os.symlink(src, tst) - - def copy_state(self): - assert not os.path.exists('state') - os.makedirs('state') - shutil.copyfile('target/manifest.json', 'state/manifest.json') - shutil.copyfile('target/run_results.json', 'state/run_results.json') - - def setUp(self): - super().setUp() - self.run_dbt(['build']) - self.copy_state() - - def rebuild_run_dbt(self, expect_pass=True): - shutil.rmtree('./state') - self.run_dbt(['build'], expect_pass=expect_pass) - self.copy_state() - - @use_profile('postgres') - def test_postgres_seed_run_results_state(self): - shutil.rmtree('./state') - self.run_dbt(['seed']) - self.copy_state() - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'result:success', '--state', './state'], expect_pass=True) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'result:success', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'result:success+', '--state', './state']) - assert len(results) == 7 - assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'} - - with open('seeds/seed.csv') as fp: - fp.readline() - newline = fp.newlines - with open('seeds/seed.csv', 'a') as fp: - fp.write(f'\"\'\'3,carl{newline}') - shutil.rmtree('./state') - self.run_dbt(['seed'], expect_pass=False) - self.copy_state() - - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'result:error', '--state', './state'], expect_pass=True) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'result:error', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'result:error+', '--state', './state']) - assert len(results) == 7 - assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'} - - - with open('seeds/seed.csv') as fp: - fp.readline() - newline = fp.newlines - with open('seeds/seed.csv', 'a') as fp: - # assume each line is ~2 bytes + len(name) - target_size = 1*1024*1024 - line_size = 64 - - num_lines = target_size // line_size - - maxlines = num_lines + 4 - - for idx in range(4, maxlines): - value = ''.join(random.choices(string.ascii_letters, k=62)) - fp.write(f'{idx},{value}{newline}') - shutil.rmtree('./state') - self.run_dbt(['seed'], expect_pass=False) - self.copy_state() - - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'result:error', '--state', './state'], expect_pass=True) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'result:error', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'result:error+', '--state', './state']) - assert len(results) == 7 - assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'} - - @use_profile('postgres') - def test_postgres_build_run_results_state(self): - results = self.run_dbt(['build', '--select', 'result:error', '--state', './state']) - assert len(results) == 0 - - with open('models/view_model.sql') as fp: - fp.readline() - newline = fp.newlines - - with open('models/view_model.sql', 'w') as fp: - fp.write(newline) - fp.write("select * from forced_error") - fp.write(newline) - - self.rebuild_run_dbt(expect_pass=False) - - results = self.run_dbt(['build', '--select', 'result:error', '--state', './state'], expect_pass=False) - assert len(results) == 3 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'view_model', 'not_null_view_model_id','unique_view_model_id'} - - results = self.run_dbt(['ls', '--select', 'result:error', '--state', './state']) - assert len(results) == 3 - assert set(results) == {'test.view_model', 'test.not_null_view_model_id', 'test.unique_view_model_id'} - - results = self.run_dbt(['build', '--select', 'result:error+', '--state', './state'], expect_pass=False) - assert len(results) == 4 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'table_model','view_model', 'not_null_view_model_id','unique_view_model_id'} - - results = self.run_dbt(['ls', '--select', 'result:error+', '--state', './state']) - assert len(results) == 6 # includes exposure - assert set(results) == {'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'} - - # test failure on build tests - # fail the unique test - with open('models/view_model.sql', 'w') as fp: - fp.write(newline) - fp.write("select 1 as id union all select 1 as id") - fp.write(newline) - - self.rebuild_run_dbt(expect_pass=False) - - results = self.run_dbt(['build', '--select', 'result:fail', '--state', './state'], expect_pass=False) - assert len(results) == 1 - assert results[0].node.name == 'unique_view_model_id' - - results = self.run_dbt(['ls', '--select', 'result:fail', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.unique_view_model_id' - - results = self.run_dbt(['build', '--select', 'result:fail+', '--state', './state'], expect_pass=False) - assert len(results) == 2 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'table_model', 'unique_view_model_id'} - - results = self.run_dbt(['ls', '--select', 'result:fail+', '--state', './state']) - assert len(results) == 1 - assert set(results) == {'test.unique_view_model_id'} - - # change the unique test severity from error to warn and reuse the same view_model.sql changes above - f = open('models/schema.yml', 'r') - filedata = f.read() - f.close() - newdata = filedata.replace('error','warn') - f = open('models/schema.yml', 'w') - f.write(newdata) - f.close() - - self.rebuild_run_dbt(expect_pass=True) - - results = self.run_dbt(['build', '--select', 'result:warn', '--state', './state'], expect_pass=True) - assert len(results) == 1 - assert results[0].node.name == 'unique_view_model_id' - - results = self.run_dbt(['ls', '--select', 'result:warn', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.unique_view_model_id' - - results = self.run_dbt(['build', '--select', 'result:warn+', '--state', './state'], expect_pass=True) - assert len(results) == 2 # includes table_model to be run - nodes = set([elem.node.name for elem in results]) - assert nodes == {'table_model', 'unique_view_model_id'} - - results = self.run_dbt(['ls', '--select', 'result:warn+', '--state', './state']) - assert len(results) == 1 - assert set(results) == {'test.unique_view_model_id'} - - @use_profile('postgres') - def test_postgres_run_run_results_state(self): - results = self.run_dbt(['run', '--select', 'result:success', '--state', './state'], expect_pass=True) - assert len(results) == 2 - assert results[0].node.name == 'view_model' - assert results[1].node.name == 'table_model' - - # clear state and rerun upstream view model to test + operator - shutil.rmtree('./state') - self.run_dbt(['run', '--select', 'view_model'], expect_pass=True) - self.copy_state() - results = self.run_dbt(['run', '--select', 'result:success+', '--state', './state'], expect_pass=True) - assert len(results) == 2 - assert results[0].node.name == 'view_model' - assert results[1].node.name == 'table_model' - - # check we are starting from a place with 0 errors - results = self.run_dbt(['run', '--select', 'result:error', '--state', './state']) - assert len(results) == 0 - - # force an error in the view model to test error and skipped states - with open('models/view_model.sql') as fp: - fp.readline() - newline = fp.newlines - - with open('models/view_model.sql', 'w') as fp: - fp.write(newline) - fp.write("select * from forced_error") - fp.write(newline) - - shutil.rmtree('./state') - self.run_dbt(['run'], expect_pass=False) - self.copy_state() - - # test single result selector on error - results = self.run_dbt(['run', '--select', 'result:error', '--state', './state'], expect_pass=False) - assert len(results) == 1 - assert results[0].node.name == 'view_model' - - # test + operator selection on error - results = self.run_dbt(['run', '--select', 'result:error+', '--state', './state'], expect_pass=False) - assert len(results) == 2 - assert results[0].node.name == 'view_model' - assert results[1].node.name == 'table_model' - - # single result selector on skipped. Expect this to pass becase underlying view already defined above - results = self.run_dbt(['run', '--select', 'result:skipped', '--state', './state'], expect_pass=True) - assert len(results) == 1 - assert results[0].node.name == 'table_model' - - # add a downstream model that depends on table_model for skipped+ selector - with open('models/table_model_downstream.sql', 'w') as fp: - fp.write("select * from {{ref('table_model')}}") - - shutil.rmtree('./state') - self.run_dbt(['run'], expect_pass=False) - self.copy_state() - - results = self.run_dbt(['run', '--select', 'result:skipped+', '--state', './state'], expect_pass=True) - assert len(results) == 2 - assert results[0].node.name == 'table_model' - assert results[1].node.name == 'table_model_downstream' - - - @use_profile('postgres') - def test_postgres_test_run_results_state(self): - # run passed nodes - results = self.run_dbt(['test', '--select', 'result:pass', '--state', './state'], expect_pass=True) - assert len(results) == 2 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'unique_view_model_id', 'not_null_view_model_id'} - - # run passed nodes with + operator - results = self.run_dbt(['test', '--select', 'result:pass+', '--state', './state'], expect_pass=True) - assert len(results) == 2 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'unique_view_model_id', 'not_null_view_model_id'} - - # update view model to generate a failure case - os.remove('./models/view_model.sql') - with open('models/view_model.sql', 'w') as fp: - fp.write("select 1 as id union all select 1 as id") - - self.rebuild_run_dbt(expect_pass=False) - - # test with failure selector - results = self.run_dbt(['test', '--select', 'result:fail', '--state', './state'], expect_pass=False) - assert len(results) == 1 - assert results[0].node.name == 'unique_view_model_id' - - # test with failure selector and + operator - results = self.run_dbt(['test', '--select', 'result:fail+', '--state', './state'], expect_pass=False) - assert len(results) == 1 - assert results[0].node.name == 'unique_view_model_id' - - # change the unique test severity from error to warn and reuse the same view_model.sql changes above - with open('models/schema.yml', 'r+') as f: - filedata = f.read() - newdata = filedata.replace('error','warn') - f.seek(0) - f.write(newdata) - f.truncate() - - # rebuild - expect_pass = True because we changed the error to a warning this time around - self.rebuild_run_dbt(expect_pass=True) - - # test with warn selector - results = self.run_dbt(['test', '--select', 'result:warn', '--state', './state'], expect_pass=True) - assert len(results) == 1 - assert results[0].node.name == 'unique_view_model_id' - - # test with warn selector and + operator - results = self.run_dbt(['test', '--select', 'result:warn+', '--state', './state'], expect_pass=True) - assert len(results) == 1 - assert results[0].node.name == 'unique_view_model_id' - - - @use_profile('postgres') - def test_postgres_concurrent_selectors_run_run_results_state(self): - results = self.run_dbt(['run', '--select', 'state:modified+', 'result:error+', '--state', './state']) - assert len(results) == 0 - - # force an error on a dbt model - with open('models/view_model.sql') as fp: - fp.readline() - newline = fp.newlines - - with open('models/view_model.sql', 'w') as fp: - fp.write(newline) - fp.write("select * from forced_error") - fp.write(newline) - - shutil.rmtree('./state') - self.run_dbt(['run'], expect_pass=False) - self.copy_state() - - # modify another dbt model - with open('models/table_model_modified_example.sql', 'w') as fp: - fp.write(newline) - fp.write("select * from forced_error") - fp.write(newline) - - results = self.run_dbt(['run', '--select', 'state:modified+', 'result:error+', '--state', './state'], expect_pass=False) - assert len(results) == 3 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'view_model', 'table_model_modified_example', 'table_model'} - - - @use_profile('postgres') - def test_postgres_concurrent_selectors_test_run_results_state(self): - # create failure test case for result:fail selector - os.remove('./models/view_model.sql') - with open('./models/view_model.sql', 'w') as f: - f.write('select 1 as id union all select 1 as id union all select null as id') - - # run dbt build again to trigger test errors - self.rebuild_run_dbt(expect_pass=False) - - # get the failures from - results = self.run_dbt(['test', '--select', 'result:fail', '--exclude', 'not_null_view_model_id', '--state', './state'], expect_pass=False) - assert len(results) == 1 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'unique_view_model_id'} - - - @use_profile('postgres') - def test_postgres_concurrent_selectors_build_run_results_state(self): - results = self.run_dbt(['build', '--select', 'state:modified+', 'result:error+', '--state', './state']) - assert len(results) == 0 - - # force an error on a dbt model - with open('models/view_model.sql') as fp: - fp.readline() - newline = fp.newlines - - with open('models/view_model.sql', 'w') as fp: - fp.write(newline) - fp.write("select * from forced_error") - fp.write(newline) - - self.rebuild_run_dbt(expect_pass=False) - - # modify another dbt model - with open('models/table_model_modified_example.sql', 'w') as fp: - fp.write(newline) - fp.write("select * from forced_error") - fp.write(newline) - - results = self.run_dbt(['build', '--select', 'state:modified+', 'result:error+', '--state', './state'], expect_pass=False) - assert len(results) == 5 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'table_model_modified_example', 'view_model', 'table_model', 'not_null_view_model_id', 'unique_view_model_id'} - - # create failure test case for result:fail selector - os.remove('./models/view_model.sql') - with open('./models/view_model.sql', 'w') as f: - f.write('select 1 as id union all select 1 as id') - - # create error model case for result:error selector - with open('./models/error_model.sql', 'w') as f: - f.write('select 1 as id from not_exists') - - # create something downstream from the error model to rerun - with open('./models/downstream_of_error_model.sql', 'w') as f: - f.write('select * from {{ ref("error_model") }} )') - - # regenerate build state - self.rebuild_run_dbt(expect_pass=False) - - # modify model again to trigger the state:modified selector - with open('models/table_model_modified_example.sql', 'w') as fp: - fp.write(newline) - fp.write("select * from forced_another_error") - fp.write(newline) - - results = self.run_dbt(['build', '--select', 'state:modified+', 'result:error+', 'result:fail+', '--state', './state'], expect_pass=False) - assert len(results) == 5 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'error_model', 'downstream_of_error_model', 'table_model_modified_example', 'table_model', 'unique_view_model_id'} diff --git a/test/integration/068_partial_parsing_tests/local_dependency/dbt_project.yml b/test/integration/068_partial_parsing_tests/local_dependency/dbt_project.yml deleted file mode 100644 index d56280a5577..00000000000 --- a/test/integration/068_partial_parsing_tests/local_dependency/dbt_project.yml +++ /dev/null @@ -1,23 +0,0 @@ - -name: 'local_dep' -version: '1.0' -config-version: 2 - -profile: 'default' - -model-paths: ["models"] -analysis-paths: ["analyses"] -test-paths: ["tests"] -seed-paths: ["seeds"] -macro-paths: ["macros"] - -require-dbt-version: '>=0.1.0' - -target-path: "target" # directory which will store compiled SQL files -clean-targets: # directories to be removed by `dbt clean` - - "target" - - "dbt_packages" - - -seeds: - quote_columns: False diff --git a/test/integration/068_partial_parsing_tests/local_dependency/macros/dep_macro.sql b/test/integration/068_partial_parsing_tests/local_dependency/macros/dep_macro.sql deleted file mode 100644 index 81e9a0faeef..00000000000 --- a/test/integration/068_partial_parsing_tests/local_dependency/macros/dep_macro.sql +++ /dev/null @@ -1,3 +0,0 @@ -{% macro some_overridden_macro() -%} -100 -{%- endmacro %} diff --git a/test/integration/068_partial_parsing_tests/local_dependency/models/model_to_import.sql b/test/integration/068_partial_parsing_tests/local_dependency/models/model_to_import.sql deleted file mode 100644 index 4b91aa0f2fa..00000000000 --- a/test/integration/068_partial_parsing_tests/local_dependency/models/model_to_import.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('seed') }} diff --git a/test/integration/068_partial_parsing_tests/local_dependency/models/schema.yml b/test/integration/068_partial_parsing_tests/local_dependency/models/schema.yml deleted file mode 100644 index 3d804a7c153..00000000000 --- a/test/integration/068_partial_parsing_tests/local_dependency/models/schema.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: 2 -sources: - - name: seed_source - schema: "{{ var('schema_override', target.schema) }}" - tables: - - name: "seed" - columns: - - name: id - tests: - - unique diff --git a/test/integration/068_partial_parsing_tests/local_dependency/seeds/seed.csv b/test/integration/068_partial_parsing_tests/local_dependency/seeds/seed.csv deleted file mode 100644 index 3ff3deb87eb..00000000000 --- a/test/integration/068_partial_parsing_tests/local_dependency/seeds/seed.csv +++ /dev/null @@ -1,2 +0,0 @@ -id -1 diff --git a/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests1.sql b/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests1.sql deleted file mode 100644 index 0f64eb17c0d..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests1.sql +++ /dev/null @@ -1,19 +0,0 @@ -{% test type_one(model) %} - - select * from ( - - select * from {{ model }} - union all - select * from {{ ref('model_b') }} - - ) as Foo - -{% endtest %} - -{% test type_two(model) %} - - {{ config(severity = "WARN") }} - - select * from {{ model }} - -{% endtest %} diff --git a/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests2.sql b/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests2.sql deleted file mode 100644 index ba5b53fa5a9..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests2.sql +++ /dev/null @@ -1,19 +0,0 @@ -{% test type_one(model) %} - - select * from ( - - select * from {{ model }} - union all - select * from {{ ref('model_b') }} - - ) as Foo - -{% endtest %} - -{% test type_two(model) %} - - {{ config(severity = "ERROR") }} - - select * from {{ model }} - -{% endtest %} diff --git a/test/integration/068_partial_parsing_tests/test-files/customers.sql b/test/integration/068_partial_parsing_tests/test-files/customers.sql deleted file mode 100644 index 98e19b557eb..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/customers.sql +++ /dev/null @@ -1,19 +0,0 @@ -with source as ( - - select * from {{ source('seed_sources', 'raw_customers') }} - -), - -renamed as ( - - select - id as customer_id, - first_name, - last_name, - email - - from source - -) - -select * from renamed diff --git a/test/integration/068_partial_parsing_tests/test-files/customers1.md b/test/integration/068_partial_parsing_tests/test-files/customers1.md deleted file mode 100644 index bba48335825..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/customers1.md +++ /dev/null @@ -1,5 +0,0 @@ -{% docs customer_table %} - -This table contains customer data - -{% enddocs %} diff --git a/test/integration/068_partial_parsing_tests/test-files/customers2.md b/test/integration/068_partial_parsing_tests/test-files/customers2.md deleted file mode 100644 index f8306f34e49..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/customers2.md +++ /dev/null @@ -1,5 +0,0 @@ -{% docs customer_table %} - -LOTS of customer data - -{% enddocs %} diff --git a/test/integration/068_partial_parsing_tests/test-files/empty_schema.yml b/test/integration/068_partial_parsing_tests/test-files/empty_schema.yml deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/integration/068_partial_parsing_tests/test-files/empty_schema_with_version.yml b/test/integration/068_partial_parsing_tests/test-files/empty_schema_with_version.yml deleted file mode 100644 index 22817d2a9c7..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/empty_schema_with_version.yml +++ /dev/null @@ -1 +0,0 @@ -version: 2 diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var-sources.yml b/test/integration/068_partial_parsing_tests/test-files/env_var-sources.yml deleted file mode 100644 index 2b5809b1cb9..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var-sources.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: 2 -sources: - - name: seed_sources - schema: "{{ target.schema }}" - database: "{{ env_var('ENV_VAR_DATABASE') }}" - tables: - - name: raw_customers - columns: - - name: id - tests: - - not_null: - severity: "{{ env_var('ENV_VAR_SEVERITY') }}" - - unique - - name: first_name - - name: last_name - - name: email - - diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_macro.sql b/test/integration/068_partial_parsing_tests/test-files/env_var_macro.sql deleted file mode 100644 index 0bf3eda6c07..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_macro.sql +++ /dev/null @@ -1,7 +0,0 @@ -{% macro do_something(foo2, bar2) %} - - select - '{{ foo2 }}' as foo2, - '{{ bar2 }}' as bar2 - -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_macros.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_macros.yml deleted file mode 100644 index 8888f65237d..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_macros.yml +++ /dev/null @@ -1,7 +0,0 @@ -version: 2 -macros: - - name: do_something - description: "This is a test macro" - meta: - some_key: "{{ env_var('ENV_VAR_SOME_KEY') }}" - diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_metrics.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_metrics.yml deleted file mode 100644 index b8112fea010..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_metrics.yml +++ /dev/null @@ -1,30 +0,0 @@ -version: 2 - -metrics: - - - model: "ref('people')" - name: number_of_people - description: Total count of people - label: "Number of people" - calculation_method: count - expression: "*" - timestamp: created_at - time_grains: [day, week, month] - dimensions: - - favorite_color - - loves_dbt - meta: - my_meta: '{{ env_var("ENV_VAR_METRICS") }}' - - - model: "ref('people')" - name: collective_tenure - description: Total number of years of team experience - label: "Collective tenure" - calculation_method: sum - expression: tenure - timestamp: created_at - time_grains: [day] - filters: - - field: loves_dbt - operator: is - value: 'true' diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_model.sql b/test/integration/068_partial_parsing_tests/test-files/env_var_model.sql deleted file mode 100644 index a926d16d9d8..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_model.sql +++ /dev/null @@ -1 +0,0 @@ -select '{{ env_var('ENV_VAR_TEST') }}' as vartest diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_model_one.sql b/test/integration/068_partial_parsing_tests/test-files/env_var_model_one.sql deleted file mode 100644 index e1875231d2e..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_model_one.sql +++ /dev/null @@ -1 +0,0 @@ -select 'blue' as fun diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_model_test.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_model_test.yml deleted file mode 100644 index 147b96de1b6..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_model_test.yml +++ /dev/null @@ -1,8 +0,0 @@ -version: 2 -models: - - name: model_color - columns: - - name: fun - tests: - - unique: - enabled: "{{ env_var('ENV_VAR_ENABLED', True) }}" diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml deleted file mode 100644 index f8cf1ed9d67..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml +++ /dev/null @@ -1,6 +0,0 @@ -version: 2 - -models: - - name: model_one - config: - materialized: "{{ env_var('TEST_SCHEMA_VAR') }}" diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_schema2.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_schema2.yml deleted file mode 100644 index b1f3f079f6a..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_schema2.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: 2 - -models: - - name: model_one - config: - materialized: "{{ env_var('TEST_SCHEMA_VAR') }}" - tests: - - check_color: - column_name: fun - color: "env_var('ENV_VAR_COLOR')" - diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_schema3.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_schema3.yml deleted file mode 100644 index 3b0409637db..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_schema3.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: 2 - -models: - - name: model_one - config: - materialized: "{{ env_var('TEST_SCHEMA_VAR') }}" - tests: - - check_color: - column_name: fun - color: "env_var('ENV_VAR_COLOR')" - -exposures: - - name: proxy_for_dashboard - description: "This is for the XXX dashboard" - type: "dashboard" - owner: - name: "{{ env_var('ENV_VAR_OWNER') }}" - email: "tester@dashboard.com" - depends_on: - - ref("model_color") - - source("seed_sources", "raw_customers") diff --git a/test/integration/068_partial_parsing_tests/test-files/generic_schema.yml b/test/integration/068_partial_parsing_tests/test-files/generic_schema.yml deleted file mode 100644 index 9a44074728a..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/generic_schema.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: 2 - -models: - - name: orders - description: "Some order data" - columns: - - name: id - tests: - - unique diff --git a/test/integration/068_partial_parsing_tests/test-files/generic_test.sql b/test/integration/068_partial_parsing_tests/test-files/generic_test.sql deleted file mode 100644 index ca09beaadb7..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/generic_test.sql +++ /dev/null @@ -1,26 +0,0 @@ -{% test is_odd(model, column_name) %} - -with validation as ( - - select - {{ column_name }} as odd_field - - from {{ model }} - -), - -validation_errors as ( - - select - odd_field - - from validation - -- if this is true, then odd_field is actually even! - where (odd_field % 2) = 0 - -) - -select * -from validation_errors - -{% endtest %} \ No newline at end of file diff --git a/test/integration/068_partial_parsing_tests/test-files/generic_test_edited.sql b/test/integration/068_partial_parsing_tests/test-files/generic_test_edited.sql deleted file mode 100644 index 5a3b611ff7a..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/generic_test_edited.sql +++ /dev/null @@ -1,26 +0,0 @@ -{% test is_odd(model, column_name) %} - -with validation as ( - - select - {{ column_name }} as odd_field2 - - from {{ model }} - -), - -validation_errors as ( - - select - odd_field2 - - from validation - -- if this is true, then odd_field is actually even! - where (odd_field2 % 2) = 0 - -) - -select * -from validation_errors - -{% endtest %} \ No newline at end of file diff --git a/test/integration/068_partial_parsing_tests/test-files/generic_test_schema.yml b/test/integration/068_partial_parsing_tests/test-files/generic_test_schema.yml deleted file mode 100644 index c8307bc1021..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/generic_test_schema.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: 2 - -models: - - name: orders - description: "Some order data" - columns: - - name: id - tests: - - unique - - is_odd diff --git a/test/integration/068_partial_parsing_tests/test-files/gsm_override.sql b/test/integration/068_partial_parsing_tests/test-files/gsm_override.sql deleted file mode 100644 index 46c7a39ddaa..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/gsm_override.sql +++ /dev/null @@ -1,6 +0,0 @@ -- custom macro -{% macro generate_schema_name(schema_name, node) %} - - {{ schema_name }}_{{ target.schema }} - -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/gsm_override2.sql b/test/integration/068_partial_parsing_tests/test-files/gsm_override2.sql deleted file mode 100644 index 1bfddb9dadb..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/gsm_override2.sql +++ /dev/null @@ -1,6 +0,0 @@ -- custom macro xxxx -{% macro generate_schema_name(schema_name, node) %} - - {{ schema_name }}_{{ target.schema }} - -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/macros-schema.yml b/test/integration/068_partial_parsing_tests/test-files/macros-schema.yml deleted file mode 100644 index cf221dec670..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/macros-schema.yml +++ /dev/null @@ -1,8 +0,0 @@ - -version: 2 - -models: - - name: model_a - tests: - - type_one - - type_two diff --git a/test/integration/068_partial_parsing_tests/test-files/macros.yml b/test/integration/068_partial_parsing_tests/test-files/macros.yml deleted file mode 100644 index 9ee72fad0ea..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/macros.yml +++ /dev/null @@ -1,4 +0,0 @@ -version: 2 -macros: - - name: do_something - description: "This is a test macro" diff --git a/test/integration/068_partial_parsing_tests/test-files/metric_model_a.sql b/test/integration/068_partial_parsing_tests/test-files/metric_model_a.sql deleted file mode 100644 index 010a0c29a02..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/metric_model_a.sql +++ /dev/null @@ -1,21 +0,0 @@ -{% - set metric_list = [ - metric('number_of_people'), - metric('collective_tenure') - ] -%} - -{% if not execute %} - - {% set metric_names = [] %} - {% for m in metric_list %} - {% do metric_names.append(m.metric_name) %} - {% endfor %} - - -- this config does nothing, but it lets us check these values - {{ config(metric_names = metric_names) }} - -{% endif %} - - -select 1 as fun diff --git a/test/integration/068_partial_parsing_tests/test-files/model_a.sql b/test/integration/068_partial_parsing_tests/test-files/model_a.sql deleted file mode 100644 index 3bd54a4c1b6..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_a.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as fun diff --git a/test/integration/068_partial_parsing_tests/test-files/model_b.sql b/test/integration/068_partial_parsing_tests/test-files/model_b.sql deleted file mode 100644 index 01f38b0698e..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_b.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as notfun diff --git a/test/integration/068_partial_parsing_tests/test-files/model_color.sql b/test/integration/068_partial_parsing_tests/test-files/model_color.sql deleted file mode 100644 index e1875231d2e..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_color.sql +++ /dev/null @@ -1 +0,0 @@ -select 'blue' as fun diff --git a/test/integration/068_partial_parsing_tests/test-files/model_four1.sql b/test/integration/068_partial_parsing_tests/test-files/model_four1.sql deleted file mode 100644 index 97c5b226d8c..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_four1.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('model_three') }} diff --git a/test/integration/068_partial_parsing_tests/test-files/model_four2.sql b/test/integration/068_partial_parsing_tests/test-files/model_four2.sql deleted file mode 100644 index c38a4c9194f..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_four2.sql +++ /dev/null @@ -1 +0,0 @@ -select fun from {{ ref('model_one') }} diff --git a/test/integration/068_partial_parsing_tests/test-files/model_one.sql b/test/integration/068_partial_parsing_tests/test-files/model_one.sql deleted file mode 100644 index 3bd54a4c1b6..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_one.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as fun diff --git a/test/integration/068_partial_parsing_tests/test-files/model_three.sql b/test/integration/068_partial_parsing_tests/test-files/model_three.sql deleted file mode 100644 index 45aa2b750f7..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_three.sql +++ /dev/null @@ -1,12 +0,0 @@ -{{ config(materialized='table') }} - -with source_data as ( - - select 1 as id - union all - select null as id - -) - -select * -from source_data diff --git a/test/integration/068_partial_parsing_tests/test-files/model_three_disabled.sql b/test/integration/068_partial_parsing_tests/test-files/model_three_disabled.sql deleted file mode 100644 index a338a2ef4d2..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_three_disabled.sql +++ /dev/null @@ -1,12 +0,0 @@ -{{ config(materialized='table', enabled=False) }} - -with source_data as ( - - select 1 as id - union all - select null as id - -) - -select * -from source_data diff --git a/test/integration/068_partial_parsing_tests/test-files/model_three_disabled2.sql b/test/integration/068_partial_parsing_tests/test-files/model_three_disabled2.sql deleted file mode 100644 index 4d416ab516e..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_three_disabled2.sql +++ /dev/null @@ -1,13 +0,0 @@ -- Disabled model -{{ config(materialized='table', enabled=False) }} - -with source_data as ( - - select 1 as id - union all - select null as id - -) - -select * -from source_data diff --git a/test/integration/068_partial_parsing_tests/test-files/model_three_modified.sql b/test/integration/068_partial_parsing_tests/test-files/model_three_modified.sql deleted file mode 100644 index e2d2df486c5..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_three_modified.sql +++ /dev/null @@ -1,14 +0,0 @@ -{{ config(materialized='table') }} - -with source_data as ( - - {#- This is model three #} - - select 1 as id - union all - select null as id - -) - -select * -from source_data diff --git a/test/integration/068_partial_parsing_tests/test-files/model_two.sql b/test/integration/068_partial_parsing_tests/test-files/model_two.sql deleted file mode 100644 index 01f38b0698e..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_two.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as notfun diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema1.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema1.yml deleted file mode 100644 index 36e5ce68a6e..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/models-schema1.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: 2 - -models: - - name: model_one - description: "The first model" diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema2.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema2.yml deleted file mode 100644 index 7c9a890a481..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/models-schema2.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: 2 - -models: - - name: model_one - description: "The first model" - - name: model_three - description: "The third model" - columns: - - name: id - tests: - - unique diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema2b.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema2b.yml deleted file mode 100644 index c9369126ffc..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/models-schema2b.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: 2 - -models: - - name: model_one - description: "The first model" - - name: model_three - description: "The third model" - columns: - - name: id - tests: - - not_null diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema3.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema3.yml deleted file mode 100644 index 11e4468d248..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/models-schema3.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: 2 - -models: - - name: model_one - description: "The first model" - - name: model_three - description: "The third model" - tests: - - unique -macros: - - name: do_something - description: "This is a test macro" diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema4.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema4.yml deleted file mode 100644 index 8087615fe49..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/models-schema4.yml +++ /dev/null @@ -1,13 +0,0 @@ -version: 2 - -models: - - name: model_one - description: "The first model" - - name: model_three - description: "The third model" - config: - enabled: false - columns: - - name: id - tests: - - unique diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema4b.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema4b.yml deleted file mode 100644 index e73ffcef1de..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/models-schema4b.yml +++ /dev/null @@ -1,13 +0,0 @@ -version: 2 - -models: - - name: model_one - description: "The first model" - - name: model_three - description: "The third model" - config: - enabled: true - columns: - - name: id - tests: - - unique diff --git a/test/integration/068_partial_parsing_tests/test-files/my_analysis.sql b/test/integration/068_partial_parsing_tests/test-files/my_analysis.sql deleted file mode 100644 index ec6959e9a68..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/my_analysis.sql +++ /dev/null @@ -1 +0,0 @@ -select * from customers diff --git a/test/integration/068_partial_parsing_tests/test-files/my_macro.sql b/test/integration/068_partial_parsing_tests/test-files/my_macro.sql deleted file mode 100644 index 0bf3eda6c07..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/my_macro.sql +++ /dev/null @@ -1,7 +0,0 @@ -{% macro do_something(foo2, bar2) %} - - select - '{{ foo2 }}' as foo2, - '{{ bar2 }}' as bar2 - -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/my_macro2.sql b/test/integration/068_partial_parsing_tests/test-files/my_macro2.sql deleted file mode 100644 index e64aafa5ab5..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/my_macro2.sql +++ /dev/null @@ -1,7 +0,0 @@ -{% macro do_something(foo2, bar2) %} - - select - 'foo' as foo2, - 'var' as bar2 - -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/my_metric.yml b/test/integration/068_partial_parsing_tests/test-files/my_metric.yml deleted file mode 100644 index 521bc92290f..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/my_metric.yml +++ /dev/null @@ -1,23 +0,0 @@ -version: 2 -metrics: - - name: new_customers - label: New Customers - model: customers - description: "The number of paid customers who are using the product" - calculation_method: count - expression: user_id - timestamp: signup_date - time_grains: [day, week, month] - dimensions: - - plan - - country - filters: - - field: is_paying - value: True - operator: '=' - +meta: - is_okr: True - tags: - - okrs - - diff --git a/test/integration/068_partial_parsing_tests/test-files/my_test.sql b/test/integration/068_partial_parsing_tests/test-files/my_test.sql deleted file mode 100644 index fbfb738bc9a..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/my_test.sql +++ /dev/null @@ -1,2 +0,0 @@ -select - * from {{ ref('customers') }} where first_name = '{{ macro_something() }}' diff --git a/test/integration/068_partial_parsing_tests/test-files/orders.sql b/test/integration/068_partial_parsing_tests/test-files/orders.sql deleted file mode 100644 index ef61d616cc1..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/orders.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as id, 101 as user_id, 'pending' as status diff --git a/test/integration/068_partial_parsing_tests/test-files/people.sql b/test/integration/068_partial_parsing_tests/test-files/people.sql deleted file mode 100644 index ce58d41a599..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/people.sql +++ /dev/null @@ -1,3 +0,0 @@ -select 1 as id, 'Drew' as first_name, 'Banin' as last_name, 'yellow' as favorite_color, true as loves_dbt, 5 as tenure, current_timestamp as created_at -union all -select 1 as id, 'Jeremy' as first_name, 'Cohen' as last_name, 'indigo' as favorite_color, true as loves_dbt, 4 as tenure, current_timestamp as created_at diff --git a/test/integration/068_partial_parsing_tests/test-files/people_metrics.yml b/test/integration/068_partial_parsing_tests/test-files/people_metrics.yml deleted file mode 100644 index 99d31a4e632..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/people_metrics.yml +++ /dev/null @@ -1,30 +0,0 @@ -version: 2 - -metrics: - - - model: "ref('people')" - name: number_of_people - description: Total count of people - label: "Number of people" - calculation_method: count - expression: "*" - timestamp: created_at - time_grains: [day, week, month] - dimensions: - - favorite_color - - loves_dbt - meta: - my_meta: 'testing' - - - model: "ref('people')" - name: collective_tenure - description: Total number of years of team experience - label: "Collective tenure" - calculation_method: sum - expression: tenure - timestamp: created_at - time_grains: [day] - filters: - - field: loves_dbt - operator: is - value: 'true' diff --git a/test/integration/068_partial_parsing_tests/test-files/people_metrics2.yml b/test/integration/068_partial_parsing_tests/test-files/people_metrics2.yml deleted file mode 100644 index 5f826e66e85..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/people_metrics2.yml +++ /dev/null @@ -1,30 +0,0 @@ -version: 2 - -metrics: - - - model: "ref('people')" - name: number_of_people - description: Total count of people - label: "Number of people" - calculation_method: count - expression: "*" - timestamp: created_at - time_grains: [day, week, month] - dimensions: - - favorite_color - - loves_dbt - meta: - my_meta: 'replaced' - - - model: "ref('people')" - name: collective_tenure - description: Total number of years of team experience - label: "Collective tenure" - calculation_method: sum - expression: tenure - timestamp: created_at - time_grains: [day] - filters: - - field: loves_dbt - operator: is - value: 'true' diff --git a/test/integration/068_partial_parsing_tests/test-files/people_metrics3.yml b/test/integration/068_partial_parsing_tests/test-files/people_metrics3.yml deleted file mode 100644 index b9c640591fc..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/people_metrics3.yml +++ /dev/null @@ -1,17 +0,0 @@ -version: 2 - -metrics: - - - model: "ref('people')" - name: number_of_people - description: Total count of people - label: "Number of people" - calculation_method: count - expression: "*" - timestamp: created_at - time_grains: [day, week, month] - dimensions: - - favorite_color - - loves_dbt - meta: - my_meta: 'replaced' diff --git a/test/integration/068_partial_parsing_tests/test-files/raw_customers.csv b/test/integration/068_partial_parsing_tests/test-files/raw_customers.csv deleted file mode 100644 index 2315be73844..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/raw_customers.csv +++ /dev/null @@ -1,11 +0,0 @@ -id,first_name,last_name,email -1,Michael,Perez,mperez0@chronoengine.com -2,Shawn,Mccoy,smccoy1@reddit.com -3,Kathleen,Payne,kpayne2@cargocollective.com -4,Jimmy,Cooper,jcooper3@cargocollective.com -5,Katherine,Rice,krice4@typepad.com -6,Sarah,Ryan,sryan5@gnu.org -7,Martin,Mcdonald,mmcdonald6@opera.com -8,Frank,Robinson,frobinson7@wunderground.com -9,Jennifer,Franklin,jfranklin8@mail.ru -10,Henry,Welch,hwelch9@list-manage.com diff --git a/test/integration/068_partial_parsing_tests/test-files/ref_override.sql b/test/integration/068_partial_parsing_tests/test-files/ref_override.sql deleted file mode 100644 index cd16793d3c4..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/ref_override.sql +++ /dev/null @@ -1,4 +0,0 @@ -- Macro to override ref -{% macro ref(modelname) %} -{% do return(builtins.ref(modelname)) %} -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/ref_override2.sql b/test/integration/068_partial_parsing_tests/test-files/ref_override2.sql deleted file mode 100644 index 2e8027d8e80..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/ref_override2.sql +++ /dev/null @@ -1,4 +0,0 @@ -- Macro to override ref xxxx -{% macro ref(modelname) %} -{% do return(builtins.ref(modelname)) %} -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-models-c.yml b/test/integration/068_partial_parsing_tests/test-files/schema-models-c.yml deleted file mode 100644 index 432b5e0efe3..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/schema-models-c.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: 2 - -sources: - - name: seed_source - description: "This is a source override" - overrides: local_dep - schema: "{{ var('schema_override', target.schema) }}" - tables: - - name: "seed" - columns: - - name: id - tests: - - unique - - not_null diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources1.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources1.yml deleted file mode 100644 index 30363115e09..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources1.yml +++ /dev/null @@ -1,17 +0,0 @@ -version: 2 -sources: - - name: seed_sources - schema: "{{ target.schema }}" - tables: - - name: raw_customers - columns: - - name: id - tests: - - not_null: - severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" - - unique - - name: first_name - - name: last_name - - name: email - - diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources2.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources2.yml deleted file mode 100644 index 5927952917f..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources2.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: 2 - -sources: - - name: seed_sources - schema: "{{ target.schema }}" - tables: - - name: raw_customers - columns: - - name: id - tests: - - not_null: - severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" - - unique - - name: first_name - - name: last_name - - name: email - -exposures: - - name: proxy_for_dashboard - description: "This is for the XXX dashboard" - type: "dashboard" - owner: - name: "Dashboard Tester" - email: "tester@dashboard.com" - depends_on: - - ref("model_one") - - ref("raw_customers") - - source("seed_sources", "raw_customers") - diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources3.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources3.yml deleted file mode 100644 index 54133a9a2f5..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources3.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: 2 - -sources: - - name: seed_sources - schema: "{{ target.schema }}" - tables: - - name: raw_customers - columns: - - name: id - tests: - - not_null: - severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" - - unique - - name: first_name - - name: last_name - - name: email - -exposures: - - name: proxy_for_dashboard - description: "This is for the XXX dashboard" - type: "dashboard" - owner: - name: "Dashboard Tester" - email: "tester@dashboard.com" - depends_on: - - ref("model_one") - - source("seed_sources", "raw_customers") - diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources4.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources4.yml deleted file mode 100644 index af76a0f315a..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources4.yml +++ /dev/null @@ -1,30 +0,0 @@ -version: 2 - -sources: - - name: seed_sources - schema: "{{ target.schema }}" - tables: - - name: raw_customers - columns: - - name: id - tests: - - not_null: - severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" - - unique - - every_value_is_blue - - name: first_name - - name: last_name - - name: email - -seeds: - - name: raw_customers - description: "Raw customer data" - columns: - - name: id - tests: - - unique - - not_null - - name: first_name - - name: last_name - - name: email - diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources5.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources5.yml deleted file mode 100644 index 57818771b71..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources5.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: 2 - -sources: - - name: seed_sources - schema: "{{ target.schema }}" - tables: - - name: raw_customers - columns: - - name: id - tests: - - not_null: - severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" - - unique - - name: first_name - - name: last_name - - name: email - -seeds: - - name: rad_customers - description: "Raw customer data" - columns: - - name: id - tests: - - unique - - not_null - - name: first_name - - name: last_name - - name: email - diff --git a/test/integration/068_partial_parsing_tests/test-files/snapshot.sql b/test/integration/068_partial_parsing_tests/test-files/snapshot.sql deleted file mode 100644 index c82a2fa5906..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/snapshot.sql +++ /dev/null @@ -1,29 +0,0 @@ -{% snapshot orders_snapshot %} - -{{ - config( - target_schema=schema, - strategy='check', - unique_key='id', - check_cols=['status'], - ) -}} - -select * from {{ ref('orders') }} - -{% endsnapshot %} - -{% snapshot orders2_snapshot %} - -{{ - config( - target_schema=schema, - strategy='check', - unique_key='id', - check_cols=['order_date'], - ) -}} - -select * from {{ ref('orders') }} - -{% endsnapshot %} diff --git a/test/integration/068_partial_parsing_tests/test-files/snapshot2.sql b/test/integration/068_partial_parsing_tests/test-files/snapshot2.sql deleted file mode 100644 index 27d320618c9..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/snapshot2.sql +++ /dev/null @@ -1,30 +0,0 @@ -- add a comment -{% snapshot orders_snapshot %} - -{{ - config( - target_schema=schema, - strategy='check', - unique_key='id', - check_cols=['status'], - ) -}} - -select * from {{ ref('orders') }} - -{% endsnapshot %} - -{% snapshot orders2_snapshot %} - -{{ - config( - target_schema=schema, - strategy='check', - unique_key='id', - check_cols=['order_date'], - ) -}} - -select * from {{ ref('orders') }} - -{% endsnapshot %} diff --git a/test/integration/068_partial_parsing_tests/test-files/sources-tests1.sql b/test/integration/068_partial_parsing_tests/test-files/sources-tests1.sql deleted file mode 100644 index dd8710f0556..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/sources-tests1.sql +++ /dev/null @@ -1,9 +0,0 @@ - -{% test every_value_is_blue(model, column_name) %} - - select * - from {{ model }} - where {{ column_name }} = 9999 - -{% endtest %} - diff --git a/test/integration/068_partial_parsing_tests/test-files/sources-tests2.sql b/test/integration/068_partial_parsing_tests/test-files/sources-tests2.sql deleted file mode 100644 index 3abcf30a658..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/sources-tests2.sql +++ /dev/null @@ -1,9 +0,0 @@ - -{% test every_value_is_blue(model, column_name) %} - - select * - from {{ model }} - where {{ column_name }} != 99 - -{% endtest %} - diff --git a/test/integration/068_partial_parsing_tests/test-files/test-macro.sql b/test/integration/068_partial_parsing_tests/test-files/test-macro.sql deleted file mode 100644 index f2b1ecfc86b..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/test-macro.sql +++ /dev/null @@ -1,5 +0,0 @@ -{% macro macro_something() %} - - {% do return('macro_something') %} - -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/test-macro2.sql b/test/integration/068_partial_parsing_tests/test-files/test-macro2.sql deleted file mode 100644 index 52b4469cd01..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/test-macro2.sql +++ /dev/null @@ -1,5 +0,0 @@ -{% macro macro_something() %} - - {% do return('some_name') %} - -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/test_color.sql b/test/integration/068_partial_parsing_tests/test-files/test_color.sql deleted file mode 100644 index 0bb1cdcd96c..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/test_color.sql +++ /dev/null @@ -1,7 +0,0 @@ -{% test check_color(model, column_name, color) %} - - select * - from {{ model }} - where {{ column_name }} = '{{ color }}' - -{% endtest %} diff --git a/test/integration/068_partial_parsing_tests/test_partial_parsing.py b/test/integration/068_partial_parsing_tests/test_partial_parsing.py deleted file mode 100644 index fce32b42cf1..00000000000 --- a/test/integration/068_partial_parsing_tests/test_partial_parsing.py +++ /dev/null @@ -1,580 +0,0 @@ -from dbt.exceptions import CompilationException, ParsingException -from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.files import ParseFileType -from dbt.contracts.results import TestStatus -from dbt.parser.partial import special_override_macros -from test.integration.base import DBTIntegrationTest, use_profile, normalize, get_manifest -import shutil -import os - - -# Note: every test case needs to have separate directories, otherwise -# they will interfere with each other when tests are multi-threaded - -class BasePPTest(DBTIntegrationTest): - - @property - def schema(self): - return "test_068A" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seed-paths': ['seeds'], - 'test-paths': ['tests'], - 'macro-paths': ['macros'], - 'analysis-paths': ['analyses'], - 'snapshot-paths': ['snapshots'], - 'seeds': { - 'quote_columns': False, - }, - } - - def setup_directories(self): - # Create the directories for the test in the `self.test_root_dir` - # directory after everything else is symlinked. We can copy to and - # delete files in this directory without tests interfering with each other. - os.mkdir(os.path.join(self.test_root_dir, 'models')) - os.mkdir(os.path.join(self.test_root_dir, 'tests')) - os.mkdir(os.path.join(self.test_root_dir, 'tests', 'generic')) - os.mkdir(os.path.join(self.test_root_dir, 'seeds')) - os.mkdir(os.path.join(self.test_root_dir, 'macros')) - os.mkdir(os.path.join(self.test_root_dir, 'analyses')) - os.mkdir(os.path.join(self.test_root_dir, 'snapshots')) - os.environ['DBT_PP_TEST'] = 'true' - - - -class ModelTest(BasePPTest): - - @use_profile('postgres') - def test_postgres_pp_models(self): - self.setup_directories() - self.copy_file('test-files/model_one.sql', 'models/model_one.sql') - # initial run - self.run_dbt(['clean']) - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - - # add a model file - self.copy_file('test-files/model_two.sql', 'models/model_two.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - - # add a schema file - self.copy_file('test-files/models-schema1.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - manifest = get_manifest() - self.assertIn('model.test.model_one', manifest.nodes) - model_one_node = manifest.nodes['model.test.model_one'] - self.assertEqual(model_one_node.description, 'The first model') - self.assertEqual(model_one_node.patch_path, 'test://' + normalize('models/schema.yml')) - - # add a model and a schema file (with a test) at the same time - self.copy_file('test-files/models-schema2.yml', 'models/schema.yml') - self.copy_file('test-files/model_three.sql', 'models/model_three.sql') - results = self.run_dbt(["--partial-parse", "test"], expect_pass=False) - self.assertEqual(len(results), 1) - manifest = get_manifest() - project_files = [f for f in manifest.files if f.startswith('test://')] - self.assertEqual(len(project_files), 4) - model_3_file_id = 'test://' + normalize('models/model_three.sql') - self.assertIn(model_3_file_id, manifest.files) - model_three_file = manifest.files[model_3_file_id] - self.assertEqual(model_three_file.parse_file_type, ParseFileType.Model) - self.assertEqual(type(model_three_file).__name__, 'SourceFile') - model_three_node = manifest.nodes[model_three_file.nodes[0]] - schema_file_id = 'test://' + normalize('models/schema.yml') - self.assertEqual(model_three_node.patch_path, schema_file_id) - self.assertEqual(model_three_node.description, 'The third model') - schema_file = manifest.files[schema_file_id] - self.assertEqual(type(schema_file).__name__, 'SchemaSourceFile') - self.assertEqual(len(schema_file.tests), 1) - tests = schema_file.get_all_test_ids() - self.assertEqual(tests, ['test.test.unique_model_three_id.6776ac8160']) - unique_test_id = tests[0] - self.assertIn(unique_test_id, manifest.nodes) - - # modify model sql file, ensure description still there - self.copy_file('test-files/model_three_modified.sql', 'models/model_three.sql') - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - model_id = 'model.test.model_three' - self.assertIn(model_id, manifest.nodes) - model_three_node = manifest.nodes[model_id] - self.assertEqual(model_three_node.description, 'The third model') - - # Change the model 3 test from unique to not_null - self.copy_file('test-files/models-schema2b.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "test"], expect_pass=False) - manifest = get_manifest() - schema_file_id = 'test://' + normalize('models/schema.yml') - schema_file = manifest.files[schema_file_id] - tests = schema_file.get_all_test_ids() - self.assertEqual(tests, ['test.test.not_null_model_three_id.3162ce0a6f']) - not_null_test_id = tests[0] - self.assertIn(not_null_test_id, manifest.nodes.keys()) - self.assertNotIn(unique_test_id, manifest.nodes.keys()) - self.assertEqual(len(results), 1) - - # go back to previous version of schema file, removing patch, test, and model for model three - self.copy_file('test-files/models-schema1.yml', 'models/schema.yml') - self.rm_file(normalize('models/model_three.sql')) - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - - # remove schema file, still have 3 models - self.copy_file('test-files/model_three.sql', 'models/model_three.sql') - self.rm_file(normalize('models/schema.yml')) - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - manifest = get_manifest() - schema_file_id = 'test://' + normalize('models/schema.yml') - self.assertNotIn(schema_file_id, manifest.files) - project_files = [f for f in manifest.files if f.startswith('test://')] - self.assertEqual(len(project_files), 3) - - # Put schema file back and remove a model - # referred to in schema file - self.copy_file('test-files/models-schema2.yml', 'models/schema.yml') - self.rm_file('models/model_three.sql') - with self.assertRaises(CompilationException): - results = self.run_dbt(["--partial-parse", "--warn-error", "run"]) - - # Put model back again - self.copy_file('test-files/model_three.sql', 'models/model_three.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Add model four refing model three - self.copy_file('test-files/model_four1.sql', 'models/model_four.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 4) - - # Remove model_three and change model_four to ref model_one - # and change schema file to remove model_three - self.rm_file('models/model_three.sql') - self.copy_file('test-files/model_four2.sql', 'models/model_four.sql') - self.copy_file('test-files/models-schema1.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Remove model four, put back model three, put back schema file - self.copy_file('test-files/model_three.sql', 'models/model_three.sql') - self.copy_file('test-files/models-schema2.yml', 'models/schema.yml') - self.rm_file('models/model_four.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # disable model three in the schema file - self.copy_file('test-files/models-schema4.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - - # update enabled config to be true for model three in the schema file - self.copy_file('test-files/models-schema4b.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # disable model three in the schema file again - self.copy_file('test-files/models-schema4.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - - # remove disabled config for model three in the schema file to check it gets enabled - self.copy_file('test-files/models-schema3.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Add a macro - self.copy_file('test-files/my_macro.sql', 'macros/my_macro.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - manifest = get_manifest() - macro_id = 'macro.test.do_something' - self.assertIn(macro_id, manifest.macros) - - # Modify the macro - self.copy_file('test-files/my_macro2.sql', 'macros/my_macro.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Add a macro patch - self.copy_file('test-files/models-schema3.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Remove the macro - self.rm_file('macros/my_macro.sql') - with self.assertRaises(CompilationException): - results = self.run_dbt(["--partial-parse", "--warn-error", "run"]) - - # put back macro file, got back to schema file with no macro - # add separate macro patch schema file - self.copy_file('test-files/models-schema2.yml', 'models/schema.yml') - self.copy_file('test-files/my_macro.sql', 'macros/my_macro.sql') - self.copy_file('test-files/macros.yml', 'macros/macros.yml') - results = self.run_dbt(["--partial-parse", "run"]) - - # delete macro and schema file - self.rm_file('macros/my_macro.sql') - self.rm_file('macros/macros.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Add an empty schema file - self.copy_file('test-files/empty_schema.yml', 'models/eschema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Add version to empty schema file - self.copy_file('test-files/empty_schema_with_version.yml', 'models/eschema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Disable model_three - self.copy_file('test-files/model_three_disabled.sql', 'models/model_three.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - manifest = get_manifest() - model_id = 'model.test.model_three' - self.assertIn(model_id, manifest.disabled) - self.assertNotIn(model_id, manifest.nodes) - - # Edit disabled model three - self.copy_file('test-files/model_three_disabled2.sql', 'models/model_three.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - manifest = get_manifest() - model_id = 'model.test.model_three' - self.assertIn(model_id, manifest.disabled) - self.assertNotIn(model_id, manifest.nodes) - - # Remove disabled from model three - self.copy_file('test-files/model_three.sql', 'models/model_three.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - manifest = get_manifest() - model_id = 'model.test.model_three' - self.assertIn(model_id, manifest.nodes) - self.assertNotIn(model_id, manifest.disabled) - - -class TestSources(BasePPTest): - - @use_profile('postgres') - def test_postgres_pp_sources(self): - self.setup_directories() - # initial run - self.copy_file('test-files/model_one.sql', 'models/model_one.sql') - self.run_dbt(['clean']) - self.copy_file('test-files/raw_customers.csv', 'seeds/raw_customers.csv') - self.copy_file('test-files/sources-tests1.sql', 'macros/tests.sql') - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - - # Partial parse running 'seed' - self.run_dbt(['--partial-parse', 'seed']) - manifest = get_manifest() - seed_file_id = 'test://' + normalize('seeds/raw_customers.csv') - self.assertIn(seed_file_id, manifest.files) - - # Add another seed file - self.copy_file('test-files/raw_customers.csv', 'seeds/more_customers.csv') - self.run_dbt(['--partial-parse', 'run']) - seed_file_id = 'test://' + normalize('seeds/more_customers.csv') - manifest = get_manifest() - self.assertIn(seed_file_id, manifest.files) - seed_id = 'seed.test.more_customers' - self.assertIn(seed_id, manifest.nodes) - - # Remove seed file and add a schema files with a source referring to raw_customers - self.rm_file(normalize('seeds/more_customers.csv')) - self.copy_file('test-files/schema-sources1.yml', 'models/sources.yml') - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - self.assertEqual(len(manifest.sources), 1) - file_id = 'test://' + normalize('models/sources.yml') - self.assertIn(file_id, manifest.files) - - # add a model referring to raw_customers source - self.copy_file('test-files/customers.sql', 'models/customers.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - - # remove sources schema file - self.rm_file(normalize('models/sources.yml')) - with self.assertRaises(CompilationException): - results = self.run_dbt(["--partial-parse", "run"]) - - # put back sources and add an exposures file - self.copy_file('test-files/schema-sources2.yml', 'models/sources.yml') - results = self.run_dbt(["--partial-parse", "run"]) - - # remove seed referenced in exposures file - self.rm_file(normalize('seeds/raw_customers.csv')) - with self.assertRaises(CompilationException): - results = self.run_dbt(["--partial-parse", "run"]) - - # put back seed and remove depends_on from exposure - self.copy_file('test-files/raw_customers.csv', 'seeds/raw_customers.csv') - self.copy_file('test-files/schema-sources3.yml', 'models/sources.yml') - results = self.run_dbt(["--partial-parse", "run"]) - - # Add seed config with test to schema.yml, remove exposure - self.copy_file('test-files/schema-sources4.yml', 'models/sources.yml') - results = self.run_dbt(["--partial-parse", "run"]) - - # Change seed name to wrong name - self.copy_file('test-files/schema-sources5.yml', 'models/sources.yml') - with self.assertRaises(CompilationException): - results = self.run_dbt(["--partial-parse", "--warn-error", "run"]) - - # Put back seed name to right name - self.copy_file('test-files/schema-sources4.yml', 'models/sources.yml') - results = self.run_dbt(["--partial-parse", "run"]) - - # Add docs file customers.md - self.copy_file('test-files/customers1.md', 'models/customers.md') - results = self.run_dbt(["--partial-parse", "run"]) - - # Change docs file customers.md - self.copy_file('test-files/customers2.md', 'models/customers.md') - results = self.run_dbt(["--partial-parse", "run"]) - - # Delete docs file - self.rm_file(normalize('models/customers.md')) - results = self.run_dbt(["--partial-parse", "run"]) - - # Add a data test - self.copy_file('test-files/test-macro.sql', 'macros/test-macro.sql') - self.copy_file('test-files/my_test.sql', 'tests/my_test.sql') - results = self.run_dbt(["--partial-parse", "test"]) - manifest = get_manifest() - self.assertEqual(len(manifest.nodes), 9) - test_id = 'test.test.my_test' - self.assertIn(test_id, manifest.nodes) - - # Change macro that data test depends on - self.copy_file('test-files/test-macro2.sql', 'macros/test-macro.sql') - results = self.run_dbt(["--partial-parse", "test"]) - manifest = get_manifest() - - # Add an analysis - self.copy_file('test-files/my_analysis.sql', 'analyses/my_analysis.sql') - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - - # Remove data test - self.rm_file(normalize('tests/my_test.sql')) - results = self.run_dbt(["--partial-parse", "test"]) - manifest = get_manifest() - self.assertEqual(len(manifest.nodes), 9) - - # Remove analysis - self.rm_file(normalize('analyses/my_analysis.sql')) - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - self.assertEqual(len(manifest.nodes), 8) - - # Change source test - self.copy_file('test-files/sources-tests2.sql', 'macros/tests.sql') - results = self.run_dbt(["--partial-parse", "run"]) - - -class TestPartialParsingDependency(BasePPTest): - - @property - def packages_config(self): - return { - "packages": [ - { - 'local': 'local_dependency' - } - ] - } - - @use_profile("postgres") - def test_postgres_parsing_with_dependency(self): - self.setup_directories() - self.copy_file('test-files/model_one.sql', 'models/model_one.sql') - self.run_dbt(["clean"]) - self.run_dbt(["deps"]) - self.run_dbt(["seed"]) - self.run_dbt(["run"]) - - # Add a source override - self.copy_file('test-files/schema-models-c.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - manifest = get_manifest() - self.assertEqual(len(manifest.sources), 1) - source_id = 'source.local_dep.seed_source.seed' - self.assertIn(source_id, manifest.sources) - # We have 1 root model, 1 local_dep model, 1 local_dep seed, 1 local_dep source test, 2 root source tests - self.assertEqual(len(manifest.nodes), 5) - test_id = 'test.local_dep.source_unique_seed_source_seed_id.afa94935ed' - test_node = manifest.nodes[test_id] - - - # Remove a source override - self.rm_file(normalize('models/schema.yml')) - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - self.assertEqual(len(manifest.sources), 1) - - -class TestMacros(BasePPTest): - - @use_profile('postgres') - def test_postgres_nested_macros(self): - self.setup_directories() - self.copy_file('test-files/model_a.sql', 'models/model_a.sql') - self.copy_file('test-files/model_b.sql', 'models/model_b.sql') - self.copy_file('test-files/macros-schema.yml', 'models/schema.yml') - self.copy_file('test-files/custom_schema_tests1.sql', 'macros/custom_schema_tests.sql') - results = self.run_dbt() - self.assertEqual(len(results), 2) - manifest = get_manifest() - macro_child_map = manifest.build_macro_child_map() - macro_unique_id = 'macro.test.test_type_two' - - results = self.run_dbt(['test'], expect_pass=False) - results = sorted(results, key=lambda r: r.node.name) - self.assertEqual(len(results), 2) - # type_one_model_a_ - self.assertEqual(results[0].status, TestStatus.Fail) - self.assertRegex(results[0].node.compiled_code, r'union all') - # type_two_model_a_ - self.assertEqual(results[1].status, TestStatus.Warn) - self.assertEqual(results[1].node.config.severity, 'WARN') - - self.copy_file('test-files/custom_schema_tests2.sql', 'macros/custom_schema_tests.sql') - results = self.run_dbt(["--partial-parse", "test"], expect_pass=False) - manifest = get_manifest() - test_node_id = 'test.test.type_two_model_a_.842bc6c2a7' - self.assertIn(test_node_id, manifest.nodes) - results = sorted(results, key=lambda r: r.node.name) - self.assertEqual(len(results), 2) - # type_two_model_a_ - self.assertEqual(results[1].status, TestStatus.Fail) - self.assertEqual(results[1].node.config.severity, 'ERROR') - - @use_profile('postgres') - def test_postgres_skip_macros(self): - expected_special_override_macros = [ - 'ref', 'source', 'config', 'generate_schema_name', - 'generate_database_name', 'generate_alias_name' - ] - self.assertEqual(special_override_macros, expected_special_override_macros) - - # initial run so we have a msgpack file - self.setup_directories() - self.copy_file('test-files/model_one.sql', 'models/model_one.sql') - # use empty_schema file for bug #4850 - self.copy_file('test-files/empty_schema.yml', 'models/eschema.yml') - results = self.run_dbt() - - # add a new ref override macro - self.copy_file('test-files/ref_override.sql', 'macros/ref_override.sql') - results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run']) - self.assertTrue('Starting full parse.' in log_output) - - # modify a ref override macro - self.copy_file('test-files/ref_override2.sql', 'macros/ref_override.sql') - results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run']) - self.assertTrue('Starting full parse.' in log_output) - - # remove a ref override macro - self.rm_file(normalize('macros/ref_override.sql')) - results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run']) - self.assertTrue('Starting full parse.' in log_output) - - # custom generate_schema_name macro - self.copy_file('test-files/gsm_override.sql', 'macros/gsm_override.sql') - results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run']) - self.assertTrue('Starting full parse.' in log_output) - - # change generate_schema_name macro - self.copy_file('test-files/gsm_override2.sql', 'macros/gsm_override.sql') - results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run']) - self.assertTrue('Starting full parse.' in log_output) - - -class TestSnapshots(BasePPTest): - - @use_profile('postgres') - def test_postgres_pp_snapshots(self): - - # initial run - self.setup_directories() - self.copy_file('test-files/orders.sql', 'models/orders.sql') - results = self.run_dbt() - self.assertEqual(len(results), 1) - - # add snapshot - self.copy_file('test-files/snapshot.sql', 'snapshots/snapshot.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - snapshot_id = 'snapshot.test.orders_snapshot' - self.assertIn(snapshot_id, manifest.nodes) - snapshot2_id = 'snapshot.test.orders2_snapshot' - self.assertIn(snapshot2_id, manifest.nodes) - - # run snapshot - results = self.run_dbt(["--partial-parse", "snapshot"]) - self.assertEqual(len(results), 2) - - # modify snapshot - self.copy_file('test-files/snapshot2.sql', 'snapshots/snapshot.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 1) - - # delete snapshot - self.rm_file(normalize('snapshots/snapshot.sql')) - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 1) - - -class TestTests(BasePPTest): - - @use_profile('postgres') - def test_postgres_pp_generic_tests(self): - - # initial run - self.setup_directories() - self.copy_file('test-files/orders.sql', 'models/orders.sql') - self.copy_file('test-files/generic_schema.yml', 'models/schema.yml') - results = self.run_dbt() - self.assertEqual(len(results), 1) - manifest = get_manifest() - expected_nodes = ['model.test.orders', 'test.test.unique_orders_id.1360ecc70e'] - self.assertCountEqual(expected_nodes, list(manifest.nodes.keys())) - - # add generic test in test-path - self.copy_file('test-files/generic_test.sql', 'tests/generic/generic_test.sql') - self.copy_file('test-files/generic_test_schema.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - test_id = 'test.test.is_odd_orders_id.82834fdc5b' - self.assertIn(test_id, manifest.nodes) - expected_nodes = ['model.test.orders', 'test.test.unique_orders_id.1360ecc70e', 'test.test.is_odd_orders_id.82834fdc5b'] - self.assertCountEqual(expected_nodes, list(manifest.nodes.keys())) - - # edit generic test in test-path - self.copy_file('test-files/generic_test_edited.sql', 'tests/generic/generic_test.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - test_id = 'test.test.is_odd_orders_id.82834fdc5b' - self.assertIn(test_id, manifest.nodes) - expected_nodes = ['model.test.orders', 'test.test.unique_orders_id.1360ecc70e', 'test.test.is_odd_orders_id.82834fdc5b'] - self.assertCountEqual(expected_nodes, list(manifest.nodes.keys())) diff --git a/test/integration/068_partial_parsing_tests/test_pp_metrics.py b/test/integration/068_partial_parsing_tests/test_pp_metrics.py deleted file mode 100644 index b9cbc69e3aa..00000000000 --- a/test/integration/068_partial_parsing_tests/test_pp_metrics.py +++ /dev/null @@ -1,106 +0,0 @@ -from dbt.exceptions import CompilationException, UndefinedMacroException -from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.files import ParseFileType -from dbt.contracts.results import TestStatus -from dbt.parser.partial import special_override_macros -from test.integration.base import DBTIntegrationTest, use_profile, normalize, get_manifest -import shutil -import os - - -# Note: every test case needs to have separate directories, otherwise -# they will interfere with each other when tests are multi-threaded - -class BasePPTest(DBTIntegrationTest): - - @property - def schema(self): - return "test_068A" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'data-paths': ['seeds'], - 'test-paths': ['tests'], - 'macro-paths': ['macros'], - 'analysis-paths': ['analyses'], - 'snapshot-paths': ['snapshots'], - 'seeds': { - 'quote_columns': False, - }, - } - - def setup_directories(self): - # Create the directories for the test in the `self.test_root_dir` - # directory after everything else is symlinked. We can copy to and - # delete files in this directory without tests interfering with each other. - os.mkdir(os.path.join(self.test_root_dir, 'models')) - os.mkdir(os.path.join(self.test_root_dir, 'tests')) - os.mkdir(os.path.join(self.test_root_dir, 'seeds')) - os.mkdir(os.path.join(self.test_root_dir, 'macros')) - os.mkdir(os.path.join(self.test_root_dir, 'analyses')) - os.mkdir(os.path.join(self.test_root_dir, 'snapshots')) - os.environ['DBT_PP_TEST'] = 'true' - - - -class MetricsTest(BasePPTest): - - @use_profile('postgres') - def test_postgres_metrics(self): - self.setup_directories() - # initial run - self.copy_file('test-files/people.sql', 'models/people.sql') - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - self.assertEqual(len(manifest.nodes), 1) - - # Add metrics yaml file - self.copy_file('test-files/people_metrics.yml', 'models/people_metrics.yml') - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - self.assertEqual(len(manifest.metrics), 2) - metric_people_id = 'metric.test.number_of_people' - metric_tenure_id = 'metric.test.collective_tenure' - metric_people = manifest.metrics[metric_people_id] - metric_tenure = manifest.metrics[metric_tenure_id] - expected_meta = {'my_meta': 'testing'} - self.assertEqual(metric_people.meta, expected_meta) - self.assertEqual(metric_people.refs, [['people']]) - self.assertEqual(metric_tenure.refs, [['people']]) - expected_depends_on_nodes = ['model.test.people'] - self.assertEqual(metric_people.depends_on.nodes, expected_depends_on_nodes) - - # Change metrics yaml files - self.copy_file('test-files/people_metrics2.yml', 'models/people_metrics.yml') - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - metric_people = manifest.metrics[metric_people_id] - expected_meta = {'my_meta': 'replaced'} - self.assertEqual(metric_people.meta, expected_meta) - expected_depends_on_nodes = ['model.test.people'] - self.assertEqual(metric_people.depends_on.nodes, expected_depends_on_nodes) - - # Add model referring to metric - self.copy_file('test-files/metric_model_a.sql', 'models/metric_model_a.sql') - results = self.run_dbt(["run"]) - manifest = get_manifest() - model_a = manifest.nodes['model.test.metric_model_a'] - expected_depends_on_nodes = ['metric.test.number_of_people', 'metric.test.collective_tenure'] - self.assertEqual(model_a.depends_on.nodes, expected_depends_on_nodes) - - # Then delete a metric - self.copy_file('test-files/people_metrics3.yml', 'models/people_metrics.yml') - with self.assertRaises(CompilationException): - # We use "parse" here and not "run" because we're checking that the CompilationException - # occurs at parse time, not compilation - results = self.run_dbt(["parse"]) - diff --git a/test/integration/068_partial_parsing_tests/test_pp_vars.py b/test/integration/068_partial_parsing_tests/test_pp_vars.py deleted file mode 100644 index e5f0752f6a9..00000000000 --- a/test/integration/068_partial_parsing_tests/test_pp_vars.py +++ /dev/null @@ -1,416 +0,0 @@ -from dbt.exceptions import CompilationException, ParsingException -from dbt.constants import SECRET_ENV_PREFIX -from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.files import ParseFileType -from dbt.contracts.results import TestStatus -from dbt.parser.partial import special_override_macros -from test.integration.base import DBTIntegrationTest, use_profile, normalize, get_manifest -import shutil -import os - - -# Note: every test case needs to have separate directories, otherwise -# they will interfere with each other when tests are multi-threaded - -class BasePPTest(DBTIntegrationTest): - - @property - def schema(self): - return "test_068A" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seed-paths': ['seeds'], - 'test-paths': ['tests'], - 'macro-paths': ['macros'], - 'seeds': { - 'quote_columns': False, - }, - } - - def setup_directories(self): - # Create the directories for the test in the `self.test_root_dir` - # directory after everything else is symlinked. We can copy to and - # delete files in this directory without tests interfering with each other. - os.mkdir(os.path.join(self.test_root_dir, 'models')) - os.mkdir(os.path.join(self.test_root_dir, 'tests')) - os.mkdir(os.path.join(self.test_root_dir, 'macros')) - os.mkdir(os.path.join(self.test_root_dir, 'seeds')) - os.environ['DBT_PP_TEST'] = 'true' - - -class EnvVarTest(BasePPTest): - - @use_profile('postgres') - def test_postgres_env_vars_models(self): - self.setup_directories() - self.copy_file('test-files/model_color.sql', 'models/model_color.sql') - # initial run - self.run_dbt(['clean']) - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - - # copy a file with an env_var call without an env_var - self.copy_file('test-files/env_var_model.sql', 'models/env_var_model.sql') - with self.assertRaises(ParsingException): - results = self.run_dbt(["--partial-parse", "run"]) - - # set the env var - os.environ['ENV_VAR_TEST'] = 'TestingEnvVars' - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - manifest = get_manifest() - expected_env_vars = {"ENV_VAR_TEST": "TestingEnvVars"} - self.assertEqual(expected_env_vars, manifest.env_vars) - model_id = 'model.test.env_var_model' - model = manifest.nodes[model_id] - model_created_at = model.created_at - - # change the env var - os.environ['ENV_VAR_TEST'] = 'second' - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - manifest = get_manifest() - expected_env_vars = {"ENV_VAR_TEST": "second"} - self.assertEqual(expected_env_vars, manifest.env_vars) - self.assertNotEqual(model_created_at, manifest.nodes[model_id].created_at) - - # set an env_var in a schema file - self.copy_file('test-files/env_var_schema.yml', 'models/schema.yml') - self.copy_file('test-files/env_var_model_one.sql', 'models/model_one.sql') - with self.assertRaises(ParsingException): - results = self.run_dbt(["--partial-parse", "run"]) - - # actually set the env_var - os.environ['TEST_SCHEMA_VAR'] = 'view' - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view"} - self.assertEqual(expected_env_vars, manifest.env_vars) - - # env vars in a source - os.environ['ENV_VAR_DATABASE'] = 'dbt' - os.environ['ENV_VAR_SEVERITY'] = 'warn' - self.copy_file('test-files/raw_customers.csv', 'seeds/raw_customers.csv') - self.copy_file('test-files/env_var-sources.yml', 'models/sources.yml') - self.run_dbt(['--partial-parse', 'seed']) - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - manifest = get_manifest() - expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view", "ENV_VAR_DATABASE": "dbt", "ENV_VAR_SEVERITY": "warn"} - self.assertEqual(expected_env_vars, manifest.env_vars) - self.assertEqual(len(manifest.sources), 1) - source_id = 'source.test.seed_sources.raw_customers' - source = manifest.sources[source_id] - self.assertEqual(source.database, 'dbt') - schema_file = manifest.files[source.file_id] - test_id = 'test.test.source_not_null_seed_sources_raw_customers_id.e39ee7bf0d' - test_node = manifest.nodes[test_id] - self.assertEqual(test_node.config.severity, 'WARN') - - # Change severity env var - os.environ['ENV_VAR_SEVERITY'] = 'error' - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view", "ENV_VAR_DATABASE": "dbt", "ENV_VAR_SEVERITY": "error"} - self.assertEqual(expected_env_vars, manifest.env_vars) - source_id = 'source.test.seed_sources.raw_customers' - source = manifest.sources[source_id] - schema_file = manifest.files[source.file_id] - expected_schema_file_env_vars = {'sources': {'seed_sources': ['ENV_VAR_DATABASE', 'ENV_VAR_SEVERITY']}} - self.assertEqual(expected_schema_file_env_vars, schema_file.env_vars) - test_node = manifest.nodes[test_id] - self.assertEqual(test_node.config.severity, 'ERROR') - - # Change database env var - os.environ['ENV_VAR_DATABASE'] = 'test_dbt' - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view", "ENV_VAR_DATABASE": "test_dbt", "ENV_VAR_SEVERITY": "error"} - self.assertEqual(expected_env_vars, manifest.env_vars) - source = manifest.sources[source_id] - self.assertEqual(source.database, 'test_dbt') - - # Delete database env var - del os.environ['ENV_VAR_DATABASE'] - with self.assertRaises(ParsingException): - results = self.run_dbt(["--partial-parse", "run"]) - os.environ['ENV_VAR_DATABASE'] = 'test_dbt' - - # Add generic test with test kwarg that's rendered late (no curly brackets) - os.environ['ENV_VAR_DATABASE'] = 'dbt' - self.copy_file('test-files/test_color.sql', 'macros/test_color.sql') - results = self.run_dbt(["--partial-parse", "run"]) - # Add source test using test_color and an env_var for color - self.copy_file('test-files/env_var_schema2.yml', 'models/schema.yml') - with self.assertRaises(ParsingException): - results = self.run_dbt(["--partial-parse", "run"]) - os.environ['ENV_VAR_COLOR'] = 'green' - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - test_color_id = 'test.test.check_color_model_one_env_var_ENV_VAR_COLOR___fun.89638de387' - test_node = manifest.nodes[test_color_id] - # kwarg was rendered but not changed (it will be rendered again when compiled) - self.assertEqual(test_node.test_metadata.kwargs['color'], "env_var('ENV_VAR_COLOR')") - results = self.run_dbt(["--partial-parse", "test"]) - - # Add an exposure with an env_var - os.environ['ENV_VAR_OWNER'] = "John Doe" - self.copy_file('test-files/env_var_schema3.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - expected_env_vars = { - "ENV_VAR_TEST": "second", - "TEST_SCHEMA_VAR": "view", - "ENV_VAR_DATABASE": "dbt", - "ENV_VAR_SEVERITY": "error", - "ENV_VAR_COLOR": 'green', - "ENV_VAR_OWNER": "John Doe", - } - self.assertEqual(expected_env_vars, manifest.env_vars) - exposure = list(manifest.exposures.values())[0] - schema_file = manifest.files[exposure.file_id] - expected_sf_env_vars = { - 'models': { - 'model_one': ['TEST_SCHEMA_VAR', 'ENV_VAR_COLOR'] - }, - 'exposures': { - 'proxy_for_dashboard': ['ENV_VAR_OWNER'] - } - } - self.assertEqual(expected_sf_env_vars, schema_file.env_vars) - - # add a macro and a macro schema file - os.environ['ENV_VAR_SOME_KEY'] = 'toodles' - self.copy_file('test-files/env_var_macro.sql', 'macros/env_var_macro.sql') - self.copy_file('test-files/env_var_macros.yml', 'macros/env_var_macros.yml') - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - expected_env_vars = { - "ENV_VAR_TEST": "second", - "TEST_SCHEMA_VAR": "view", - "ENV_VAR_DATABASE": "dbt", - "ENV_VAR_SEVERITY": "error", - "ENV_VAR_COLOR": 'green', - "ENV_VAR_OWNER": "John Doe", - "ENV_VAR_SOME_KEY": "toodles", - } - self.assertEqual(expected_env_vars, manifest.env_vars) - macro_id = 'macro.test.do_something' - macro = manifest.macros[macro_id] - self.assertEqual(macro.meta, {"some_key": "toodles"}) - # change the env var - os.environ['ENV_VAR_SOME_KEY'] = 'dumdedum' - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - macro = manifest.macros[macro_id] - self.assertEqual(macro.meta, {"some_key": "dumdedum"}) - - # Add a schema file with a test on model_color and env_var in test enabled config - self.copy_file('test-files/env_var_model_test.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - manifest = get_manifest() - model_color = manifest.nodes['model.test.model_color'] - schema_file = manifest.files[model_color.patch_path] - expected_env_vars = {'models': {'model_one': ['TEST_SCHEMA_VAR', 'ENV_VAR_COLOR'], 'model_color': ['ENV_VAR_ENABLED']}, 'exposures': {'proxy_for_dashboard': ['ENV_VAR_OWNER']}} - self.assertEqual(expected_env_vars, schema_file.env_vars) - - # Add a metrics file with env_vars - os.environ['ENV_VAR_METRICS'] = 'TeStInG' - self.copy_file('test-files/people.sql', 'models/people.sql') - self.copy_file('test-files/env_var_metrics.yml', 'models/metrics.yml') - results = self.run_dbt(["run"]) - manifest = get_manifest() - self.assertIn('ENV_VAR_METRICS', manifest.env_vars) - self.assertEqual(manifest.env_vars['ENV_VAR_METRICS'], 'TeStInG') - metric_node = manifest.metrics['metric.test.number_of_people'] - self.assertEqual(metric_node.meta, {'my_meta': 'TeStInG'}) - - # Change metrics env var - os.environ['ENV_VAR_METRICS'] = 'Changed!' - results = self.run_dbt(["run"]) - manifest = get_manifest() - metric_node = manifest.metrics['metric.test.number_of_people'] - self.assertEqual(metric_node.meta, {'my_meta': 'Changed!'}) - - # delete the env vars to cleanup - del os.environ['ENV_VAR_TEST'] - del os.environ['ENV_VAR_SEVERITY'] - del os.environ['ENV_VAR_DATABASE'] - del os.environ['TEST_SCHEMA_VAR'] - del os.environ['ENV_VAR_COLOR'] - del os.environ['ENV_VAR_SOME_KEY'] - del os.environ['ENV_VAR_OWNER'] - del os.environ['ENV_VAR_METRICS'] - - -class ProjectEnvVarTest(BasePPTest): - - @property - def project_config(self): - # Need to set the environment variable here initially because - # the unittest setup does a load_config. - os.environ['ENV_VAR_NAME'] = "Jane Smith" - return { - 'config-version': 2, - 'seed-paths': ['seeds'], - 'test-paths': ['tests'], - 'macro-paths': ['macros'], - 'seeds': { - 'quote_columns': False, - }, - 'models': { - '+meta': { - 'meta_name': "{{ env_var('ENV_VAR_NAME') }}" - } - } - } - - @use_profile('postgres') - def test_postgres_project_env_vars(self): - - # Initial run - self.setup_directories() - self.copy_file('test-files/model_one.sql', 'models/model_one.sql') - self.run_dbt(['clean']) - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - state_check = manifest.state_check - model_id = 'model.test.model_one' - model = manifest.nodes[model_id] - self.assertEqual(model.config.meta['meta_name'], 'Jane Smith') - env_vars_hash_checksum = state_check.project_env_vars_hash.checksum - - # Change the environment variable - os.environ['ENV_VAR_NAME'] = "Jane Doe" - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - model = manifest.nodes[model_id] - self.assertEqual(model.config.meta['meta_name'], 'Jane Doe') - self.assertNotEqual(env_vars_hash_checksum, manifest.state_check.project_env_vars_hash.checksum) - - # cleanup - del os.environ['ENV_VAR_NAME'] - - -class ProfileEnvVarTest(BasePPTest): - - @property - def profile_config(self): - # Need to set these here because the base integration test class - # calls 'load_config' before the tests are run. - # Note: only the specified profile is rendered, so there's no - # point it setting env_vars in non-used profiles. - os.environ['ENV_VAR_USER'] = 'root' - os.environ['ENV_VAR_PASS'] = 'password' - return { - 'config': { - 'send_anonymous_usage_stats': False - }, - 'test': { - 'outputs': { - 'dev': { - 'type': 'postgres', - 'threads': 1, - 'host': self.database_host, - 'port': 5432, - 'user': "root", - 'pass': "password", - 'user': "{{ env_var('ENV_VAR_USER') }}", - 'pass': "{{ env_var('ENV_VAR_PASS') }}", - 'dbname': 'dbt', - 'schema': self.unique_schema() - }, - }, - 'target': 'dev' - } - } - - @use_profile('postgres') - def test_postgres_profile_env_vars(self): - - # Initial run - os.environ['ENV_VAR_USER'] = 'root' - os.environ['ENV_VAR_PASS'] = 'password' - self.setup_directories() - self.copy_file('test-files/model_one.sql', 'models/model_one.sql') - results = self.run_dbt(["run"]) - manifest = get_manifest() - env_vars_checksum = manifest.state_check.profile_env_vars_hash.checksum - - # Change env_vars, the user doesn't exist, this should fail - os.environ['ENV_VAR_USER'] = 'fake_user' - (results, log_output) = self.run_dbt_and_capture(["run"], expect_pass=False) - self.assertTrue('env vars used in profiles.yml have changed' in log_output) - manifest = get_manifest() - self.assertNotEqual(env_vars_checksum, manifest.state_check.profile_env_vars_hash.checksum) - - -class ProfileSecretEnvVarTest(BasePPTest): - - @property - def profile_config(self): - # Need to set these here because the base integration test class - # calls 'load_config' before the tests are run. - # Note: only the specified profile is rendered, so there's no - # point it setting env_vars in non-used profiles. - - # user is secret and password is not. postgres on macos doesn't care if the password - # changes so we have to change the user. related: https://github.com/dbt-labs/dbt-core/pull/4250 - os.environ[SECRET_ENV_PREFIX + 'USER'] = 'root' - os.environ['ENV_VAR_PASS'] = 'password' - return { - 'config': { - 'send_anonymous_usage_stats': False - }, - 'test': { - 'outputs': { - 'dev': { - 'type': 'postgres', - 'threads': 1, - 'host': self.database_host, - 'port': 5432, - 'user': "root", - 'pass': "password", - 'user': "{{ env_var('DBT_ENV_SECRET_USER') }}", - 'pass': "{{ env_var('ENV_VAR_PASS') }}", - 'dbname': 'dbt', - 'schema': self.unique_schema() - }, - }, - 'target': 'dev' - } - } - - @use_profile('postgres') - def test_postgres_profile_secret_env_vars(self): - - # Initial run - os.environ[SECRET_ENV_PREFIX + 'USER'] = 'root' - os.environ['ENV_VAR_PASS'] = 'password' - self.setup_directories() - self.copy_file('test-files/model_one.sql', 'models/model_one.sql') - results = self.run_dbt(["run"]) - manifest = get_manifest() - env_vars_checksum = manifest.state_check.profile_env_vars_hash.checksum - - # Change a secret var, it shouldn't register because we shouldn't save secrets. - os.environ[SECRET_ENV_PREFIX + 'USER'] = 'boop' - # this dbt run is going to fail because the password isn't actually the right one, - # but that doesn't matter because we just want to see if the manifest has included - # the secret in the hash of environment variables. - (results, log_output) = self.run_dbt_and_capture(["run"], expect_pass=False) - # I020 is the event code for "env vars used in profiles.yml have changed" - self.assertFalse('I020' in log_output) - manifest = get_manifest() - self.assertEqual(env_vars_checksum, manifest.state_check.profile_env_vars_hash.checksum) - diff --git a/test/unit/test_adapter_connection_manager.py b/test/unit/test_adapter_connection_manager.py index 47db6b67ab0..b270f6a5d19 100644 --- a/test/unit/test_adapter_connection_manager.py +++ b/test/unit/test_adapter_connection_manager.py @@ -64,7 +64,7 @@ def test_retry_connection_fails_unhandled(self): * The Connection state should be "fail" and the handle None. * The resulting attempt count should be 1 as we are not explicitly configured to handle a ValueError. - * retry_connection should raise a FailedToConnectException with the Exception message. + * retry_connection should raise a FailedToConnectError with the Exception message. """ conn = self.postgres_connection attempts = 0 @@ -75,7 +75,7 @@ def connect(): raise ValueError("Something went horribly wrong") with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectException, "Something went horribly wrong" + dbt.exceptions.FailedToConnectError, "Something went horribly wrong" ): BaseConnectionManager.retry_connection( @@ -99,7 +99,7 @@ def test_retry_connection_fails_handled(self): As a result: * The Connection state should be "fail" and the handle None. * The resulting attempt count should be 2 as we are configured to handle a ValueError. - * retry_connection should raise a FailedToConnectException with the Exception message. + * retry_connection should raise a FailedToConnectError with the Exception message. """ conn = self.postgres_connection attempts = 0 @@ -110,7 +110,7 @@ def connect(): raise ValueError("Something went horribly wrong") with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectException, "Something went horribly wrong" + dbt.exceptions.FailedToConnectError, "Something went horribly wrong" ): BaseConnectionManager.retry_connection( @@ -173,7 +173,7 @@ def test_retry_connection_attempts(self): * The Connection state should be "fail" and the handle None, as connect never returns. * The resulting attempt count should be 11 as we are configured to handle a ValueError. - * retry_connection should raise a FailedToConnectException with the Exception message. + * retry_connection should raise a FailedToConnectError with the Exception message. """ conn = self.postgres_connection attempts = 0 @@ -185,7 +185,7 @@ def connect(): raise ValueError("Something went horribly wrong") with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectException, "Something went horribly wrong" + dbt.exceptions.FailedToConnectError, "Something went horribly wrong" ): BaseConnectionManager.retry_connection( conn, @@ -208,7 +208,7 @@ def test_retry_connection_fails_handling_all_exceptions(self): * The Connection state should be "fail" and the handle None, as connect never returns. * The resulting attempt count should be 11 as we are configured to handle all Exceptions. - * retry_connection should raise a FailedToConnectException with the Exception message. + * retry_connection should raise a FailedToConnectError with the Exception message. """ conn = self.postgres_connection attempts = 0 @@ -220,7 +220,7 @@ def connect(): raise TypeError("An unhandled thing went horribly wrong") with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectException, "An unhandled thing went horribly wrong" + dbt.exceptions.FailedToConnectError, "An unhandled thing went horribly wrong" ): BaseConnectionManager.retry_connection( conn, @@ -338,7 +338,7 @@ def connect(): return True with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectException, "retry_limit cannot be negative" + dbt.exceptions.FailedToConnectError, "retry_limit cannot be negative" ): BaseConnectionManager.retry_connection( conn, @@ -365,7 +365,7 @@ def connect(): for retry_timeout in [-10, -2.5, lambda _: -100, lambda _: -10.1]: with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectException, + dbt.exceptions.FailedToConnectError, "retry_timeout cannot be negative or return a negative time", ): BaseConnectionManager.retry_connection( @@ -392,7 +392,7 @@ def connect(): return True with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectException, + dbt.exceptions.FailedToConnectError, "retry_limit cannot be negative", ): BaseConnectionManager.retry_connection( diff --git a/test/unit/test_cache.py b/test/unit/test_cache.py index f69b4783ee1..3f9c6e4f6bf 100644 --- a/test/unit/test_cache.py +++ b/test/unit/test_cache.py @@ -121,7 +121,7 @@ def test_dest_exists_error(self): self.cache.add(bar) self.assert_relations_exist('DBT', 'schema', 'foo', 'bar') - with self.assertRaises(dbt.exceptions.InternalException): + with self.assertRaises(dbt.exceptions.DbtInternalError): self.cache.rename(foo, bar) self.assert_relations_exist('DBT', 'schema', 'foo', 'bar') diff --git a/test/unit/test_config.py b/test/unit/test_config.py index 2f4c7b45ca1..4c1707d28b9 100644 --- a/test/unit/test_config.py +++ b/test/unit/test_config.py @@ -928,7 +928,7 @@ def test_run_operation_task(self): def test_run_operation_task_with_bad_path(self): self.args.project_dir = 'bad_path' - with self.assertRaises(dbt.exceptions.RuntimeException): + with self.assertRaises(dbt.exceptions.DbtRuntimeError): new_task = RunOperationTask.from_args(self.args) @@ -1150,8 +1150,8 @@ def test__warn_for_unused_resource_config_paths(self): project.warn_for_unused_resource_config_paths(self.used, []) warn_or_error_patch.assert_called_once() event = warn_or_error_patch.call_args[0][0] - assert event.info.name == 'UnusedResourceConfigPath' - msg = event.info.msg + assert type(event).__name__ == 'UnusedResourceConfigPath' + msg = event.message() expected_msg = "- models.my_test_project.baz" assert expected_msg in msg diff --git a/test/unit/test_context.py b/test/unit/test_context.py index a567e032f55..34c8562402f 100644 --- a/test/unit/test_context.py +++ b/test/unit/test_context.py @@ -89,7 +89,7 @@ def test_var_not_defined(self): var = providers.RuntimeVar(self.context, self.config, self.model) self.assertEqual(var("foo", "bar"), "bar") - with self.assertRaises(dbt.exceptions.CompilationException): + with self.assertRaises(dbt.exceptions.CompilationError): var("foo") def test_parser_var_default_something(self): @@ -464,7 +464,7 @@ def test_macro_namespace_duplicates(config_postgres, manifest_fx): mn.add_macros(manifest_fx.macros.values(), {}) # same pkg, same name: error - with pytest.raises(dbt.exceptions.CompilationException): + with pytest.raises(dbt.exceptions.CompilationError): mn.add_macro(mock_macro("macro_a", "root"), {}) # different pkg, same name: no error diff --git a/test/unit/test_core_dbt_utils.py b/test/unit/test_core_dbt_utils.py index 1deb8a77552..546e4f6ca00 100644 --- a/test/unit/test_core_dbt_utils.py +++ b/test/unit/test_core_dbt_utils.py @@ -2,7 +2,7 @@ import tarfile import unittest -from dbt.exceptions import ConnectionException +from dbt.exceptions import ConnectionError from dbt.utils import _connection_exception_retry as connection_exception_retry @@ -19,7 +19,7 @@ def test_connection_exception_retry_success_requests_exception(self): def test_connection_exception_retry_max(self): Counter._reset() - with self.assertRaises(ConnectionException): + with self.assertRaises(ConnectionError): connection_exception_retry(lambda: Counter._add_with_exception(), 5) self.assertEqual(6, counter) # 6 = original attempt plus 5 retries diff --git a/test/unit/test_deps.py b/test/unit/test_deps.py index 650722ef6f4..27c6f66e015 100644 --- a/test/unit/test_deps.py +++ b/test/unit/test_deps.py @@ -133,7 +133,7 @@ def test_resolve_fail(self): self.assertEqual(c.git, 'http://example.com') self.assertEqual(c.revisions, ['0.0.1', '0.0.2']) - with self.assertRaises(dbt.exceptions.DependencyException): + with self.assertRaises(dbt.exceptions.DependencyError): c.resolved() def test_default_revision(self): @@ -264,7 +264,7 @@ def test_resolve_missing_package(self): package='dbt-labs-test/b', version='0.1.2' )) - with self.assertRaises(dbt.exceptions.DependencyException) as exc: + with self.assertRaises(dbt.exceptions.DependencyError) as exc: a.resolved() msg = 'Package dbt-labs-test/b was not found in the package index' @@ -276,7 +276,7 @@ def test_resolve_missing_version(self): version='0.1.4' )) - with self.assertRaises(dbt.exceptions.DependencyException) as exc: + with self.assertRaises(dbt.exceptions.DependencyError) as exc: a.resolved() msg = ( "Could not find a matching compatible version for package " @@ -298,7 +298,7 @@ def test_resolve_conflict(self): b = RegistryUnpinnedPackage.from_contract(b_contract) c = a.incorporate(b) - with self.assertRaises(dbt.exceptions.DependencyException) as exc: + with self.assertRaises(dbt.exceptions.DependencyError) as exc: c.resolved() msg = ( "Version error for package dbt-labs-test/a: Could not " diff --git a/test/unit/test_exceptions.py b/test/unit/test_exceptions.py index 6a47255e13c..e66e913b1a6 100644 --- a/test/unit/test_exceptions.py +++ b/test/unit/test_exceptions.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import raise_duplicate_macro_name, CompilationException +from dbt.exceptions import raise_duplicate_macro_name, CompilationError from .utils import MockMacro @@ -8,7 +8,7 @@ def test_raise_duplicate_macros_different_package(): macro_1 = MockMacro(package='dbt', name='some_macro') macro_2 = MockMacro(package='dbt-myadapter', name='some_macro') - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: raise_duplicate_macro_name( node_1=macro_1, node_2=macro_2, @@ -24,7 +24,7 @@ def test_raise_duplicate_macros_same_package(): macro_1 = MockMacro(package='dbt', name='some_macro') macro_2 = MockMacro(package='dbt', name='some_macro') - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: raise_duplicate_macro_name( node_1=macro_1, node_2=macro_2, diff --git a/test/unit/test_flags.py b/test/unit/test_flags.py index 4be866338a2..6f03ec22e92 100644 --- a/test/unit/test_flags.py +++ b/test/unit/test_flags.py @@ -1,8 +1,8 @@ import os -from unittest import mock, TestCase +from unittest import TestCase from argparse import Namespace +import pytest -from .utils import normalize from dbt import flags from dbt.contracts.project import UserConfig from dbt.graph.selector_spec import IndirectSelection @@ -63,6 +63,21 @@ def test__flags(self): flags.WARN_ERROR = False self.user_config.warn_error = None + # warn_error_options + self.user_config.warn_error_options = '{"include": "all"}' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.WARN_ERROR_OPTIONS, '{"include": "all"}') + os.environ['DBT_WARN_ERROR_OPTIONS'] = '{"include": []}' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.WARN_ERROR_OPTIONS, '{"include": []}') + setattr(self.args, 'warn_error_options', '{"include": "all"}') + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.WARN_ERROR_OPTIONS, '{"include": "all"}') + # cleanup + os.environ.pop('DBT_WARN_ERROR_OPTIONS') + delattr(self.args, 'warn_error_options') + self.user_config.warn_error_options = None + # write_json self.user_config.write_json = True flags.set_from_args(self.args, self.user_config) @@ -206,6 +221,9 @@ def test__flags(self): self.user_config.indirect_selection = 'cautious' flags.set_from_args(self.args, self.user_config) self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Cautious) + self.user_config.indirect_selection = 'buildable' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Buildable) self.user_config.indirect_selection = None flags.set_from_args(self.args, self.user_config) self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Eager) @@ -261,3 +279,59 @@ def test__flags(self): # cleanup os.environ.pop('DBT_LOG_PATH') delattr(self.args, 'log_path') + + def test__flags_are_mutually_exclusive(self): + # options from user config + self.user_config.warn_error = False + self.user_config.warn_error_options = '{"include":"all}' + with pytest.raises(ValueError): + flags.set_from_args(self.args, self.user_config) + #cleanup + self.user_config.warn_error = None + self.user_config.warn_error_options = None + + # options from args + setattr(self.args, 'warn_error', False) + setattr(self.args, 'warn_error_options', '{"include":"all}') + with pytest.raises(ValueError): + flags.set_from_args(self.args, self.user_config) + # cleanup + delattr(self.args, 'warn_error') + delattr(self.args, 'warn_error_options') + + # options from environment + os.environ['DBT_WARN_ERROR'] = 'false' + os.environ['DBT_WARN_ERROR_OPTIONS'] = '{"include": []}' + with pytest.raises(ValueError): + flags.set_from_args(self.args, self.user_config) + #cleanup + os.environ.pop('DBT_WARN_ERROR') + os.environ.pop('DBT_WARN_ERROR_OPTIONS') + + # options from user config + args + self.user_config.warn_error = False + setattr(self.args, 'warn_error_options', '{"include":"all}') + with pytest.raises(ValueError): + flags.set_from_args(self.args, self.user_config) + # cleanup + self.user_config.warn_error = None + delattr(self.args, 'warn_error_options') + + # options from user config + environ + self.user_config.warn_error = False + os.environ['DBT_WARN_ERROR_OPTIONS'] = '{"include": []}' + with pytest.raises(ValueError): + flags.set_from_args(self.args, self.user_config) + # cleanup + self.user_config.warn_error = None + os.environ.pop('DBT_WARN_ERROR_OPTIONS') + + # options from args + environ + setattr(self.args, 'warn_error', False) + os.environ['DBT_WARN_ERROR_OPTIONS'] = '{"include": []}' + with pytest.raises(ValueError): + flags.set_from_args(self.args, self.user_config) + # cleanup + delattr(self.args, 'warn_error') + os.environ.pop('DBT_WARN_ERROR_OPTIONS') + diff --git a/test/unit/test_graph_selection.py b/test/unit/test_graph_selection.py index a0da5b490e9..4c40c1dff82 100644 --- a/test/unit/test_graph_selection.py +++ b/test/unit/test_graph_selection.py @@ -200,5 +200,5 @@ def test_parse_specs(spec, parents, parents_depth, children, children_depth, fil @pytest.mark.parametrize('invalid', invalid_specs, ids=lambda k: str(k)) def test_invalid_specs(invalid): - with pytest.raises(dbt.exceptions.RuntimeException): + with pytest.raises(dbt.exceptions.DbtRuntimeError): graph_selector.SelectionCriteria.from_single_spec(invalid) diff --git a/test/unit/test_graph_selector_methods.py b/test/unit/test_graph_selector_methods.py index 0497d5da02a..769199e841f 100644 --- a/test/unit/test_graph_selector_methods.py +++ b/test/unit/test_graph_selector_methods.py @@ -898,11 +898,11 @@ def test_select_state_no_change(manifest, previous_state): def test_select_state_nothing(manifest, previous_state): previous_state.manifest = None method = statemethod(manifest, previous_state) - with pytest.raises(dbt.exceptions.RuntimeException) as exc: + with pytest.raises(dbt.exceptions.DbtRuntimeError) as exc: search_manifest_using_method(manifest, method, 'modified') assert 'no comparison manifest' in str(exc.value) - with pytest.raises(dbt.exceptions.RuntimeException) as exc: + with pytest.raises(dbt.exceptions.DbtRuntimeError) as exc: search_manifest_using_method(manifest, method, 'new') assert 'no comparison manifest' in str(exc.value) @@ -973,8 +973,8 @@ def test_select_state_changed_seed_checksum_path_to_path(manifest, previous_stat assert not search_manifest_using_method(manifest, method, 'modified') warn_or_error_patch.assert_called_once() event = warn_or_error_patch.call_args[0][0] - assert event.info.name == 'SeedExceedsLimitSamePath' - msg = event.info.msg + assert type(event).__name__ == 'SeedExceedsLimitSamePath' + msg = event.message() assert msg.startswith('Found a seed (pkg.seed) >1MB in size') with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') @@ -990,8 +990,8 @@ def test_select_state_changed_seed_checksum_sha_to_path(manifest, previous_state manifest, method, 'modified') == {'seed'} warn_or_error_patch.assert_called_once() event = warn_or_error_patch.call_args[0][0] - assert event.info.name == 'SeedIncreased' - msg = event.info.msg + assert type(event).__name__ == 'SeedIncreased' + msg = event.message() assert msg.startswith('Found a seed (pkg.seed) >1MB in size') with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') diff --git a/test/unit/test_graph_selector_spec.py b/test/unit/test_graph_selector_spec.py index 68c8611ccac..d72325affc2 100644 --- a/test/unit/test_graph_selector_spec.py +++ b/test/unit/test_graph_selector_spec.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError from dbt.graph.selector_spec import ( SelectionCriteria, SelectionIntersection, @@ -111,10 +111,10 @@ def test_raw_parse_weird(): def test_raw_parse_invalid(): - with pytest.raises(RuntimeException): + with pytest.raises(DbtRuntimeError): SelectionCriteria.from_single_spec('invalid_method:something') - with pytest.raises(RuntimeException): + with pytest.raises(DbtRuntimeError): SelectionCriteria.from_single_spec('@foo+') diff --git a/test/unit/test_jinja.py b/test/unit/test_jinja.py index 6b8c939de64..5213f8d7d8c 100644 --- a/test/unit/test_jinja.py +++ b/test/unit/test_jinja.py @@ -6,7 +6,7 @@ from dbt.clients.jinja import get_rendered from dbt.clients.jinja import get_template from dbt.clients.jinja import extract_toplevel_blocks -from dbt.exceptions import CompilationException, JinjaRenderingException +from dbt.exceptions import CompilationError, JinjaRenderingError @contextmanager @@ -55,12 +55,12 @@ def expected_id(arg): ( '''foo: "{{ 'bar' | as_bool }}"''', returns('bar'), - raises(JinjaRenderingException), + raises(JinjaRenderingError), ), ( '''foo: "{{ 'bar' | as_number }}"''', returns('bar'), - raises(JinjaRenderingException), + raises(JinjaRenderingError), ), ( '''foo: "{{ 'bar' | as_native }}"''', @@ -116,7 +116,7 @@ def expected_id(arg): ( '''foo: "{{ 1 | as_bool }}"''', returns('1'), - raises(JinjaRenderingException), + raises(JinjaRenderingError), ), ( '''foo: "{{ 1 | as_number }}"''', @@ -136,7 +136,7 @@ def expected_id(arg): ( '''foo: "{{ '1' | as_bool }}"''', returns('1'), - raises(JinjaRenderingException), + raises(JinjaRenderingError), ), ( '''foo: "{{ '1' | as_number }}"''', @@ -171,7 +171,7 @@ def expected_id(arg): ( '''foo: "{{ True | as_number }}"''', returns('True'), - raises(JinjaRenderingException), + raises(JinjaRenderingError), ), ( '''foo: "{{ True | as_native }}"''', @@ -197,7 +197,7 @@ def expected_id(arg): ( '''foo: "{{ true | as_number }}"''', returns("True"), - raises(JinjaRenderingException), + raises(JinjaRenderingError), ), ( '''foo: "{{ true | as_native }}"''', @@ -254,7 +254,7 @@ def expected_id(arg): ( '''foo: "{{ True | as_number }}"''', returns("True"), - raises(JinjaRenderingException), + raises(JinjaRenderingError), ), ( '''foo: "{{ True | as_native }}"''', @@ -552,24 +552,24 @@ def test_materialization_parse(self): def test_nested_not_ok(self): # we don't allow nesting same blocks body = '{% myblock a %} {% myblock b %} {% endmyblock %} {% endmyblock %}' - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): extract_toplevel_blocks(body, allowed_blocks={'myblock'}) def test_incomplete_block_failure(self): fullbody = '{% myblock foo %} {% endmyblock %}' for length in range(len('{% myblock foo %}'), len(fullbody)-1): body = fullbody[:length] - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): extract_toplevel_blocks(body, allowed_blocks={'myblock'}) def test_wrong_end_failure(self): body = '{% myblock foo %} {% endotherblock %}' - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): extract_toplevel_blocks(body, allowed_blocks={'myblock', 'otherblock'}) def test_comment_no_end_failure(self): body = '{# ' - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): extract_toplevel_blocks(body) def test_comment_only(self): @@ -698,7 +698,7 @@ def test_unclosed_model_quotes(self): def test_if(self): # if you conditionally define your macros/models, don't body = '{% if true %}{% macro my_macro() %} adsf {% endmacro %}{% endif %}' - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): extract_toplevel_blocks(body) def test_if_innocuous(self): @@ -710,7 +710,7 @@ def test_if_innocuous(self): def test_for(self): # no for-loops over macros. body = '{% for x in range(10) %}{% macro my_macro() %} adsf {% endmacro %}{% endfor %}' - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): extract_toplevel_blocks(body) def test_for_innocuous(self): @@ -722,19 +722,19 @@ def test_for_innocuous(self): def test_endif(self): body = '{% snapshot foo %}select * from thing{% endsnapshot%}{% endif %}' - with self.assertRaises(CompilationException) as err: + with self.assertRaises(CompilationError) as err: extract_toplevel_blocks(body) self.assertIn('Got an unexpected control flow end tag, got endif but never saw a preceeding if (@ 1:53)', str(err.exception)) def test_if_endfor(self): body = '{% if x %}...{% endfor %}{% endif %}' - with self.assertRaises(CompilationException) as err: + with self.assertRaises(CompilationError) as err: extract_toplevel_blocks(body) self.assertIn('Got an unexpected control flow end tag, got endfor but expected endif next (@ 1:13)', str(err.exception)) def test_if_endfor_newlines(self): body = '{% if x %}\n ...\n {% endfor %}\n{% endif %}' - with self.assertRaises(CompilationException) as err: + with self.assertRaises(CompilationError) as err: extract_toplevel_blocks(body) self.assertIn('Got an unexpected control flow end tag, got endfor but expected endif next (@ 3:4)', str(err.exception)) diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py index 38e439a696f..0699253417b 100644 --- a/test/unit/test_parser.py +++ b/test/unit/test_parser.py @@ -18,7 +18,7 @@ ModelNode, Macro, DependsOn, SingularTestNode, SnapshotNode, AnalysisNode, UnpatchedSourceDefinition ) -from dbt.exceptions import CompilationException, ParsingException +from dbt.exceptions import CompilationError, ParsingError from dbt.node_types import NodeType from dbt.parser import ( ModelParser, MacroParser, SingularTestParser, GenericTestParser, @@ -664,7 +664,7 @@ def test_basic(self): def test_sql_model_parse_error(self): block = self.file_block_for(sql_model_parse_error, 'nested/model_1.sql') - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): self.parser.parse_file(block) def test_python_model_parse(self): @@ -724,31 +724,31 @@ def test_python_model_config_with_defaults(self): def test_python_model_single_argument(self): block = self.file_block_for(python_model_single_argument, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_no_argument(self): block = self.file_block_for(python_model_no_argument, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_incorrect_argument_name(self): block = self.file_block_for(python_model_incorrect_argument_name, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_multiple_models(self): block = self.file_block_for(python_model_multiple_models, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_incorrect_function_name(self): block = self.file_block_for(python_model_incorrect_function_name, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_empty_file(self): @@ -759,13 +759,13 @@ def test_python_model_empty_file(self): def test_python_model_multiple_returns(self): block = self.file_block_for(python_model_multiple_returns, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_no_return(self): block = self.file_block_for(python_model_no_return, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_single_return(self): @@ -776,7 +776,7 @@ def test_python_model_single_return(self): def test_python_model_incorrect_ref(self): block = self.file_block_for(python_model_incorrect_ref, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_default_materialization(self): @@ -1027,7 +1027,7 @@ def file_block_for(self, data, filename): def test_parse_error(self): block = self.file_block_for('{% snapshot foo %}select 1 as id{%snapshot bar %}{% endsnapshot %}', 'nested/snap_1.sql') - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): self.parser.parse_file(block) def test_single_block(self): diff --git a/test/unit/test_postgres_adapter.py b/test/unit/test_postgres_adapter.py index 06a2ed7c497..0d56ff9ff63 100644 --- a/test/unit/test_postgres_adapter.py +++ b/test/unit/test_postgres_adapter.py @@ -12,7 +12,7 @@ from dbt.contracts.files import FileHash from dbt.contracts.graph.manifest import ManifestStateCheck from dbt.clients import agate_helper -from dbt.exceptions import ValidationException, DbtConfigError +from dbt.exceptions import DbtValidationError, DbtConfigError from psycopg2 import extensions as psycopg2_extensions from psycopg2 import DatabaseError @@ -58,8 +58,8 @@ def adapter(self): def test_acquire_connection_validations(self, psycopg2): try: connection = self.adapter.acquire_connection('dummy') - except ValidationException as e: - self.fail('got ValidationException: {}'.format(str(e))) + except DbtValidationError as e: + self.fail('got DbtValidationError: {}'.format(str(e))) except BaseException as e: self.fail('acquiring connection failed with unknown exception: {}' .format(str(e))) diff --git a/test/unit/test_registry_get_request_exception.py b/test/unit/test_registry_get_request_exception.py index 44033fe0546..3029971cad4 100644 --- a/test/unit/test_registry_get_request_exception.py +++ b/test/unit/test_registry_get_request_exception.py @@ -1,9 +1,9 @@ import unittest -from dbt.exceptions import ConnectionException +from dbt.exceptions import ConnectionError from dbt.clients.registry import _get_with_retries class testRegistryGetRequestException(unittest.TestCase): def test_registry_request_error_catching(self): # using non routable IP to test connection error logic in the _get_with_retries function - self.assertRaises(ConnectionException, _get_with_retries, '', 'http://0.0.0.0') + self.assertRaises(ConnectionError, _get_with_retries, '', 'http://0.0.0.0') diff --git a/test/unit/test_semver.py b/test/unit/test_semver.py index eff7603a2f6..b36c403e3a7 100644 --- a/test/unit/test_semver.py +++ b/test/unit/test_semver.py @@ -2,7 +2,7 @@ import itertools from typing import List -from dbt.exceptions import VersionsNotCompatibleException +from dbt.exceptions import VersionsNotCompatibleError from dbt.semver import VersionSpecifier, UnboundedVersionSpecifier, \ VersionRange, reduce_versions, versions_compatible, \ resolve_to_specific_version, filter_installable @@ -40,7 +40,7 @@ def assertVersionSetResult(self, inputs, output_range): def assertInvalidVersionSet(self, inputs): for permutation in itertools.permutations(inputs): - with self.assertRaises(VersionsNotCompatibleException): + with self.assertRaises(VersionsNotCompatibleError): reduce_versions(*permutation) def test__versions_compatible(self): diff --git a/tests/adapter/dbt/tests/adapter/__version__.py b/tests/adapter/dbt/tests/adapter/__version__.py index 27cfeecd9e8..219c289b1bf 100644 --- a/tests/adapter/dbt/tests/adapter/__version__.py +++ b/tests/adapter/dbt/tests/adapter/__version__.py @@ -1 +1 @@ -version = "1.4.0b1" +version = "1.5.0a1" diff --git a/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py b/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py index a9f846e2ca4..d9ff6b5b28f 100644 --- a/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py +++ b/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py @@ -50,10 +50,7 @@ def models(self): @pytest.fixture(scope="class") def macros(self): - return { - "cast.sql": MACROS__CAST_SQL, - "expect_value.sql": MACROS__EXPECT_VALUE_SQL - } + return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} def test_alias_model_name(self, project): results = run_dbt(["run"]) @@ -71,10 +68,7 @@ def project_config_update(self): @pytest.fixture(scope="class") def macros(self): - return { - "cast.sql": MACROS__CAST_SQL, - "expect_value.sql": MACROS__EXPECT_VALUE_SQL - } + return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} @pytest.fixture(scope="class") def models(self): @@ -100,10 +94,7 @@ def project_config_update(self): @pytest.fixture(scope="class") def macros(self): - return { - "cast.sql": MACROS__CAST_SQL, - "expect_value.sql": MACROS__EXPECT_VALUE_SQL - } + return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} @pytest.fixture(scope="class") def models(self): @@ -130,19 +121,14 @@ def project_config_update(self, unique_schema): "models": { "test": { "alias": "duped_alias", - "model_b": { - "schema": unique_schema + "_alt" - }, + "model_b": {"schema": unique_schema + "_alt"}, }, }, } @pytest.fixture(scope="class") def macros(self): - return { - "cast.sql": MACROS__CAST_SQL, - "expect_value.sql": MACROS__EXPECT_VALUE_SQL - } + return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} @pytest.fixture(scope="class") def models(self): diff --git a/tests/adapter/dbt/tests/adapter/basic/test_table_materialization.py b/tests/adapter/dbt/tests/adapter/basic/test_table_materialization.py new file mode 100644 index 00000000000..279152d6985 --- /dev/null +++ b/tests/adapter/dbt/tests/adapter/basic/test_table_materialization.py @@ -0,0 +1,96 @@ +import pytest + +from dbt.tests.util import run_dbt, check_relations_equal + + +seeds__seed_csv = """id,first_name,last_name,email,gender,ip_address +1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 +2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 +3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 +4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 +5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136 +6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220 +7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64 +8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13 +9,Gary,Day,gday8@nih.gov,Male,35.81.68.186 +10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100 +11,Raymond,Kelley,rkelleya@fc2.com,Male,213.65.166.67 +12,Gerald,Robinson,grobinsonb@disqus.com,Male,72.232.194.193 +13,Mildred,Martinez,mmartinezc@samsung.com,Female,198.29.112.5 +14,Dennis,Arnold,darnoldd@google.com,Male,86.96.3.250 +15,Judy,Gray,jgraye@opensource.org,Female,79.218.162.245 +16,Theresa,Garza,tgarzaf@epa.gov,Female,21.59.100.54 +17,Gerald,Robertson,grobertsong@csmonitor.com,Male,131.134.82.96 +18,Philip,Hernandez,phernandezh@adobe.com,Male,254.196.137.72 +19,Julia,Gonzalez,jgonzalezi@cam.ac.uk,Female,84.240.227.174 +20,Andrew,Davis,adavisj@patch.com,Male,9.255.67.25 +21,Kimberly,Harper,kharperk@foxnews.com,Female,198.208.120.253 +22,Mark,Martin,mmartinl@marketwatch.com,Male,233.138.182.153 +23,Cynthia,Ruiz,cruizm@google.fr,Female,18.178.187.201 +24,Samuel,Carroll,scarrolln@youtu.be,Male,128.113.96.122 +25,Jennifer,Larson,jlarsono@vinaora.com,Female,98.234.85.95 +26,Ashley,Perry,aperryp@rakuten.co.jp,Female,247.173.114.52 +27,Howard,Rodriguez,hrodriguezq@shutterfly.com,Male,231.188.95.26 +28,Amy,Brooks,abrooksr@theatlantic.com,Female,141.199.174.118 +29,Louise,Warren,lwarrens@adobe.com,Female,96.105.158.28 +30,Tina,Watson,twatsont@myspace.com,Female,251.142.118.177 +31,Janice,Kelley,jkelleyu@creativecommons.org,Female,239.167.34.233 +32,Terry,Mccoy,tmccoyv@bravesites.com,Male,117.201.183.203 +33,Jeffrey,Morgan,jmorganw@surveymonkey.com,Male,78.101.78.149 +34,Louis,Harvey,lharveyx@sina.com.cn,Male,51.50.0.167 +35,Philip,Miller,pmillery@samsung.com,Male,103.255.222.110 +36,Willie,Marshall,wmarshallz@ow.ly,Male,149.219.91.68 +37,Patrick,Lopez,plopez10@redcross.org,Male,250.136.229.89 +38,Adam,Jenkins,ajenkins11@harvard.edu,Male,7.36.112.81 +39,Benjamin,Cruz,bcruz12@linkedin.com,Male,32.38.98.15 +40,Ruby,Hawkins,rhawkins13@gmpg.org,Female,135.171.129.255 +41,Carlos,Barnes,cbarnes14@a8.net,Male,240.197.85.140 +42,Ruby,Griffin,rgriffin15@bravesites.com,Female,19.29.135.24 +43,Sean,Mason,smason16@icq.com,Male,159.219.155.249 +44,Anthony,Payne,apayne17@utexas.edu,Male,235.168.199.218 +45,Steve,Cruz,scruz18@pcworld.com,Male,238.201.81.198 +46,Anthony,Garcia,agarcia19@flavors.me,Male,25.85.10.18 +47,Doris,Lopez,dlopez1a@sphinn.com,Female,245.218.51.238 +48,Susan,Nichols,snichols1b@freewebs.com,Female,199.99.9.61 +49,Wanda,Ferguson,wferguson1c@yahoo.co.jp,Female,236.241.135.21 +50,Andrea,Pierce,apierce1d@google.co.uk,Female,132.40.10.209 +""" + +model_sql = """ +{{ + config( + materialized = "table", + sort = 'first_name', + dist = 'first_name' + ) +}} + +select * from {{ this.schema }}.seed +""" + + +class BaseTableMaterialization: + @pytest.fixture(scope="class") + def seeds(self): + return {"seed.csv": seeds__seed_csv} + + @pytest.fixture(scope="class") + def models(self): + return {"materialized.sql": model_sql} + + def test_table_materialization_sort_dist_no_op(self, project): + # basic table materialization test, sort and dist is not supported by postgres so the result table would still be same as input + + # check seed + results = run_dbt(["seed"]) + assert len(results) == 1 + + # check run + results = run_dbt(["run"]) + assert len(results) == 1 + + check_relations_equal(project.adapter, ["seed", "materialized"]) + + +class TestTableMat(BaseTableMaterialization): + pass diff --git a/tests/adapter/dbt/tests/adapter/caching/test_caching.py b/tests/adapter/dbt/tests/adapter/caching/test_caching.py new file mode 100644 index 00000000000..9cf02309c4c --- /dev/null +++ b/tests/adapter/dbt/tests/adapter/caching/test_caching.py @@ -0,0 +1,103 @@ +import pytest + +from dbt.tests.util import run_dbt + +model_sql = """ +{{ + config( + materialized='table' + ) +}} +select 1 as id +""" + +another_schema_model_sql = """ +{{ + config( + materialized='table', + schema='another_schema' + ) +}} +select 1 as id +""" + + +class BaseCachingTest: + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "config-version": 2, + "quoting": { + "identifier": False, + "schema": False, + }, + } + + def run_and_inspect_cache(self, project, run_args=None): + run_dbt(run_args) + + # the cache was empty at the start of the run. + # the model materialization returned an unquoted relation and added to the cache. + adapter = project.adapter + assert len(adapter.cache.relations) == 1 + relation = list(adapter.cache.relations).pop() + assert relation.schema == project.test_schema + assert relation.schema == project.test_schema.lower() + + # on the second run, dbt will find a relation in the database during cache population. + # this relation will be quoted, because list_relations_without_caching (by default) uses + # quote_policy = {"database": True, "schema": True, "identifier": True} + # when adding relations to the cache. + run_dbt(run_args) + adapter = project.adapter + assert len(adapter.cache.relations) == 1 + second_relation = list(adapter.cache.relations).pop() + + # perform a case-insensitive + quote-insensitive comparison + for key in ["database", "schema", "identifier"]: + assert getattr(relation, key).lower() == getattr(second_relation, key).lower() + + def test_cache(self, project): + self.run_and_inspect_cache(project, run_args=["run"]) + + +class BaseCachingLowercaseModel(BaseCachingTest): + @pytest.fixture(scope="class") + def models(self): + return { + "model.sql": model_sql, + } + + +class BaseCachingUppercaseModel(BaseCachingTest): + @pytest.fixture(scope="class") + def models(self): + return { + "MODEL.sql": model_sql, + } + + +class BaseCachingSelectedSchemaOnly(BaseCachingTest): + @pytest.fixture(scope="class") + def models(self): + return { + "model.sql": model_sql, + "another_schema_model.sql": another_schema_model_sql, + } + + def test_cache(self, project): + # this should only cache the schema containing the selected model + run_args = ["--cache-selected-only", "run", "--select", "model"] + self.run_and_inspect_cache(project, run_args) + + +class TestCachingLowerCaseModel(BaseCachingLowercaseModel): + pass + + +class TestCachingUppercaseModel(BaseCachingUppercaseModel): + pass + + +class TestCachingSelectedSchemaOnly(BaseCachingSelectedSchemaOnly): + pass diff --git a/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py b/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py index b7b0ff9ac17..8d3fd7751f2 100644 --- a/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py +++ b/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py @@ -21,7 +21,7 @@ def capsys(self, capsys): def assertGotValue(self, linepat, result): found = False output = self.capsys.readouterr().out - for line in output.split('\n'): + for line in output.split("\n"): if linepat.match(line): found = True assert result in line @@ -41,10 +41,7 @@ def check_project(self, splitout, msg="ERROR invalid"): class BaseDebugProfileVariable(BaseDebug): @pytest.fixture(scope="class") def project_config_update(self): - return { - "config-version": 2, - "profile": '{{ "te" ~ "st" }}' - } + return {"config-version": 2, "profile": '{{ "te" ~ "st" }}'} class TestDebugPostgres(BaseDebug): @@ -70,7 +67,6 @@ class TestDebugProfileVariablePostgres(BaseDebugProfileVariable): class TestDebugInvalidProjectPostgres(BaseDebug): - def test_empty_project(self, project): with open("dbt_project.yml", "w") as f: # noqa: F841 pass @@ -96,9 +92,7 @@ def test_not_found_project(self, project): def test_invalid_project_outside_current_dir(self, project): # create a dbt_project.yml - project_config = { - "invalid-key": "not a valid key in this project" - } + project_config = {"invalid-key": "not a valid key in this project"} os.makedirs("custom", exist_ok=True) with open("custom/dbt_project.yml", "w") as f: yaml.safe_dump(project_config, f, default_flow_style=True) diff --git a/tests/functional/incremental_schema_tests/fixtures.py b/tests/adapter/dbt/tests/adapter/incremental/fixtures.py similarity index 73% rename from tests/functional/incremental_schema_tests/fixtures.py rename to tests/adapter/dbt/tests/adapter/incremental/fixtures.py index c6eebc5e183..6e130266df2 100644 --- a/tests/functional/incremental_schema_tests/fixtures.py +++ b/tests/adapter/dbt/tests/adapter/incremental/fixtures.py @@ -1,61 +1,3 @@ - -# -# Properties -# -_PROPERTIES__SCHEMA = """ -version: 2 - -models: - - name: model_a - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_ignore - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_ignore_target - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_append_new_columns - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_append_new_columns_target - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_sync_all_columns - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_sync_all_columns_target - columns: - - name: id - tags: [column_leveL_tag] - tests: - - unique -""" - # # Models # @@ -71,7 +13,7 @@ WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} {% if is_incremental() %} @@ -124,7 +66,7 @@ ) -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} select id ,cast(field1 as {{string_type}}) as field1 @@ -185,7 +127,7 @@ WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} {% if is_incremental() %} @@ -216,7 +158,7 @@ ) }} -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) @@ -268,7 +210,7 @@ config(materialized='table') }} -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} with source_data as ( @@ -294,7 +236,7 @@ ) }} -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) @@ -328,7 +270,7 @@ ) -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} select id ,cast(field1 as {{string_type}}) as field1 @@ -345,7 +287,7 @@ config(materialized='table') }} -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} with source_data as ( @@ -361,35 +303,3 @@ from source_data """ - -# -# Tests -# - -_TESTS__SELECT_FROM_INCREMENTAL_IGNORE = """ -select * from {{ ref('incremental_ignore') }} where false -""" - -_TESTS__SELECT_FROM_A = """ -select * from {{ ref('model_a') }} where false -""" - -_TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET = """ -select * from {{ ref('incremental_append_new_columns_target') }} where false -""" - -_TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS = """ -select * from {{ ref('incremental_sync_all_columns') }} where false -""" - -_TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET = """ -select * from {{ ref('incremental_sync_all_columns_target') }} where false -""" - -_TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET = """ -select * from {{ ref('incremental_ignore_target') }} where false -""" - -_TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS = """ -select * from {{ ref('incremental_append_new_columns') }} where false -""" diff --git a/tests/adapter/dbt/tests/adapter/incremental/test_incremental_on_schema_change.py b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_on_schema_change.py new file mode 100644 index 00000000000..4fbefbe7651 --- /dev/null +++ b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_on_schema_change.py @@ -0,0 +1,104 @@ +import pytest + +from dbt.tests.util import ( + check_relations_equal, + run_dbt, +) + +from dbt.tests.adapter.incremental.fixtures import ( + _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY, + _MODELS__INCREMENTAL_IGNORE, + _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET, + _MODELS__INCREMENTAL_IGNORE_TARGET, + _MODELS__INCREMENTAL_FAIL, + _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE, + _MODELS__A, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS, + _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET, +) + + +class BaseIncrementalOnSchemaChangeSetup: + @pytest.fixture(scope="class") + def models(self): + return { + "incremental_sync_remove_only.sql": _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY, + "incremental_ignore.sql": _MODELS__INCREMENTAL_IGNORE, + "incremental_sync_remove_only_target.sql": _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET, + "incremental_ignore_target.sql": _MODELS__INCREMENTAL_IGNORE_TARGET, + "incremental_fail.sql": _MODELS__INCREMENTAL_FAIL, + "incremental_sync_all_columns.sql": _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS, + "incremental_append_new_columns_remove_one.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE, + "model_a.sql": _MODELS__A, + "incremental_append_new_columns_target.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + "incremental_append_new_columns.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS, + "incremental_sync_all_columns_target.sql": _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + "incremental_append_new_columns_remove_one_target.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET, + } + + def run_twice_and_assert(self, include, compare_source, compare_target, project): + + # dbt run (twice) + run_args = ["run"] + if include: + run_args.extend(("--select", include)) + results_one = run_dbt(run_args) + assert len(results_one) == 3 + + results_two = run_dbt(run_args) + assert len(results_two) == 3 + + check_relations_equal(project.adapter, [compare_source, compare_target]) + + def run_incremental_append_new_columns(self, project): + select = "model_a incremental_append_new_columns incremental_append_new_columns_target" + compare_source = "incremental_append_new_columns" + compare_target = "incremental_append_new_columns_target" + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def run_incremental_append_new_columns_remove_one(self, project): + select = "model_a incremental_append_new_columns_remove_one incremental_append_new_columns_remove_one_target" + compare_source = "incremental_append_new_columns_remove_one" + compare_target = "incremental_append_new_columns_remove_one_target" + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def run_incremental_sync_all_columns(self, project): + select = "model_a incremental_sync_all_columns incremental_sync_all_columns_target" + compare_source = "incremental_sync_all_columns" + compare_target = "incremental_sync_all_columns_target" + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def run_incremental_sync_remove_only(self, project): + select = "model_a incremental_sync_remove_only incremental_sync_remove_only_target" + compare_source = "incremental_sync_remove_only" + compare_target = "incremental_sync_remove_only_target" + self.run_twice_and_assert(select, compare_source, compare_target, project) + + +class BaseIncrementalOnSchemaChange(BaseIncrementalOnSchemaChangeSetup): + def test_run_incremental_ignore(self, project): + select = "model_a incremental_ignore incremental_ignore_target" + compare_source = "incremental_ignore" + compare_target = "incremental_ignore_target" + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def test_run_incremental_append_new_columns(self, project): + self.run_incremental_append_new_columns(project) + self.run_incremental_append_new_columns_remove_one(project) + + def test_run_incremental_sync_all_columns(self, project): + self.run_incremental_sync_all_columns(project) + self.run_incremental_sync_remove_only(project) + + def test_run_incremental_fail_on_schema_change(self, project): + select = "model_a incremental_fail" + run_dbt(["run", "--models", select, "--full-refresh"]) + results_two = run_dbt(["run", "--models", select], expect_pass=False) + assert "Compilation Error" in results_two[1].message + + +class TestIncrementalOnSchemaChange(BaseIncrementalOnSchemaChange): + pass diff --git a/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py index 11a4b6c0384..2060e9eb6d4 100644 --- a/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py +++ b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py @@ -64,10 +64,8 @@ def seeds(self): def project_config_update(self): return { "models": { - "+incremental_predicates": [ - "id != 2" - ], - "+incremental_strategy": "delete+insert" + "+incremental_predicates": ["id != 2"], + "+incremental_strategy": "delete+insert", } } @@ -123,16 +121,21 @@ def get_expected_fields(self, relation, seed_rows, opt_model_count=None): inc_test_model_count=1, seed_rows=seed_rows, opt_model_count=opt_model_count, - relation=relation + relation=relation, ) # no unique_key test def test__incremental_predicates(self, project): """seed should match model after two incremental runs""" - expected_fields = self.get_expected_fields(relation="expected_delete_insert_incremental_predicates", seed_rows=4) + expected_fields = self.get_expected_fields( + relation="expected_delete_insert_incremental_predicates", seed_rows=4 + ) test_case_fields = self.get_test_fields( - project, seed="expected_delete_insert_incremental_predicates", incremental_model="delete_insert_incremental_predicates", update_sql_file=None + project, + seed="expected_delete_insert_incremental_predicates", + incremental_model="delete_insert_incremental_predicates", + update_sql_file=None, ) self.check_scenario_correctness(expected_fields, test_case_fields, project) @@ -144,11 +147,4 @@ class TestIncrementalPredicatesDeleteInsert(BaseIncrementalPredicates): class TestPredicatesDeleteInsert(BaseIncrementalPredicates): @pytest.fixture(scope="class") def project_config_update(self): - return { - "models": { - "+predicates": [ - "id != 2" - ], - "+incremental_strategy": "delete+insert" - } - } + return {"models": {"+predicates": ["id != 2"], "+incremental_strategy": "delete+insert"}} diff --git a/tests/adapter/dbt/tests/adapter/query_comment/test_query_comment.py b/tests/adapter/dbt/tests/adapter/query_comment/test_query_comment.py index b764568fe16..053fcc506c8 100644 --- a/tests/adapter/dbt/tests/adapter/query_comment/test_query_comment.py +++ b/tests/adapter/dbt/tests/adapter/query_comment/test_query_comment.py @@ -1,6 +1,6 @@ import pytest import json -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError from dbt.version import __version__ as dbt_version from dbt.tests.util import run_dbt_and_capture from dbt.tests.adapter.query_comment.fixtures import MACROS__MACRO_SQL, MODELS__X_SQL @@ -77,7 +77,7 @@ def project_config_update(self): return {"query-comment": "{{ invalid_query_header() }}"} def run_assert_comments(self): - with pytest.raises(RuntimeException): + with pytest.raises(DbtRuntimeError): self.run_get_json(expect_pass=False) diff --git a/tests/adapter/dbt/tests/adapter/relations/test_changing_relation_type.py b/tests/adapter/dbt/tests/adapter/relations/test_changing_relation_type.py index 38515bc0206..2eeb5aea64d 100644 --- a/tests/adapter/dbt/tests/adapter/relations/test_changing_relation_type.py +++ b/tests/adapter/dbt/tests/adapter/relations/test_changing_relation_type.py @@ -1,5 +1,3 @@ - - from typing import List, Optional import pytest @@ -20,12 +18,10 @@ class BaseChangeRelationTypeValidator: @pytest.fixture(scope="class") def models(self): - return { - "model_mc_modelface.sql": _DEFAULT_CHANGE_RELATION_TYPE_MODEL - } + return {"model_mc_modelface.sql": _DEFAULT_CHANGE_RELATION_TYPE_MODEL} def _run_and_check_materialization(self, materialization, extra_args: Optional[List] = None): - run_args = ["run", '--vars', f'materialized: {materialization}'] + run_args = ["run", "--vars", f"materialized: {materialization}"] if extra_args: run_args.extend(extra_args) results = run_dbt(run_args) @@ -33,11 +29,11 @@ def _run_and_check_materialization(self, materialization, extra_args: Optional[L assert len(results) == 1 def test_changing_materialization_changes_relation_type(self, project): - self._run_and_check_materialization('view') - self._run_and_check_materialization('table') - self._run_and_check_materialization('view') - self._run_and_check_materialization('incremental') - self._run_and_check_materialization('table', extra_args=['--full-refresh']) + self._run_and_check_materialization("view") + self._run_and_check_materialization("table") + self._run_and_check_materialization("view") + self._run_and_check_materialization("incremental") + self._run_and_check_materialization("table", extra_args=["--full-refresh"]) class TestChangeRelationTypes(BaseChangeRelationTypeValidator): diff --git a/tests/adapter/dbt/tests/adapter/utils/fixture_escape_single_quotes.py b/tests/adapter/dbt/tests/adapter/utils/fixture_escape_single_quotes.py index d7e7148b886..aeaaaa44193 100644 --- a/tests/adapter/dbt/tests/adapter/utils/fixture_escape_single_quotes.py +++ b/tests/adapter/dbt/tests/adapter/utils/fixture_escape_single_quotes.py @@ -1,15 +1,37 @@ # escape_single_quotes models__test_escape_single_quotes_quote_sql = """ -select '{{ escape_single_quotes("they're") }}' as actual, 'they''re' as expected union all -select '{{ escape_single_quotes("they are") }}' as actual, 'they are' as expected +select + '{{ escape_single_quotes("they're") }}' as actual, + 'they''re' as expected, + {{ length(string_literal(escape_single_quotes("they're"))) }} as actual_length, + 7 as expected_length + +union all + +select + '{{ escape_single_quotes("they are") }}' as actual, + 'they are' as expected, + {{ length(string_literal(escape_single_quotes("they are"))) }} as actual_length, + 8 as expected_length """ # The expected literal is 'they\'re'. The second backslash is to escape it from Python. models__test_escape_single_quotes_backslash_sql = """ -select '{{ escape_single_quotes("they're") }}' as actual, 'they\\'re' as expected union all -select '{{ escape_single_quotes("they are") }}' as actual, 'they are' as expected +select + '{{ escape_single_quotes("they're") }}' as actual, + 'they\\'re' as expected, + {{ length(string_literal(escape_single_quotes("they're"))) }} as actual_length, + 7 as expected_length + +union all + +select + '{{ escape_single_quotes("they are") }}' as actual, + 'they are' as expected, + {{ length(string_literal(escape_single_quotes("they are"))) }} as actual_length, + 8 as expected_length """ @@ -21,4 +43,7 @@ - assert_equal: actual: actual expected: expected + - assert_equal: + actual: actual_length + expected: expected_length """ diff --git a/tests/adapter/setup.py b/tests/adapter/setup.py index f9ac627e445..c4c1e393483 100644 --- a/tests/adapter/setup.py +++ b/tests/adapter/setup.py @@ -20,7 +20,7 @@ package_name = "dbt-tests-adapter" -package_version = "1.4.0b1" +package_version = "1.5.0a1" description = """The dbt adapter tests for adapter plugins""" this_directory = os.path.abspath(os.path.dirname(__file__)) diff --git a/tests/functional/artifacts/expected_manifest.py b/tests/functional/artifacts/expected_manifest.py index 51a6b633e40..6e1e8e89af5 100644 --- a/tests/functional/artifacts/expected_manifest.py +++ b/tests/functional/artifacts/expected_manifest.py @@ -1061,9 +1061,7 @@ def expected_references_manifest(project): "unique_id": "seed.test.seed", "checksum": checksum_file(seed_path), "unrendered_config": get_unrendered_seed_config(), - "relation_name": '"{0}"."{1}".seed'.format( - project.database, my_schema_name - ), + "relation_name": '"{0}"."{1}".seed'.format(project.database, my_schema_name), }, "snapshot.test.snapshot_seed": { "alias": "snapshot_seed", @@ -1244,9 +1242,7 @@ def expected_references_manifest(project): "unique_id": "doc.test.table_info", }, "doc.test.view_summary": { - "block_contents": ( - "A view of the summary of the ephemeral copy of the seed data" - ), + "block_contents": ("A view of the summary of the ephemeral copy of the seed data"), "resource_type": "doc", "name": "view_summary", "original_file_path": docs_path, diff --git a/tests/functional/artifacts/test_override.py b/tests/functional/artifacts/test_override.py index 46a037bdcc5..a7b689a3670 100644 --- a/tests/functional/artifacts/test_override.py +++ b/tests/functional/artifacts/test_override.py @@ -1,6 +1,6 @@ import pytest from dbt.tests.util import run_dbt -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError model_sql = """ select 1 as id @@ -30,6 +30,6 @@ def test_override_used( results = run_dbt(["run"]) assert len(results) == 1 # this should pick up our failure macro and raise a compilation exception - with pytest.raises(CompilationException) as excinfo: + with pytest.raises(CompilationError) as excinfo: run_dbt(["--warn-error", "docs", "generate"]) assert "rejected: no catalogs for you" in str(excinfo.value) diff --git a/tests/functional/artifacts/test_previous_version_state.py b/tests/functional/artifacts/test_previous_version_state.py index a7a7ed5417c..84fd8bab360 100644 --- a/tests/functional/artifacts/test_previous_version_state.py +++ b/tests/functional/artifacts/test_previous_version_state.py @@ -2,7 +2,7 @@ import os import shutil from dbt.tests.util import run_dbt -from dbt.exceptions import IncompatibleSchemaException +from dbt.exceptions import IncompatibleSchemaError from dbt.contracts.graph.manifest import WritableManifest # This is a *very* simple project, with just one model in it. @@ -84,7 +84,7 @@ def compare_previous_state( results = run_dbt(cli_args, expect_pass=expect_pass) assert len(results) == 0 else: - with pytest.raises(IncompatibleSchemaException): + with pytest.raises(IncompatibleSchemaError): run_dbt(cli_args, expect_pass=expect_pass) def test_compare_state_current(self, project): diff --git a/tests/functional/basic/test_invalid_reference.py b/tests/functional/basic/test_invalid_reference.py index 8a516027940..1c54d1b906a 100644 --- a/tests/functional/basic/test_invalid_reference.py +++ b/tests/functional/basic/test_invalid_reference.py @@ -1,6 +1,6 @@ import pytest from dbt.tests.util import run_dbt -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError descendant_sql = """ @@ -24,5 +24,5 @@ def models(): def test_undefined_value(project): # Tests that a project with an invalid reference fails - with pytest.raises(CompilationException): + with pytest.raises(CompilationError): run_dbt(["compile"]) diff --git a/tests/functional/colors/test_colors.py b/tests/functional/colors/test_colors.py index 7e92e039506..f42591c2b6a 100644 --- a/tests/functional/colors/test_colors.py +++ b/tests/functional/colors/test_colors.py @@ -16,7 +16,7 @@ def models(): @pytest.fixture(scope="class") def project_config_update(): - return {'config-version': 2} + return {"config-version": 2} class TestColors: diff --git a/tests/functional/configs/test_configs.py b/tests/functional/configs/test_configs.py index 489b60fbbb1..086ef455f18 100644 --- a/tests/functional/configs/test_configs.py +++ b/tests/functional/configs/test_configs.py @@ -1,9 +1,8 @@ - from hologram import ValidationError import pytest import os -from dbt.exceptions import ParsingException +from dbt.exceptions import ParsingError from dbt.tests.util import run_dbt, update_config_file, write_file, check_relations_equal from tests.functional.configs.fixtures import BaseConfigProject, simple_snapshot @@ -94,7 +93,11 @@ def test_seeds_materialization_proj_config(self, project): class TestInvalidSeedsMaterializationSchema(object): def test_seeds_materialization_schema_config(self, project): seeds_dir = os.path.join(project.project_root, "seeds") - write_file("version: 2\nseeds:\n - name: myseed\n config:\n materialized: table", seeds_dir, "schema.yml") + write_file( + "version: 2\nseeds:\n - name: myseed\n config:\n materialized: table", + seeds_dir, + "schema.yml", + ) write_file("id1, id2\n1, 2", seeds_dir, "myseed.csv") with pytest.raises(ValidationError): @@ -109,14 +112,18 @@ def test_snapshots_materialization_proj_config(self, project): snapshots_dir = os.path.join(project.project_root, "snapshots") write_file(simple_snapshot, snapshots_dir, "mysnapshot.sql") - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt() class TestInvalidSnapshotsMaterializationSchema(object): def test_snapshots_materialization_schema_config(self, project): snapshots_dir = os.path.join(project.project_root, "snapshots") - write_file("version: 2\nsnapshots:\n - name: mysnapshot\n config:\n materialized: table", snapshots_dir, "schema.yml") + write_file( + "version: 2\nsnapshots:\n - name: mysnapshot\n config:\n materialized: table", + snapshots_dir, + "schema.yml", + ) write_file(simple_snapshot, snapshots_dir, "mysnapshot.sql") with pytest.raises(ValidationError): diff --git a/tests/functional/configs/test_configs_in_schema_files.py b/tests/functional/configs/test_configs_in_schema_files.py index 0d702615474..a04b9ed43aa 100644 --- a/tests/functional/configs/test_configs_in_schema_files.py +++ b/tests/functional/configs/test_configs_in_schema_files.py @@ -2,7 +2,7 @@ from dbt.tests.util import run_dbt, get_manifest, check_relations_equal, write_file -from dbt.exceptions import CompilationException, ParsingException +from dbt.exceptions import CompilationError, ParsingError models_alt__schema_yml = """ version: 2 @@ -242,11 +242,11 @@ def test_config_layering( # copy a schema file with multiple metas # shutil.copyfile('extra-alt/untagged.yml', 'models-alt/untagged.yml') write_file(extra_alt__untagged_yml, project.project_root, "models", "untagged.yml") - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["run"]) # copy a schema file with config key in top-level of test and in config dict # shutil.copyfile('extra-alt/untagged2.yml', 'models-alt/untagged.yml') write_file(extra_alt__untagged2_yml, project.project_root, "models", "untagged.yml") - with pytest.raises(CompilationException): + with pytest.raises(CompilationError): run_dbt(["run"]) diff --git a/tests/functional/configs/test_disabled_model.py b/tests/functional/configs/test_disabled_model.py index 5ca56512e14..4b6e74adffd 100644 --- a/tests/functional/configs/test_disabled_model.py +++ b/tests/functional/configs/test_disabled_model.py @@ -2,7 +2,7 @@ from hologram import ValidationError from dbt.tests.util import run_dbt, get_manifest -from dbt.exceptions import CompilationException, ParsingException +from dbt.exceptions import CompilationError, ParsingError from tests.functional.configs.fixtures import ( schema_all_disabled_yml, @@ -47,7 +47,7 @@ def models(self): } def test_disabled_config(self, project): - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["parse"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace expected_msg = "which is disabled" @@ -209,7 +209,7 @@ def models(self): } def test_disabled_config(self, project): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt(["parse"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace expected_msg = "Found 3 matching disabled nodes for model 'my_model_2'" diff --git a/tests/functional/configs/test_unused_configs.py b/tests/functional/configs/test_unused_configs.py index 7796472fea9..1bc887b03f1 100644 --- a/tests/functional/configs/test_unused_configs.py +++ b/tests/functional/configs/test_unused_configs.py @@ -1,7 +1,7 @@ import pytest from dbt.tests.util import run_dbt -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError seeds__seed_csv = """id,value 4,2 @@ -41,7 +41,7 @@ def test_warn_unused_configuration_paths( self, project, ): - with pytest.raises(CompilationException) as excinfo: + with pytest.raises(CompilationError) as excinfo: run_dbt(["--warn-error", "seed"]) assert "Configuration paths exist" in str(excinfo.value) diff --git a/tests/functional/context_methods/test_builtin_functions.py b/tests/functional/context_methods/test_builtin_functions.py index 529087c851a..562118f946f 100644 --- a/tests/functional/context_methods/test_builtin_functions.py +++ b/tests/functional/context_methods/test_builtin_functions.py @@ -3,7 +3,7 @@ import os from dbt.tests.util import run_dbt, run_dbt_and_capture, write_file -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError macros__validate_set_sql = """ {% macro validate_set() %} @@ -112,7 +112,17 @@ def test_builtin_invocation_args_dict_function(self, project): expected = "invocation_result: {'debug': True, 'log_format': 'json', 'write_json': True, 'use_colors': True, 'printer_width': 80, 'version_check': True, 'partial_parse': True, 'static_parser': True, 'profiles_dir': " assert expected in str(result) - expected = ("'send_anonymous_usage_stats': False", "'quiet': False", "'no_print': False", "'cache_selected_only': False", "'macro': 'validate_invocation'", "'args': '{my_variable: test_variable}'", "'which': 'run-operation'", "'rpc_method': 'run-operation'", "'indirect_selection': 'eager'") + expected = ( + "'send_anonymous_usage_stats': False", + "'quiet': False", + "'no_print': False", + "'cache_selected_only': False", + "'macro': 'validate_invocation'", + "'args': '{my_variable: test_variable}'", + "'which': 'run-operation'", + "'rpc_method': 'run-operation'", + "'indirect_selection': 'eager'", + ) for element in expected: assert element in str(result) @@ -142,9 +152,9 @@ class TestContextBuiltinExceptions: # Assert compilation errors are raised with _strict equivalents def test_builtin_function_exception(self, project): write_file(models__set_exception_sql, project.project_root, "models", "raise.sql") - with pytest.raises(CompilationException): + with pytest.raises(CompilationError): run_dbt(["compile"]) write_file(models__zip_exception_sql, project.project_root, "models", "raise.sql") - with pytest.raises(CompilationException): + with pytest.raises(CompilationError): run_dbt(["compile"]) diff --git a/tests/functional/context_methods/test_cli_vars.py b/tests/functional/context_methods/test_cli_vars.py index 3e548b6f402..353d96d777b 100644 --- a/tests/functional/context_methods/test_cli_vars.py +++ b/tests/functional/context_methods/test_cli_vars.py @@ -5,7 +5,7 @@ from dbt.tests.util import run_dbt, get_artifact, write_config_file from dbt.tests.fixtures.project import write_project_files -from dbt.exceptions import RuntimeException, CompilationException +from dbt.exceptions import DbtRuntimeError, CompilationError models_complex__schema_yml = """ @@ -114,7 +114,7 @@ def test_cli_vars_in_profile(self, project, dbt_profile_data): profile = dbt_profile_data profile["test"]["outputs"]["default"]["host"] = "{{ var('db_host') }}" write_config_file(profile, project.profiles_dir, "profiles.yml") - with pytest.raises(RuntimeException): + with pytest.raises(DbtRuntimeError): results = run_dbt(["run"]) results = run_dbt(["run", "--vars", "db_host: localhost"]) assert len(results) == 1 @@ -148,7 +148,7 @@ def test_cli_vars_in_packages(self, project, packages_config): write_config_file(packages, project.project_root, "packages.yml") # Without vars args deps fails - with pytest.raises(RuntimeException): + with pytest.raises(DbtRuntimeError): run_dbt(["deps"]) # With vars arg deps succeeds @@ -200,7 +200,7 @@ def test_vars_in_selectors(self, project): # Update the selectors.yml file to have a var write_config_file(var_selectors_yml, project.project_root, "selectors.yml") - with pytest.raises(CompilationException): + with pytest.raises(CompilationError): run_dbt(["run"]) # Var in cli_vars works diff --git a/tests/functional/context_methods/test_custom_env_vars.py b/tests/functional/context_methods/test_custom_env_vars.py index 413789c7676..e74a5dcee09 100644 --- a/tests/functional/context_methods/test_custom_env_vars.py +++ b/tests/functional/context_methods/test_custom_env_vars.py @@ -27,7 +27,9 @@ def setup(self): del os.environ["DBT_ENV_CUSTOM_ENV_SOME_VAR"] def test_extra_filled(self, project): - _, log_output = run_dbt_and_capture(['--log-format=json', 'deps'],) + _, log_output = run_dbt_and_capture( + ["--log-format=json", "deps"], + ) logs = parse_json_logs(log_output) for log in logs: - assert log['info'].get('extra') == {"SOME_VAR": "value"} + assert log["info"].get("extra") == {"SOME_VAR": "value"} diff --git a/tests/functional/context_methods/test_secret_env_vars.py b/tests/functional/context_methods/test_secret_env_vars.py index 9cd4c2eacac..710c104f551 100644 --- a/tests/functional/context_methods/test_secret_env_vars.py +++ b/tests/functional/context_methods/test_secret_env_vars.py @@ -2,7 +2,7 @@ import os from dbt.constants import SECRET_ENV_PREFIX -from dbt.exceptions import ParsingException, InternalException +from dbt.exceptions import ParsingError, DbtInternalError from tests.functional.context_methods.first_dependency import FirstDependencyProject from dbt.tests.util import run_dbt, run_dbt_and_capture @@ -30,7 +30,7 @@ def models(self): return {"context.sql": secret_bad__context_sql} def test_disallow_secret(self, project): - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["compile"]) @@ -130,7 +130,7 @@ def packages(self): } def test_fail_clone_with_scrubbing(self, project): - with pytest.raises(InternalException) as excinfo: + with pytest.raises(DbtInternalError) as excinfo: _, log_output = run_dbt_and_capture(["deps"]) assert "abc123" not in str(excinfo.value) @@ -149,7 +149,7 @@ def packages(self): } def test_fail_clone_with_scrubbing(self, project): - with pytest.raises(InternalException) as excinfo: + with pytest.raises(DbtInternalError) as excinfo: _, log_output = run_dbt_and_capture(["deps"]) # we should not see any manipulated form of the secret value (abc123) here diff --git a/tests/functional/context_methods/test_var_in_generate_name.py b/tests/functional/context_methods/test_var_in_generate_name.py index 5025cb8fede..2bbba457e58 100644 --- a/tests/functional/context_methods/test_var_in_generate_name.py +++ b/tests/functional/context_methods/test_var_in_generate_name.py @@ -1,7 +1,7 @@ import pytest from dbt.tests.util import run_dbt, update_config_file -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError model_sql = """ select 1 as id @@ -27,7 +27,7 @@ def models(self): def test_generate_schema_name_var(self, project): # var isn't set, so generate_name macro fails - with pytest.raises(CompilationException) as excinfo: + with pytest.raises(CompilationError) as excinfo: run_dbt(["compile"]) assert "Required var 'somevar' not found in config" in str(excinfo.value) diff --git a/tests/functional/custom_aliases/fixtures.py b/tests/functional/custom_aliases/fixtures.py new file mode 100644 index 00000000000..6324e1249e4 --- /dev/null +++ b/tests/functional/custom_aliases/fixtures.py @@ -0,0 +1,68 @@ +model1_sql = """ +{{ config(materialized='table', alias='alias') }} + +select {{ string_literal(this.name) }} as model_name +""" + +model2_sql = """ +{{ config(materialized='table') }} + +select {{ string_literal(this.name) }} as model_name +""" + +macros_sql = """ +{% macro generate_alias_name(custom_alias_name, node) -%} + {%- if custom_alias_name is none -%} + {{ node.name }} + {%- else -%} + custom_{{ custom_alias_name | trim }} + {%- endif -%} +{%- endmacro %} + + +{% macro string_literal(s) -%} + {{ adapter.dispatch('string_literal', macro_namespace='test')(s) }} +{%- endmacro %} + +{% macro default__string_literal(s) %} + '{{ s }}'::text +{% endmacro %} +""" + +macros_config_sql = """ +{#-- Verify that the config['alias'] key is present #} +{% macro generate_alias_name(custom_alias_name, node) -%} + {%- if custom_alias_name is none -%} + {{ node.name }} + {%- else -%} + custom_{{ node.config['alias'] if 'alias' in node.config else '' | trim }} + {%- endif -%} +{%- endmacro %} + +{% macro string_literal(s) -%} + {{ adapter.dispatch('string_literal', macro_namespace='test')(s) }} +{%- endmacro %} + +{% macro default__string_literal(s) %} + '{{ s }}'::text +{% endmacro %} +""" + +schema_yml = """ +version: 2 + +models: + - name: model1 + columns: + - name: model_name + tests: + - accepted_values: + values: ['custom_alias'] + - name: model2 + columns: + - name: model_name + tests: + - accepted_values: + values: ['model2'] + +""" diff --git a/tests/functional/custom_aliases/test_custom_aliases.py b/tests/functional/custom_aliases/test_custom_aliases.py new file mode 100644 index 00000000000..86b44c3b3f0 --- /dev/null +++ b/tests/functional/custom_aliases/test_custom_aliases.py @@ -0,0 +1,49 @@ +import pytest + +from dbt.tests.util import run_dbt + +from tests.functional.custom_aliases.fixtures import ( + model1_sql, + model2_sql, + macros_sql, + macros_config_sql, + schema_yml, +) + + +class TestAliases: + @pytest.fixture(scope="class") + def models(self): + return {"model1.sql": model1_sql, "model2.sql": model2_sql, "schema.yml": schema_yml} + + @pytest.fixture(scope="class") + def macros(self): + return { + "macros.sql": macros_sql, + } + + def test_customer_alias_name(self, project): + results = run_dbt(["run"]) + assert len(results) == 2 + + results = run_dbt(["test"]) + assert len(results) == 2 + + +class TestAliasesWithConfig: + @pytest.fixture(scope="class") + def models(self): + return {"model1.sql": model1_sql, "model2.sql": model2_sql, "schema.yml": schema_yml} + + @pytest.fixture(scope="class") + def macros(self): + return { + "macros.sql": macros_config_sql, + } + + def test_customer_alias_name(self, project): + results = run_dbt(["run"]) + assert len(results) == 2 + + results = run_dbt(["test"]) + assert len(results) == 2 diff --git a/tests/functional/cycles/test_cycles.py b/tests/functional/cycles/test_cycles.py index 0e2cdcaf911..6d2eb3fd0cc 100644 --- a/tests/functional/cycles/test_cycles.py +++ b/tests/functional/cycles/test_cycles.py @@ -36,10 +36,7 @@ class TestSimpleCycle: @pytest.fixture(scope="class") def models(self): - return { - "model_a.sql": model_a_sql, - "model_b.sql": model_b_sql - } + return {"model_a.sql": model_a_sql, "model_b.sql": model_b_sql} def test_simple_cycle(self, project): with pytest.raises(RuntimeError) as exc: diff --git a/tests/functional/defer_state/fixtures.py b/tests/functional/defer_state/fixtures.py new file mode 100644 index 00000000000..17f46f842d9 --- /dev/null +++ b/tests/functional/defer_state/fixtures.py @@ -0,0 +1,101 @@ +seed_csv = """id,name +1,Alice +2,Bob +""" + +table_model_sql = """ +{{ config(materialized='table') }} +select * from {{ ref('ephemeral_model') }} + +-- establish a macro dependency to trigger state:modified.macros +-- depends on: {{ my_macro() }} +""" + +changed_table_model_sql = """ +{{ config(materialized='table') }} +select 1 as fun +""" + +view_model_sql = """ +select * from {{ ref('seed') }} + +-- establish a macro dependency that trips infinite recursion if not handled +-- depends on: {{ my_infinitely_recursive_macro() }} +""" + +changed_view_model_sql = """ +select * from no.such.table +""" + +ephemeral_model_sql = """ +{{ config(materialized='ephemeral') }} +select * from {{ ref('view_model') }} +""" + +changed_ephemeral_model_sql = """ +{{ config(materialized='ephemeral') }} +select * from no.such.table +""" + +schema_yml = """ +version: 2 +models: + - name: view_model + columns: + - name: id + tests: + - unique: + severity: error + - not_null + - name: name +""" + +exposures_yml = """ +version: 2 +exposures: + - name: my_exposure + type: application + depends_on: + - ref('view_model') + owner: + email: test@example.com +""" + +macros_sql = """ +{% macro my_macro() %} + {% do log('in a macro' ) %} +{% endmacro %} +""" + +infinite_macros_sql = """ +{# trigger infinite recursion if not handled #} + +{% macro my_infinitely_recursive_macro() %} + {{ return(adapter.dispatch('my_infinitely_recursive_macro')()) }} +{% endmacro %} + +{% macro default__my_infinitely_recursive_macro() %} + {% if unmet_condition %} + {{ my_infinitely_recursive_macro() }} + {% else %} + {{ return('') }} + {% endif %} +{% endmacro %} +""" + +snapshot_sql = """ +{% snapshot my_cool_snapshot %} + + {{ + config( + target_database=database, + target_schema=schema, + unique_key='id', + strategy='check', + check_cols=['id'], + ) + }} + select * from {{ ref('view_model') }} + +{% endsnapshot %} +""" diff --git a/tests/functional/defer_state/test_defer_state.py b/tests/functional/defer_state/test_defer_state.py new file mode 100644 index 00000000000..134cae1c626 --- /dev/null +++ b/tests/functional/defer_state/test_defer_state.py @@ -0,0 +1,273 @@ +import json +import os +import shutil +from copy import deepcopy + +import pytest + +from dbt.tests.util import run_dbt, write_file, rm_file + +from dbt.exceptions import DbtRuntimeError + +from tests.functional.defer_state.fixtures import ( + seed_csv, + table_model_sql, + changed_table_model_sql, + view_model_sql, + changed_view_model_sql, + ephemeral_model_sql, + changed_ephemeral_model_sql, + schema_yml, + exposures_yml, + macros_sql, + infinite_macros_sql, + snapshot_sql, +) + + +class BaseDeferState: + @pytest.fixture(scope="class") + def models(self): + return { + "table_model.sql": table_model_sql, + "view_model.sql": view_model_sql, + "ephemeral_model.sql": ephemeral_model_sql, + "schema.yml": schema_yml, + "exposures.yml": exposures_yml, + } + + @pytest.fixture(scope="class") + def macros(self): + return { + "macros.sql": macros_sql, + "infinite_macros.sql": infinite_macros_sql, + } + + @pytest.fixture(scope="class") + def seeds(self): + return { + "seed.csv": seed_csv, + } + + @pytest.fixture(scope="class") + def snapshots(self): + return { + "snapshot.sql": snapshot_sql, + } + + @pytest.fixture(scope="class") + def other_schema(self, unique_schema): + return unique_schema + "_other" + + @property + def project_config_update(self): + return { + "seeds": { + "test": { + "quote_columns": False, + } + } + } + + @pytest.fixture(scope="class") + def profiles_config_update(self, dbt_profile_target, unique_schema, other_schema): + outputs = {"default": dbt_profile_target, "otherschema": deepcopy(dbt_profile_target)} + outputs["default"]["schema"] = unique_schema + outputs["otherschema"]["schema"] = other_schema + return {"test": {"outputs": outputs, "target": "default"}} + + def copy_state(self): + if not os.path.exists("state"): + os.makedirs("state") + shutil.copyfile("target/manifest.json", "state/manifest.json") + + def run_and_save_state(self): + results = run_dbt(["seed"]) + assert len(results) == 1 + assert not any(r.node.deferred for r in results) + results = run_dbt(["run"]) + assert len(results) == 2 + assert not any(r.node.deferred for r in results) + results = run_dbt(["test"]) + assert len(results) == 2 + + # copy files + self.copy_state() + + +class TestDeferStateUnsupportedCommands(BaseDeferState): + def test_unsupported_commands(self, project): + # make sure these commands don"t work with --defer + with pytest.raises(SystemExit): + run_dbt(["seed", "--defer"]) + + def test_no_state(self, project): + # no "state" files present, snapshot fails + with pytest.raises(DbtRuntimeError): + run_dbt(["snapshot", "--state", "state", "--defer"]) + + +class TestRunCompileState(BaseDeferState): + def test_run_and_compile_defer(self, project): + self.run_and_save_state() + + # defer test, it succeeds + results = run_dbt(["compile", "--state", "state", "--defer"]) + assert len(results.results) == 6 + assert results.results[0].node.name == "seed" + + +class TestSnapshotState(BaseDeferState): + def test_snapshot_state_defer(self, project): + self.run_and_save_state() + # snapshot succeeds without --defer + run_dbt(["snapshot"]) + # copy files + self.copy_state() + # defer test, it succeeds + run_dbt(["snapshot", "--state", "state", "--defer"]) + # favor_state test, it succeeds + run_dbt(["snapshot", "--state", "state", "--defer", "--favor-state"]) + + +class TestRunDeferState(BaseDeferState): + def test_run_and_defer(self, project, unique_schema, other_schema): + project.create_test_schema(other_schema) + self.run_and_save_state() + + # test tests first, because run will change things + # no state, wrong schema, failure. + run_dbt(["test", "--target", "otherschema"], expect_pass=False) + + # test generate docs + # no state, wrong schema, empty nodes + catalog = run_dbt(["docs", "generate", "--target", "otherschema"]) + assert not catalog.nodes + + # no state, run also fails + run_dbt(["run", "--target", "otherschema"], expect_pass=False) + + # defer test, it succeeds + results = run_dbt( + ["test", "-m", "view_model+", "--state", "state", "--defer", "--target", "otherschema"] + ) + + # defer docs generate with state, catalog refers schema from the happy times + catalog = run_dbt( + [ + "docs", + "generate", + "-m", + "view_model+", + "--state", + "state", + "--defer", + "--target", + "otherschema", + ] + ) + assert other_schema not in catalog.nodes["seed.test.seed"].metadata.schema + assert unique_schema in catalog.nodes["seed.test.seed"].metadata.schema + + # with state it should work though + results = run_dbt( + ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"] + ) + assert other_schema not in results[0].node.compiled_code + assert unique_schema in results[0].node.compiled_code + + with open("target/manifest.json") as fp: + data = json.load(fp) + assert data["nodes"]["seed.test.seed"]["deferred"] + + assert len(results) == 1 + + +class TestRunDeferStateChangedModel(BaseDeferState): + def test_run_defer_state_changed_model(self, project): + self.run_and_save_state() + + # change "view_model" + write_file(changed_view_model_sql, "models", "view_model.sql") + + # the sql here is just wrong, so it should fail + run_dbt( + ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"], + expect_pass=False, + ) + # but this should work since we just use the old happy model + run_dbt( + ["run", "-m", "table_model", "--state", "state", "--defer", "--target", "otherschema"], + expect_pass=True, + ) + + # change "ephemeral_model" + write_file(changed_ephemeral_model_sql, "models", "ephemeral_model.sql") + # this should fail because the table model refs a broken ephemeral + # model, which it should see + run_dbt( + ["run", "-m", "table_model", "--state", "state", "--defer", "--target", "otherschema"], + expect_pass=False, + ) + + +class TestRunDeferStateIFFNotExists(BaseDeferState): + def test_run_defer_iff_not_exists(self, project, unique_schema, other_schema): + project.create_test_schema(other_schema) + self.run_and_save_state() + + results = run_dbt(["seed", "--target", "otherschema"]) + assert len(results) == 1 + results = run_dbt(["run", "--state", "state", "--defer", "--target", "otherschema"]) + assert len(results) == 2 + + # because the seed now exists in our "other" schema, we should prefer it over the one + # available from state + assert other_schema in results[0].node.compiled_code + + # this time with --favor-state: even though the seed now exists in our "other" schema, + # we should still favor the one available from state + results = run_dbt( + ["run", "--state", "state", "--defer", "--favor-state", "--target", "otherschema"] + ) + assert len(results) == 2 + assert other_schema not in results[0].node.compiled_code + + +class TestDeferStateDeletedUpstream(BaseDeferState): + def test_run_defer_deleted_upstream(self, project, unique_schema, other_schema): + project.create_test_schema(other_schema) + self.run_and_save_state() + + # remove "ephemeral_model" + change "table_model" + rm_file("models", "ephemeral_model.sql") + write_file(changed_table_model_sql, "models", "table_model.sql") + + # ephemeral_model is now gone. previously this caused a + # keyerror (dbt#2875), now it should pass + run_dbt( + ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"], + expect_pass=True, + ) + + # despite deferral, we should use models just created in our schema + results = run_dbt(["test", "--state", "state", "--defer", "--target", "otherschema"]) + assert other_schema in results[0].node.compiled_code + + # this time with --favor-state: prefer the models in the "other" schema, even though they exist in ours + run_dbt( + [ + "run", + "-m", + "view_model", + "--state", + "state", + "--defer", + "--favor-state", + "--target", + "otherschema", + ], + expect_pass=True, + ) + results = run_dbt(["test", "--state", "state", "--defer", "--favor-state"]) + assert other_schema not in results[0].node.compiled_code diff --git a/tests/functional/defer_state/test_modified_state.py b/tests/functional/defer_state/test_modified_state.py new file mode 100644 index 00000000000..80e3d455da1 --- /dev/null +++ b/tests/functional/defer_state/test_modified_state.py @@ -0,0 +1,263 @@ +import os +import random +import shutil +import string + +import pytest + +from dbt.tests.util import run_dbt, update_config_file, write_file + +from dbt.exceptions import CompilationError + +from tests.functional.defer_state.fixtures import ( + seed_csv, + table_model_sql, + view_model_sql, + ephemeral_model_sql, + schema_yml, + exposures_yml, + macros_sql, + infinite_macros_sql, +) + + +class BaseModifiedState: + @pytest.fixture(scope="class") + def models(self): + return { + "table_model.sql": table_model_sql, + "view_model.sql": view_model_sql, + "ephemeral_model.sql": ephemeral_model_sql, + "schema.yml": schema_yml, + "exposures.yml": exposures_yml, + } + + @pytest.fixture(scope="class") + def macros(self): + return { + "macros.sql": macros_sql, + "infinite_macros.sql": infinite_macros_sql, + } + + @pytest.fixture(scope="class") + def seeds(self): + return { + "seed.csv": seed_csv, + } + + @property + def project_config_update(self): + return { + "seeds": { + "test": { + "quote_columns": False, + } + } + } + + def copy_state(self): + if not os.path.exists("state"): + os.makedirs("state") + shutil.copyfile("target/manifest.json", "state/manifest.json") + + def run_and_save_state(self): + run_dbt(["seed"]) + run_dbt(["run"]) + self.copy_state() + + +class TestChangedSeedContents(BaseModifiedState): + def test_changed_seed_contents_state(self, project): + self.run_and_save_state() + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"], + expect_pass=True, + ) + assert len(results) == 0 + + # add a new row to the seed + changed_seed_contents = seed_csv + "\n" + "3,carl" + write_file(changed_seed_contents, "seeds", "seed.csv") + + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"] + ) + assert len(results) == 1 + assert results[0] == "test.seed" + + results = run_dbt(["ls", "--select", "state:modified", "--state", "./state"]) + assert len(results) == 1 + assert results[0] == "test.seed" + + results = run_dbt(["ls", "--select", "state:modified+", "--state", "./state"]) + assert len(results) == 7 + assert set(results) == { + "test.seed", + "test.table_model", + "test.view_model", + "test.ephemeral_model", + "test.not_null_view_model_id", + "test.unique_view_model_id", + "exposure:test.my_exposure", + } + + shutil.rmtree("./state") + self.copy_state() + + # make a very big seed + # assume each line is ~2 bytes + len(name) + target_size = 1 * 1024 * 1024 + line_size = 64 + num_lines = target_size // line_size + maxlines = num_lines + 4 + seed_lines = [seed_csv] + for idx in range(4, maxlines): + value = "".join(random.choices(string.ascii_letters, k=62)) + seed_lines.append(f"{idx},{value}") + seed_contents = "\n".join(seed_lines) + write_file(seed_contents, "seeds", "seed.csv") + + # now if we run again, we should get a warning + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"] + ) + assert len(results) == 1 + assert results[0] == "test.seed" + + with pytest.raises(CompilationError) as exc: + run_dbt( + [ + "--warn-error", + "ls", + "--resource-type", + "seed", + "--select", + "state:modified", + "--state", + "./state", + ] + ) + assert ">1MB" in str(exc.value) + + shutil.rmtree("./state") + self.copy_state() + + # once it"s in path mode, we don"t mark it as modified if it changes + write_file(seed_contents + "\n1,test", "seeds", "seed.csv") + + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"], + expect_pass=True, + ) + assert len(results) == 0 + + +class TestChangedSeedConfig(BaseModifiedState): + def test_changed_seed_config(self, project): + self.run_and_save_state() + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"], + expect_pass=True, + ) + assert len(results) == 0 + + update_config_file({"seeds": {"test": {"quote_columns": False}}}, "dbt_project.yml") + + # quoting change -> seed changed + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"] + ) + assert len(results) == 1 + assert results[0] == "test.seed" + + +class TestUnrenderedConfigSame(BaseModifiedState): + def test_unrendered_config_same(self, project): + self.run_and_save_state() + results = run_dbt( + ["ls", "--resource-type", "model", "--select", "state:modified", "--state", "./state"], + expect_pass=True, + ) + assert len(results) == 0 + + # although this is the default value, dbt will recognize it as a change + # for previously-unconfigured models, because it"s been explicitly set + update_config_file({"models": {"test": {"materialized": "view"}}}, "dbt_project.yml") + results = run_dbt( + ["ls", "--resource-type", "model", "--select", "state:modified", "--state", "./state"] + ) + assert len(results) == 1 + assert results[0] == "test.view_model" + + +class TestChangedModelContents(BaseModifiedState): + def test_changed_model_contents(self, project): + self.run_and_save_state() + results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) + assert len(results) == 0 + + table_model_update = """ + {{ config(materialized="table") }} + + select * from {{ ref("seed") }} + """ + + write_file(table_model_update, "models", "table_model.sql") + + results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) + assert len(results) == 1 + assert results[0].node.name == "table_model" + + +class TestNewMacro(BaseModifiedState): + def test_new_macro(self, project): + self.run_and_save_state() + + new_macro = """ + {% macro my_other_macro() %} + {% endmacro %} + """ + + # add a new macro to a new file + write_file(new_macro, "macros", "second_macro.sql") + + results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) + assert len(results) == 0 + + os.remove("macros/second_macro.sql") + # add a new macro to the existing file + with open("macros/macros.sql", "a") as fp: + fp.write(new_macro) + + results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) + assert len(results) == 0 + + +class TestChangedMacroContents(BaseModifiedState): + def test_changed_macro_contents(self, project): + self.run_and_save_state() + + # modify an existing macro + updated_macro = """ + {% macro my_macro() %} + {% do log("in a macro", info=True) %} + {% endmacro %} + """ + write_file(updated_macro, "macros", "macros.sql") + + # table_model calls this macro + results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) + assert len(results) == 1 + + +class TestChangedExposure(BaseModifiedState): + def test_changed_exposure(self, project): + self.run_and_save_state() + + # add an "owner.name" to existing exposure + updated_exposure = exposures_yml + "\n name: John Doe\n" + write_file(updated_exposure, "models", "exposures.yml") + + results = run_dbt(["run", "--models", "+state:modified", "--state", "./state"]) + assert len(results) == 1 + assert results[0].node.name == "view_model" diff --git a/tests/functional/defer_state/test_run_results_state.py b/tests/functional/defer_state/test_run_results_state.py new file mode 100644 index 00000000000..aa1dc549272 --- /dev/null +++ b/tests/functional/defer_state/test_run_results_state.py @@ -0,0 +1,494 @@ +import os +import shutil + +import pytest + +from dbt.tests.util import run_dbt, write_file + +from tests.functional.defer_state.fixtures import ( + seed_csv, + table_model_sql, + view_model_sql, + ephemeral_model_sql, + schema_yml, + exposures_yml, + macros_sql, + infinite_macros_sql, +) + + +class BaseRunResultsState: + @pytest.fixture(scope="class") + def models(self): + return { + "table_model.sql": table_model_sql, + "view_model.sql": view_model_sql, + "ephemeral_model.sql": ephemeral_model_sql, + "schema.yml": schema_yml, + "exposures.yml": exposures_yml, + } + + @pytest.fixture(scope="class") + def macros(self): + return { + "macros.sql": macros_sql, + "infinite_macros.sql": infinite_macros_sql, + } + + @pytest.fixture(scope="class") + def seeds(self): + return { + "seed.csv": seed_csv, + } + + @property + def project_config_update(self): + return { + "seeds": { + "test": { + "quote_columns": False, + } + } + } + + def clear_state(self): + shutil.rmtree("./state") + + def copy_state(self): + if not os.path.exists("state"): + os.makedirs("state") + shutil.copyfile("target/manifest.json", "state/manifest.json") + shutil.copyfile("target/run_results.json", "state/run_results.json") + + def run_and_save_state(self): + run_dbt(["build"]) + self.copy_state() + + def rebuild_run_dbt(self, expect_pass=True): + self.clear_state() + run_dbt(["build"], expect_pass=expect_pass) + self.copy_state() + + def update_view_model_bad_sql(self): + # update view model to generate a failure case + not_unique_sql = "select * from forced_error" + write_file(not_unique_sql, "models", "view_model.sql") + + def update_view_model_failing_tests(self, with_dupes=True, with_nulls=False): + # test failure on build tests + # fail the unique test + select_1 = "select 1 as id" + select_stmts = [select_1] + if with_dupes: + select_stmts.append(select_1) + if with_nulls: + select_stmts.append("select null as id") + failing_tests_sql = " union all ".join(select_stmts) + write_file(failing_tests_sql, "models", "view_model.sql") + + def update_unique_test_severity_warn(self): + # change the unique test severity from error to warn and reuse the same view_model.sql changes above + new_config = schema_yml.replace("error", "warn") + write_file(new_config, "models", "schema.yml") + + +class TestSeedRunResultsState(BaseRunResultsState): + def test_seed_run_results_state(self, project): + self.run_and_save_state() + self.clear_state() + run_dbt(["seed"]) + self.copy_state() + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "result:success", "--state", "./state"], + expect_pass=True, + ) + assert len(results) == 1 + assert results[0] == "test.seed" + + results = run_dbt(["ls", "--select", "result:success", "--state", "./state"]) + assert len(results) == 1 + assert results[0] == "test.seed" + + results = run_dbt(["ls", "--select", "result:success+", "--state", "./state"]) + assert len(results) == 7 + assert set(results) == { + "test.seed", + "test.table_model", + "test.view_model", + "test.ephemeral_model", + "test.not_null_view_model_id", + "test.unique_view_model_id", + "exposure:test.my_exposure", + } + + # add a new faulty row to the seed + changed_seed_contents = seed_csv + "\n" + "\\\3,carl" + write_file(changed_seed_contents, "seeds", "seed.csv") + + self.clear_state() + run_dbt(["seed"], expect_pass=False) + self.copy_state() + + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "result:error", "--state", "./state"], + expect_pass=True, + ) + assert len(results) == 1 + assert results[0] == "test.seed" + + results = run_dbt(["ls", "--select", "result:error", "--state", "./state"]) + assert len(results) == 1 + assert results[0] == "test.seed" + + results = run_dbt(["ls", "--select", "result:error+", "--state", "./state"]) + assert len(results) == 7 + assert set(results) == { + "test.seed", + "test.table_model", + "test.view_model", + "test.ephemeral_model", + "test.not_null_view_model_id", + "test.unique_view_model_id", + "exposure:test.my_exposure", + } + + +class TestBuildRunResultsState(BaseRunResultsState): + def test_build_run_results_state(self, project): + self.run_and_save_state() + results = run_dbt(["build", "--select", "result:error", "--state", "./state"]) + assert len(results) == 0 + + self.update_view_model_bad_sql() + self.rebuild_run_dbt(expect_pass=False) + + results = run_dbt( + ["build", "--select", "result:error", "--state", "./state"], expect_pass=False + ) + assert len(results) == 3 + nodes = set([elem.node.name for elem in results]) + assert nodes == {"view_model", "not_null_view_model_id", "unique_view_model_id"} + + results = run_dbt(["ls", "--select", "result:error", "--state", "./state"]) + assert len(results) == 3 + assert set(results) == { + "test.view_model", + "test.not_null_view_model_id", + "test.unique_view_model_id", + } + + results = run_dbt( + ["build", "--select", "result:error+", "--state", "./state"], expect_pass=False + ) + assert len(results) == 4 + nodes = set([elem.node.name for elem in results]) + assert nodes == { + "table_model", + "view_model", + "not_null_view_model_id", + "unique_view_model_id", + } + + results = run_dbt(["ls", "--select", "result:error+", "--state", "./state"]) + assert len(results) == 6 # includes exposure + assert set(results) == { + "test.table_model", + "test.view_model", + "test.ephemeral_model", + "test.not_null_view_model_id", + "test.unique_view_model_id", + "exposure:test.my_exposure", + } + + self.update_view_model_failing_tests() + self.rebuild_run_dbt(expect_pass=False) + + results = run_dbt( + ["build", "--select", "result:fail", "--state", "./state"], expect_pass=False + ) + assert len(results) == 1 + assert results[0].node.name == "unique_view_model_id" + + results = run_dbt(["ls", "--select", "result:fail", "--state", "./state"]) + assert len(results) == 1 + assert results[0] == "test.unique_view_model_id" + + results = run_dbt( + ["build", "--select", "result:fail+", "--state", "./state"], expect_pass=False + ) + assert len(results) == 2 + nodes = set([elem.node.name for elem in results]) + assert nodes == {"table_model", "unique_view_model_id"} + + results = run_dbt(["ls", "--select", "result:fail+", "--state", "./state"]) + assert len(results) == 1 + assert set(results) == {"test.unique_view_model_id"} + + self.update_unique_test_severity_warn() + self.rebuild_run_dbt(expect_pass=True) + + results = run_dbt( + ["build", "--select", "result:warn", "--state", "./state"], expect_pass=True + ) + assert len(results) == 1 + assert results[0].node.name == "unique_view_model_id" + + results = run_dbt(["ls", "--select", "result:warn", "--state", "./state"]) + assert len(results) == 1 + assert results[0] == "test.unique_view_model_id" + + results = run_dbt( + ["build", "--select", "result:warn+", "--state", "./state"], expect_pass=True + ) + assert len(results) == 2 # includes table_model to be run + nodes = set([elem.node.name for elem in results]) + assert nodes == {"table_model", "unique_view_model_id"} + + results = run_dbt(["ls", "--select", "result:warn+", "--state", "./state"]) + assert len(results) == 1 + assert set(results) == {"test.unique_view_model_id"} + + +class TestRunRunResultsState(BaseRunResultsState): + def test_run_run_results_state(self, project): + self.run_and_save_state() + results = run_dbt( + ["run", "--select", "result:success", "--state", "./state"], expect_pass=True + ) + assert len(results) == 2 + assert results[0].node.name == "view_model" + assert results[1].node.name == "table_model" + + # clear state and rerun upstream view model to test + operator + self.clear_state() + run_dbt(["run", "--select", "view_model"], expect_pass=True) + self.copy_state() + results = run_dbt( + ["run", "--select", "result:success+", "--state", "./state"], expect_pass=True + ) + assert len(results) == 2 + assert results[0].node.name == "view_model" + assert results[1].node.name == "table_model" + + # check we are starting from a place with 0 errors + results = run_dbt(["run", "--select", "result:error", "--state", "./state"]) + assert len(results) == 0 + + self.update_view_model_bad_sql() + self.clear_state() + run_dbt(["run"], expect_pass=False) + self.copy_state() + + # test single result selector on error + results = run_dbt( + ["run", "--select", "result:error", "--state", "./state"], expect_pass=False + ) + assert len(results) == 1 + assert results[0].node.name == "view_model" + + # test + operator selection on error + results = run_dbt( + ["run", "--select", "result:error+", "--state", "./state"], expect_pass=False + ) + assert len(results) == 2 + assert results[0].node.name == "view_model" + assert results[1].node.name == "table_model" + + # single result selector on skipped. Expect this to pass becase underlying view already defined above + results = run_dbt( + ["run", "--select", "result:skipped", "--state", "./state"], expect_pass=True + ) + assert len(results) == 1 + assert results[0].node.name == "table_model" + + # add a downstream model that depends on table_model for skipped+ selector + downstream_model_sql = "select * from {{ref('table_model')}}" + write_file(downstream_model_sql, "models", "table_model_downstream.sql") + + self.clear_state() + run_dbt(["run"], expect_pass=False) + self.copy_state() + + results = run_dbt( + ["run", "--select", "result:skipped+", "--state", "./state"], expect_pass=True + ) + assert len(results) == 2 + assert results[0].node.name == "table_model" + assert results[1].node.name == "table_model_downstream" + + +class TestTestRunResultsState(BaseRunResultsState): + def test_test_run_results_state(self, project): + self.run_and_save_state() + # run passed nodes + results = run_dbt( + ["test", "--select", "result:pass", "--state", "./state"], expect_pass=True + ) + assert len(results) == 2 + nodes = set([elem.node.name for elem in results]) + assert nodes == {"unique_view_model_id", "not_null_view_model_id"} + + # run passed nodes with + operator + results = run_dbt( + ["test", "--select", "result:pass+", "--state", "./state"], expect_pass=True + ) + assert len(results) == 2 + nodes = set([elem.node.name for elem in results]) + assert nodes == {"unique_view_model_id", "not_null_view_model_id"} + + self.update_view_model_failing_tests() + self.rebuild_run_dbt(expect_pass=False) + + # test with failure selector + results = run_dbt( + ["test", "--select", "result:fail", "--state", "./state"], expect_pass=False + ) + assert len(results) == 1 + assert results[0].node.name == "unique_view_model_id" + + # test with failure selector and + operator + results = run_dbt( + ["test", "--select", "result:fail+", "--state", "./state"], expect_pass=False + ) + assert len(results) == 1 + assert results[0].node.name == "unique_view_model_id" + + self.update_unique_test_severity_warn() + # rebuild - expect_pass = True because we changed the error to a warning this time around + self.rebuild_run_dbt(expect_pass=True) + + # test with warn selector + results = run_dbt( + ["test", "--select", "result:warn", "--state", "./state"], expect_pass=True + ) + assert len(results) == 1 + assert results[0].node.name == "unique_view_model_id" + + # test with warn selector and + operator + results = run_dbt( + ["test", "--select", "result:warn+", "--state", "./state"], expect_pass=True + ) + assert len(results) == 1 + assert results[0].node.name == "unique_view_model_id" + + +class TestConcurrentSelectionRunResultsState(BaseRunResultsState): + def test_concurrent_selection_run_run_results_state(self, project): + self.run_and_save_state() + results = run_dbt( + ["run", "--select", "state:modified+", "result:error+", "--state", "./state"] + ) + assert len(results) == 0 + + self.update_view_model_bad_sql() + self.clear_state() + run_dbt(["run"], expect_pass=False) + self.copy_state() + + # add a new failing dbt model + bad_sql = "select * from forced_error" + write_file(bad_sql, "models", "table_model_modified_example.sql") + + results = run_dbt( + ["run", "--select", "state:modified+", "result:error+", "--state", "./state"], + expect_pass=False, + ) + assert len(results) == 3 + nodes = set([elem.node.name for elem in results]) + assert nodes == {"view_model", "table_model_modified_example", "table_model"} + + +class TestConcurrentSelectionTestRunResultsState(BaseRunResultsState): + def test_concurrent_selection_test_run_results_state(self, project): + self.run_and_save_state() + # create failure test case for result:fail selector + self.update_view_model_failing_tests(with_nulls=True) + + # run dbt build again to trigger test errors + self.rebuild_run_dbt(expect_pass=False) + + # get the failures from + results = run_dbt( + [ + "test", + "--select", + "result:fail", + "--exclude", + "not_null_view_model_id", + "--state", + "./state", + ], + expect_pass=False, + ) + assert len(results) == 1 + nodes = set([elem.node.name for elem in results]) + assert nodes == {"unique_view_model_id"} + + +class TestConcurrentSelectionBuildRunResultsState(BaseRunResultsState): + def test_concurrent_selectors_build_run_results_state(self, project): + self.run_and_save_state() + results = run_dbt( + ["build", "--select", "state:modified+", "result:error+", "--state", "./state"] + ) + assert len(results) == 0 + + self.update_view_model_bad_sql() + self.rebuild_run_dbt(expect_pass=False) + + # add a new failing dbt model + bad_sql = "select * from forced_error" + write_file(bad_sql, "models", "table_model_modified_example.sql") + + results = run_dbt( + ["build", "--select", "state:modified+", "result:error+", "--state", "./state"], + expect_pass=False, + ) + assert len(results) == 5 + nodes = set([elem.node.name for elem in results]) + assert nodes == { + "table_model_modified_example", + "view_model", + "table_model", + "not_null_view_model_id", + "unique_view_model_id", + } + + self.update_view_model_failing_tests() + + # create error model case for result:error selector + more_bad_sql = "select 1 as id from not_exists" + write_file(more_bad_sql, "models", "error_model.sql") + + # create something downstream from the error model to rerun + downstream_model_sql = "select * from {{ ref('error_model') }} )" + write_file(downstream_model_sql, "models", "downstream_of_error_model.sql") + + # regenerate build state + self.rebuild_run_dbt(expect_pass=False) + + # modify model again to trigger the state:modified selector + bad_again_sql = "select * from forced_anothererror" + write_file(bad_again_sql, "models", "table_model_modified_example.sql") + + results = run_dbt( + [ + "build", + "--select", + "state:modified+", + "result:error+", + "result:fail+", + "--state", + "./state", + ], + expect_pass=False, + ) + assert len(results) == 5 + nodes = set([elem.node.name for elem in results]) + assert nodes == { + "error_model", + "downstream_of_error_model", + "table_model_modified_example", + "table_model", + "unique_view_model_id", + } diff --git a/tests/functional/dependencies/test_local_dependency.py b/tests/functional/dependencies/test_local_dependency.py index 3e0bc5efdb7..13605028519 100644 --- a/tests/functional/dependencies/test_local_dependency.py +++ b/tests/functional/dependencies/test_local_dependency.py @@ -184,7 +184,7 @@ def models(self): def test_missing_dependency(self, project): # dbt should raise a runtime exception - with pytest.raises(dbt.exceptions.RuntimeException): + with pytest.raises(dbt.exceptions.DbtRuntimeError): run_dbt(["compile"]) @@ -335,12 +335,12 @@ def prepare_dependencies(self, project): ) def test_local_dependency_same_name(self, prepare_dependencies, project): - with pytest.raises(dbt.exceptions.DependencyException): + with pytest.raises(dbt.exceptions.DependencyError): run_dbt(["deps"], expect_pass=False) def test_local_dependency_same_name_sneaky(self, prepare_dependencies, project): shutil.copytree("duplicate_dependency", "./dbt_packages/duplicate_dependency") - with pytest.raises(dbt.exceptions.CompilationException): + with pytest.raises(dbt.exceptions.CompilationError): run_dbt(["compile"]) # needed to avoid compilation errors from duplicate package names in test autocleanup diff --git a/tests/functional/deprecations/test_deprecations.py b/tests/functional/deprecations/test_deprecations.py index fc76289b2ee..a70b3687c69 100644 --- a/tests/functional/deprecations/test_deprecations.py +++ b/tests/functional/deprecations/test_deprecations.py @@ -63,7 +63,7 @@ def test_data_path(self, project): def test_data_path_fail(self, project): deprecations.reset_deprecations() assert deprecations.active_deprecations == set() - with pytest.raises(dbt.exceptions.CompilationException) as exc: + with pytest.raises(dbt.exceptions.CompilationError) as exc: run_dbt(["--warn-error", "debug"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace expected_msg = "The `data-paths` config has been renamed" @@ -107,7 +107,7 @@ def test_package_path(self, project): def test_package_path_not_set(self, project): deprecations.reset_deprecations() assert deprecations.active_deprecations == set() - with pytest.raises(dbt.exceptions.CompilationException) as exc: + with pytest.raises(dbt.exceptions.CompilationError) as exc: run_dbt(["--warn-error", "clean"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace expected_msg = "path has changed from `dbt_modules` to `dbt_packages`." @@ -134,7 +134,7 @@ def test_package_redirect(self, project): def test_package_redirect_fail(self, project): deprecations.reset_deprecations() assert deprecations.active_deprecations == set() - with pytest.raises(dbt.exceptions.CompilationException) as exc: + with pytest.raises(dbt.exceptions.CompilationError) as exc: run_dbt(["--warn-error", "deps"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace expected_msg = "The `fishtown-analytics/dbt_utils` package is deprecated in favor of `dbt-labs/dbt_utils`" @@ -159,7 +159,7 @@ def test_metric_handle_rename(self, project): def test_metric_handle_rename_fail(self, project): deprecations.reset_deprecations() assert deprecations.active_deprecations == set() - with pytest.raises(dbt.exceptions.CompilationException) as exc: + with pytest.raises(dbt.exceptions.CompilationError) as exc: # turn off partial parsing to ensure that the metric is re-parsed run_dbt(["--warn-error", "--no-partial-parse", "parse"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace @@ -182,7 +182,7 @@ def test_exposure_name(self, project): def test_exposure_name_fail(self, project): deprecations.reset_deprecations() assert deprecations.active_deprecations == set() - with pytest.raises(dbt.exceptions.CompilationException) as exc: + with pytest.raises(dbt.exceptions.CompilationError) as exc: run_dbt(["--warn-error", "--no-partial-parse", "parse"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace expected_msg = "Starting in v1.3, the 'name' of an exposure should contain only letters, numbers, and underscores." diff --git a/tests/functional/docs/test_duplicate_docs_block.py b/tests/functional/docs/test_duplicate_docs_block.py new file mode 100644 index 00000000000..2ff9459e4b3 --- /dev/null +++ b/tests/functional/docs/test_duplicate_docs_block.py @@ -0,0 +1,35 @@ +import pytest + +from dbt.tests.util import run_dbt +import dbt.exceptions + + +duplicate_doc_blocks_model_sql = "select 1 as id, 'joe' as first_name" + +duplicate_doc_blocks_docs_md = """{% docs my_model_doc %} + a doc string +{% enddocs %} + +{% docs my_model_doc %} + duplicate doc string +{% enddocs %}""" + +duplicate_doc_blocks_schema_yml = """version: 2 + +models: + - name: model + description: "{{ doc('my_model_doc') }}" +""" + + +class TestDuplicateDocsBlock: + @pytest.fixture(scope="class") + def models(self): + return { + "model.sql": duplicate_doc_blocks_model_sql, + "schema.yml": duplicate_doc_blocks_schema_yml, + } + + def test_duplicate_doc_ref(self, project): + with pytest.raises(dbt.exceptions.CompilationError): + run_dbt(expect_pass=False) diff --git a/tests/functional/docs/test_good_docs_blocks.py b/tests/functional/docs/test_good_docs_blocks.py new file mode 100644 index 00000000000..9fc9a7f0bb5 --- /dev/null +++ b/tests/functional/docs/test_good_docs_blocks.py @@ -0,0 +1,171 @@ +import json +import os +from pathlib import Path +import pytest + +from dbt.tests.util import run_dbt, update_config_file, write_file + + +good_docs_blocks_model_sql = "select 1 as id, 'joe' as first_name" + +good_docs_blocks_docs_md = """{% docs my_model_doc %} +My model is just a copy of the seed +{% enddocs %} + +{% docs my_model_doc__id %} +The user ID number +{% enddocs %} + +The following doc is never used, which should be fine. +{% docs my_model_doc__first_name %} +The user's first name (should not be shown!) +{% enddocs %} + +This doc is referenced by its full name +{% docs my_model_doc__last_name %} +The user's last name +{% enddocs %} +""" + +good_doc_blocks_alt_docs_md = """{% docs my_model_doc %} +Alt text about the model +{% enddocs %} + +{% docs my_model_doc__id %} +The user ID number with alternative text +{% enddocs %} + +The following doc is never used, which should be fine. +{% docs my_model_doc__first_name %} +The user's first name - don't show this text! +{% enddocs %} + +This doc is referenced by its full name +{% docs my_model_doc__last_name %} +The user's last name in this other file +{% enddocs %} +""" + +good_docs_blocks_schema_yml = """version: 2 + +models: + - name: model + description: "{{ doc('my_model_doc') }}" + columns: + - name: id + description: "{{ doc('my_model_doc__id') }}" + - name: first_name + description: The user's first name + - name: last_name + description: "{{ doc('test', 'my_model_doc__last_name') }}" +""" + + +class TestGoodDocsBlocks: + @pytest.fixture(scope="class") + def models(self): + return { + "model.sql": good_docs_blocks_model_sql, + "schema.yml": good_docs_blocks_schema_yml, + "docs.md": good_docs_blocks_docs_md, + } + + def test_valid_doc_ref(self, project): + result = run_dbt() + assert len(result.results) == 1 + + assert os.path.exists("./target/manifest.json") + + with open("./target/manifest.json") as fp: + manifest = json.load(fp) + + model_data = manifest["nodes"]["model.test.model"] + + assert model_data["description"] == "My model is just a copy of the seed" + + assert { + "name": "id", + "description": "The user ID number", + "data_type": None, + "meta": {}, + "quote": None, + "tags": [], + } == model_data["columns"]["id"] + + assert { + "name": "first_name", + "description": "The user's first name", + "data_type": None, + "meta": {}, + "quote": None, + "tags": [], + } == model_data["columns"]["first_name"] + + assert { + "name": "last_name", + "description": "The user's last name", + "data_type": None, + "meta": {}, + "quote": None, + "tags": [], + } == model_data["columns"]["last_name"] + + assert len(model_data["columns"]) == 3 + + +class TestGoodDocsBlocksAltPath: + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": good_docs_blocks_model_sql, "schema.yml": good_docs_blocks_schema_yml} + + def test_alternative_docs_path(self, project): + # self.use_default_project({"docs-paths": [self.dir("docs")]}) + docs_path = Path(project.project_root, "alt-docs") + docs_path.mkdir() + write_file(good_doc_blocks_alt_docs_md, project.project_root, "alt-docs", "docs.md") + + update_config_file( + {"docs-paths": [str(docs_path)]}, project.project_root, "dbt_project.yml" + ) + + result = run_dbt() + + assert len(result.results) == 1 + + assert os.path.exists("./target/manifest.json") + + with open("./target/manifest.json") as fp: + manifest = json.load(fp) + + model_data = manifest["nodes"]["model.test.model"] + + assert model_data["description"] == "Alt text about the model" + + assert { + "name": "id", + "description": "The user ID number with alternative text", + "data_type": None, + "meta": {}, + "quote": None, + "tags": [], + } == model_data["columns"]["id"] + + assert { + "name": "first_name", + "description": "The user's first name", + "data_type": None, + "meta": {}, + "quote": None, + "tags": [], + } == model_data["columns"]["first_name"] + + assert { + "name": "last_name", + "description": "The user's last name in this other file", + "data_type": None, + "meta": {}, + "quote": None, + "tags": [], + } == model_data["columns"]["last_name"] + + assert len(model_data["columns"]) == 3 diff --git a/tests/functional/docs/test_invalid_doc_ref.py b/tests/functional/docs/test_invalid_doc_ref.py new file mode 100644 index 00000000000..7c486938124 --- /dev/null +++ b/tests/functional/docs/test_invalid_doc_ref.py @@ -0,0 +1,47 @@ +import pytest + +from dbt.tests.util import run_dbt +import dbt.exceptions + + +invalid_doc_ref_model_sql = "select 1 as id, 'joe' as first_name" + +invalid_doc_ref_docs_md = """{% docs my_model_doc %} +My model is just a copy of the seed +{% enddocs %} + +{% docs my_model_doc__id %} +The user ID number +{% enddocs %} + +The following doc is never used, which should be fine. +{% docs my_model_doc__first_name %} +The user's first name +{% enddocs %}""" + +invalid_doc_ref_schema_yml = """version: 2 + +models: + - name: model + description: "{{ doc('my_model_doc') }}" + columns: + - name: id + description: "{{ doc('my_model_doc__id') }}" + - name: first_name + description: "{{ doc('foo.bar.my_model_doc__id') }}" +""" + + +class TestInvalidDocRef: + @pytest.fixture(scope="class") + def models(self): + return { + "model.sql": invalid_doc_ref_model_sql, + "docs.md": invalid_doc_ref_docs_md, + "schema.yml": invalid_doc_ref_schema_yml, + } + + def test_invalid_doc_ref(self, project): + # The run should fail since we could not find the docs reference. + with pytest.raises(dbt.exceptions.CompilationError): + run_dbt(expect_pass=False) diff --git a/tests/functional/docs/test_missing_docs_blocks.py b/tests/functional/docs/test_missing_docs_blocks.py new file mode 100644 index 00000000000..3b6f4e540b9 --- /dev/null +++ b/tests/functional/docs/test_missing_docs_blocks.py @@ -0,0 +1,43 @@ +import pytest + +from dbt.tests.util import run_dbt +import dbt.exceptions + + +missing_docs_blocks_model_sql = "select 1 as id, 'joe' as first_name" + +missing_docs_blocks_docs_md = """{% docs my_model_doc %} +My model is just a copy of the seed +{% enddocs %} + +{% docs my_model_doc__id %} +The user ID number +{% enddocs %}""" + +missing_docs_blocks_schema_yml = """version: 2 + +models: + - name: model + description: "{{ doc('my_model_doc') }}" + columns: + - name: id + description: "{{ doc('my_model_doc__id') }}" + - name: first_name + # invalid reference + description: "{{ doc('my_model_doc__first_name') }}" +""" + + +class TestMissingDocsBlocks: + @pytest.fixture(scope="class") + def models(self): + return { + "model.sql": missing_docs_blocks_model_sql, + "schema.yml": missing_docs_blocks_schema_yml, + "docs.md": missing_docs_blocks_docs_md, + } + + def test_missing_doc_ref(self, project): + # The run should fail since we could not find the docs reference. + with pytest.raises(dbt.exceptions.CompilationError): + run_dbt() diff --git a/tests/functional/duplicates/test_duplicate_analysis.py b/tests/functional/duplicates/test_duplicate_analysis.py index e9050860ad9..44dc4c6f167 100644 --- a/tests/functional/duplicates/test_duplicate_analysis.py +++ b/tests/functional/duplicates/test_duplicate_analysis.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.tests.util import run_dbt @@ -27,7 +27,7 @@ def analyses(self): def test_duplicate_model_enabled(self, project): message = "dbt found two analyses with the name" - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace assert message in exc_str diff --git a/tests/functional/duplicates/test_duplicate_exposure.py b/tests/functional/duplicates/test_duplicate_exposure.py index 6035da7c110..140db21cd07 100644 --- a/tests/functional/duplicates/test_duplicate_exposure.py +++ b/tests/functional/duplicates/test_duplicate_exposure.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.tests.util import run_dbt @@ -26,6 +26,6 @@ def models(self): def test_duplicate_exposure(self, project): message = "dbt found two exposures with the name" - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) assert message in str(exc.value) diff --git a/tests/functional/duplicates/test_duplicate_macro.py b/tests/functional/duplicates/test_duplicate_macro.py index 1fc7282808f..35b843f5891 100644 --- a/tests/functional/duplicates/test_duplicate_macro.py +++ b/tests/functional/duplicates/test_duplicate_macro.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.tests.util import run_dbt @@ -43,7 +43,7 @@ def macros(self): def test_duplicate_macros(self, project): message = 'dbt found two macros named "some_macro" in the project' - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["parse"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace assert message in exc_str @@ -64,7 +64,7 @@ def macros(self): def test_duplicate_macros(self, project): message = 'dbt found two macros named "some_macro" in the project' - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace assert message in exc_str diff --git a/tests/functional/duplicates/test_duplicate_metric.py b/tests/functional/duplicates/test_duplicate_metric.py index e40295278b9..f8beca39c24 100644 --- a/tests/functional/duplicates/test_duplicate_metric.py +++ b/tests/functional/duplicates/test_duplicate_metric.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.tests.util import run_dbt @@ -46,6 +46,6 @@ def models(self): def test_duplicate_metric(self, project): message = "dbt found two metrics with the name" - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) assert message in str(exc.value) diff --git a/tests/functional/duplicates/test_duplicate_model.py b/tests/functional/duplicates/test_duplicate_model.py index fbcd1b79671..7a53fd6de63 100644 --- a/tests/functional/duplicates/test_duplicate_model.py +++ b/tests/functional/duplicates/test_duplicate_model.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException, DuplicateResourceName +from dbt.exceptions import CompilationError, DuplicateResourceNameError from dbt.tests.fixtures.project import write_project_files from dbt.tests.util import run_dbt, get_manifest @@ -54,7 +54,7 @@ def models(self): def test_duplicate_model_enabled(self, project): message = "dbt found two models with the name" - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace assert message in exc_str @@ -108,7 +108,7 @@ def packages(self): def test_duplicate_model_enabled_across_packages(self, project): run_dbt(["deps"]) message = "dbt found two models with the name" - with pytest.raises(DuplicateResourceName) as exc: + with pytest.raises(DuplicateResourceNameError) as exc: run_dbt(["run"]) assert message in str(exc.value) diff --git a/tests/functional/duplicates/test_duplicate_source.py b/tests/functional/duplicates/test_duplicate_source.py index 181aaf5d18e..1100345aabc 100644 --- a/tests/functional/duplicates/test_duplicate_source.py +++ b/tests/functional/duplicates/test_duplicate_source.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.tests.util import run_dbt @@ -22,6 +22,6 @@ def models(self): def test_duplicate_source_enabled(self, project): message = "dbt found two sources with the name" - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) assert message in str(exc.value) diff --git a/tests/functional/exit_codes/fixtures.py b/tests/functional/exit_codes/fixtures.py index 23a0bef3897..296e1a3f6c0 100644 --- a/tests/functional/exit_codes/fixtures.py +++ b/tests/functional/exit_codes/fixtures.py @@ -74,5 +74,5 @@ def models(self): "bad.sql": bad_sql, "dupe.sql": dupe_sql, "good.sql": good_sql, - "schema.yml": schema_yml + "schema.yml": schema_yml, } diff --git a/tests/functional/exit_codes/test_exit_codes.py b/tests/functional/exit_codes/test_exit_codes.py index 54b5cb6865e..44672beecae 100644 --- a/tests/functional/exit_codes/test_exit_codes.py +++ b/tests/functional/exit_codes/test_exit_codes.py @@ -1,17 +1,13 @@ import pytest import dbt.exceptions -from dbt.tests.util import ( - check_table_does_exist, - check_table_does_not_exist, - run_dbt -) +from dbt.tests.util import check_table_does_exist, check_table_does_not_exist, run_dbt from tests.functional.exit_codes.fixtures import ( BaseConfigProject, snapshots_bad_sql, snapshots_good_sql, data_seed_bad_csv, - data_seed_good_csv + data_seed_good_csv, ) @@ -21,38 +17,38 @@ def snapshots(self): return {"g.sql": snapshots_good_sql} def test_exit_code_run_succeed(self, project): - results = run_dbt(['run', '--model', 'good']) + results = run_dbt(["run", "--model", "good"]) assert len(results) == 1 - check_table_does_exist(project.adapter, 'good') + check_table_does_exist(project.adapter, "good") def test_exit_code_run_fail(self, project): - results = run_dbt(['run', '--model', 'bad'], expect_pass=False) + results = run_dbt(["run", "--model", "bad"], expect_pass=False) assert len(results) == 1 - check_table_does_not_exist(project.adapter, 'bad') + check_table_does_not_exist(project.adapter, "bad") def test_schema_test_pass(self, project): - results = run_dbt(['run', '--model', 'good']) + results = run_dbt(["run", "--model", "good"]) assert len(results) == 1 - results = run_dbt(['test', '--model', 'good']) + results = run_dbt(["test", "--model", "good"]) assert len(results) == 1 def test_schema_test_fail(self, project): - results = run_dbt(['run', '--model', 'dupe']) + results = run_dbt(["run", "--model", "dupe"]) assert len(results) == 1 - results = run_dbt(['test', '--model', 'dupe'], expect_pass=False) + results = run_dbt(["test", "--model", "dupe"], expect_pass=False) assert len(results) == 1 def test_compile(self, project): - results = run_dbt(['compile']) + results = run_dbt(["compile"]) assert len(results) == 7 def test_snapshot_pass(self, project): run_dbt(["run", "--model", "good"]) - results = run_dbt(['snapshot']) + results = run_dbt(["snapshot"]) assert len(results) == 1 - check_table_does_exist(project.adapter, 'good_snapshot') + check_table_does_exist(project.adapter, "good_snapshot") class TestExitCodesSnapshotFail(BaseConfigProject): @@ -61,12 +57,12 @@ def snapshots(self): return {"b.sql": snapshots_bad_sql} def test_snapshot_fail(self, project): - results = run_dbt(['run', '--model', 'good']) + results = run_dbt(["run", "--model", "good"]) assert len(results) == 1 - results = run_dbt(['snapshot'], expect_pass=False) + results = run_dbt(["snapshot"], expect_pass=False) assert len(results) == 1 - check_table_does_not_exist(project.adapter, 'good_snapshot') + check_table_does_not_exist(project.adapter, "good_snapshot") class TestExitCodesDeps: @@ -75,14 +71,14 @@ def packages(self): return { "packages": [ { - 'git': 'https://github.com/dbt-labs/dbt-integration-project', - 'revision': 'dbt/1.0.0', + "git": "https://github.com/dbt-labs/dbt-integration-project", + "revision": "dbt/1.0.0", } ] } def test_deps(self, project): - results = run_dbt(['deps']) + results = run_dbt(["deps"]) assert results is None @@ -92,15 +88,15 @@ def packages(self): return { "packages": [ { - 'git': 'https://github.com/dbt-labs/dbt-integration-project', - 'revision': 'bad-branch', + "git": "https://github.com/dbt-labs/dbt-integration-project", + "revision": "bad-branch", }, ] } def test_deps_fail(self, project): with pytest.raises(dbt.exceptions.GitCheckoutError) as exc: - run_dbt(['deps']) + run_dbt(["deps"]) expected_msg = "Error checking out spec='bad-branch'" assert expected_msg in str(exc.value) @@ -111,7 +107,7 @@ def seeds(self): return {"good.csv": data_seed_good_csv} def test_seed(self, project): - results = run_dbt(['seed']) + results = run_dbt(["seed"]) assert len(results) == 1 @@ -121,4 +117,4 @@ def seeds(self): return {"bad.csv": data_seed_bad_csv} def test_seed(self, project): - run_dbt(['seed'], expect_pass=False) + run_dbt(["seed"], expect_pass=False) diff --git a/tests/functional/exposures/fixtures.py b/tests/functional/exposures/fixtures.py index 1d573b1a7b6..f02c5723f72 100644 --- a/tests/functional/exposures/fixtures.py +++ b/tests/functional/exposures/fixtures.py @@ -1,4 +1,3 @@ - models_sql = """ select 1 as id """ diff --git a/tests/functional/exposures/test_exposure_configs.py b/tests/functional/exposures/test_exposure_configs.py index a7018204952..199a6368a4a 100644 --- a/tests/functional/exposures/test_exposure_configs.py +++ b/tests/functional/exposures/test_exposure_configs.py @@ -12,7 +12,7 @@ enabled_yaml_level_exposure_yml, invalid_config_exposure_yml, source_schema_yml, - metrics_schema_yml + metrics_schema_yml, ) diff --git a/tests/functional/exposures/test_exposures.py b/tests/functional/exposures/test_exposures.py index 777a8e161c4..97849fa0835 100644 --- a/tests/functional/exposures/test_exposures.py +++ b/tests/functional/exposures/test_exposures.py @@ -6,7 +6,7 @@ second_model_sql, simple_exposure_yml, source_schema_yml, - metrics_schema_yml + metrics_schema_yml, ) @@ -37,8 +37,8 @@ def test_depends_on(self, project): manifest = get_manifest(project.project_root) exposure_depends_on = manifest.exposures["exposure.test.simple_exposure"].depends_on.nodes expected_exposure_depends_on = [ - 'source.test.test_source.test_table', - 'model.test.model', - 'metric.test.metric' + "source.test.test_source.test_table", + "model.test.model", + "metric.test.metric", ] assert sorted(exposure_depends_on) == sorted(expected_exposure_depends_on) diff --git a/tests/functional/external_reference/test_external_reference.py b/tests/functional/external_reference/test_external_reference.py new file mode 100644 index 00000000000..8b5294155d8 --- /dev/null +++ b/tests/functional/external_reference/test_external_reference.py @@ -0,0 +1,59 @@ +import pytest + +from dbt.tests.util import run_dbt + + +external_model_sql = """ +{{ + config( + materialized = "view" + ) +}} + +select * from "{{ this.schema + 'z' }}"."external" +""" + +model_sql = """ +select 1 as id +""" + + +class TestExternalReference: + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": external_model_sql} + + def test_external_reference(self, project, unique_schema): + external_schema = unique_schema + "z" + project.run_sql(f'create schema "{external_schema}"') + project.run_sql(f'create table "{external_schema}"."external" (id integer)') + project.run_sql(f'insert into "{external_schema}"."external" values (1), (2)') + + results = run_dbt(["run"]) + assert len(results) == 1 + + # running it again should succeed + results = run_dbt(["run"]) + assert len(results) == 1 + + +# The opposite of the test above -- check that external relations that +# depend on a dbt model do not create issues with caching +class TestExternalDependency: + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": model_sql} + + def test_external_reference(self, project, unique_schema): + results = run_dbt(["run"]) + assert len(results) == 1 + + external_schema = unique_schema + "z" + project.run_sql(f'create schema "{external_schema}"') + project.run_sql( + f'create view "{external_schema}"."external" as (select * from {unique_schema}.model)' + ) + + # running it again should succeed + results = run_dbt(["run"]) + assert len(results) == 1 diff --git a/tests/functional/fail_fast/test_fail_fast_run.py b/tests/functional/fail_fast/test_fail_fast_run.py index 3ea3c4bc0f0..5c0c8cf849d 100644 --- a/tests/functional/fail_fast/test_fail_fast_run.py +++ b/tests/functional/fail_fast/test_fail_fast_run.py @@ -2,7 +2,7 @@ from dbt.tests.util import run_dbt from tests.functional.fail_fast.fixtures import models, project_files # noqa: F401 -from dbt.exceptions import FailFastException +from dbt.exceptions import FailFastError def check_audit_table(project, count=1): @@ -43,7 +43,7 @@ def test_fail_fast_run( self, project, ): - with pytest.raises(FailFastException): + with pytest.raises(FailFastError): run_dbt(["run", "--threads", "1", "--fail-fast"]) check_audit_table(project) @@ -62,6 +62,6 @@ def test_fail_fast_run_user_config( self, project, ): - with pytest.raises(FailFastException): + with pytest.raises(FailFastError): run_dbt(["run", "--threads", "1"]) check_audit_table(project) diff --git a/tests/functional/hooks/test_model_hooks.py b/tests/functional/hooks/test_model_hooks.py index 097fa8af0c8..99a05c9c895 100644 --- a/tests/functional/hooks/test_model_hooks.py +++ b/tests/functional/hooks/test_model_hooks.py @@ -2,7 +2,7 @@ from pathlib import Path -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.tests.util import ( run_dbt, @@ -170,6 +170,29 @@ def test_pre_and_post_run_hooks(self, project, dbt_profile_target): self.check_hooks("end", project, dbt_profile_target["host"]) +class TestPrePostModelHooksUnderscores(TestPrePostModelHooks): + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "models": { + "test": { + "pre_hook": [ + # inside transaction (runs second) + MODEL_PRE_HOOK, + # outside transaction (runs first) + {"sql": "vacuum {{ this.schema }}.on_model_hook", "transaction": False}, + ], + "post_hook": [ + # outside transaction (runs second) + {"sql": "vacuum {{ this.schema }}.on_model_hook", "transaction": False}, + # inside transaction (runs first) + MODEL_POST_HOOK, + ], + } + } + } + + class TestHookRefs(BaseTestPrePost): @pytest.fixture(scope="class") def project_config_update(self): @@ -399,7 +422,7 @@ def models(self): return {"hooks.sql": models__hooks_error} def test_run_duplicate_hook_defs(self, project): - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt() assert "pre_hook" in str(exc.value) assert "pre-hook" in str(exc.value) diff --git a/tests/functional/incremental_schema_tests/test_incremental_schema.py b/tests/functional/incremental_schema_tests/test_incremental_schema.py deleted file mode 100644 index 3ee9e6477e4..00000000000 --- a/tests/functional/incremental_schema_tests/test_incremental_schema.py +++ /dev/null @@ -1,136 +0,0 @@ -import pytest - -from dbt.tests.util import ( - check_relations_equal, - run_dbt, -) - -from tests.functional.incremental_schema_tests.fixtures import ( - _PROPERTIES__SCHEMA, - _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY, - _MODELS__INCREMENTAL_IGNORE, - _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET, - _MODELS__INCREMENTAL_IGNORE_TARGET, - _MODELS__INCREMENTAL_FAIL, - _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS, - _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE, - _MODELS__A, - _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, - _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS, - _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, - _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET, - _TESTS__SELECT_FROM_INCREMENTAL_IGNORE, - _TESTS__SELECT_FROM_A, - _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, - _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS, - _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, - _TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET, - _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS, -) - - -class TestIncrementalSchemaChange: - @pytest.fixture(scope="class") - def properties(self): - return { - "schema.yml": _PROPERTIES__SCHEMA, - } - - @pytest.fixture(scope="class") - def models(self): - return { - "incremental_sync_remove_only.sql": _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY, - "incremental_ignore.sql": _MODELS__INCREMENTAL_IGNORE, - "incremental_sync_remove_only_target.sql": - _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET, - "incremental_ignore_target.sql": _MODELS__INCREMENTAL_IGNORE_TARGET, - "incremental_fail.sql": _MODELS__INCREMENTAL_FAIL, - "incremental_sync_all_columns.sql": _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS, - "incremental_append_new_columns_remove_one.sql": - _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE, - "model_a.sql": _MODELS__A, - "incremental_append_new_columns_target.sql": - _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, - "incremental_append_new_columns.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS, - "incremental_sync_all_columns_target.sql": - _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, - "incremental_append_new_columns_remove_one_target.sql": - _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET, - } - - @pytest.fixture(scope="class") - def tests(self): - return { - "select_from_incremental.sql": _TESTS__SELECT_FROM_INCREMENTAL_IGNORE, - "select_from_a.sql": _TESTS__SELECT_FROM_A, - "select_from_incremental_append_new_columns_target.sql": - _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, - "select_from_incremental_sync_all_columns.sql": - _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS, - "select_from_incremental_sync_all_columns_target.sql": - _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, - "select_from_incremental_ignore_target.sql": - _TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET, - "select_from_incremental_append_new_columns.sql": - _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS, - } - - def run_twice_and_assert( - self, include, compare_source, compare_target, project - ): - - # dbt run (twice) - run_args = ['run'] - if include: - run_args.extend(('--select', include)) - results_one = run_dbt(run_args) - assert len(results_one) == 3 - - results_two = run_dbt(run_args) - assert len(results_two) == 3 - - check_relations_equal(project.adapter, [compare_source, compare_target]) - - def run_incremental_append_new_columns(self, project): - select = 'model_a incremental_append_new_columns incremental_append_new_columns_target' - compare_source = 'incremental_append_new_columns' - compare_target = 'incremental_append_new_columns_target' - self.run_twice_and_assert(select, compare_source, compare_target, project) - - def run_incremental_append_new_columns_remove_one(self, project): - select = 'model_a incremental_append_new_columns_remove_one incremental_append_new_columns_remove_one_target' - compare_source = 'incremental_append_new_columns_remove_one' - compare_target = 'incremental_append_new_columns_remove_one_target' - self.run_twice_and_assert(select, compare_source, compare_target, project) - - def run_incremental_sync_all_columns(self, project): - select = 'model_a incremental_sync_all_columns incremental_sync_all_columns_target' - compare_source = 'incremental_sync_all_columns' - compare_target = 'incremental_sync_all_columns_target' - self.run_twice_and_assert(select, compare_source, compare_target, project) - - def run_incremental_sync_remove_only(self, project): - select = 'model_a incremental_sync_remove_only incremental_sync_remove_only_target' - compare_source = 'incremental_sync_remove_only' - compare_target = 'incremental_sync_remove_only_target' - self.run_twice_and_assert(select, compare_source, compare_target, project) - - def test_run_incremental_ignore(self, project): - select = 'model_a incremental_ignore incremental_ignore_target' - compare_source = 'incremental_ignore' - compare_target = 'incremental_ignore_target' - self.run_twice_and_assert(select, compare_source, compare_target, project) - - def test_run_incremental_append_new_columns(self, project): - self.run_incremental_append_new_columns(project) - self.run_incremental_append_new_columns_remove_one(project) - - def test_run_incremental_sync_all_columns(self, project): - self.run_incremental_sync_all_columns(project) - self.run_incremental_sync_remove_only(project) - - def test_run_incremental_fail_on_schema_change(self, project): - select = 'model_a incremental_fail' - run_dbt(['run', '--models', select, '--full-refresh']) - results_two = run_dbt(['run', '--models', select], expect_pass=False) - assert 'Compilation Error' in results_two[1].message diff --git a/tests/functional/invalid_model_tests/test_invalid_models.py b/tests/functional/invalid_model_tests/test_invalid_models.py index 29739dcac20..09db17bc325 100644 --- a/tests/functional/invalid_model_tests/test_invalid_models.py +++ b/tests/functional/invalid_model_tests/test_invalid_models.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException, ParsingException +from dbt.exceptions import CompilationError, ParsingError from dbt.tests.util import ( run_dbt, @@ -129,7 +129,7 @@ def models(self): } def test_view_disabled(self, project): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt(["seed"]) assert "enabled" in str(exc.value) @@ -146,7 +146,7 @@ def models(self): } def test_referencing_disabled_model(self, project): - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt() assert "which is disabled" in str(exc.value) @@ -160,7 +160,7 @@ def models(self): return {"models__dependent_on_view.sql": models__dependent_on_view} def test_models_not_found(self, project): - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt() assert "which was not found" in str(exc.value) @@ -176,7 +176,7 @@ def models(self): return {"models__with_bad_macro.sql": models__with_bad_macro} def test_with_invalid_macro_call(self, project): - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) assert "macro 'dbt_macro__some_macro' takes no keyword argument 'invalid'" in str( @@ -207,7 +207,7 @@ def project_config_update(self): } def test_postgres_source_disabled(self, project): - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt() assert "which is disabled" in str(exc.value) @@ -221,7 +221,7 @@ def models(self): return {"models__referencing_disabled_source.sql": models__referencing_disabled_source} def test_source_missing(self, project): - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt() assert "which was not found" in str(exc.value) diff --git a/tests/functional/logging/test_logging.py b/tests/functional/logging/test_logging.py index b0feea50809..afcd90d4afb 100644 --- a/tests/functional/logging/test_logging.py +++ b/tests/functional/logging/test_logging.py @@ -1,6 +1,7 @@ import pytest from dbt.tests.util import run_dbt, get_manifest, read_file import json +import os my_model_sql = """ @@ -26,7 +27,8 @@ def test_basic(project, logs_dir): assert log_file node_start = False node_finished = False - for log_line in log_file.split('\n'): + connection_reused_data = [] + for log_line in log_file.split("\n"): # skip empty lines if len(log_line) == 0: continue @@ -34,18 +36,30 @@ def test_basic(project, logs_dir): if "[debug]" in log_line: continue log_dct = json.loads(log_line) - log_event = log_dct['info']['name'] + log_data = log_dct["data"] + log_event = log_dct["info"]["name"] + if log_event == "ConnectionReused": + connection_reused_data.append(log_data) if log_event == "NodeStart": node_start = True if log_event == "NodeFinished": node_finished = True if node_start and not node_finished: - if log_event == 'NodeExecuting': - assert "node_info" in log_dct + if log_event == "NodeExecuting": + assert "node_info" in log_data if log_event == "JinjaLogDebug": - assert "node_info" in log_dct + assert "node_info" in log_data if log_event == "SQLQuery": - assert "node_info" in log_dct + assert "node_info" in log_data if log_event == "TimingInfoCollected": - assert "node_info" in log_dct - assert "timing_info" in log_dct + assert "node_info" in log_data + assert "timing_info" in log_data + + # windows doesn't have the same thread/connection flow so the ConnectionReused + # events don't show up + if os.name != "nt": + # Verify the ConnectionReused event occurs and has the right data + assert connection_reused_data + for data in connection_reused_data: + assert "conn_name" in data and data["conn_name"] + assert "orig_conn_name" in data and data["orig_conn_name"] diff --git a/tests/functional/logging/test_meta_logging.py b/tests/functional/logging/test_meta_logging.py new file mode 100644 index 00000000000..189562bba49 --- /dev/null +++ b/tests/functional/logging/test_meta_logging.py @@ -0,0 +1,44 @@ +import pytest +from dbt.tests.util import run_dbt, read_file +import json + +model1 = "select 1 as fun" +model2 = '{{ config(meta={"owners": ["team1", "team2"]})}} select 1 as fun' +model3 = '{{ config(meta={"key": 1})}} select 1 as fun' + + +@pytest.fixture(scope="class") # noqa +def models(): + return {"model1.sql": model1, "model2.sql": model2, "model3.sql": model3} + + +# This test checks that various events contain node_info, +# which is supplied by the log_contextvars context manager +def test_meta(project, logs_dir): + run_dbt(["--log-format=json", "run"]) + + # get log file + log_file = read_file(logs_dir, "dbt.log") + assert log_file + + for log_line in log_file.split("\n"): + # skip empty lines + if len(log_line) == 0: + continue + # The adapter logging also shows up, so skip non-json lines + if "[debug]" in log_line: + continue + + log_dct = json.loads(log_line) + if "node_info" not in log_dct["data"]: + continue + + print(f"--- log_dct: {log_dct}") + node_info = log_dct["data"]["node_info"] + node_path = node_info["node_path"] + if node_path == "model1.sql": + assert node_info["meta"] == {} + elif node_path == "model2.sql": + assert node_info["meta"] == {"owners": "['team1', 'team2']"} + elif node_path == "model3.sql": + assert node_info["meta"] == {"key": "1"} diff --git a/tests/functional/macros/test_macros.py b/tests/functional/macros/test_macros.py index 899be2453b1..e7f25acab3a 100644 --- a/tests/functional/macros/test_macros.py +++ b/tests/functional/macros/test_macros.py @@ -97,7 +97,7 @@ def macros(self): return {"my_macros.sql": macros__no_default_macros} def test_invalid_macro(self, project): - with pytest.raises(dbt.exceptions.CompilationException) as exc: + with pytest.raises(dbt.exceptions.CompilationError) as exc: run_dbt() assert "In dispatch: No macro named 'dispatch_to_nowhere' found" in str(exc.value) @@ -213,7 +213,7 @@ def macros(self): return {"macro.sql": macros__deprecated_adapter_macro} def test_invalid_macro(self, project): - with pytest.raises(dbt.exceptions.CompilationException) as exc: + with pytest.raises(dbt.exceptions.CompilationError) as exc: run_dbt() assert 'The "adapter_macro" macro has been deprecated' in str(exc.value) diff --git a/tests/functional/materializations/test_incremental.py b/tests/functional/materializations/test_incremental.py index f6ec8b2a3e9..7e8df9ea6f1 100644 --- a/tests/functional/materializations/test_incremental.py +++ b/tests/functional/materializations/test_incremental.py @@ -1,6 +1,6 @@ import pytest from dbt.tests.util import run_dbt, get_manifest -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError from dbt.context.providers import generate_runtime_model_context @@ -43,10 +43,10 @@ def test_basic(project): assert type(macro_func).__name__ == "MacroGenerator" # These two incremental strategies are not valid for Postgres - with pytest.raises(RuntimeException) as excinfo: + with pytest.raises(DbtRuntimeError) as excinfo: macro_func = project.adapter.get_incremental_strategy_macro(context, "merge") assert "merge" in str(excinfo.value) - with pytest.raises(RuntimeException) as excinfo: + with pytest.raises(DbtRuntimeError) as excinfo: macro_func = project.adapter.get_incremental_strategy_macro(context, "insert_overwrite") assert "insert_overwrite" in str(excinfo.value) diff --git a/tests/functional/metrics/test_metric_configs.py b/tests/functional/metrics/test_metric_configs.py index 88c39e0537d..6ad960ec11f 100644 --- a/tests/functional/metrics/test_metric_configs.py +++ b/tests/functional/metrics/test_metric_configs.py @@ -1,7 +1,7 @@ import pytest from hologram import ValidationError from dbt.contracts.graph.model_config import MetricConfig -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.tests.util import run_dbt, update_config_file, get_manifest @@ -11,7 +11,7 @@ disabled_metric_level_schema_yml, enabled_metric_level_schema_yml, models_people_metrics_sql, - invalid_config_metric_yml + invalid_config_metric_yml, ) @@ -106,7 +106,7 @@ def test_metrics_all_configs(self, project): assert config_test_table == pytest.expected_config -# Test CompilationException if a model references a disabled metric +# Test CompilationError if a model references a disabled metric class TestDisabledMetricRef(MetricConfigTests): @pytest.fixture(scope="class") def models(self): @@ -134,7 +134,7 @@ def test_disabled_metric_ref_model(self, project): } update_config_file(new_enabled_config, project.project_root, "dbt_project.yml") - with pytest.raises(CompilationException): + with pytest.raises(CompilationError): run_dbt(["parse"]) diff --git a/tests/functional/metrics/test_metric_helper_functions.py b/tests/functional/metrics/test_metric_helper_functions.py index c1b7a3487b6..da9a0046ba4 100644 --- a/tests/functional/metrics/test_metric_helper_functions.py +++ b/tests/functional/metrics/test_metric_helper_functions.py @@ -3,10 +3,7 @@ from dbt.tests.util import run_dbt, get_manifest from dbt.contracts.graph.metrics import ResolvedMetricReference -from tests.functional.metrics.fixtures import ( - models_people_sql, - basic_metrics_yml -) +from tests.functional.metrics.fixtures import models_people_sql, basic_metrics_yml class TestMetricHelperFunctions: diff --git a/tests/functional/metrics/test_metrics.py b/tests/functional/metrics/test_metrics.py index de8c022f3d3..adc55c3b996 100644 --- a/tests/functional/metrics/test_metrics.py +++ b/tests/functional/metrics/test_metrics.py @@ -1,7 +1,7 @@ import pytest from dbt.tests.util import run_dbt, get_manifest -from dbt.exceptions import ParsingException +from dbt.exceptions import ParsingError from tests.functional.metrics.fixtures import ( @@ -21,7 +21,7 @@ derived_metric_old_attr_names_yml, metric_without_timestamp_or_timegrains_yml, invalid_metric_without_timestamp_with_time_grains_yml, - invalid_metric_without_timestamp_with_window_yml + invalid_metric_without_timestamp_with_window_yml, ) @@ -85,14 +85,14 @@ def models(self): "people.sql": models_people_sql, } - # tests that we get a ParsingException with an invalid model ref, where + # tests that we get a ParsingError with an invalid model ref, where # the model name does not have quotes def test_simple_metric( self, project, ): # initial run - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["run"]) @@ -104,14 +104,14 @@ def models(self): "people.sql": models_people_sql, } - # tests that we get a ParsingException with an invalid model ref, where + # tests that we get a ParsingError with an invalid model ref, where # the model name does not have quotes def test_simple_metric( self, project, ): # initial run - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["run"]) @@ -123,13 +123,13 @@ def models(self): "people.sql": models_people_sql, } - # tests that we get a ParsingException with a missing expression + # tests that we get a ParsingError with a missing expression def test_simple_metric( self, project, ): # initial run - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["run"]) @@ -142,7 +142,7 @@ def models(self): } def test_names_with_spaces(self, project): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt(["run"]) assert "cannot contain spaces" in str(exc.value) @@ -156,7 +156,7 @@ def models(self): } def test_names_with_special_char(self, project): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt(["run"]) assert "must contain only letters, numbers and underscores" in str(exc.value) @@ -170,7 +170,7 @@ def models(self): } def test_names_with_leading_number(self, project): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt(["run"]) assert "must begin with a letter" in str(exc.value) @@ -184,7 +184,7 @@ def models(self): } def test_long_name(self, project): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt(["run"]) assert "cannot contain more than 250 characters" in str(exc.value) @@ -198,7 +198,7 @@ def models(self): } def test_invalid_derived_metrics(self, project): - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["run"]) @@ -294,14 +294,14 @@ def models(self): "people.sql": models_people_sql, } - # Tests that we get a ParsingException with an invalid metric definition. + # Tests that we get a ParsingError with an invalid metric definition. # This metric definition is missing timestamp but HAS a time_grains property def test_simple_metric( self, project, ): # initial run - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["run"]) @@ -313,12 +313,12 @@ def models(self): "people.sql": models_people_sql, } - # Tests that we get a ParsingException with an invalid metric definition. + # Tests that we get a ParsingError with an invalid metric definition. # This metric definition is missing timestamp but HAS a window property def test_simple_metric( self, project, ): # initial run - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["run"]) diff --git a/tests/functional/partial_parsing/fixtures.py b/tests/functional/partial_parsing/fixtures.py new file mode 100644 index 00000000000..7681b9dcb8c --- /dev/null +++ b/tests/functional/partial_parsing/fixtures.py @@ -0,0 +1,1126 @@ +local_dependency__dbt_project_yml = """ + +name: 'local_dep' +version: '1.0' +config-version: 2 + +profile: 'default' + +model-paths: ["models"] +analysis-paths: ["analyses"] +test-paths: ["tests"] +seed-paths: ["seeds"] +macro-paths: ["macros"] + +require-dbt-version: '>=0.1.0' + +target-path: "target" # directory which will store compiled SQL files +clean-targets: # directories to be removed by `dbt clean` + - "target" + - "dbt_packages" + + +seeds: + quote_columns: False + +""" + +local_dependency__models__schema_yml = """ +version: 2 +sources: + - name: seed_source + schema: "{{ var('schema_override', target.schema) }}" + tables: + - name: "seed" + columns: + - name: id + tests: + - unique + +""" + +local_dependency__models__model_to_import_sql = """ +select * from {{ ref('seed') }} + +""" + +local_dependency__macros__dep_macro_sql = """ +{% macro some_overridden_macro() -%} +100 +{%- endmacro %} + +""" + +local_dependency__seeds__seed_csv = """id +1 +""" + +empty_schema_with_version_yml = """ +version: 2 + +""" + +schema_sources5_yml = """ +version: 2 + +sources: + - name: seed_sources + schema: "{{ target.schema }}" + tables: + - name: raw_customers + columns: + - name: id + tests: + - not_null: + severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" + - unique + - name: first_name + - name: last_name + - name: email + +seeds: + - name: rad_customers + description: "Raw customer data" + columns: + - name: id + tests: + - unique + - not_null + - name: first_name + - name: last_name + - name: email + + +""" + +my_macro2_sql = """ +{% macro do_something(foo2, bar2) %} + + select + 'foo' as foo2, + 'var' as bar2 + +{% endmacro %} + +""" + +raw_customers_csv = """id,first_name,last_name,email +1,Michael,Perez,mperez0@chronoengine.com +2,Shawn,Mccoy,smccoy1@reddit.com +3,Kathleen,Payne,kpayne2@cargocollective.com +4,Jimmy,Cooper,jcooper3@cargocollective.com +5,Katherine,Rice,krice4@typepad.com +6,Sarah,Ryan,sryan5@gnu.org +7,Martin,Mcdonald,mmcdonald6@opera.com +8,Frank,Robinson,frobinson7@wunderground.com +9,Jennifer,Franklin,jfranklin8@mail.ru +10,Henry,Welch,hwelch9@list-manage.com +""" + +model_three_disabled2_sql = """ +- Disabled model +{{ config(materialized='table', enabled=False) }} + +with source_data as ( + + select 1 as id + union all + select null as id + +) + +select * +from source_data + +""" + +schema_sources4_yml = """ +version: 2 + +sources: + - name: seed_sources + schema: "{{ target.schema }}" + tables: + - name: raw_customers + columns: + - name: id + tests: + - not_null: + severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" + - unique + - every_value_is_blue + - name: first_name + - name: last_name + - name: email + +seeds: + - name: raw_customers + description: "Raw customer data" + columns: + - name: id + tests: + - unique + - not_null + - name: first_name + - name: last_name + - name: email + + +""" + +env_var_schema_yml = """ +version: 2 + +models: + - name: model_one + config: + materialized: "{{ env_var('TEST_SCHEMA_VAR') }}" + +""" + +my_test_sql = """ +select + * from {{ ref('customers') }} where first_name = '{{ macro_something() }}' + +""" + +empty_schema_yml = """ + +""" + +schema_models_c_yml = """ +version: 2 + +sources: + - name: seed_source + description: "This is a source override" + overrides: local_dep + schema: "{{ var('schema_override', target.schema) }}" + tables: + - name: "seed" + columns: + - name: id + tests: + - unique + - not_null + +""" + +env_var_sources_yml = """ +version: 2 +sources: + - name: seed_sources + schema: "{{ target.schema }}" + database: "{{ env_var('ENV_VAR_DATABASE') }}" + tables: + - name: raw_customers + columns: + - name: id + tests: + - not_null: + severity: "{{ env_var('ENV_VAR_SEVERITY') }}" + - unique + - name: first_name + - name: last_name + - name: email + + + +""" + +generic_test_edited_sql = """ +{% test is_odd(model, column_name) %} + +with validation as ( + + select + {{ column_name }} as odd_field2 + + from {{ model }} + +), + +validation_errors as ( + + select + odd_field2 + + from validation + -- if this is true, then odd_field is actually even! + where (odd_field2 % 2) = 0 + +) + +select * +from validation_errors + +{% endtest %} +""" + +schema_sources1_yml = """ +version: 2 +sources: + - name: seed_sources + schema: "{{ target.schema }}" + tables: + - name: raw_customers + columns: + - name: id + tests: + - not_null: + severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" + - unique + - name: first_name + - name: last_name + - name: email + + + +""" + +schema_sources3_yml = """ +version: 2 + +sources: + - name: seed_sources + schema: "{{ target.schema }}" + tables: + - name: raw_customers + columns: + - name: id + tests: + - not_null: + severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" + - unique + - name: first_name + - name: last_name + - name: email + +exposures: + - name: proxy_for_dashboard + description: "This is for the XXX dashboard" + type: "dashboard" + owner: + name: "Dashboard Tester" + email: "tester@dashboard.com" + depends_on: + - ref("model_one") + - source("seed_sources", "raw_customers") + + +""" + +my_analysis_sql = """ +select * from customers + +""" + +schema_sources2_yml = """ +version: 2 + +sources: + - name: seed_sources + schema: "{{ target.schema }}" + tables: + - name: raw_customers + columns: + - name: id + tests: + - not_null: + severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" + - unique + - name: first_name + - name: last_name + - name: email + +exposures: + - name: proxy_for_dashboard + description: "This is for the XXX dashboard" + type: "dashboard" + owner: + name: "Dashboard Tester" + email: "tester@dashboard.com" + depends_on: + - ref("model_one") + - ref("raw_customers") + - source("seed_sources", "raw_customers") + + +""" + +model_color_sql = """ +select 'blue' as fun + +""" + +my_metric_yml = """ +version: 2 +metrics: + - name: new_customers + label: New Customers + model: customers + description: "The number of paid customers who are using the product" + calculation_method: count + expression: user_id + timestamp: signup_date + time_grains: [day, week, month] + dimensions: + - plan + - country + filters: + - field: is_paying + value: True + operator: '=' + +meta: + is_okr: True + tags: + - okrs + + + +""" + +env_var_schema2_yml = """ +version: 2 + +models: + - name: model_one + config: + materialized: "{{ env_var('TEST_SCHEMA_VAR') }}" + tests: + - check_color: + column_name: fun + color: "env_var('ENV_VAR_COLOR')" + + +""" + +gsm_override_sql = """ +- custom macro +{% macro generate_schema_name(schema_name, node) %} + + {{ schema_name }}_{{ target.schema }} + +{% endmacro %} + +""" + +model_four1_sql = """ +select * from {{ ref('model_three') }} + +""" + +model_one_sql = """ +select 1 as fun + +""" + +env_var_schema3_yml = """ +version: 2 + +models: + - name: model_one + config: + materialized: "{{ env_var('TEST_SCHEMA_VAR') }}" + tests: + - check_color: + column_name: fun + color: "env_var('ENV_VAR_COLOR')" + +exposures: + - name: proxy_for_dashboard + description: "This is for the XXX dashboard" + type: "dashboard" + owner: + name: "{{ env_var('ENV_VAR_OWNER') }}" + email: "tester@dashboard.com" + depends_on: + - ref("model_color") + - source("seed_sources", "raw_customers") + +""" + +env_var_metrics_yml = """ +version: 2 + +metrics: + + - model: "ref('people')" + name: number_of_people + description: Total count of people + label: "Number of people" + calculation_method: count + expression: "*" + timestamp: created_at + time_grains: [day, week, month] + dimensions: + - favorite_color + - loves_dbt + meta: + my_meta: '{{ env_var("ENV_VAR_METRICS") }}' + + - model: "ref('people')" + name: collective_tenure + description: Total number of years of team experience + label: "Collective tenure" + calculation_method: sum + expression: tenure + timestamp: created_at + time_grains: [day] + filters: + - field: loves_dbt + operator: is + value: 'true' + +""" + +customers_sql = """ +with source as ( + + select * from {{ source('seed_sources', 'raw_customers') }} + +), + +renamed as ( + + select + id as customer_id, + first_name, + last_name, + email + + from source + +) + +select * from renamed + +""" + +model_four2_sql = """ +select fun from {{ ref('model_one') }} + +""" + +env_var_model_sql = """ +select '{{ env_var('ENV_VAR_TEST') }}' as vartest + +""" + +env_var_model_one_sql = """ +select 'blue' as fun + +""" + +custom_schema_tests2_sql = """ +{% test type_one(model) %} + + select * from ( + + select * from {{ model }} + union all + select * from {{ ref('model_b') }} + + ) as Foo + +{% endtest %} + +{% test type_two(model) %} + + {{ config(severity = "ERROR") }} + + select * from {{ model }} + +{% endtest %} + +""" + +metric_model_a_sql = """ +{% + set metric_list = [ + metric('number_of_people'), + metric('collective_tenure') + ] +%} + +{% if not execute %} + + {% set metric_names = [] %} + {% for m in metric_list %} + {% do metric_names.append(m.metric_name) %} + {% endfor %} + + -- this config does nothing, but it lets us check these values + {{ config(metric_names = metric_names) }} + +{% endif %} + + +select 1 as fun + +""" + +model_b_sql = """ +select 1 as notfun + +""" + +customers2_md = """ +{% docs customer_table %} + +LOTS of customer data + +{% enddocs %} + +""" + +custom_schema_tests1_sql = """ +{% test type_one(model) %} + + select * from ( + + select * from {{ model }} + union all + select * from {{ ref('model_b') }} + + ) as Foo + +{% endtest %} + +{% test type_two(model) %} + + {{ config(severity = "WARN") }} + + select * from {{ model }} + +{% endtest %} + +""" + +people_metrics_yml = """ +version: 2 + +metrics: + + - model: "ref('people')" + name: number_of_people + description: Total count of people + label: "Number of people" + calculation_method: count + expression: "*" + timestamp: created_at + time_grains: [day, week, month] + dimensions: + - favorite_color + - loves_dbt + meta: + my_meta: 'testing' + + - model: "ref('people')" + name: collective_tenure + description: Total number of years of team experience + label: "Collective tenure" + calculation_method: sum + expression: tenure + timestamp: created_at + time_grains: [day] + filters: + - field: loves_dbt + operator: is + value: 'true' + +""" + +people_sql = """ +select 1 as id, 'Drew' as first_name, 'Banin' as last_name, 'yellow' as favorite_color, true as loves_dbt, 5 as tenure, current_timestamp as created_at +union all +select 1 as id, 'Jeremy' as first_name, 'Cohen' as last_name, 'indigo' as favorite_color, true as loves_dbt, 4 as tenure, current_timestamp as created_at + +""" + +orders_sql = """ +select 1 as id, 101 as user_id, 'pending' as status + +""" + +model_a_sql = """ +select 1 as fun + +""" + +model_three_disabled_sql = """ +{{ config(materialized='table', enabled=False) }} + +with source_data as ( + + select 1 as id + union all + select null as id + +) + +select * +from source_data + +""" + +models_schema2b_yml = """ +version: 2 + +models: + - name: model_one + description: "The first model" + - name: model_three + description: "The third model" + columns: + - name: id + tests: + - not_null + +""" + +env_var_macros_yml = """ +version: 2 +macros: + - name: do_something + description: "This is a test macro" + meta: + some_key: "{{ env_var('ENV_VAR_SOME_KEY') }}" + + +""" + +models_schema4_yml = """ +version: 2 + +models: + - name: model_one + description: "The first model" + - name: model_three + description: "The third model" + config: + enabled: false + columns: + - name: id + tests: + - unique + +""" + +model_two_sql = """ +select 1 as notfun + +""" + +generic_test_schema_yml = """ +version: 2 + +models: + - name: orders + description: "Some order data" + columns: + - name: id + tests: + - unique + - is_odd + +""" + +customers1_md = """ +{% docs customer_table %} + +This table contains customer data + +{% enddocs %} + +""" + +model_three_modified_sql = """ +{{ config(materialized='table') }} + +with source_data as ( + + {#- This is model three #} + + select 1 as id + union all + select null as id + +) + +select * +from source_data + +""" + +macros_yml = """ +version: 2 +macros: + - name: do_something + description: "This is a test macro" + +""" + +test_color_sql = """ +{% test check_color(model, column_name, color) %} + + select * + from {{ model }} + where {{ column_name }} = '{{ color }}' + +{% endtest %} + +""" + +models_schema2_yml = """ +version: 2 + +models: + - name: model_one + description: "The first model" + - name: model_three + description: "The third model" + columns: + - name: id + tests: + - unique + +""" + +gsm_override2_sql = """ +- custom macro xxxx +{% macro generate_schema_name(schema_name, node) %} + + {{ schema_name }}_{{ target.schema }} + +{% endmacro %} + +""" + +models_schema3_yml = """ +version: 2 + +models: + - name: model_one + description: "The first model" + - name: model_three + description: "The third model" + tests: + - unique +macros: + - name: do_something + description: "This is a test macro" + +""" + +generic_test_sql = """ +{% test is_odd(model, column_name) %} + +with validation as ( + + select + {{ column_name }} as odd_field + + from {{ model }} + +), + +validation_errors as ( + + select + odd_field + + from validation + -- if this is true, then odd_field is actually even! + where (odd_field % 2) = 0 + +) + +select * +from validation_errors + +{% endtest %} +""" + +env_var_model_test_yml = """ +version: 2 +models: + - name: model_color + columns: + - name: fun + tests: + - unique: + enabled: "{{ env_var('ENV_VAR_ENABLED', True) }}" + +""" + +model_three_sql = """ +{{ config(materialized='table') }} + +with source_data as ( + + select 1 as id + union all + select null as id + +) + +select * +from source_data + +""" + +ref_override2_sql = """ +- Macro to override ref xxxx +{% macro ref(modelname) %} +{% do return(builtins.ref(modelname)) %} +{% endmacro %} + +""" + +models_schema1_yml = """ +version: 2 + +models: + - name: model_one + description: "The first model" + +""" + +macros_schema_yml = """ + +version: 2 + +models: + - name: model_a + tests: + - type_one + - type_two + +""" + +my_macro_sql = """ +{% macro do_something(foo2, bar2) %} + + select + '{{ foo2 }}' as foo2, + '{{ bar2 }}' as bar2 + +{% endmacro %} + +""" + +snapshot_sql = """ +{% snapshot orders_snapshot %} + +{{ + config( + target_schema=schema, + strategy='check', + unique_key='id', + check_cols=['status'], + ) +}} + +select * from {{ ref('orders') }} + +{% endsnapshot %} + +{% snapshot orders2_snapshot %} + +{{ + config( + target_schema=schema, + strategy='check', + unique_key='id', + check_cols=['order_date'], + ) +}} + +select * from {{ ref('orders') }} + +{% endsnapshot %} + +""" + +models_schema4b_yml = """ +version: 2 + +models: + - name: model_one + description: "The first model" + - name: model_three + description: "The third model" + config: + enabled: true + columns: + - name: id + tests: + - unique + +""" + +test_macro_sql = """ +{% macro macro_something() %} + + {% do return('macro_something') %} + +{% endmacro %} + +""" + +people_metrics2_yml = """ +version: 2 + +metrics: + + - model: "ref('people')" + name: number_of_people + description: Total count of people + label: "Number of people" + calculation_method: count + expression: "*" + timestamp: created_at + time_grains: [day, week, month] + dimensions: + - favorite_color + - loves_dbt + meta: + my_meta: 'replaced' + + - model: "ref('people')" + name: collective_tenure + description: Total number of years of team experience + label: "Collective tenure" + calculation_method: sum + expression: tenure + timestamp: created_at + time_grains: [day] + filters: + - field: loves_dbt + operator: is + value: 'true' + +""" + +generic_schema_yml = """ +version: 2 + +models: + - name: orders + description: "Some order data" + columns: + - name: id + tests: + - unique + +""" + +snapshot2_sql = """ +- add a comment +{% snapshot orders_snapshot %} + +{{ + config( + target_schema=schema, + strategy='check', + unique_key='id', + check_cols=['status'], + ) +}} + +select * from {{ ref('orders') }} + +{% endsnapshot %} + +{% snapshot orders2_snapshot %} + +{{ + config( + target_schema=schema, + strategy='check', + unique_key='id', + check_cols=['order_date'], + ) +}} + +select * from {{ ref('orders') }} + +{% endsnapshot %} + +""" + +sources_tests2_sql = """ + +{% test every_value_is_blue(model, column_name) %} + + select * + from {{ model }} + where {{ column_name }} != 99 + +{% endtest %} + + +""" + +people_metrics3_yml = """ +version: 2 + +metrics: + + - model: "ref('people')" + name: number_of_people + description: Total count of people + label: "Number of people" + calculation_method: count + expression: "*" + timestamp: created_at + time_grains: [day, week, month] + dimensions: + - favorite_color + - loves_dbt + meta: + my_meta: 'replaced' + +""" + +ref_override_sql = """ +- Macro to override ref +{% macro ref(modelname) %} +{% do return(builtins.ref(modelname)) %} +{% endmacro %} + +""" + +test_macro2_sql = """ +{% macro macro_something() %} + + {% do return('some_name') %} + +{% endmacro %} + +""" + +env_var_macro_sql = """ +{% macro do_something(foo2, bar2) %} + + select + '{{ foo2 }}' as foo2, + '{{ bar2 }}' as bar2 + +{% endmacro %} + +""" + +sources_tests1_sql = """ + +{% test every_value_is_blue(model, column_name) %} + + select * + from {{ model }} + where {{ column_name }} = 9999 + +{% endtest %} + + +""" diff --git a/tests/functional/partial_parsing/test_partial_parsing.py b/tests/functional/partial_parsing/test_partial_parsing.py new file mode 100644 index 00000000000..f70b2e0f9fa --- /dev/null +++ b/tests/functional/partial_parsing/test_partial_parsing.py @@ -0,0 +1,643 @@ +import pytest + +from dbt.tests.util import run_dbt, get_manifest, write_file, rm_file, run_dbt_and_capture +from dbt.tests.fixtures.project import write_project_files +from tests.functional.partial_parsing.fixtures import ( + model_one_sql, + model_two_sql, + models_schema1_yml, + models_schema2_yml, + models_schema2b_yml, + model_three_sql, + model_three_modified_sql, + model_four1_sql, + model_four2_sql, + models_schema4_yml, + models_schema4b_yml, + models_schema3_yml, + my_macro_sql, + my_macro2_sql, + macros_yml, + empty_schema_yml, + empty_schema_with_version_yml, + model_three_disabled_sql, + model_three_disabled2_sql, + raw_customers_csv, + customers_sql, + sources_tests1_sql, + schema_sources1_yml, + schema_sources2_yml, + schema_sources3_yml, + schema_sources4_yml, + schema_sources5_yml, + customers1_md, + customers2_md, + test_macro_sql, + my_test_sql, + test_macro2_sql, + my_analysis_sql, + sources_tests2_sql, + local_dependency__dbt_project_yml, + local_dependency__models__schema_yml, + local_dependency__models__model_to_import_sql, + local_dependency__macros__dep_macro_sql, + local_dependency__seeds__seed_csv, + schema_models_c_yml, + model_a_sql, + model_b_sql, + macros_schema_yml, + custom_schema_tests1_sql, + custom_schema_tests2_sql, + ref_override_sql, + ref_override2_sql, + gsm_override_sql, + gsm_override2_sql, + orders_sql, + snapshot_sql, + snapshot2_sql, + generic_schema_yml, + generic_test_sql, + generic_test_schema_yml, + generic_test_edited_sql, +) + +from dbt.exceptions import CompilationError +from dbt.contracts.files import ParseFileType +from dbt.contracts.results import TestStatus +import re +import os + +os.environ["DBT_PP_TEST"] = "true" + + +def normalize(path): + return os.path.normcase(os.path.normpath(path)) + + +class TestModels: + @pytest.fixture(scope="class") + def models(self): + return { + "model_one.sql": model_one_sql, + } + + def test_pp_models(self, project): + # initial run + # run_dbt(['clean']) + results = run_dbt(["run"]) + assert len(results) == 1 + + # add a model file + write_file(model_two_sql, project.project_root, "models", "model_two.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + + # add a schema file + write_file(models_schema1_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + manifest = get_manifest(project.project_root) + assert "model.test.model_one" in manifest.nodes + model_one_node = manifest.nodes["model.test.model_one"] + assert model_one_node.description == "The first model" + assert model_one_node.patch_path == "test://" + normalize("models/schema.yml") + + # add a model and a schema file (with a test) at the same time + write_file(models_schema2_yml, project.project_root, "models", "schema.yml") + write_file(model_three_sql, project.project_root, "models", "model_three.sql") + results = run_dbt(["--partial-parse", "test"], expect_pass=False) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + project_files = [f for f in manifest.files if f.startswith("test://")] + assert len(project_files) == 4 + model_3_file_id = "test://" + normalize("models/model_three.sql") + assert model_3_file_id in manifest.files + model_three_file = manifest.files[model_3_file_id] + assert model_three_file.parse_file_type == ParseFileType.Model + assert type(model_three_file).__name__ == "SourceFile" + model_three_node = manifest.nodes[model_three_file.nodes[0]] + schema_file_id = "test://" + normalize("models/schema.yml") + assert model_three_node.patch_path == schema_file_id + assert model_three_node.description == "The third model" + schema_file = manifest.files[schema_file_id] + assert type(schema_file).__name__ == "SchemaSourceFile" + assert len(schema_file.tests) == 1 + tests = schema_file.get_all_test_ids() + assert tests == ["test.test.unique_model_three_id.6776ac8160"] + unique_test_id = tests[0] + assert unique_test_id in manifest.nodes + + # modify model sql file, ensure description still there + write_file(model_three_modified_sql, project.project_root, "models", "model_three.sql") + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + model_id = "model.test.model_three" + assert model_id in manifest.nodes + model_three_node = manifest.nodes[model_id] + assert model_three_node.description == "The third model" + + # Change the model 3 test from unique to not_null + write_file(models_schema2b_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "test"], expect_pass=False) + manifest = get_manifest(project.project_root) + schema_file_id = "test://" + normalize("models/schema.yml") + schema_file = manifest.files[schema_file_id] + tests = schema_file.get_all_test_ids() + assert tests == ["test.test.not_null_model_three_id.3162ce0a6f"] + not_null_test_id = tests[0] + assert not_null_test_id in manifest.nodes.keys() + assert unique_test_id not in manifest.nodes.keys() + assert len(results) == 1 + + # go back to previous version of schema file, removing patch, test, and model for model three + write_file(models_schema1_yml, project.project_root, "models", "schema.yml") + rm_file(project.project_root, "models", "model_three.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + + # remove schema file, still have 3 models + write_file(model_three_sql, project.project_root, "models", "model_three.sql") + rm_file(project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + manifest = get_manifest(project.project_root) + schema_file_id = "test://" + normalize("models/schema.yml") + assert schema_file_id not in manifest.files + project_files = [f for f in manifest.files if f.startswith("test://")] + assert len(project_files) == 3 + + # Put schema file back and remove a model + # referred to in schema file + write_file(models_schema2_yml, project.project_root, "models", "schema.yml") + rm_file(project.project_root, "models", "model_three.sql") + with pytest.raises(CompilationError): + results = run_dbt(["--partial-parse", "--warn-error", "run"]) + + # Put model back again + write_file(model_three_sql, project.project_root, "models", "model_three.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Add model four refing model three + write_file(model_four1_sql, project.project_root, "models", "model_four.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 4 + + # Remove model_three and change model_four to ref model_one + # and change schema file to remove model_three + rm_file(project.project_root, "models", "model_three.sql") + write_file(model_four2_sql, project.project_root, "models", "model_four.sql") + write_file(models_schema1_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Remove model four, put back model three, put back schema file + write_file(model_three_sql, project.project_root, "models", "model_three.sql") + write_file(models_schema2_yml, project.project_root, "models", "schema.yml") + rm_file(project.project_root, "models", "model_four.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # disable model three in the schema file + write_file(models_schema4_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + + # update enabled config to be true for model three in the schema file + write_file(models_schema4b_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # disable model three in the schema file again + write_file(models_schema4_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + + # remove disabled config for model three in the schema file to check it gets enabled + write_file(models_schema4b_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Add a macro + write_file(my_macro_sql, project.project_root, "macros", "my_macro.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + manifest = get_manifest(project.project_root) + macro_id = "macro.test.do_something" + assert macro_id in manifest.macros + + # Modify the macro + write_file(my_macro2_sql, project.project_root, "macros", "my_macro.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Add a macro patch + write_file(models_schema3_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Remove the macro + rm_file(project.project_root, "macros", "my_macro.sql") + with pytest.raises(CompilationError): + results = run_dbt(["--partial-parse", "--warn-error", "run"]) + + # put back macro file, got back to schema file with no macro + # add separate macro patch schema file + write_file(models_schema2_yml, project.project_root, "models", "schema.yml") + write_file(my_macro_sql, project.project_root, "macros", "my_macro.sql") + write_file(macros_yml, project.project_root, "macros", "macros.yml") + results = run_dbt(["--partial-parse", "run"]) + + # delete macro and schema file + rm_file(project.project_root, "macros", "my_macro.sql") + rm_file(project.project_root, "macros", "macros.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Add an empty schema file + write_file(empty_schema_yml, project.project_root, "models", "eschema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Add version to empty schema file + write_file(empty_schema_with_version_yml, project.project_root, "models", "eschema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Disable model_three + write_file(model_three_disabled_sql, project.project_root, "models", "model_three.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + manifest = get_manifest(project.project_root) + model_id = "model.test.model_three" + assert model_id in manifest.disabled + assert model_id not in manifest.nodes + + # Edit disabled model three + write_file(model_three_disabled2_sql, project.project_root, "models", "model_three.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + manifest = get_manifest(project.project_root) + model_id = "model.test.model_three" + assert model_id in manifest.disabled + assert model_id not in manifest.nodes + + # Remove disabled from model three + write_file(model_three_sql, project.project_root, "models", "model_three.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + manifest = get_manifest(project.project_root) + model_id = "model.test.model_three" + assert model_id in manifest.nodes + assert model_id not in manifest.disabled + + +class TestSources: + @pytest.fixture(scope="class") + def models(self): + return { + "model_one.sql": model_one_sql, + } + + def test_pp_sources(self, project): + # initial run + write_file(raw_customers_csv, project.project_root, "seeds", "raw_customers.csv") + write_file(sources_tests1_sql, project.project_root, "macros", "tests.sql") + results = run_dbt(["run"]) + assert len(results) == 1 + + # Partial parse running 'seed' + run_dbt(["--partial-parse", "seed"]) + manifest = get_manifest(project.project_root) + seed_file_id = "test://" + normalize("seeds/raw_customers.csv") + assert seed_file_id in manifest.files + + # Add another seed file + write_file(raw_customers_csv, project.project_root, "seeds", "more_customers.csv") + run_dbt(["--partial-parse", "run"]) + seed_file_id = "test://" + normalize("seeds/more_customers.csv") + manifest = get_manifest(project.project_root) + assert seed_file_id in manifest.files + seed_id = "seed.test.more_customers" + assert seed_id in manifest.nodes + + # Remove seed file and add a schema files with a source referring to raw_customers + rm_file(project.project_root, "seeds", "more_customers.csv") + write_file(schema_sources1_yml, project.project_root, "models", "sources.yml") + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + assert len(manifest.sources) == 1 + file_id = "test://" + normalize("models/sources.yml") + assert file_id in manifest.files + + # add a model referring to raw_customers source + write_file(customers_sql, project.project_root, "models", "customers.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + + # remove sources schema file + rm_file(project.project_root, "models", "sources.yml") + with pytest.raises(CompilationError): + results = run_dbt(["--partial-parse", "run"]) + + # put back sources and add an exposures file + write_file(schema_sources2_yml, project.project_root, "models", "sources.yml") + results = run_dbt(["--partial-parse", "run"]) + + # remove seed referenced in exposures file + rm_file(project.project_root, "seeds", "raw_customers.csv") + with pytest.raises(CompilationError): + results = run_dbt(["--partial-parse", "run"]) + + # put back seed and remove depends_on from exposure + write_file(raw_customers_csv, project.project_root, "seeds", "raw_customers.csv") + write_file(schema_sources3_yml, project.project_root, "models", "sources.yml") + results = run_dbt(["--partial-parse", "run"]) + + # Add seed config with test to schema.yml, remove exposure + write_file(schema_sources4_yml, project.project_root, "models", "sources.yml") + results = run_dbt(["--partial-parse", "run"]) + + # Change seed name to wrong name + write_file(schema_sources5_yml, project.project_root, "models", "sources.yml") + with pytest.raises(CompilationError): + results = run_dbt(["--partial-parse", "--warn-error", "run"]) + + # Put back seed name to right name + write_file(schema_sources4_yml, project.project_root, "models", "sources.yml") + results = run_dbt(["--partial-parse", "run"]) + + # Add docs file customers.md + write_file(customers1_md, project.project_root, "models", "customers.md") + results = run_dbt(["--partial-parse", "run"]) + + # Change docs file customers.md + write_file(customers2_md, project.project_root, "models", "customers.md") + results = run_dbt(["--partial-parse", "run"]) + + # Delete docs file + rm_file(project.project_root, "models", "customers.md") + results = run_dbt(["--partial-parse", "run"]) + + # Add a data test + write_file(test_macro_sql, project.project_root, "macros", "test-macro.sql") + write_file(my_test_sql, project.project_root, "tests", "my_test.sql") + results = run_dbt(["--partial-parse", "test"]) + manifest = get_manifest(project.project_root) + assert len(manifest.nodes) == 9 + test_id = "test.test.my_test" + assert test_id in manifest.nodes + + # Change macro that data test depends on + write_file(test_macro2_sql, project.project_root, "macros", "test-macro.sql") + results = run_dbt(["--partial-parse", "test"]) + manifest = get_manifest(project.project_root) + + # Add an analysis + write_file(my_analysis_sql, project.project_root, "analyses", "my_analysis.sql") + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + + # Remove data test + rm_file(project.project_root, "tests", "my_test.sql") + results = run_dbt(["--partial-parse", "test"]) + manifest = get_manifest(project.project_root) + assert len(manifest.nodes) == 9 + + # Remove analysis + rm_file(project.project_root, "analyses", "my_analysis.sql") + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + assert len(manifest.nodes) == 8 + + # Change source test + write_file(sources_tests2_sql, project.project_root, "macros", "tests.sql") + results = run_dbt(["--partial-parse", "run"]) + + +class TestPartialParsingDependency: + @pytest.fixture(scope="class") + def models(self): + return { + "model_one.sql": model_one_sql, + } + + @pytest.fixture(scope="class", autouse=True) + def setUp(self, project_root): + local_dependency_files = { + "dbt_project.yml": local_dependency__dbt_project_yml, + "models": { + "schema.yml": local_dependency__models__schema_yml, + "model_to_import.sql": local_dependency__models__model_to_import_sql, + }, + "macros": {"dep_macro.sql": local_dependency__macros__dep_macro_sql}, + "seeds": {"seed.csv": local_dependency__seeds__seed_csv}, + } + write_project_files(project_root, "local_dependency", local_dependency_files) + + @pytest.fixture(scope="class") + def packages(self): + return {"packages": [{"local": "local_dependency"}]} + + def test_parsing_with_dependency(self, project): + run_dbt(["clean"]) + run_dbt(["deps"]) + run_dbt(["seed"]) + run_dbt(["run"]) + + # Add a source override + write_file(schema_models_c_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + manifest = get_manifest(project.project_root) + assert len(manifest.sources) == 1 + source_id = "source.local_dep.seed_source.seed" + assert source_id in manifest.sources + # We have 1 root model, 1 local_dep model, 1 local_dep seed, 1 local_dep source test, 2 root source tests + assert len(manifest.nodes) == 5 + test_id = "test.local_dep.source_unique_seed_source_seed_id.afa94935ed" + assert test_id in manifest.nodes + + # Remove a source override + rm_file(project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + assert len(manifest.sources) == 1 + + +class TestNestedMacros: + @pytest.fixture(scope="class") + def models(self): + return { + "model_a.sql": model_a_sql, + "model_b.sql": model_b_sql, + "schema.yml": macros_schema_yml, + } + + @pytest.fixture(scope="class") + def macros(self): + return { + "custom_schema_tests.sql": custom_schema_tests1_sql, + } + + def test_nested_macros(self, project): + results = run_dbt() + assert len(results) == 2 + manifest = get_manifest(project.project_root) + macro_child_map = manifest.build_macro_child_map() + macro_unique_id = "macro.test.test_type_two" + assert macro_unique_id in macro_child_map + + results = run_dbt(["test"], expect_pass=False) + results = sorted(results, key=lambda r: r.node.name) + assert len(results) == 2 + # type_one_model_a_ + assert results[0].status == TestStatus.Fail + assert re.search(r"union all", results[0].node.compiled_code) + # type_two_model_a_ + assert results[1].status == TestStatus.Warn + assert results[1].node.config.severity == "WARN" + + write_file( + custom_schema_tests2_sql, project.project_root, "macros", "custom_schema_tests.sql" + ) + results = run_dbt(["--partial-parse", "test"], expect_pass=False) + manifest = get_manifest(project.project_root) + test_node_id = "test.test.type_two_model_a_.842bc6c2a7" + assert test_node_id in manifest.nodes + results = sorted(results, key=lambda r: r.node.name) + assert len(results) == 2 + # type_two_model_a_ + assert results[1].status == TestStatus.Fail + assert results[1].node.config.severity == "ERROR" + + +class TestSkipMacros: + @pytest.fixture(scope="class") + def models(self): + return { + "model_one.sql": model_one_sql, + "eschema.yml": empty_schema_yml, + } + + def test_skip_macros(self, project): + # initial run so we have a msgpack file + # includes empty_schema file for bug #4850 + results = run_dbt() + + # add a new ref override macro + write_file(ref_override_sql, project.project_root, "macros", "ref_override.sql") + results, log_output = run_dbt_and_capture(["--partial-parse", "run"]) + assert "Starting full parse." in log_output + + # modify a ref override macro + write_file(ref_override2_sql, project.project_root, "macros", "ref_override.sql") + results, log_output = run_dbt_and_capture(["--partial-parse", "run"]) + assert "Starting full parse." in log_output + + # remove a ref override macro + rm_file(project.project_root, "macros", "ref_override.sql") + results, log_output = run_dbt_and_capture(["--partial-parse", "run"]) + assert "Starting full parse." in log_output + + # custom generate_schema_name macro + write_file(gsm_override_sql, project.project_root, "macros", "gsm_override.sql") + results, log_output = run_dbt_and_capture(["--partial-parse", "run"]) + assert "Starting full parse." in log_output + + # change generate_schema_name macro + write_file(gsm_override2_sql, project.project_root, "macros", "gsm_override.sql") + results, log_output = run_dbt_and_capture(["--partial-parse", "run"]) + assert "Starting full parse." in log_output + + +class TestSnapshots: + @pytest.fixture(scope="class") + def models(self): + return { + "orders.sql": orders_sql, + } + + def test_pp_snapshots(self, project): + + # initial run + results = run_dbt() + assert len(results) == 1 + + # add snapshot + write_file(snapshot_sql, project.project_root, "snapshots", "snapshot.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + snapshot_id = "snapshot.test.orders_snapshot" + assert snapshot_id in manifest.nodes + snapshot2_id = "snapshot.test.orders2_snapshot" + assert snapshot2_id in manifest.nodes + + # run snapshot + results = run_dbt(["--partial-parse", "snapshot"]) + assert len(results) == 2 + + # modify snapshot + write_file(snapshot2_sql, project.project_root, "snapshots", "snapshot.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 1 + + # delete snapshot + rm_file(project.project_root, "snapshots", "snapshot.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 1 + + +class TestTests: + @pytest.fixture(scope="class") + def models(self): + return { + "orders.sql": orders_sql, + "schema.yml": generic_schema_yml, + } + + @pytest.fixture(scope="class") + def tests(self): + # Make sure "generic" directory is created + return {"generic": {"readme.md": ""}} + + def test_pp_generic_tests(self, project): + + # initial run + results = run_dbt() + assert len(results) == 1 + manifest = get_manifest(project.project_root) + expected_nodes = ["model.test.orders", "test.test.unique_orders_id.1360ecc70e"] + assert expected_nodes == list(manifest.nodes.keys()) + + # add generic test in test-path + write_file(generic_test_sql, project.project_root, "tests", "generic", "generic_test.sql") + write_file(generic_test_schema_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + test_id = "test.test.is_odd_orders_id.82834fdc5b" + assert test_id in manifest.nodes + expected_nodes = [ + "model.test.orders", + "test.test.unique_orders_id.1360ecc70e", + "test.test.is_odd_orders_id.82834fdc5b", + ] + assert expected_nodes == list(manifest.nodes.keys()) + + # edit generic test in test-path + write_file( + generic_test_edited_sql, project.project_root, "tests", "generic", "generic_test.sql" + ) + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + test_id = "test.test.is_odd_orders_id.82834fdc5b" + assert test_id in manifest.nodes + expected_nodes = [ + "model.test.orders", + "test.test.unique_orders_id.1360ecc70e", + "test.test.is_odd_orders_id.82834fdc5b", + ] + assert expected_nodes == list(manifest.nodes.keys()) diff --git a/tests/functional/partial_parsing/test_pp_metrics.py b/tests/functional/partial_parsing/test_pp_metrics.py new file mode 100644 index 00000000000..575c5ca613e --- /dev/null +++ b/tests/functional/partial_parsing/test_pp_metrics.py @@ -0,0 +1,73 @@ +import pytest + +from dbt.tests.util import run_dbt, write_file, get_manifest +from tests.functional.partial_parsing.fixtures import ( + people_sql, + people_metrics_yml, + people_metrics2_yml, + metric_model_a_sql, + people_metrics3_yml, +) + +from dbt.exceptions import CompilationError + + +class TestMetrics: + @pytest.fixture(scope="class") + def models(self): + return { + "people.sql": people_sql, + } + + def test_metrics(self, project): + # initial run + results = run_dbt(["run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + assert len(manifest.nodes) == 1 + + # Add metrics yaml file + write_file(people_metrics_yml, project.project_root, "models", "people_metrics.yml") + results = run_dbt(["run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + assert len(manifest.metrics) == 2 + metric_people_id = "metric.test.number_of_people" + metric_tenure_id = "metric.test.collective_tenure" + metric_people = manifest.metrics[metric_people_id] + metric_tenure = manifest.metrics[metric_tenure_id] + expected_meta = {"my_meta": "testing"} + assert metric_people.meta == expected_meta + assert metric_people.refs == [["people"]] + assert metric_tenure.refs == [["people"]] + expected_depends_on_nodes = ["model.test.people"] + assert metric_people.depends_on.nodes == expected_depends_on_nodes + + # Change metrics yaml files + write_file(people_metrics2_yml, project.project_root, "models", "people_metrics.yml") + results = run_dbt(["run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + metric_people = manifest.metrics[metric_people_id] + expected_meta = {"my_meta": "replaced"} + assert metric_people.meta == expected_meta + expected_depends_on_nodes = ["model.test.people"] + assert metric_people.depends_on.nodes == expected_depends_on_nodes + + # Add model referring to metric + write_file(metric_model_a_sql, project.project_root, "models", "metric_model_a.sql") + results = run_dbt(["run"]) + manifest = get_manifest(project.project_root) + model_a = manifest.nodes["model.test.metric_model_a"] + expected_depends_on_nodes = [ + "metric.test.number_of_people", + "metric.test.collective_tenure", + ] + assert model_a.depends_on.nodes == expected_depends_on_nodes + + # Then delete a metric + write_file(people_metrics3_yml, project.project_root, "models", "people_metrics.yml") + with pytest.raises(CompilationError): + # We use "parse" here and not "run" because we're checking that the CompilationError + # occurs at parse time, not compilation + results = run_dbt(["parse"]) diff --git a/tests/functional/partial_parsing/test_pp_vars.py b/tests/functional/partial_parsing/test_pp_vars.py new file mode 100644 index 00000000000..19b3c7db849 --- /dev/null +++ b/tests/functional/partial_parsing/test_pp_vars.py @@ -0,0 +1,386 @@ +import pytest + +from dbt.tests.util import run_dbt, write_file, run_dbt_and_capture, get_manifest + +from tests.functional.partial_parsing.fixtures import ( + model_color_sql, + env_var_model_sql, + env_var_schema_yml, + env_var_model_one_sql, + raw_customers_csv, + env_var_sources_yml, + test_color_sql, + env_var_schema2_yml, + env_var_schema3_yml, + env_var_macro_sql, + env_var_macros_yml, + env_var_model_test_yml, + people_sql, + env_var_metrics_yml, + model_one_sql, +) + + +from dbt.exceptions import ParsingError +from dbt.constants import SECRET_ENV_PREFIX +import os + + +os.environ["DBT_PP_TEST"] = "true" + + +class TestEnvVars: + @pytest.fixture(scope="class") + def models(self): + return { + "model_color.sql": model_color_sql, + } + + def test_env_vars_models(self, project): + + # initial run + results = run_dbt(["run"]) + assert len(results) == 1 + + # copy a file with an env_var call without an env_var + write_file(env_var_model_sql, project.project_root, "models", "env_var_model.sql") + with pytest.raises(ParsingError): + results = run_dbt(["--partial-parse", "run"]) + + # set the env var + os.environ["ENV_VAR_TEST"] = "TestingEnvVars" + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + manifest = get_manifest(project.project_root) + expected_env_vars = {"ENV_VAR_TEST": "TestingEnvVars"} + assert expected_env_vars == manifest.env_vars + model_id = "model.test.env_var_model" + model = manifest.nodes[model_id] + model_created_at = model.created_at + + # change the env var + os.environ["ENV_VAR_TEST"] = "second" + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + manifest = get_manifest(project.project_root) + expected_env_vars = {"ENV_VAR_TEST": "second"} + assert expected_env_vars == manifest.env_vars + assert model_created_at != manifest.nodes[model_id].created_at + + # set an env_var in a schema file + write_file(env_var_schema_yml, project.project_root, "models", "schema.yml") + write_file(env_var_model_one_sql, project.project_root, "models", "model_one.sql") + with pytest.raises(ParsingError): + results = run_dbt(["--partial-parse", "run"]) + + # actually set the env_var + os.environ["TEST_SCHEMA_VAR"] = "view" + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view"} + assert expected_env_vars == manifest.env_vars + + # env vars in a source + os.environ["ENV_VAR_DATABASE"] = "dbt" + os.environ["ENV_VAR_SEVERITY"] = "warn" + write_file(raw_customers_csv, project.project_root, "seeds", "raw_customers.csv") + write_file(env_var_sources_yml, project.project_root, "models", "sources.yml") + run_dbt(["--partial-parse", "seed"]) + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + manifest = get_manifest(project.project_root) + expected_env_vars = { + "ENV_VAR_TEST": "second", + "TEST_SCHEMA_VAR": "view", + "ENV_VAR_DATABASE": "dbt", + "ENV_VAR_SEVERITY": "warn", + } + assert expected_env_vars == manifest.env_vars + assert len(manifest.sources) == 1 + source_id = "source.test.seed_sources.raw_customers" + source = manifest.sources[source_id] + assert source.database == "dbt" + schema_file = manifest.files[source.file_id] + test_id = "test.test.source_not_null_seed_sources_raw_customers_id.e39ee7bf0d" + test_node = manifest.nodes[test_id] + assert test_node.config.severity == "WARN" + + # Change severity env var + os.environ["ENV_VAR_SEVERITY"] = "error" + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + expected_env_vars = { + "ENV_VAR_TEST": "second", + "TEST_SCHEMA_VAR": "view", + "ENV_VAR_DATABASE": "dbt", + "ENV_VAR_SEVERITY": "error", + } + assert expected_env_vars == manifest.env_vars + source_id = "source.test.seed_sources.raw_customers" + source = manifest.sources[source_id] + schema_file = manifest.files[source.file_id] + expected_schema_file_env_vars = { + "sources": {"seed_sources": ["ENV_VAR_DATABASE", "ENV_VAR_SEVERITY"]} + } + assert expected_schema_file_env_vars == schema_file.env_vars + test_node = manifest.nodes[test_id] + assert test_node.config.severity == "ERROR" + + # Change database env var + os.environ["ENV_VAR_DATABASE"] = "test_dbt" + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + expected_env_vars = { + "ENV_VAR_TEST": "second", + "TEST_SCHEMA_VAR": "view", + "ENV_VAR_DATABASE": "test_dbt", + "ENV_VAR_SEVERITY": "error", + } + assert expected_env_vars == manifest.env_vars + source = manifest.sources[source_id] + assert source.database == "test_dbt" + + # Delete database env var + del os.environ["ENV_VAR_DATABASE"] + with pytest.raises(ParsingError): + results = run_dbt(["--partial-parse", "run"]) + os.environ["ENV_VAR_DATABASE"] = "test_dbt" + + # Add generic test with test kwarg that's rendered late (no curly brackets) + os.environ["ENV_VAR_DATABASE"] = "dbt" + write_file(test_color_sql, project.project_root, "macros", "test_color.sql") + results = run_dbt(["--partial-parse", "run"]) + # Add source test using test_color and an env_var for color + write_file(env_var_schema2_yml, project.project_root, "models/schema.yml") + with pytest.raises(ParsingError): + results = run_dbt(["--partial-parse", "run"]) + os.environ["ENV_VAR_COLOR"] = "green" + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + test_color_id = "test.test.check_color_model_one_env_var_ENV_VAR_COLOR___fun.89638de387" + test_node = manifest.nodes[test_color_id] + # kwarg was rendered but not changed (it will be rendered again when compiled) + assert test_node.test_metadata.kwargs["color"] == "env_var('ENV_VAR_COLOR')" + results = run_dbt(["--partial-parse", "test"]) + + # Add an exposure with an env_var + os.environ["ENV_VAR_OWNER"] = "John Doe" + write_file(env_var_schema3_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + expected_env_vars = { + "ENV_VAR_TEST": "second", + "TEST_SCHEMA_VAR": "view", + "ENV_VAR_DATABASE": "dbt", + "ENV_VAR_SEVERITY": "error", + "ENV_VAR_COLOR": "green", + "ENV_VAR_OWNER": "John Doe", + } + assert expected_env_vars == manifest.env_vars + exposure = list(manifest.exposures.values())[0] + schema_file = manifest.files[exposure.file_id] + expected_sf_env_vars = { + "models": {"model_one": ["TEST_SCHEMA_VAR", "ENV_VAR_COLOR"]}, + "exposures": {"proxy_for_dashboard": ["ENV_VAR_OWNER"]}, + } + assert expected_sf_env_vars == schema_file.env_vars + + # add a macro and a macro schema file + os.environ["ENV_VAR_SOME_KEY"] = "toodles" + write_file(env_var_macro_sql, project.project_root, "macros", "env_var_macro.sql") + write_file(env_var_macros_yml, project.project_root, "macros", "env_var_macros.yml") + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + expected_env_vars = { + "ENV_VAR_TEST": "second", + "TEST_SCHEMA_VAR": "view", + "ENV_VAR_DATABASE": "dbt", + "ENV_VAR_SEVERITY": "error", + "ENV_VAR_COLOR": "green", + "ENV_VAR_OWNER": "John Doe", + "ENV_VAR_SOME_KEY": "toodles", + } + assert expected_env_vars == manifest.env_vars + macro_id = "macro.test.do_something" + macro = manifest.macros[macro_id] + assert macro.meta == {"some_key": "toodles"} + # change the env var + os.environ["ENV_VAR_SOME_KEY"] = "dumdedum" + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + macro = manifest.macros[macro_id] + assert macro.meta == {"some_key": "dumdedum"} + + # Add a schema file with a test on model_color and env_var in test enabled config + write_file(env_var_model_test_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + manifest = get_manifest(project.project_root) + model_color = manifest.nodes["model.test.model_color"] + schema_file = manifest.files[model_color.patch_path] + expected_env_vars = { + "models": { + "model_one": ["TEST_SCHEMA_VAR", "ENV_VAR_COLOR"], + "model_color": ["ENV_VAR_ENABLED"], + }, + "exposures": {"proxy_for_dashboard": ["ENV_VAR_OWNER"]}, + } + assert expected_env_vars == schema_file.env_vars + + # Add a metrics file with env_vars + os.environ["ENV_VAR_METRICS"] = "TeStInG" + write_file(people_sql, project.project_root, "models", "people.sql") + write_file(env_var_metrics_yml, project.project_root, "models", "metrics.yml") + results = run_dbt(["run"]) + manifest = get_manifest(project.project_root) + assert "ENV_VAR_METRICS" in manifest.env_vars + assert manifest.env_vars["ENV_VAR_METRICS"] == "TeStInG" + metric_node = manifest.metrics["metric.test.number_of_people"] + assert metric_node.meta == {"my_meta": "TeStInG"} + + # Change metrics env var + os.environ["ENV_VAR_METRICS"] = "Changed!" + results = run_dbt(["run"]) + manifest = get_manifest(project.project_root) + metric_node = manifest.metrics["metric.test.number_of_people"] + assert metric_node.meta == {"my_meta": "Changed!"} + + # delete the env vars to cleanup + del os.environ["ENV_VAR_TEST"] + del os.environ["ENV_VAR_SEVERITY"] + del os.environ["ENV_VAR_DATABASE"] + del os.environ["TEST_SCHEMA_VAR"] + del os.environ["ENV_VAR_COLOR"] + del os.environ["ENV_VAR_SOME_KEY"] + del os.environ["ENV_VAR_OWNER"] + del os.environ["ENV_VAR_METRICS"] + + +class TestProjectEnvVars: + @pytest.fixture(scope="class") + def project_config_update(self): + # Need to set the environment variable here initially because + # the project fixture loads the config. + os.environ["ENV_VAR_NAME"] = "Jane Smith" + return {"models": {"+meta": {"meta_name": "{{ env_var('ENV_VAR_NAME') }}"}}} + + @pytest.fixture(scope="class") + def models(self): + return { + "model_one.sql": model_one_sql, + } + + def test_project_env_vars(self, project): + # Initial run + results = run_dbt(["run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + state_check = manifest.state_check + model_id = "model.test.model_one" + model = manifest.nodes[model_id] + assert model.config.meta["meta_name"] == "Jane Smith" + env_vars_hash_checksum = state_check.project_env_vars_hash.checksum + + # Change the environment variable + os.environ["ENV_VAR_NAME"] = "Jane Doe" + results = run_dbt(["run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + model = manifest.nodes[model_id] + assert model.config.meta["meta_name"] == "Jane Doe" + assert env_vars_hash_checksum != manifest.state_check.project_env_vars_hash.checksum + + # cleanup + del os.environ["ENV_VAR_NAME"] + + +class TestProfileEnvVars: + @pytest.fixture(scope="class") + def models(self): + return { + "model_one.sql": model_one_sql, + } + + @pytest.fixture(scope="class") + def dbt_profile_target(self): + # Need to set these here because the base integration test class + # calls 'load_config' before the tests are run. + # Note: only the specified profile is rendered, so there's no + # point it setting env_vars in non-used profiles. + os.environ["ENV_VAR_USER"] = "root" + os.environ["ENV_VAR_PASS"] = "password" + return { + "type": "postgres", + "threads": 4, + "host": "localhost", + "port": 5432, + "user": "{{ env_var('ENV_VAR_USER') }}", + "pass": "{{ env_var('ENV_VAR_PASS') }}", + "dbname": "dbt", + } + + def test_profile_env_vars(self, project): + + # Initial run + os.environ["ENV_VAR_USER"] = "root" + os.environ["ENV_VAR_PASS"] = "password" + + results = run_dbt(["run"]) + manifest = get_manifest(project.project_root) + env_vars_checksum = manifest.state_check.profile_env_vars_hash.checksum + + # Change env_vars, the user doesn't exist, this should fail + os.environ["ENV_VAR_USER"] = "fake_user" + (results, log_output) = run_dbt_and_capture(["run"], expect_pass=False) + assert "env vars used in profiles.yml have changed" in log_output + manifest = get_manifest(project.project_root) + assert env_vars_checksum != manifest.state_check.profile_env_vars_hash.checksum + + +class TestProfileSecretEnvVars: + @pytest.fixture(scope="class") + def models(self): + return { + "model_one.sql": model_one_sql, + } + + @property + def dbt_profile_target(self): + # Need to set these here because the base integration test class + # calls 'load_config' before the tests are run. + # Note: only the specified profile is rendered, so there's no + # point in setting env_vars in non-used profiles. + + # user is secret and password is not. postgres on macos doesn't care if the password + # changes so we have to change the user. related: https://github.com/dbt-labs/dbt-core/pull/4250 + os.environ[SECRET_ENV_PREFIX + "USER"] = "root" + os.environ["ENV_VAR_PASS"] = "password" + return { + "type": "postgres", + "threads": 4, + "host": "localhost", + "port": 5432, + "user": "{{ env_var('DBT_ENV_SECRET_USER') }}", + "pass": "{{ env_var('ENV_VAR_PASS') }}", + "dbname": "dbt", + } + + def test_profile_secret_env_vars(self, project): + + # Initial run + os.environ[SECRET_ENV_PREFIX + "USER"] = "root" + os.environ["ENV_VAR_PASS"] = "password" + + results = run_dbt(["run"]) + manifest = get_manifest(project.project_root) + env_vars_checksum = manifest.state_check.profile_env_vars_hash.checksum + + # Change a secret var, it shouldn't register because we shouldn't save secrets. + os.environ[SECRET_ENV_PREFIX + "USER"] = "fake_user" + # we just want to see if the manifest has included + # the secret in the hash of environment variables. + (results, log_output) = run_dbt_and_capture(["run"], expect_pass=True) + # I020 is the event code for "env vars used in profiles.yml have changed" + assert not ("I020" in log_output) + manifest = get_manifest(project.project_root) + assert env_vars_checksum == manifest.state_check.profile_env_vars_hash.checksum diff --git a/test/integration/060_persist_docs_tests/models/schema.yml b/tests/functional/persist_docs_tests/fixtures.py similarity index 68% rename from test/integration/060_persist_docs_tests/models/schema.yml rename to tests/functional/persist_docs_tests/fixtures.py index 5a909162456..c596f5219cf 100644 --- a/test/integration/060_persist_docs_tests/models/schema.yml +++ b/tests/functional/persist_docs_tests/fixtures.py @@ -1,3 +1,4 @@ +_PROPERTIES__SCHEMA_YML = """ version: 2 models: @@ -68,3 +69,53 @@ description: | Some stuff here and then a call to {{ doc('my_fun_doc')}} +""" + +_MODELS__VIEW = """ +{{ config(materialized='view') }} +select 2 as id, 'Bob' as name +""" + +_MODELS__NO_DOCS_MODEL = """ +select 1 as id, 'Alice' as name +""" + +_DOCS__MY_FUN_DOCS = """ +{% docs my_fun_doc %} +name Column description "with double quotes" +and with 'single quotes' as welll as other; +'''abc123''' +reserved -- characters +-- +/* comment */ +Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting + +{% enddocs %} +""" + +_MODELS__TABLE = """ +{{ config(materialized='table') }} +select 1 as id, 'Joe' as name +""" + + +_MODELS__MISSING_COLUMN = """ +{{ config(materialized='table') }} +select 1 as id, 'Ed' as name +""" + +_PROPERITES__SCHEMA_MISSING_COL = """ +version: 2 +models: + - name: missing_column + columns: + - name: id + description: "test id column description" + - name: column_that_does_not_exist + description: "comment that cannot be created" +""" + +_SEEDS__SEED = """id,name +1,Alice +2,Bob +""" diff --git a/tests/functional/persist_docs_tests/test_persist_docs.py b/tests/functional/persist_docs_tests/test_persist_docs.py new file mode 100644 index 00000000000..8c3822b497a --- /dev/null +++ b/tests/functional/persist_docs_tests/test_persist_docs.py @@ -0,0 +1,150 @@ +import json +import os +import pytest + +from dbt.tests.util import ( + run_dbt, +) + +from tests.functional.persist_docs_tests.fixtures import ( + _DOCS__MY_FUN_DOCS, + _MODELS__MISSING_COLUMN, + _MODELS__NO_DOCS_MODEL, + _MODELS__TABLE, + _MODELS__VIEW, + _PROPERITES__SCHEMA_MISSING_COL, + _PROPERTIES__SCHEMA_YML, + _SEEDS__SEED, +) + + +class BasePersistDocsTest: + @pytest.fixture(scope="class", autouse=True) + def setUp(self, project): + run_dbt(["seed"]) + run_dbt() + + @pytest.fixture(scope="class") + def seeds(self): + return {"seed.csv": _SEEDS__SEED} + + @pytest.fixture(scope="class") + def models(self): + return { + "no_docs_model.sql": _MODELS__NO_DOCS_MODEL, + "table_model.sql": _MODELS__TABLE, + "view_model.sql": _MODELS__VIEW, + } + + @pytest.fixture(scope="class") + def properties(self): + return { + "my_fun_docs.md": _DOCS__MY_FUN_DOCS, + "schema.yml": _PROPERTIES__SCHEMA_YML, + } + + def _assert_common_comments(self, *comments): + for comment in comments: + assert '"with double quotes"' in comment + assert """'''abc123'''""" in comment + assert "\n" in comment + assert "Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting" in comment + assert "/* comment */" in comment + if os.name == "nt": + assert "--\r\n" in comment or "--\n" in comment + else: + assert "--\n" in comment + + def _assert_has_table_comments(self, table_node): + table_comment = table_node["metadata"]["comment"] + assert table_comment.startswith("Table model description") + + table_id_comment = table_node["columns"]["id"]["comment"] + assert table_id_comment.startswith("id Column description") + + table_name_comment = table_node["columns"]["name"]["comment"] + assert table_name_comment.startswith("Some stuff here and then a call to") + + self._assert_common_comments(table_comment, table_id_comment, table_name_comment) + + def _assert_has_view_comments( + self, view_node, has_node_comments=True, has_column_comments=True + ): + view_comment = view_node["metadata"]["comment"] + if has_node_comments: + assert view_comment.startswith("View model description") + self._assert_common_comments(view_comment) + else: + assert view_comment is None + + view_id_comment = view_node["columns"]["id"]["comment"] + if has_column_comments: + assert view_id_comment.startswith("id Column description") + self._assert_common_comments(view_id_comment) + else: + assert view_id_comment is None + + view_name_comment = view_node["columns"]["name"]["comment"] + assert view_name_comment is None + + +class TestPersistDocs(BasePersistDocsTest): + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "models": { + "test": { + "+persist_docs": { + "relation": True, + "columns": True, + }, + } + } + } + + def test_has_comments_pglike(self, project): + run_dbt(["docs", "generate"]) + with open("target/catalog.json") as fp: + catalog_data = json.load(fp) + assert "nodes" in catalog_data + assert len(catalog_data["nodes"]) == 4 + table_node = catalog_data["nodes"]["model.test.table_model"] + view_node = self._assert_has_table_comments(table_node) + + view_node = catalog_data["nodes"]["model.test.view_model"] + self._assert_has_view_comments(view_node) + + no_docs_node = catalog_data["nodes"]["model.test.no_docs_model"] + self._assert_has_view_comments(no_docs_node, False, False) + + +class TestPersistDocsColumnMissing(BasePersistDocsTest): + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "models": { + "test": { + "+persist_docs": { + "columns": True, + }, + } + } + } + + @pytest.fixture(scope="class") + def models(self): + return {"missing_column.sql": _MODELS__MISSING_COLUMN} + + @pytest.fixture(scope="class") + def properties(self): + return {"schema.yml": _PROPERITES__SCHEMA_MISSING_COL} + + def test_postgres_missing_column(self, project): + run_dbt(["docs", "generate"]) + with open("target/catalog.json") as fp: + catalog_data = json.load(fp) + assert "nodes" in catalog_data + + table_node = catalog_data["nodes"]["model.test.missing_column"] + table_id_comment = table_node["columns"]["id"]["comment"] + assert table_id_comment.startswith("test id column description") diff --git a/tests/functional/postgres/test_postgres_indexes.py b/tests/functional/postgres/test_postgres_indexes.py index 64d61d2df87..143a0888755 100644 --- a/tests/functional/postgres/test_postgres_indexes.py +++ b/tests/functional/postgres/test_postgres_indexes.py @@ -70,7 +70,7 @@ def test_incremental(self, project, unique_schema): results = run_dbt(["run", "--models", "incremental"] + additional_argument) assert len(results) == 1 - indexes = self.get_indexes('incremental', project, unique_schema) + indexes = self.get_indexes("incremental", project, unique_schema) expected = [ {"columns": "column_a", "unique": False, "type": "hash"}, {"columns": "column_a, column_b", "unique": True, "type": "btree"}, @@ -78,11 +78,11 @@ def test_incremental(self, project, unique_schema): assert len(indexes) == len(expected) def test_seed(self, project, unique_schema): - for additional_argument in [[], [], ['--full-refresh']]: + for additional_argument in [[], [], ["--full-refresh"]]: results = run_dbt(["seed"] + additional_argument) assert len(results) == 1 - indexes = self.get_indexes('seed', project, unique_schema) + indexes = self.get_indexes("seed", project, unique_schema) expected = [ {"columns": "country_code", "unique": False, "type": "hash"}, {"columns": "country_code, country_name", "unique": True, "type": "btree"}, @@ -94,7 +94,7 @@ def test_snapshot(self, project, unique_schema): results = run_dbt(["snapshot", "--vars", f"version: {version}"]) assert len(results) == 1 - indexes = self.get_indexes('colors', project, unique_schema) + indexes = self.get_indexes("colors", project, unique_schema) expected = [ {"columns": "id", "unique": False, "type": "hash"}, {"columns": "id, color", "unique": True, "type": "btree"}, @@ -130,7 +130,7 @@ def assertCountEqual(self, a, b): assert len(a) == len(b) -class TestPostgresInvalidIndex(): +class TestPostgresInvalidIndex: @pytest.fixture(scope="class") def models(self): return { diff --git a/tests/functional/ref_override/test_ref_override.py b/tests/functional/ref_override/test_ref_override.py new file mode 100644 index 00000000000..9a6b1def435 --- /dev/null +++ b/tests/functional/ref_override/test_ref_override.py @@ -0,0 +1,79 @@ +import pytest + +from dbt.tests.util import run_dbt, check_relations_equal +from dbt.tests.fixtures.project import write_project_files + + +models__ref_override_sql = """ +select + * +from {{ ref('seed_1') }} +""" + +macros__ref_override_macro_sql = """ +-- Macro to override ref and always return the same result +{% macro ref(modelname) %} +{% do return(builtins.ref(modelname).replace_path(identifier='seed_2')) %} +{% endmacro %} +""" + +seeds__seed_2_csv = """a,b +6,2 +12,4 +18,6""" + +seeds__seed_1_csv = """a,b +1,2 +2,4 +3,6""" + + +@pytest.fixture(scope="class") +def models(): + return {"ref_override.sql": models__ref_override_sql} + + +@pytest.fixture(scope="class") +def macros(): + return {"ref_override_macro.sql": macros__ref_override_macro_sql} + + +@pytest.fixture(scope="class") +def seeds(): + return {"seed_2.csv": seeds__seed_2_csv, "seed_1.csv": seeds__seed_1_csv} + + +@pytest.fixture(scope="class") +def project_files( + project_root, + models, + macros, + seeds, +): + write_project_files(project_root, "models", models) + write_project_files(project_root, "macros", macros) + write_project_files(project_root, "seeds", seeds) + + +class TestRefOverride: + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "config-version": 2, + "seed-paths": ["seeds"], + "macro-paths": ["macros"], + "seeds": { + "quote_columns": False, + }, + } + + def test_ref_override( + self, + project, + ): + run_dbt(["seed"]) + run_dbt(["run"]) + + # We want it to equal seed_2 and not seed_1. If it's + # still pointing at seed_1 then the override hasn't worked. + check_relations_equal(project.adapter, ["ref_override", "seed_2"]) diff --git a/tests/functional/relation_names/test_relation_name.py b/tests/functional/relation_names/test_relation_name.py index 5d941d96da5..f0c241c9302 100644 --- a/tests/functional/relation_names/test_relation_name.py +++ b/tests/functional/relation_names/test_relation_name.py @@ -40,9 +40,13 @@ class TestGeneratedDDLNameRules: def setup_class(self): self.incremental_filename = "my_name_is_51_characters_incremental_abcdefghijklmn" # length is 63 - self.max_length_filename = "my_name_is_max_length_chars_abcdefghijklmnopqrstuvwxyz123456789" + self.max_length_filename = ( + "my_name_is_max_length_chars_abcdefghijklmnopqrstuvwxyz123456789" + ) # length is 64 - self.over_max_length_filename = "my_name_is_one_over_max_length_chats_abcdefghijklmnopqrstuvwxyz1" + self.over_max_length_filename = ( + "my_name_is_one_over_max_length_chats_abcdefghijklmnopqrstuvwxyz1" + ) self.filename_for_backup_file = "my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0" @@ -57,14 +61,10 @@ def seeds(self): @pytest.fixture(scope="class") def models(self): return { - f"{self.incremental_filename}.sql": - models__basic_incremental, - f"{self.filename_for_backup_file}.sql": - models__basic_table, - f"{self.max_length_filename}.sql": - models__basic_table, - f"{self.over_max_length_filename}.sql": - models__basic_table, + f"{self.incremental_filename}.sql": models__basic_incremental, + f"{self.filename_for_backup_file}.sql": models__basic_table, + f"{self.max_length_filename}.sql": models__basic_table, + f"{self.over_max_length_filename}.sql": models__basic_table, } @pytest.fixture(scope="class") @@ -110,15 +110,17 @@ def test_long_name_passes_when_temp_tables_are_generated(self): # 63 characters is the character limit for a table name in a postgres database # (assuming compiled without changes from source) def test_name_longer_than_63_does_not_build(self): - err_msg = "Relation name 'my_name_is_one_over_max"\ + err_msg = ( + "Relation name 'my_name_is_one_over_max" "_length_chats_abcdefghijklmnopqrstuvwxyz1' is longer than 63 characters" + ) res = run_dbt( [ "run", "-s", self.over_max_length_filename, ], - expect_pass=False + expect_pass=False, ) assert res[0].status == RunStatus.Error assert err_msg in res[0].message diff --git a/tests/functional/run_operations/test_run_operations.py b/tests/functional/run_operations/test_run_operations.py index f91ef2d8359..68e9fb8c6e0 100644 --- a/tests/functional/run_operations/test_run_operations.py +++ b/tests/functional/run_operations/test_run_operations.py @@ -2,15 +2,8 @@ import pytest import yaml -from dbt.tests.util import ( - check_table_does_exist, - run_dbt -) -from tests.functional.run_operations.fixtures import ( - happy_macros_sql, - sad_macros_sql, - model_sql -) +from dbt.tests.util import check_table_does_exist, run_dbt +from tests.functional.run_operations.fixtures import happy_macros_sql, sad_macros_sql, model_sql class TestOperations: @@ -20,10 +13,7 @@ def models(self): @pytest.fixture(scope="class") def macros(self): - return { - "happy_macros.sql": happy_macros_sql, - "sad_macros.sql": sad_macros_sql - } + return {"happy_macros.sql": happy_macros_sql, "sad_macros.sql": sad_macros_sql} @pytest.fixture(scope="class") def dbt_profile_data(self, unique_schema): @@ -46,59 +36,57 @@ def dbt_profile_data(self, unique_schema): "threads": 4, "host": "localhost", "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)), - "user": 'noaccess', - "pass": 'password', + "user": "noaccess", + "pass": "password", "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"), - 'schema': unique_schema - } + "schema": unique_schema, + }, }, "target": "default", }, } def run_operation(self, macro, expect_pass=True, extra_args=None, **kwargs): - args = ['run-operation', macro] + args = ["run-operation", macro] if kwargs: - args.extend(('--args', yaml.safe_dump(kwargs))) + args.extend(("--args", yaml.safe_dump(kwargs))) if extra_args: args.extend(extra_args) return run_dbt(args, expect_pass=expect_pass) def test_macro_noargs(self, project): - self.run_operation('no_args') - check_table_does_exist(project.adapter, 'no_args') + self.run_operation("no_args") + check_table_does_exist(project.adapter, "no_args") def test_macro_args(self, project): - self.run_operation('table_name_args', table_name='my_fancy_table') - check_table_does_exist(project.adapter, 'my_fancy_table') + self.run_operation("table_name_args", table_name="my_fancy_table") + check_table_does_exist(project.adapter, "my_fancy_table") def test_macro_exception(self, project): - self.run_operation('syntax_error', False) + self.run_operation("syntax_error", False) def test_macro_missing(self, project): - self.run_operation('this_macro_does_not_exist', False) + self.run_operation("this_macro_does_not_exist", False) def test_cannot_connect(self, project): - self.run_operation('no_args', - extra_args=['--target', 'noaccess'], - expect_pass=False) + self.run_operation("no_args", extra_args=["--target", "noaccess"], expect_pass=False) def test_vacuum(self, project): - run_dbt(['run']) + run_dbt(["run"]) # this should succeed - self.run_operation('vacuum', table_name='model') + self.run_operation("vacuum", table_name="model") def test_vacuum_ref(self, project): - run_dbt(['run']) + run_dbt(["run"]) # this should succeed - self.run_operation('vacuum_ref', ref_target='model') + self.run_operation("vacuum_ref", ref_target="model") def test_select(self, project): - self.run_operation('select_something', name='world') + self.run_operation("select_something", name="world") def test_access_graph(self, project): - self.run_operation('log_graph') + self.run_operation("log_graph") def test_print(self, project): # Tests that calling the `print()` macro does not cause an exception - self.run_operation('print_something') + self.run_operation("print_something") diff --git a/test/integration/057_run_query_tests/macros/test_pg_array_queries.sql b/tests/functional/run_query/test_types.py similarity index 52% rename from test/integration/057_run_query_tests/macros/test_pg_array_queries.sql rename to tests/functional/run_query/test_types.py index f672d777f6f..825d3793895 100644 --- a/test/integration/057_run_query_tests/macros/test_pg_array_queries.sql +++ b/tests/functional/run_query/test_types.py @@ -1,4 +1,8 @@ +import pytest +from dbt.tests.util import run_dbt + +macros_sql = """ {% macro test_array_results() %} {% set sql %} @@ -14,3 +18,16 @@ {% endif %} {% endmacro %} +""" + + +class TestTypes: + @pytest.fixture(scope="class") + def macros(self): + return { + "macros.sql": macros_sql, + } + + def test_nested_types(self, project): + result = run_dbt(["run-operation", "test_array_results"]) + assert result.success diff --git a/tests/functional/schema_tests/test_schema_v2_tests.py b/tests/functional/schema_tests/test_schema_v2_tests.py index 44a6696931b..7b80c5d3eb4 100644 --- a/tests/functional/schema_tests/test_schema_v2_tests.py +++ b/tests/functional/schema_tests/test_schema_v2_tests.py @@ -95,7 +95,7 @@ alt_local_utils__macros__type_timestamp_sql, all_quotes_schema__schema_yml, ) -from dbt.exceptions import ParsingException, CompilationException, DuplicateResourceName +from dbt.exceptions import ParsingError, CompilationError, DuplicateResourceNameError from dbt.contracts.results import TestStatus @@ -410,7 +410,7 @@ def test_malformed_schema_will_break_run( self, project, ): - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt() @@ -904,7 +904,7 @@ def test_generic_test_collision( project, ): """These tests collide, since only the configs differ""" - with pytest.raises(DuplicateResourceName) as exc: + with pytest.raises(DuplicateResourceNameError) as exc: run_dbt() assert "dbt found two tests with the name" in str(exc.value) @@ -922,7 +922,7 @@ def test_generic_test_config_custom_macros( project, ): """This test has a reference to a custom macro its configs""" - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt() assert "Invalid generic test configuration" in str(exc) @@ -987,7 +987,7 @@ def test_invalid_schema_file( self, project, ): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt() assert re.search(r"'models' is not a list", str(exc)) @@ -1003,11 +1003,13 @@ def models(self): def test_quoted_schema_file(self, project): try: # A schema file consisting entirely of quotes should not be a problem - run_dbt(['parse']) + run_dbt(["parse"]) except TypeError: - assert False, '`dbt parse` failed with a yaml file that is all comments with the same exception as 3568' + assert ( + False + ), "`dbt parse` failed with a yaml file that is all comments with the same exception as 3568" except Exception: - assert False, '`dbt parse` failed with a yaml file that is all comments' + assert False, "`dbt parse` failed with a yaml file that is all comments" class TestWrongSpecificationBlock: diff --git a/tests/functional/severity/test_severity.py b/tests/functional/severity/test_severity.py index 050ccd22325..8a76ef6ac24 100644 --- a/tests/functional/severity/test_severity.py +++ b/tests/functional/severity/test_severity.py @@ -60,10 +60,7 @@ @pytest.fixture(scope="class") def models(): - return { - "sample_model.sql": models__sample_model_sql, - "schema.yml": models__schema_yml - } + return {"sample_model.sql": models__sample_model_sql, "schema.yml": models__schema_yml} @pytest.fixture(scope="class") @@ -79,9 +76,9 @@ def tests(): @pytest.fixture(scope="class") def project_config_update(): return { - 'config-version': 2, - 'seed-paths': ['seeds'], - 'test-paths': ['tests'], + "config-version": 2, + "seed-paths": ["seeds"], + "test-paths": ["tests"], "seeds": { "quote_columns": False, }, @@ -95,25 +92,31 @@ def seed_and_run(self, project): run_dbt(["run"]) def test_generic_default(self, project): - results = run_dbt(['test', '--select', 'test_type:generic']) + results = run_dbt(["test", "--select", "test_type:generic"]) assert len(results) == 2 - assert all([r.status == 'warn' for r in results]) + assert all([r.status == "warn" for r in results]) assert all([r.failures == 2 for r in results]) def test_generic_strict(self, project): - results = run_dbt(['test', '--select', 'test_type:generic', "--vars", '{"strict": True}'], expect_pass=False) + results = run_dbt( + ["test", "--select", "test_type:generic", "--vars", '{"strict": True}'], + expect_pass=False, + ) assert len(results) == 2 - assert all([r.status == 'fail' for r in results]) + assert all([r.status == "fail" for r in results]) assert all([r.failures == 2 for r in results]) def test_singular_default(self, project): - results = run_dbt(['test', '--select', 'test_type:singular']) + results = run_dbt(["test", "--select", "test_type:singular"]) assert len(results) == 1 - assert all([r.status == 'warn' for r in results]) + assert all([r.status == "warn" for r in results]) assert all([r.failures == 2 for r in results]) def test_singular_strict(self, project): - results = run_dbt(['test', '--select', 'test_type:singular', "--vars", '{"strict": True}'], expect_pass=False) + results = run_dbt( + ["test", "--select", "test_type:singular", "--vars", '{"strict": True}'], + expect_pass=False, + ) assert len(results) == 1 - assert all([r.status == 'fail' for r in results]) + assert all([r.status == "fail" for r in results]) assert all([r.failures == 2 for r in results]) diff --git a/tests/functional/simple_snapshot/test_missing_strategy_snapshot.py b/tests/functional/simple_snapshot/test_missing_strategy_snapshot.py index 33e6b61aebc..dfb51f7992e 100644 --- a/tests/functional/simple_snapshot/test_missing_strategy_snapshot.py +++ b/tests/functional/simple_snapshot/test_missing_strategy_snapshot.py @@ -1,6 +1,6 @@ import pytest from dbt.tests.util import run_dbt -from dbt.exceptions import ParsingException +from dbt.exceptions import ParsingError from tests.functional.simple_snapshot.fixtures import ( models__schema_yml, models__ref_snapshot_sql, @@ -43,7 +43,7 @@ def macros(): def test_missing_strategy(project): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt(["compile"], expect_pass=False) assert "Snapshots must be configured with a 'strategy'" in str(exc.value) diff --git a/tests/functional/source_overrides/test_source_overrides_duplicate_model.py b/tests/functional/source_overrides/test_source_overrides_duplicate_model.py index cd35fd6f7c2..e3cdebe4794 100644 --- a/tests/functional/source_overrides/test_source_overrides_duplicate_model.py +++ b/tests/functional/source_overrides/test_source_overrides_duplicate_model.py @@ -1,5 +1,5 @@ import os -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError import pytest from dbt.tests.util import run_dbt @@ -56,7 +56,7 @@ def project_config_update(self): def test_source_duplicate_overrides(self, project): run_dbt(["deps"]) - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) assert "dbt found two schema.yml entries for the same source named" in str(exc.value) diff --git a/tests/functional/sources/test_simple_source.py b/tests/functional/sources/test_simple_source.py index 0c69f859b6b..cd08647f367 100644 --- a/tests/functional/sources/test_simple_source.py +++ b/tests/functional/sources/test_simple_source.py @@ -1,7 +1,7 @@ import os import pytest import yaml -from dbt.exceptions import ParsingException +from dbt.exceptions import ParsingError from dbt.tests.util import ( run_dbt, @@ -164,7 +164,7 @@ def models(self): } def test_malformed_schema_will_break_run(self, project): - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): self.run_dbt_with_vars(project, ["seed"]) diff --git a/tests/functional/sources/test_source_fresher_state.py b/tests/functional/sources/test_source_fresher_state.py index 362f9a816c0..a97694a9c5a 100644 --- a/tests/functional/sources/test_source_fresher_state.py +++ b/tests/functional/sources/test_source_fresher_state.py @@ -4,7 +4,7 @@ import pytest from datetime import datetime, timedelta -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.tests.util import AnyStringWith, AnyFloat @@ -619,7 +619,7 @@ class TestSourceFresherNoPreviousState(SuccessfulSourceFreshnessTest): def test_intentional_failure_no_previous_state(self, project): self.run_dbt_with_vars(project, ["run"]) # TODO add the current and previous but with previous as null - with pytest.raises(InternalException) as excinfo: + with pytest.raises(DbtInternalError) as excinfo: self.run_dbt_with_vars( project, ["run", "-s", "source_status:fresher", "--defer", "--state", "previous_state"], @@ -641,7 +641,7 @@ def test_intentional_failure_no_previous_state(self, project): copy_to_previous_state() assert previous_state_results[0].max_loaded_at is not None - with pytest.raises(InternalException) as excinfo: + with pytest.raises(DbtInternalError) as excinfo: self.run_dbt_with_vars( project, ["run", "-s", "source_status:fresher", "--defer", "--state", "previous_state"], diff --git a/tests/functional/statements/test_statements.py b/tests/functional/statements/test_statements.py index 4b8640b8066..b3d615a2b69 100644 --- a/tests/functional/statements/test_statements.py +++ b/tests/functional/statements/test_statements.py @@ -1,11 +1,7 @@ import pathlib import pytest -from dbt.tests.util import ( - run_dbt, - check_relations_equal, - write_file -) +from dbt.tests.util import run_dbt, check_relations_equal, write_file from tests.functional.statements.fixtures import ( models__statement_actual, seeds__statement_actual, @@ -19,7 +15,9 @@ def setUp(self, project): # put seeds in 'seed' not 'seeds' directory (pathlib.Path(project.project_root) / "seed").mkdir(parents=True, exist_ok=True) write_file(seeds__statement_actual, project.project_root, "seed", "seed.csv") - write_file(seeds__statement_expected, project.project_root, "seed", "statement_expected.csv") + write_file( + seeds__statement_expected, project.project_root, "seed", "statement_expected.csv" + ) @pytest.fixture(scope="class") def models(self): diff --git a/tests/functional/store_test_failures_tests/test_store_test_failures.py b/tests/functional/store_test_failures_tests/test_store_test_failures.py index ff26d7d97d3..15527c86bd3 100644 --- a/tests/functional/store_test_failures_tests/test_store_test_failures.py +++ b/tests/functional/store_test_failures_tests/test_store_test_failures.py @@ -38,10 +38,8 @@ def seeds(self): "people.csv": seeds__people, "expected_accepted_values.csv": seeds__expected_accepted_values, "expected_failing_test.csv": seeds__expected_failing_test, - "expected_not_null_problematic_model_id.csv": - seeds__expected_not_null_problematic_model_id, - "expected_unique_problematic_model_id.csv": - seeds__expected_unique_problematic_model_id, + "expected_not_null_problematic_model_id.csv": seeds__expected_not_null_problematic_model_id, + "expected_unique_problematic_model_id.csv": seeds__expected_unique_problematic_model_id, } @pytest.fixture(scope="class") @@ -59,8 +57,7 @@ def properties(self): def models(self): return { "fine_model.sql": models__fine_model, - "fine_model_but_with_a_no_good_very_long_name.sql": - models__file_model_but_with_a_no_good_very_long_name, + "fine_model_but_with_a_no_good_very_long_name.sql": models__file_model_but_with_a_no_good_very_long_name, "problematic_model.sql": models__problematic_model, } @@ -71,9 +68,7 @@ def project_config_update(self): "quote_columns": False, "test": self.column_type_overrides(), }, - "tests": { - "+schema": TEST_AUDIT_SCHEMA_SUFFIX - } + "tests": {"+schema": TEST_AUDIT_SCHEMA_SUFFIX}, } def column_type_overrides(self): @@ -87,8 +82,8 @@ def run_tests_store_one_failure(self, project): project.adapter, [ f"{self.test_audit_schema}.unique_problematic_model_id", - "expected_unique_problematic_model_id" - ] + "expected_unique_problematic_model_id", + ], ) def run_tests_store_failures_and_assert(self, project): @@ -98,39 +93,59 @@ def run_tests_store_failures_and_assert(self, project): # compare test results actual = [(r.status, r.failures) for r in results] - expected = [('pass', 0), ('pass', 0), ('pass', 0), ('pass', 0), - ('fail', 2), ('fail', 2), ('fail', 2), ('fail', 10)] + expected = [ + ("pass", 0), + ("pass", 0), + ("pass", 0), + ("pass", 0), + ("fail", 2), + ("fail", 2), + ("fail", 2), + ("fail", 10), + ] assert sorted(actual) == sorted(expected) # compare test results stored in database - check_relations_equal(project.adapter, [ - f"{self.test_audit_schema}.failing_test", - "expected_failing_test" - ]) - check_relations_equal(project.adapter, [ - f"{self.test_audit_schema}.not_null_problematic_model_id", - "expected_not_null_problematic_model_id" - ]) - check_relations_equal(project.adapter, [ - f"{self.test_audit_schema}.unique_problematic_model_id", - "expected_unique_problematic_model_id" - ]) - check_relations_equal(project.adapter, [ - f"{self.test_audit_schema}.accepted_values_problemat" - "ic_mo_c533ab4ca65c1a9dbf14f79ded49b628", - "expected_accepted_values" - ]) + check_relations_equal( + project.adapter, [f"{self.test_audit_schema}.failing_test", "expected_failing_test"] + ) + check_relations_equal( + project.adapter, + [ + f"{self.test_audit_schema}.not_null_problematic_model_id", + "expected_not_null_problematic_model_id", + ], + ) + check_relations_equal( + project.adapter, + [ + f"{self.test_audit_schema}.unique_problematic_model_id", + "expected_unique_problematic_model_id", + ], + ) + check_relations_equal( + project.adapter, + [ + f"{self.test_audit_schema}.accepted_values_problemat" + "ic_mo_c533ab4ca65c1a9dbf14f79ded49b628", + "expected_accepted_values", + ], + ) class TestStoreTestFailures(StoreTestFailuresBase): @pytest.fixture(scope="function") def clean_up(self, project): yield - with project.adapter.connection_named('__test'): - relation = project.adapter.Relation.create(database=project.database, schema=self.test_audit_schema) + with project.adapter.connection_named("__test"): + relation = project.adapter.Relation.create( + database=project.database, schema=self.test_audit_schema + ) project.adapter.drop_schema(relation) - relation = project.adapter.Relation.create(database=project.database, schema=project.test_schema) + relation = project.adapter.Relation.create( + database=project.database, schema=project.test_schema + ) project.adapter.drop_schema(relation) def column_type_overrides(self): diff --git a/tests/functional/test_selection/fixtures.py b/tests/functional/test_selection/fixtures.py index ae798edd3fd..48c3f40c62d 100644 --- a/tests/functional/test_selection/fixtures.py +++ b/tests/functional/test_selection/fixtures.py @@ -64,7 +64,7 @@ tags = ['a_or_b'] ) }} -select 1 as fun +select * FROM {{ref('model_b')}} """ diff --git a/tests/functional/test_selection/test_selection_expansion.py b/tests/functional/test_selection/test_selection_expansion.py index b563398e89f..290b8f066ff 100644 --- a/tests/functional/test_selection/test_selection_expansion.py +++ b/tests/functional/test_selection/test_selection_expansion.py @@ -184,6 +184,24 @@ def test_model_a_exclude_specific_test_cautious( self.list_tests_and_assert(select, exclude, expected, indirect_selection) self.run_tests_and_assert(select, exclude, expected, indirect_selection) + def test_model_a_exclude_specific_test_buildable( + self, + project, + ): + select = "model_a" + exclude = "unique_model_a_fun" + expected = [ + "just_a", + "cf_a_b", + "cf_a_src", + "relationships_model_a_fun__fun__ref_model_b_", + "relationships_model_a_fun__fun__source_my_src_my_tbl_", + ] + indirect_selection = "buildable" + + self.list_tests_and_assert(select, exclude, expected, indirect_selection) + self.run_tests_and_assert(select, exclude, expected, indirect_selection) + def test_only_generic( self, project, @@ -374,6 +392,40 @@ def test_model_a_indirect_selection_eager( self.list_tests_and_assert(select, exclude, expected, indirect_selection) self.run_tests_and_assert(select, exclude, expected, indirect_selection) + def test_model_a_indirect_selection_cautious( + self, + project, + ): + select = "model_a" + exclude = None + expected = [ + "just_a", + "unique_model_a_fun", + ] + indirect_selection = "cautious" + + self.list_tests_and_assert(select, exclude, expected, indirect_selection) + self.run_tests_and_assert(select, exclude, expected, indirect_selection) + + def test_model_a_indirect_selection_buildable( + self, + project, + ): + select = "model_a" + exclude = None + expected = [ + "cf_a_b", + "cf_a_src", + "just_a", + "relationships_model_a_fun__fun__ref_model_b_", + "relationships_model_a_fun__fun__source_my_src_my_tbl_", + "unique_model_a_fun", + ] + indirect_selection = "buildable" + + self.list_tests_and_assert(select, exclude, expected, indirect_selection) + self.run_tests_and_assert(select, exclude, expected, indirect_selection) + def test_model_a_indirect_selection_exclude_unique_tests( self, project, @@ -402,16 +454,21 @@ def selectors(self): definition: method: fqn value: model_a - - name: model_a_no_indirect_selection + - name: model_a_cautious_indirect_selection definition: method: fqn value: model_a indirect_selection: "cautious" - - name: model_a_yes_indirect_selection + - name: model_a_eager_indirect_selection definition: method: fqn value: model_a indirect_selection: "eager" + - name: model_a_buildable_indirect_selection + definition: + method: fqn + value: model_a + indirect_selection: "buildable" """ def test_selector_model_a_unset_indirect_selection( @@ -440,7 +497,7 @@ def test_selector_model_a_unset_indirect_selection( selector_name="model_a_unset_indirect_selection", ) - def test_selector_model_a_no_indirect_selection( + def test_selector_model_a_cautious_indirect_selection( self, project, ): @@ -450,16 +507,42 @@ def test_selector_model_a_no_indirect_selection( include=None, exclude=None, expected_tests=expected, - selector_name="model_a_no_indirect_selection", + selector_name="model_a_cautious_indirect_selection", + ) + self.run_tests_and_assert( + include=None, + exclude=None, + expected_tests=expected, + selector_name="model_a_cautious_indirect_selection", + ) + + def test_selector_model_a_eager_indirect_selection( + self, + project, + ): + expected = [ + "cf_a_b", + "cf_a_src", + "just_a", + "relationships_model_a_fun__fun__ref_model_b_", + "relationships_model_a_fun__fun__source_my_src_my_tbl_", + "unique_model_a_fun", + ] + + self.list_tests_and_assert( + include=None, + exclude=None, + expected_tests=expected, + selector_name="model_a_eager_indirect_selection", ) self.run_tests_and_assert( include=None, exclude=None, expected_tests=expected, - selector_name="model_a_no_indirect_selection", + selector_name="model_a_eager_indirect_selection", ) - def test_selector_model_a_yes_indirect_selection( + def test_selector_model_a_buildable_indirect_selection( self, project, ): @@ -476,11 +559,11 @@ def test_selector_model_a_yes_indirect_selection( include=None, exclude=None, expected_tests=expected, - selector_name="model_a_yes_indirect_selection", + selector_name="model_a_buildable_indirect_selection", ) self.run_tests_and_assert( include=None, exclude=None, expected_tests=expected, - selector_name="model_a_yes_indirect_selection", + selector_name="model_a_buildable_indirect_selection", ) diff --git a/tests/functional/timezones/test_timezones.py b/tests/functional/timezones/test_timezones.py new file mode 100644 index 00000000000..7b0135442c8 --- /dev/null +++ b/tests/functional/timezones/test_timezones.py @@ -0,0 +1,67 @@ +import os +import pytest +from freezegun import freeze_time + +from dbt.tests.util import run_dbt + + +model_sql = """ +{{ + config( + materialized='table' + ) +}} + +select + '{{ run_started_at.astimezone(modules.pytz.timezone("America/New_York")) }}' as run_started_at_est, + '{{ run_started_at }}' as run_started_at_utc +""" + + +class TestTimezones: + @pytest.fixture(scope="class") + def models(self): + return {"timezones.sql": model_sql} + + @pytest.fixture(scope="class") + def dbt_profile_data(self, unique_schema): + return { + "test": { + "outputs": { + "dev": { + "type": "postgres", + "threads": 1, + "host": "localhost", + "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)), + "user": os.getenv("POSTGRES_TEST_USER", "root"), + "pass": os.getenv("POSTGRES_TEST_PASS", "password"), + "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"), + "schema": unique_schema, + }, + }, + "target": "dev", + } + } + + @pytest.fixture(scope="class") + def query(self, project): + return """ + select + run_started_at_est, + run_started_at_utc + from {schema}.timezones + """.format( + schema=project.test_schema + ) + + @freeze_time("2022-01-01 03:00:00", tz_offset=0) + def test_run_started_at(self, project, query): + results = run_dbt(["run"]) + + assert len(results) == 1 + + result = project.run_sql(query, fetch="all")[0] + est, utc = result + + assert utc == "2022-01-01 03:00:00+00:00" + assert est == "2021-12-31 22:00:00-05:00" diff --git a/tests/unit/test_connection_retries.py b/tests/unit/test_connection_retries.py index 8b031ce5ab4..9076adb7ef9 100644 --- a/tests/unit/test_connection_retries.py +++ b/tests/unit/test_connection_retries.py @@ -1,7 +1,7 @@ import functools import pytest from requests.exceptions import RequestException -from dbt.exceptions import ConnectionException +from dbt.exceptions import ConnectionError from dbt.utils import _connection_exception_retry @@ -28,7 +28,7 @@ class TestMaxRetries: def test_no_retry(self): fn_to_retry = functools.partial(no_success_fn) - with pytest.raises(ConnectionException): + with pytest.raises(ConnectionError): _connection_exception_retry(fn_to_retry, 3) diff --git a/tests/unit/test_deprecations.py b/tests/unit/test_deprecations.py new file mode 100644 index 00000000000..ce80ba3d040 --- /dev/null +++ b/tests/unit/test_deprecations.py @@ -0,0 +1,602 @@ +import argparse +import pytest + +from dbt.internal_deprecations import deprecated +import dbt.exceptions +from dbt.node_types import NodeType + + +@deprecated(reason="just because", version="1.23.0", suggested_action="Make some updates") +def to_be_decorated(): + return 5 + + +# simpletest that the return value is not modified +def test_deprecated_func(): + assert hasattr(to_be_decorated, "__wrapped__") + assert to_be_decorated() == 5 + + +class TestDeprecatedFunctions: + def is_deprecated(self, func): + assert hasattr(func, "__wrapped__") + # TODO: add in log check + + def test_warn(self): + self.is_deprecated(dbt.exceptions.warn) + + +class TestDeprecatedExceptionFunctions: + def runFunc(self, func, *args): + return func(*args) + + def is_deprecated(self, func): + assert hasattr(func, "__wrapped__") + # TODO: add in log check + + def test_missing_config(self): + func = dbt.exceptions.missing_config + exception = dbt.exceptions.MissingConfigError + model = argparse.Namespace() + model.unique_id = "" + name = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(model, name) + + def test_missing_materialization(self): + func = dbt.exceptions.missing_materialization + exception = dbt.exceptions.MissingMaterializationError + model = argparse.Namespace() + model.config = argparse.Namespace() + model.config.materialized = "" + adapter_type = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(model, adapter_type) + + def test_missing_relation(self): + func = dbt.exceptions.missing_relation + exception = dbt.exceptions.MissingRelationError + relation = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(relation) + + def test_raise_ambiguous_alias(self): + func = dbt.exceptions.raise_ambiguous_alias + exception = dbt.exceptions.AmbiguousAliasError + node_1 = argparse.Namespace() + node_1.unique_id = "" + node_1.original_file_path = "" + node_2 = argparse.Namespace() + node_2.unique_id = "" + node_2.original_file_path = "" + duped_name = "string" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(node_1, node_2, duped_name) + + def test_raise_ambiguous_catalog_match(self): + func = dbt.exceptions.raise_ambiguous_catalog_match + exception = dbt.exceptions.AmbiguousCatalogMatchError + unique_id = "" + match_1 = {"metadata": {"schema": ""}} + match_2 = {"metadata": {"schema": ""}} + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(unique_id, match_1, match_2) + + def test_raise_cache_inconsistent(self): + func = dbt.exceptions.raise_cache_inconsistent + exception = dbt.exceptions.CacheInconsistencyError + msg = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(msg) + + def test_raise_dataclass_not_dict(self): + func = dbt.exceptions.raise_dataclass_not_dict + exception = dbt.exceptions.DataclassNotDictError + obj = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(obj) + + def test_raise_compiler_error(self): + func = dbt.exceptions.raise_compiler_error + exception = dbt.exceptions.CompilationError + msg = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(msg) + + def test_raise_database_error(self): + func = dbt.exceptions.raise_database_error + exception = dbt.exceptions.DbtDatabaseError + msg = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(msg) + + def test_raise_dep_not_found(self): + func = dbt.exceptions.raise_dep_not_found + exception = dbt.exceptions.DependencyNotFoundError + node = "" + node_description = "" + required_pkg = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(node, node_description, required_pkg) + + def test_raise_dependency_error(self): + func = dbt.exceptions.raise_dependency_error + exception = dbt.exceptions.DependencyError + msg = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(msg) + + def test_raise_duplicate_patch_name(self): + func = dbt.exceptions.raise_duplicate_patch_name + exception = dbt.exceptions.DuplicatePatchPathError + patch_1 = argparse.Namespace() + patch_1.name = "" + patch_1.original_file_path = "" + existing_patch_path = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(patch_1, existing_patch_path) + + def test_raise_duplicate_resource_name(self): + func = dbt.exceptions.raise_duplicate_resource_name + exception = dbt.exceptions.DuplicateResourceNameError + node_1 = argparse.Namespace() + node_1.name = "" + node_1.resource_type = NodeType("model") + node_1.column_name = "" + node_1.unique_id = "" + node_1.original_file_path = "" + node_2 = argparse.Namespace() + node_2.name = "" + node_2.resource_type = "" + node_2.unique_id = "" + node_2.original_file_path = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(node_1, node_2) + + def test_raise_invalid_property_yml_version(self): + func = dbt.exceptions.raise_invalid_property_yml_version + exception = dbt.exceptions.PropertyYMLError + path = "" + issue = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(path, issue) + + def test_raise_not_implemented(self): + func = dbt.exceptions.raise_not_implemented + exception = dbt.exceptions.NotImplementedError + msg = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(msg) + + def test_relation_wrong_type(self): + func = dbt.exceptions.relation_wrong_type + exception = dbt.exceptions.RelationWrongTypeError + + relation = argparse.Namespace() + relation.type = "" + expected_type = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(relation, expected_type) + + def test_raise_duplicate_alias(self): + func = dbt.exceptions.raise_duplicate_alias + exception = dbt.exceptions.DuplicateAliasError + kwargs = {"": ""} + aliases = {"": ""} + canonical_key = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(kwargs, aliases, canonical_key) + + def test_raise_duplicate_source_patch_name(self): + func = dbt.exceptions.raise_duplicate_source_patch_name + exception = dbt.exceptions.DuplicateSourcePatchNameError + patch_1 = argparse.Namespace() + patch_1.name = "" + patch_1.path = "" + patch_1.overrides = "" + patch_2 = argparse.Namespace() + patch_2.path = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(patch_1, patch_2) + + def test_raise_duplicate_macro_patch_name(self): + func = dbt.exceptions.raise_duplicate_macro_patch_name + exception = dbt.exceptions.DuplicateMacroPatchNameError + patch_1 = argparse.Namespace() + patch_1.package_name = "" + patch_1.name = "" + patch_1.original_file_path = "" + existing_patch_path = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(patch_1, existing_patch_path) + + def test_raise_duplicate_macro_name(self): + func = dbt.exceptions.raise_duplicate_macro_name + exception = dbt.exceptions.DuplicateMacroNameError + node_1 = argparse.Namespace() + node_1.name = "" + node_1.package_name = "" + node_1.original_file_path = "" + node_1.unique_id = "" + node_2 = argparse.Namespace() + node_2.package_name = "" + node_2.unique_id = "" + node_2.original_file_path = "" + namespace = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(node_1, node_2, namespace) + + def test_approximate_relation_match(self): + func = dbt.exceptions.approximate_relation_match + exception = dbt.exceptions.ApproximateMatchError + target = "" + relation = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(target, relation) + + def test_get_relation_returned_multiple_results(self): + func = dbt.exceptions.get_relation_returned_multiple_results + exception = dbt.exceptions.RelationReturnedMultipleResultsError + kwargs = {} + matches = [] + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(kwargs, matches) + + def test_system_error(self): + func = dbt.exceptions.system_error + exception = dbt.exceptions.OperationError + operation_name = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(operation_name) + + def test_invalid_materialization_argument(self): + func = dbt.exceptions.invalid_materialization_argument + exception = dbt.exceptions.MaterializationArgError + name = "" + argument = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(name, argument) + + def test_bad_package_spec(self): + func = dbt.exceptions.bad_package_spec + exception = dbt.exceptions.BadSpecError + repo = "" + spec = "" + error = argparse.Namespace() + error.stderr = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(repo, spec, error) + + # def test_raise_git_cloning_error(self): + # func = dbt.exceptions.raise_git_cloning_error + # exception = dbt.exceptions.CommandResultError + + # error = dbt.exceptions.CommandResultError + # error.cwd = "" + # error.cmd = [""] + # error.returncode = 1 + # error.stdout = "" + # error.stderr = "" + + # self.is_deprecated(func) + + # assert(hasattr(func, '__wrapped__')) + # with pytest.raises(exception): + # func(error) + + def test_raise_git_cloning_problem(self): + func = dbt.exceptions.raise_git_cloning_problem + exception = dbt.exceptions.UnknownGitCloningProblemError + repo = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(repo) + + def test_macro_invalid_dispatch_arg(self): + func = dbt.exceptions.macro_invalid_dispatch_arg + exception = dbt.exceptions.MacroDispatchArgError + macro_name = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(macro_name) + + def test_dependency_not_found(self): + func = dbt.exceptions.dependency_not_found + exception = dbt.exceptions.GraphDependencyNotFoundError + node = argparse.Namespace() + node.unique_id = "" + dependency = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(node, dependency) + + def test_target_not_found(self): + func = dbt.exceptions.target_not_found + exception = dbt.exceptions.TargetNotFoundError + node = argparse.Namespace() + node.unique_id = "" + node.original_file_path = "" + node.resource_type = "" + target_name = "" + target_kind = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(node, target_name, target_kind) + + def test_doc_target_not_found(self): + func = dbt.exceptions.doc_target_not_found + exception = dbt.exceptions.DocTargetNotFoundError + model = argparse.Namespace() + model.unique_id = "" + target_doc_name = "" + target_doc_package = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(model, target_doc_name, target_doc_package) + + def test_ref_bad_context(self): + func = dbt.exceptions.ref_bad_context + exception = dbt.exceptions.RefBadContextError + model = argparse.Namespace() + model.name = "" + args = [] + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(model, args) + + def test_metric_invalid_args(self): + func = dbt.exceptions.metric_invalid_args + exception = dbt.exceptions.MetricArgsError + model = argparse.Namespace() + model.unique_id = "" + args = [] + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(model, args) + + def test_ref_invalid_args(self): + func = dbt.exceptions.ref_invalid_args + exception = dbt.exceptions.RefArgsError + model = argparse.Namespace() + model.unique_id = "" + args = [] + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(model, args) + + def test_invalid_bool_error(self): + func = dbt.exceptions.invalid_bool_error + exception = dbt.exceptions.BooleanError + return_value = "" + macro_name = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(return_value, macro_name) + + def test_invalid_type_error(self): + func = dbt.exceptions.invalid_type_error + exception = dbt.exceptions.MacroArgTypeError + method_name = "" + arg_name = "" + got_value = "" + expected_type = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(method_name, arg_name, got_value, expected_type) + + def test_disallow_secret_env_var(self): + func = dbt.exceptions.disallow_secret_env_var + exception = dbt.exceptions.SecretEnvVarLocationError + env_var_name = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(env_var_name) + + def test_raise_parsing_error(self): + func = dbt.exceptions.raise_parsing_error + exception = dbt.exceptions.ParsingError + msg = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(msg) + + def test_raise_unrecognized_credentials_type(self): + func = dbt.exceptions.raise_unrecognized_credentials_type + exception = dbt.exceptions.UnrecognizedCredentialTypeError + typename = "" + supported_types = [] + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(typename, supported_types) + + def test_raise_patch_targets_not_found(self): + func = dbt.exceptions.raise_patch_targets_not_found + exception = dbt.exceptions.PatchTargetNotFoundError + node = argparse.Namespace() + node.name = "" + node.original_file_path = "" + patches = {"patch": node} + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(patches) + + def test_multiple_matching_relations(self): + func = dbt.exceptions.multiple_matching_relations + exception = dbt.exceptions.RelationReturnedMultipleResultsError + kwargs = {} + matches = [] + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(kwargs, matches) + + def test_materialization_not_available(self): + func = dbt.exceptions.materialization_not_available + exception = dbt.exceptions.MaterializationNotAvailableError + model = argparse.Namespace() + model.config = argparse.Namespace() + model.config.materialized = "" + adapter_type = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(model, adapter_type) + + def test_macro_not_found(self): + func = dbt.exceptions.macro_not_found + exception = dbt.exceptions.MacroNotFoundError + model = argparse.Namespace() + model.unique_id = "" + target_macro_id = "" + + self.is_deprecated(func) + + assert hasattr(func, "__wrapped__") + with pytest.raises(exception): + func(model, target_macro_id) diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 3cdaba76f55..cc23b016762 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -1,9 +1,11 @@ # flake8: noqa from dbt.events.test_types import UnitTestInfo from dbt.events import AdapterLogger -from dbt.events.functions import event_to_json, LOG_VERSION, event_to_dict +from dbt.events.functions import msg_to_json, LOG_VERSION, msg_to_dict +from dbt.events.base_types import msg_from_base_event from dbt.events.types import * from dbt.events.test_types import * +from dbt.contracts.results import TimingInfo from dbt.events.base_types import ( BaseEvent, @@ -76,10 +78,10 @@ def test_formatting(self): # ensure AdapterLogger and subclasses makes all base_msg members # of type string; when someone writes logger.debug(a) where a is # any non-string object - event = AdapterEventDebug(name="dbt_tests", base_msg=[1,2,3], args=(3,)) + event = AdapterEventDebug(name="dbt_tests", base_msg=[1, 2, 3], args=(3,)) assert isinstance(event.base_msg, str) - event = JinjaLogDebug(msg=[1,2,3]) + event = JinjaLogDebug(msg=[1, 2, 3]) assert isinstance(event.msg, str) @@ -102,43 +104,6 @@ def test_event_codes(self): all_codes.add(code) -def MockNode(): - return ModelNode( - alias="model_one", - name="model_one", - database="dbt", - schema="analytics", - resource_type=NodeType.Model, - unique_id="model.root.model_one", - fqn=["root", "model_one"], - package_name="root", - original_file_path="model_one.sql", - root_path="/usr/src/app", - refs=[], - sources=[], - depends_on=DependsOn(), - config=NodeConfig.from_dict( - { - "enabled": True, - "materialized": "view", - "persist_docs": {}, - "post-hook": [], - "pre-hook": [], - "vars": {}, - "quoting": {}, - "column_types": {}, - "tags": [], - } - ), - tags=[], - path="model_one.sql", - raw_code="", - description="", - columns={}, - checksum=FileHash.from_contents(""), - ) - - sample_values = [ # A - pre-project loading MainReportVersion(version=""), @@ -146,15 +111,9 @@ def MockNode(): MainTrackingUserState(user_state=""), MergedFromState(num_merged=0, sample=[]), MissingProfileTarget(profile_name="", target_name=""), - InvalidVarsYAML(), - DbtProjectError(), - DbtProjectErrorException(exc=""), - DbtProfileError(), - DbtProfileErrorException(exc=""), - ProfileListTitle(), - ListSingleProfile(profile=""), - NoDefinedProfiles(), - ProfileHelpMessage(), + InvalidOptionYAML(option_name="vars"), + LogDbtProjectError(), + LogDbtProfileError(), StarterProjectPath(dir=""), ConfigFolderDirectory(dir=""), NoSampleProfileFound(adapter=""), @@ -165,7 +124,6 @@ def MockNode(): InvalidProfileTemplateYAML(), ProjectNameAlreadyExists(name=""), ProjectCreated(project_name=""), - # D - Deprecations ====================== PackageRedirectDeprecation(old_name="", new_name=""), PackageInstallPathDeprecation(), @@ -174,7 +132,7 @@ def MockNode(): AdapterDeprecationWarning(old_name="", new_name=""), MetricAttributesRenamed(metric_name=""), ExposureNameDeprecation(exposure=""), - + InternalDeprecation(name="", reason="", suggested_action="", version=""), # E - DB Adapter ====================== AdapterEventDebug(), AdapterEventInfo(), @@ -199,35 +157,12 @@ def MockNode(): ), SchemaCreation(relation=ReferenceKeyMsg(database="", schema="", identifier="")), SchemaDrop(relation=ReferenceKeyMsg(database="", schema="", identifier="")), - UncachedRelation( - dep_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ref_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ), - AddLink( - dep_key=ReferenceKeyMsg(database="", schema="", identifier=""), + CacheAction( + action="adding_relation", ref_key=ReferenceKeyMsg(database="", schema="", identifier=""), + ref_key_2=ReferenceKeyMsg(database="", schema="", identifier=""), ), - AddRelation(relation=ReferenceKeyMsg(database="", schema="", identifier="")), - DropMissingRelation(relation=ReferenceKeyMsg(database="", schema="", identifier="")), - DropCascade( - dropped=ReferenceKeyMsg(database="", schema="", identifier=""), - consequences=[ReferenceKeyMsg(database="", schema="", identifier="")], - ), - DropRelation(dropped=ReferenceKeyMsg()), - UpdateReference( - old_key=ReferenceKeyMsg(database="", schema="", identifier=""), - new_key=ReferenceKeyMsg(database="", schema="", identifier=""), - cached_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ), - TemporaryRelation(key=ReferenceKeyMsg(database="", schema="", identifier="")), - RenameSchema( - old_key=ReferenceKeyMsg(database="", schema="", identifier=""), - new_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ), - DumpBeforeAddGraph(dump=dict()), - DumpAfterAddGraph(dump=dict()), - DumpBeforeRenameSchema(dump=dict()), - DumpAfterRenameSchema(dump=dict()), + CacheDumpGraph(before_after="before", action="rename", dump=dict()), AdapterImportError(exc=""), PluginLoadError(exc_info=""), NewConnectionOpening(connection_state=""), @@ -240,24 +175,14 @@ def MockNode(): BuildingCatalog(), DatabaseErrorRunningHook(hook_type=""), HooksRunning(num_hooks=0, hook_type=""), - HookFinished(stat_line="", execution="", execution_time=0), - + FinishedRunningStats(stat_line="", execution="", execution_time=0), # I - Project parsing ====================== - ParseCmdStart(), - ParseCmdCompiling(), - ParseCmdWritingManifest(), - ParseCmdDone(), - ManifestDependenciesLoaded(), - ManifestLoaderCreated(), - ManifestLoaded(), - ManifestChecked(), - ManifestFlatGraphBuilt(), - ParseCmdPerfInfoPath(path=""), + ParseCmdOut(msg="testing"), GenericTestFileParse(path=""), MacroFileParse(path=""), - PartialParsingExceptionProcessingFile(file=""), + PartialParsingErrorProcessingFile(file=""), PartialParsingFile(file_id=""), - PartialParsingException(exc_info={}), + PartialParsingError(exc_info={}), PartialParsingSkipParsing(), UnableToPartialParse(reason="something went wrong"), PartialParsingNotEnabled(), @@ -287,9 +212,11 @@ def MockNode(): SeedExceedsLimitAndPathChanged(package_name="", name=""), SeedExceedsLimitChecksumChanged(package_name="", name="", checksum_name=""), UnusedTables(unused_tables=[]), - WrongResourceSchemaFile(patch_name="", resource_type="", file_path="", plural_resource_type=""), + WrongResourceSchemaFile( + patch_name="", resource_type="", file_path="", plural_resource_type="" + ), NoNodeForYamlKey(patch_name="", yaml_key="", file_path=""), - MacroPatchNotFound(patch_name=""), + MacroNotFoundForPatch(patch_name=""), NodeNotFoundOrDisabled( original_file_path="", unique_id="", @@ -300,9 +227,7 @@ def MockNode(): disabled="", ), JinjaLogWarning(), - # M - Deps generation ====================== - GitSparseCheckoutSubdirectory(subdir=""), GitProgressCheckoutRevision(revision=""), GitProgressUpdatingExistingDependency(dir=""), @@ -331,9 +256,7 @@ def MockNode(): RegistryResponseMissingNestedKeys(response=""), RegistryResponseExtraNestedKeys(response=""), DepsSetDownloadDirectory(path=""), - # Q - Node execution ====================== - RunningOperationCaughtError(exc=""), CompileComplete(), FreshnessCheckComplete(), @@ -412,25 +335,21 @@ def MockNode(): NoNodesSelected(), DepsUnpinned(revision="", git=""), NoNodesForSelectionCriteria(spec_raw=""), - # W - Node testing ====================== - CatchableExceptionOnRun(exc=""), - InternalExceptionOnRun(build_path="", exc=""), + InternalErrorOnRun(build_path="", exc=""), GenericExceptionOnRun(build_path="", unique_id="", exc=""), NodeConnectionReleaseError(node_name="", exc=""), FoundStats(stat_line=""), - # Z - misc ====================== - MainKeyboardInterrupt(), MainEncounteredError(exc=""), MainStackTrace(stack_trace=""), SystemErrorRetrievingModTime(path=""), SystemCouldNotWrite(path="", reason="", exc=""), SystemExecutingCmd(cmd=[""]), - SystemStdOutMsg(bmsg=b""), - SystemStdErrMsg(bmsg=b""), + SystemStdOut(bmsg=b""), + SystemStdErr(bmsg=b""), SystemReportReturnCode(returncode=0), TimingInfoCollected(), LogDebugStackTrace(), @@ -445,7 +364,7 @@ def MockNode(): ServingDocsExitInfo(), RunResultWarning(resource_type="", node_name="", path=""), RunResultFailure(resource_type="", node_name="", path=""), - StatsLine(stats={"error": 0, "skip": 0, "pass": 0, "warn": 0,"total": 0}), + StatsLine(stats={"error": 0, "skip": 0, "pass": 0, "warn": 0, "total": 0}), RunResultError(msg=""), RunResultErrorNoMessage(status=""), SQLCompiledPath(path=""), @@ -464,21 +383,21 @@ def MockNode(): FlushEventsFailure(), TrackingInitializeFailure(), RunResultWarningMessage(), + # TODO fix these DebugEnvironmentDetails(msg=""), - DebugDependenciesDetails(msg=""), - DebugDependenciesSuccess(msg=""), - DebugDependenciesFailure(msg=""), - DebugConfigurationDetails(msg=""), - DebugConfigurationSuccess(msg=""), - DebugConfigurationFailure(msg=""), - DebugConnectionDetails(msg=""), - DebugConnectionSuccess(msg=""), - DebugConnectionFailure(msg=""), - DebugRunSuccess(msg=""), - DebugRunFailure(msg=""), - DebugMiscMessages(msg=""), - ListRunDetails(msg=""), - + # DebugDependenciesDetails(msg=""), + # DebugDependenciesSuccess(msg=""), + # DebugDependenciesFailure(msg=""), + # DebugConfigurationDetails(msg=""), + # DebugConfigurationSuccess(msg=""), + # DebugConfigurationFailure(msg=""), + # DebugConnectionDetails(msg=""), + # DebugConnectionSuccess(msg=""), + # DebugConnectionFailure(msg=""), + # DebugRunSuccess(msg=""), + # DebugRunFailure(msg=""), + # DebugMiscMessages(msg=""), + # ListRunDetails(msg=""), # T - tests ====================== IntegrationTestInfo(), IntegrationTestDebug(), @@ -486,12 +405,9 @@ def MockNode(): IntegrationTestError(), IntegrationTestException(), UnitTestInfo(), - ] - - class TestEventJSONSerialization: # attempts to test that every event is serializable to json. @@ -513,11 +429,21 @@ def test_all_serializable(self): # if we have everything we need to test, try to serialize everything for event in sample_values: - event_dict = event_to_dict(event) + msg = msg_from_base_event(event) + msg_dict = msg_to_dict(msg) try: - event_json = event_to_json(event) + msg_json = msg_to_json(msg) except Exception as e: raise Exception(f"{event} is not serializable to json. Originating exception: {e}") T = TypeVar("T") + + +def test_date_serialization(): + ti = TimingInfo("test") + ti.begin() + ti.end() + ti_dict = ti.to_dict() + assert ti_dict["started_at"].endswith("Z") + assert ti_dict["completed_at"].endswith("Z") diff --git a/tests/unit/test_functions.py b/tests/unit/test_functions.py new file mode 100644 index 00000000000..a43361a7e94 --- /dev/null +++ b/tests/unit/test_functions.py @@ -0,0 +1,45 @@ +from argparse import Namespace +import pytest + +import dbt.flags as flags +from dbt.events.functions import warn_or_error +from dbt.events.types import NoNodesForSelectionCriteria +from dbt.exceptions import EventCompilationError + + +@pytest.mark.parametrize( + "warn_error_options,expect_compilation_exception", + [ + ('{"include": "all"}', True), + ('{"include": [NoNodesForSelectionCriteria]}', True), + ('{"include": []}', False), + ("{}", False), + ('{"include": [MainTrackingUserState]}', False), + ('{"include": "all", "exclude": [NoNodesForSelectionCriteria]}', False), + ], +) +def test_warn_or_error_warn_error_options(warn_error_options, expect_compilation_exception): + args = Namespace(warn_error_options=warn_error_options) + flags.set_from_args(args, {}) + if expect_compilation_exception: + with pytest.raises(EventCompilationError): + warn_or_error(NoNodesForSelectionCriteria()) + else: + warn_or_error(NoNodesForSelectionCriteria()) + + +@pytest.mark.parametrize( + "warn_error,expect_compilation_exception", + [ + (True, True), + (False, False), + ], +) +def test_warn_or_error_warn_error(warn_error, expect_compilation_exception): + args = Namespace(warn_error=warn_error) + flags.set_from_args(args, {}) + if expect_compilation_exception: + with pytest.raises(EventCompilationError): + warn_or_error(NoNodesForSelectionCriteria()) + else: + warn_or_error(NoNodesForSelectionCriteria()) diff --git a/tests/unit/test_helper_types.py b/tests/unit/test_helper_types.py new file mode 100644 index 00000000000..f0aa077b46e --- /dev/null +++ b/tests/unit/test_helper_types.py @@ -0,0 +1,45 @@ +import pytest + +from dbt.helper_types import IncludeExclude, WarnErrorOptions +from dbt.dataclass_schema import ValidationError + + +class TestIncludeExclude: + def test_init_invalid(self): + with pytest.raises(ValidationError): + IncludeExclude(include="invalid") + + with pytest.raises(ValidationError): + IncludeExclude(include=["ItemA"], exclude=["ItemB"]) + + @pytest.mark.parametrize( + "include,exclude,expected_includes", + [ + ("all", [], True), + ("*", [], True), + ("*", ["ItemA"], False), + (["ItemA"], [], True), + (["ItemA", "ItemB"], [], True), + ], + ) + def test_includes(self, include, exclude, expected_includes): + include_exclude = IncludeExclude(include=include, exclude=exclude) + + assert include_exclude.includes("ItemA") == expected_includes + + +class TestWarnErrorOptions: + def test_init(self): + with pytest.raises(ValidationError): + WarnErrorOptions(include=["InvalidError"]) + + with pytest.raises(ValidationError): + WarnErrorOptions(include="*", exclude=["InvalidError"]) + + warn_error_options = WarnErrorOptions(include=["NoNodesForSelectionCriteria"]) + assert warn_error_options.include == ["NoNodesForSelectionCriteria"] + assert warn_error_options.exclude == [] + + warn_error_options = WarnErrorOptions(include="*", exclude=["NoNodesForSelectionCriteria"]) + assert warn_error_options.include == "*" + assert warn_error_options.exclude == ["NoNodesForSelectionCriteria"] diff --git a/tests/unit/test_proto_events.py b/tests/unit/test_proto_events.py index d5b070c41e2..2b03cac453a 100644 --- a/tests/unit/test_proto_events.py +++ b/tests/unit/test_proto_events.py @@ -1,4 +1,3 @@ -import sys from dbt.events.types import ( MainReportVersion, MainReportArgs, @@ -8,78 +7,104 @@ LogStartLine, LogTestResult, ) -from dbt.events.functions import event_to_dict, LOG_VERSION, reset_metadata_vars, info -from dbt.events import proto_types as pl +from dbt.events.functions import msg_to_dict, LOG_VERSION, reset_metadata_vars +from dbt.events import proto_types as pt +from dbt.events.base_types import msg_from_base_event, EventLevel from dbt.version import installed -info_keys = {"name", "code", "msg", "level", "invocation_id", "pid", "thread", "ts", "extra", "category"} +info_keys = { + "name", + "code", + "msg", + "level", + "invocation_id", + "pid", + "thread", + "ts", + "extra", + "category", +} def test_events(): # A001 event event = MainReportVersion(version=str(installed), log_version=LOG_VERSION) - event_dict = event_to_dict(event) - event_json = event.to_json() - serialized = bytes(event) + msg = msg_from_base_event(event) + msg_dict = msg_to_dict(msg) + msg_json = msg.to_json() + serialized = bytes(msg) assert "Running with dbt=" in str(serialized) - assert set(event_dict.keys()) == {"version", "info", "log_version"} - assert set(event_dict["info"].keys()) == info_keys - assert event_json - assert event.info.code == "A001" + assert set(msg_dict.keys()) == {"info", "data"} + assert set(msg_dict["data"].keys()) == {"version", "log_version"} + assert set(msg_dict["info"].keys()) == info_keys + assert msg_json + assert msg.info.code == "A001" # Extract EventInfo from serialized message - generic_event = pl.GenericMessage().parse(serialized) + generic_event = pt.GenericMessage().parse(serialized) assert generic_event.info.code == "A001" # get the message class for the real message from the generic message - message_class = getattr(sys.modules["dbt.events.proto_types"], generic_event.info.name) - new_event = message_class().parse(serialized) - assert new_event.info.code == event.info.code - assert new_event.version == event.version + message_class = getattr(pt, f"{generic_event.info.name}Msg") + new_msg = message_class().parse(serialized) + assert new_msg.info.code == msg.info.code + assert new_msg.data.version == msg.data.version # A002 event event = MainReportArgs(args={"one": "1", "two": "2"}) - event_dict = event_to_dict(event) - event_json = event.to_json() + msg = msg_from_base_event(event) + msg_dict = msg_to_dict(msg) + msg_json = msg.to_json() - assert set(event_dict.keys()) == {"info", "args"} - assert set(event_dict["info"].keys()) == info_keys - assert event_json - assert event.info.code == "A002" + assert set(msg_dict.keys()) == {"info", "data"} + assert set(msg_dict["data"].keys()) == {"args"} + assert set(msg_dict["info"].keys()) == info_keys + assert msg_json + assert msg.info.code == "A002" def test_exception_events(): event = RollbackFailed(conn_name="test", exc_info="something failed") - event_dict = event_to_dict(event) - event_json = event.to_json() - assert set(event_dict.keys()) == {"info", "conn_name", "exc_info"} - assert set(event_dict["info"].keys()) == info_keys - assert event_json - assert event.info.code == "E009" + msg = msg_from_base_event(event) + msg_dict = msg_to_dict(msg) + msg_json = msg.to_json() + assert set(msg_dict.keys()) == {"info", "data"} + assert set(msg_dict["data"].keys()) == {"conn_name", "exc_info"} + assert set(msg_dict["info"].keys()) == info_keys + assert msg_json + assert msg.info.code == "E009" event = PluginLoadError(exc_info="something failed") - event_dict = event_to_dict(event) - event_json = event.to_json() - assert set(event_dict.keys()) == {"info", "exc_info"} - assert set(event_dict["info"].keys()) == info_keys - assert event_json - assert event.info.code == "E036" - # This event has no "msg"/"message" - assert event.info.msg is None + msg = msg_from_base_event(event) + msg_dict = msg_to_dict(msg) + msg_json = msg.to_json() + assert set(msg_dict["data"].keys()) == {"exc_info"} + assert set(msg_dict["info"].keys()) == info_keys + assert msg_json + assert msg.info.code == "E036" + assert msg.info.msg == "something failed" # Z002 event event = MainEncounteredError(exc="Rollback failed") - event_dict = event_to_dict(event) - event_json = event.to_json() + msg = msg_from_base_event(event) + msg_dict = msg_to_dict(msg) + msg_json = msg.to_json() - assert set(event_dict.keys()) == {"info", "exc"} - assert set(event_dict["info"].keys()) == info_keys - assert event_json - assert event.info.code == "Z002" + assert set(msg_dict["data"].keys()) == {"exc"} + assert set(msg_dict["info"].keys()) == info_keys + assert msg_json + assert msg.info.code == "Z002" def test_node_info_events(): + meta_dict = { + "string-key1": ["value1", 2], + "string-key2": {"nested-dict-key": "value2"}, + 1: "value-from-non-string-key", + "string-key3": 1, + "string-key4": ["string1", 1, "string2", 2], + } node_info = { "node_path": "some_path", "node_name": "some_name", @@ -89,15 +114,17 @@ def test_node_info_events(): "node_status": "started", "node_started_at": "some_time", "node_finished_at": "another_time", + "meta": meta_dict, } event = LogStartLine( description="some description", index=123, total=111, - node_info=pl.NodeInfo(**node_info), + node_info=pt.NodeInfo(**node_info), ) assert event assert event.node_info.node_path == "some_path" + assert event.node_info.meta == meta_dict def test_extra_dict_on_event(monkeypatch): @@ -107,31 +134,26 @@ def test_extra_dict_on_event(monkeypatch): reset_metadata_vars() event = MainReportVersion(version=str(installed), log_version=LOG_VERSION) - event_dict = event_to_dict(event) - assert set(event_dict["info"].keys()) == info_keys - assert event.info.extra == {"env_key": "env_value"} - serialized = bytes(event) + msg = msg_from_base_event(event) + msg_dict = msg_to_dict(msg) + assert set(msg_dict["info"].keys()) == info_keys + assert msg.info.extra == {"env_key": "env_value"} + serialized = bytes(msg) # Extract EventInfo from serialized message - generic_event = pl.GenericMessage().parse(serialized) + generic_event = pt.GenericMessage().parse(serialized) assert generic_event.info.code == "A001" # get the message class for the real message from the generic message - message_class = getattr(sys.modules["dbt.events.proto_types"], generic_event.info.name) - new_event = message_class().parse(serialized) - assert new_event.info.extra == event.info.extra + message_class = getattr(pt, f"{generic_event.info.name}Msg") + new_msg = message_class().parse(serialized) + assert new_msg.info.extra == msg.info.extra # clean up reset_metadata_vars() def test_dynamic_level_events(): - event = LogTestResult( - name="model_name", - info=info(level=LogTestResult.status_to_level("pass")), - status="pass", - index=1, - num_models=3, - num_failures=0 - ) - assert event - assert event.info.level == "info" + event = LogTestResult(name="model_name", status="pass", index=1, num_models=3, num_failures=0) + msg = msg_from_base_event(event, level=EventLevel.INFO) + assert msg + assert msg.info.level == "info"