diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 02ea0717225..3cdca1ad352 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.4.0a1 +current_version = 1.4.0b1 parse = (?P\d+) \.(?P\d+) \.(?P\d+) diff --git a/.changes/1.4.0-b1.md b/.changes/1.4.0-b1.md new file mode 100644 index 00000000000..b2a0e96827c --- /dev/null +++ b/.changes/1.4.0-b1.md @@ -0,0 +1,89 @@ +## dbt-core 1.4.0-b1 - December 15, 2022 + +### Features + +- Added favor-state flag to optionally favor state nodes even if unselected node exists ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. ([#5610](https://github.com/dbt-labs/dbt-core/issues/5610)) +- Friendlier error messages when packages.yml is malformed ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) +- Migrate dbt-utils current_timestamp macros into core + adapters ([#5521](https://github.com/dbt-labs/dbt-core/issues/5521)) +- Allow partitions in external tables to be supplied as a list ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) +- extend -f flag shorthand for seed command ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) +- This pulls the profile name from args when constructing a RuntimeConfig in lib.py, enabling the dbt-server to override the value that's in the dbt_project.yml ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) +- Adding tarball install method for packages. Allowing package tarball to be specified via url in the packages.yaml. ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) +- Added an md5 function to the base context ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) +- Exposures support metrics in lineage ([#6057](https://github.com/dbt-labs/dbt-core/issues/6057)) +- Add support for Python 3.11 ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) +- incremental predicates ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) + +### Fixes + +- Account for disabled flags on models in schema files more completely ([#3992](https://github.com/dbt-labs/dbt-core/issues/3992)) +- Add validation of enabled config for metrics, exposures and sources ([#6030](https://github.com/dbt-labs/dbt-core/issues/6030)) +- check length of args of python model function before accessing it ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) +- Add functors to ensure event types with str-type attributes are initialized to spec, even when provided non-str type params. ([#5436](https://github.com/dbt-labs/dbt-core/issues/5436)) +- Allow hooks to fail without halting execution flow ([#5625](https://github.com/dbt-labs/dbt-core/issues/5625)) +- Clarify Error Message for how many models are allowed in a Python file ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) +- After this, will be possible to use default values for dbt.config.get ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) +- Use full path for writing manifest ([#6055](https://github.com/dbt-labs/dbt-core/issues/6055)) +- [CT-1284] Change Python model default materialization to table ([#6345](https://github.com/dbt-labs/dbt-core/issues/6345)) +- Repair a regression which prevented basic logging before the logging subsystem is completely configured. ([#6434](https://github.com/dbt-labs/dbt-core/issues/6434)) + +### Docs + +- minor doc correction ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) +- Generate API docs for new CLI interface ([dbt-docs/#5528](https://github.com/dbt-labs/dbt-docs/issues/5528)) +- ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) +- Fix rendering of sample code for metrics ([dbt-docs/#323](https://github.com/dbt-labs/dbt-docs/issues/323)) +- Alphabetize `core/dbt/README.md` ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368)) + +### Under the Hood + +- Put black config in explicit config ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946)) +- Added flat_graph attribute the Manifest class's deepcopy() coverage ([#5809](https://github.com/dbt-labs/dbt-core/issues/5809)) +- Add mypy configs so `mypy` passes from CLI ([#5983](https://github.com/dbt-labs/dbt-core/issues/5983)) +- Exception message cleanup. ([#6023](https://github.com/dbt-labs/dbt-core/issues/6023)) +- Add dmypy cache to gitignore ([#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) +- Provide useful errors when the value of 'materialized' is invalid ([#5229](https://github.com/dbt-labs/dbt-core/issues/5229)) +- Clean up string formatting ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) +- Fixed extra whitespace in strings introduced by black. ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) +- Remove the 'root_path' field from most nodes ([#6171](https://github.com/dbt-labs/dbt-core/issues/6171)) +- Combine certain logging events with different levels ([#6173](https://github.com/dbt-labs/dbt-core/issues/6173)) +- Convert threading tests to pytest ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) +- Convert postgres index tests to pytest ([#5770](https://github.com/dbt-labs/dbt-core/issues/5770)) +- Convert use color tests to pytest ([#5771](https://github.com/dbt-labs/dbt-core/issues/5771)) +- Add github actions workflow to generate high level CLI API docs ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) +- Functionality-neutral refactor of event logging system to improve encapsulation and modularity. ([#6139](https://github.com/dbt-labs/dbt-core/issues/6139)) +- Consolidate ParsedNode and CompiledNode classes ([#6383](https://github.com/dbt-labs/dbt-core/issues/6383)) +- Prevent doc gen workflow from running on forks ([#6386](https://github.com/dbt-labs/dbt-core/issues/6386)) +- Fix intermittent database connection failure in Windows CI test ([#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) +- Refactor and clean up manifest nodes ([#6426](https://github.com/dbt-labs/dbt-core/issues/6426)) +- Restore important legacy logging behaviors, following refactor which removed them ([#6437](https://github.com/dbt-labs/dbt-core/issues/6437)) + +### Dependencies + +- Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core ([#5917](https://github.com/dbt-labs/dbt-core/pull/5917)) +- Bump black from 22.8.0 to 22.10.0 ([#6019](https://github.com/dbt-labs/dbt-core/pull/6019)) +- Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core ([#6108](https://github.com/dbt-labs/dbt-core/pull/6108)) +- Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core ([#6144](https://github.com/dbt-labs/dbt-core/pull/6144)) +- Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core ([#4904](https://github.com/dbt-labs/dbt-core/issues/4904)) + +### Contributors +- [@andy-clapson](https://github.com/andy-clapson) ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) +- [@chamini2](https://github.com/chamini2) ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) +- [@daniel-murray](https://github.com/daniel-murray) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- [@dave-connors-3](https://github.com/dave-connors-3) ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) +- [@dbeatty10](https://github.com/dbeatty10) ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368), [#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) +- [@devmessias](https://github.com/devmessias) ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) +- [@eve-johns](https://github.com/eve-johns) ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) +- [@haritamar](https://github.com/haritamar) ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) +- [@jared-rimmer](https://github.com/jared-rimmer) ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) +- [@josephberni](https://github.com/josephberni) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- [@joshuataylor](https://github.com/joshuataylor) ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) +- [@justbldwn](https://github.com/justbldwn) ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) +- [@luke-bassett](https://github.com/luke-bassett) ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) +- [@max-sixty](https://github.com/max-sixty) ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946), [#5983](https://github.com/dbt-labs/dbt-core/issues/5983), [#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) +- [@paulbenschmidt](https://github.com/paulbenschmidt) ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) +- [@pgoslatara](https://github.com/pgoslatara) ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) +- [@racheldaniel](https://github.com/racheldaniel) ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) +- [@timle2](https://github.com/timle2) ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) +- [@dave-connors-3](https://github.com/dave-connors-3) ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) diff --git a/.changes/1.4.0/Dependency-20220923-000646.yaml b/.changes/1.4.0/Dependency-20220923-000646.yaml new file mode 100644 index 00000000000..0375eeb125f --- /dev/null +++ b/.changes/1.4.0/Dependency-20220923-000646.yaml @@ -0,0 +1,6 @@ +kind: "Dependencies" +body: "Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core" +time: 2022-09-23T00:06:46.00000Z +custom: + Author: dependabot[bot] + PR: "5917" diff --git a/.changes/unreleased/Dependency-20221007-000848.yaml b/.changes/1.4.0/Dependency-20221007-000848.yaml similarity index 71% rename from .changes/unreleased/Dependency-20221007-000848.yaml rename to .changes/1.4.0/Dependency-20221007-000848.yaml index 8b2aebdc466..7e36733d14e 100644 --- a/.changes/unreleased/Dependency-20221007-000848.yaml +++ b/.changes/1.4.0/Dependency-20221007-000848.yaml @@ -1,7 +1,6 @@ -kind: "Dependency" +kind: "Dependencies" body: "Bump black from 22.8.0 to 22.10.0" time: 2022-10-07T00:08:48.00000Z custom: Author: dependabot[bot] - Issue: 4904 - PR: 6019 + PR: "6019" diff --git a/.changes/1.4.0/Dependency-20221020-000753.yaml b/.changes/1.4.0/Dependency-20221020-000753.yaml new file mode 100644 index 00000000000..ce0f122826b --- /dev/null +++ b/.changes/1.4.0/Dependency-20221020-000753.yaml @@ -0,0 +1,6 @@ +kind: "Dependencies" +body: "Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core" +time: 2022-10-20T00:07:53.00000Z +custom: + Author: dependabot[bot] + PR: "6108" diff --git a/.changes/1.4.0/Dependency-20221026-000910.yaml b/.changes/1.4.0/Dependency-20221026-000910.yaml new file mode 100644 index 00000000000..d68fa8a11ef --- /dev/null +++ b/.changes/1.4.0/Dependency-20221026-000910.yaml @@ -0,0 +1,6 @@ +kind: "Dependencies" +body: "Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core" +time: 2022-10-26T00:09:10.00000Z +custom: + Author: dependabot[bot] + PR: "6144" diff --git a/.changes/1.4.0/Dependency-20221205-002118.yaml b/.changes/1.4.0/Dependency-20221205-002118.yaml new file mode 100644 index 00000000000..f4203a5285c --- /dev/null +++ b/.changes/1.4.0/Dependency-20221205-002118.yaml @@ -0,0 +1,7 @@ +kind: "Dependencies" +body: "Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core" +time: 2022-12-05T00:21:18.00000Z +custom: + Author: dependabot[bot] + Issue: 4904 + PR: 6375 diff --git a/.changes/unreleased/Docs-20220908-154157.yaml b/.changes/1.4.0/Docs-20220908-154157.yaml similarity index 90% rename from .changes/unreleased/Docs-20220908-154157.yaml rename to .changes/1.4.0/Docs-20220908-154157.yaml index 2b2d30d41e5..e307f3bd5e0 100644 --- a/.changes/unreleased/Docs-20220908-154157.yaml +++ b/.changes/1.4.0/Docs-20220908-154157.yaml @@ -4,4 +4,3 @@ time: 2022-09-08T15:41:57.689162-04:00 custom: Author: andy-clapson Issue: "5791" - PR: "5684" diff --git a/.changes/unreleased/Docs-20221007-090656.yaml b/.changes/1.4.0/Docs-20221007-090656.yaml similarity index 91% rename from .changes/unreleased/Docs-20221007-090656.yaml rename to .changes/1.4.0/Docs-20221007-090656.yaml index 1159879a249..070ecd48944 100644 --- a/.changes/unreleased/Docs-20221007-090656.yaml +++ b/.changes/1.4.0/Docs-20221007-090656.yaml @@ -4,4 +4,3 @@ time: 2022-10-07T09:06:56.446078-05:00 custom: Author: stu-k Issue: "5528" - PR: "6022" diff --git a/.changes/1.4.0/Docs-20221017-171411.yaml b/.changes/1.4.0/Docs-20221017-171411.yaml new file mode 100644 index 00000000000..487362c1d5c --- /dev/null +++ b/.changes/1.4.0/Docs-20221017-171411.yaml @@ -0,0 +1,5 @@ +kind: Docs +time: 2022-10-17T17:14:11.715348-05:00 +custom: + Author: paulbenschmidt + Issue: "5880" diff --git a/.changes/1.4.0/Docs-20221116-155743.yaml b/.changes/1.4.0/Docs-20221116-155743.yaml new file mode 100644 index 00000000000..84d90a67b99 --- /dev/null +++ b/.changes/1.4.0/Docs-20221116-155743.yaml @@ -0,0 +1,6 @@ +kind: Docs +body: Fix rendering of sample code for metrics +time: 2022-11-16T15:57:43.204201+01:00 +custom: + Author: jtcohen6 + Issue: "323" diff --git a/.changes/1.4.0/Docs-20221202-150523.yaml b/.changes/1.4.0/Docs-20221202-150523.yaml new file mode 100644 index 00000000000..b08a32cddf6 --- /dev/null +++ b/.changes/1.4.0/Docs-20221202-150523.yaml @@ -0,0 +1,6 @@ +kind: Docs +body: Alphabetize `core/dbt/README.md` +time: 2022-12-02T15:05:23.695333-07:00 +custom: + Author: dbeatty10 + Issue: "6368" diff --git a/.changes/1.4.0/Features-20220408-165459.yaml b/.changes/1.4.0/Features-20220408-165459.yaml new file mode 100644 index 00000000000..12cdf74c757 --- /dev/null +++ b/.changes/1.4.0/Features-20220408-165459.yaml @@ -0,0 +1,7 @@ +kind: Features +body: Added favor-state flag to optionally favor state nodes even if unselected node + exists +time: 2022-04-08T16:54:59.696564+01:00 +custom: + Author: daniel-murray josephberni + Issue: "2968" diff --git a/.changes/1.4.0/Features-20220817-154857.yaml b/.changes/1.4.0/Features-20220817-154857.yaml new file mode 100644 index 00000000000..ad53df05a3f --- /dev/null +++ b/.changes/1.4.0/Features-20220817-154857.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. +time: 2022-08-17T15:48:57.225267-04:00 +custom: + Author: gshank + Issue: "5610" diff --git a/.changes/1.4.0/Features-20220823-085727.yaml b/.changes/1.4.0/Features-20220823-085727.yaml new file mode 100644 index 00000000000..4d8daebbf5e --- /dev/null +++ b/.changes/1.4.0/Features-20220823-085727.yaml @@ -0,0 +1,7 @@ +kind: Features +body: incremental predicates +time: 2022-08-23T08:57:27.640804-05:00 +custom: + Author: dave-connors-3 + Issue: "5680" + PR: "5702" diff --git a/.changes/unreleased/Features-20220912-125935.yaml b/.changes/1.4.0/Features-20220912-125935.yaml similarity index 92% rename from .changes/unreleased/Features-20220912-125935.yaml rename to .changes/1.4.0/Features-20220912-125935.yaml index b0c1dd41a26..d49f35fd0af 100644 --- a/.changes/unreleased/Features-20220912-125935.yaml +++ b/.changes/1.4.0/Features-20220912-125935.yaml @@ -4,4 +4,3 @@ time: 2022-09-12T12:59:35.121188+01:00 custom: Author: jared-rimmer Issue: "5486" - PR: "5812" diff --git a/.changes/unreleased/Features-20220914-095625.yaml b/.changes/1.4.0/Features-20220914-095625.yaml similarity index 93% rename from .changes/unreleased/Features-20220914-095625.yaml rename to .changes/1.4.0/Features-20220914-095625.yaml index 51828084a0d..d46b1bfa8d8 100644 --- a/.changes/unreleased/Features-20220914-095625.yaml +++ b/.changes/1.4.0/Features-20220914-095625.yaml @@ -4,4 +4,3 @@ time: 2022-09-14T09:56:25.97818-07:00 custom: Author: colin-rogers-dbt Issue: "5521" - PR: "5838" diff --git a/.changes/unreleased/Features-20220925-211651.yaml b/.changes/1.4.0/Features-20220925-211651.yaml similarity index 92% rename from .changes/unreleased/Features-20220925-211651.yaml rename to .changes/1.4.0/Features-20220925-211651.yaml index 0f0f6e84213..d2c1911c720 100644 --- a/.changes/unreleased/Features-20220925-211651.yaml +++ b/.changes/1.4.0/Features-20220925-211651.yaml @@ -4,4 +4,3 @@ time: 2022-09-25T21:16:51.051239654+02:00 custom: Author: pgoslatara Issue: "5929" - PR: "5930" diff --git a/.changes/unreleased/Features-20221003-110705.yaml b/.changes/1.4.0/Features-20221003-110705.yaml similarity index 92% rename from .changes/unreleased/Features-20221003-110705.yaml rename to .changes/1.4.0/Features-20221003-110705.yaml index f8142666c3b..637d8be58c6 100644 --- a/.changes/unreleased/Features-20221003-110705.yaml +++ b/.changes/1.4.0/Features-20221003-110705.yaml @@ -4,4 +4,3 @@ time: 2022-10-03T11:07:05.381632-05:00 custom: Author: dave-connors-3 Issue: "5990" - PR: "5991" diff --git a/.changes/1.4.0/Features-20221102-150003.yaml b/.changes/1.4.0/Features-20221102-150003.yaml new file mode 100644 index 00000000000..9d8ba192687 --- /dev/null +++ b/.changes/1.4.0/Features-20221102-150003.yaml @@ -0,0 +1,7 @@ +kind: Features +body: This pulls the profile name from args when constructing a RuntimeConfig in lib.py, + enabling the dbt-server to override the value that's in the dbt_project.yml +time: 2022-11-02T15:00:03.000805-05:00 +custom: + Author: racheldaniel + Issue: "6201" diff --git a/.changes/1.4.0/Features-20221107-105018.yaml b/.changes/1.4.0/Features-20221107-105018.yaml new file mode 100644 index 00000000000..db6a0ab753a --- /dev/null +++ b/.changes/1.4.0/Features-20221107-105018.yaml @@ -0,0 +1,8 @@ +kind: Features +body: Adding tarball install method for packages. Allowing package tarball to be specified + via url in the packages.yaml. +time: 2022-11-07T10:50:18.464545-05:00 +custom: + Author: timle2 + Issue: "4205" + PR: "4689" diff --git a/.changes/1.4.0/Features-20221114-185207.yaml b/.changes/1.4.0/Features-20221114-185207.yaml new file mode 100644 index 00000000000..459bc8ce234 --- /dev/null +++ b/.changes/1.4.0/Features-20221114-185207.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Added an md5 function to the base context +time: 2022-11-14T18:52:07.788593+02:00 +custom: + Author: haritamar + Issue: "6246" diff --git a/.changes/1.4.0/Features-20221130-112913.yaml b/.changes/1.4.0/Features-20221130-112913.yaml new file mode 100644 index 00000000000..64832de2f68 --- /dev/null +++ b/.changes/1.4.0/Features-20221130-112913.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Exposures support metrics in lineage +time: 2022-11-30T11:29:13.256034-05:00 +custom: + Author: michelleark + Issue: "6057" diff --git a/.changes/1.4.0/Features-20221206-150704.yaml b/.changes/1.4.0/Features-20221206-150704.yaml new file mode 100644 index 00000000000..47939ea5a79 --- /dev/null +++ b/.changes/1.4.0/Features-20221206-150704.yaml @@ -0,0 +1,7 @@ +kind: Features +body: Add support for Python 3.11 +time: 2022-12-06T15:07:04.753127+01:00 +custom: + Author: joshuataylor MichelleArk jtcohen6 + Issue: "6147" + PR: "6326" diff --git a/.changes/unreleased/Fixes-20220916-104854.yaml b/.changes/1.4.0/Fixes-20220916-104854.yaml similarity index 92% rename from .changes/unreleased/Fixes-20220916-104854.yaml rename to .changes/1.4.0/Fixes-20220916-104854.yaml index 64e76c43a3f..bd9af0469a7 100644 --- a/.changes/unreleased/Fixes-20220916-104854.yaml +++ b/.changes/1.4.0/Fixes-20220916-104854.yaml @@ -4,4 +4,3 @@ time: 2022-09-16T10:48:54.162273-05:00 custom: Author: emmyoop Issue: "3992" - PR: "5868" diff --git a/.changes/unreleased/Fixes-20221010-113218.yaml b/.changes/1.4.0/Fixes-20221010-113218.yaml similarity index 92% rename from .changes/unreleased/Fixes-20221010-113218.yaml rename to .changes/1.4.0/Fixes-20221010-113218.yaml index 73f128ec5b7..5b73b8d9ccd 100644 --- a/.changes/unreleased/Fixes-20221010-113218.yaml +++ b/.changes/1.4.0/Fixes-20221010-113218.yaml @@ -4,4 +4,3 @@ time: 2022-10-10T11:32:18.752322-05:00 custom: Author: emmyoop Issue: "6030" - PR: "6038" diff --git a/.changes/unreleased/Fixes-20221011-160715.yaml b/.changes/1.4.0/Fixes-20221011-160715.yaml similarity index 92% rename from .changes/unreleased/Fixes-20221011-160715.yaml rename to .changes/1.4.0/Fixes-20221011-160715.yaml index 273e1398bdd..936546a5232 100644 --- a/.changes/unreleased/Fixes-20221011-160715.yaml +++ b/.changes/1.4.0/Fixes-20221011-160715.yaml @@ -4,4 +4,3 @@ time: 2022-10-11T16:07:15.464093-04:00 custom: Author: chamini2 Issue: "6041" - PR: "6042" diff --git a/.changes/1.4.0/Fixes-20221016-173742.yaml b/.changes/1.4.0/Fixes-20221016-173742.yaml new file mode 100644 index 00000000000..c7b00dddba8 --- /dev/null +++ b/.changes/1.4.0/Fixes-20221016-173742.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Add functors to ensure event types with str-type attributes are initialized + to spec, even when provided non-str type params. +time: 2022-10-16T17:37:42.846683-07:00 +custom: + Author: versusfacit + Issue: "5436" diff --git a/.changes/1.4.0/Fixes-20221107-095314.yaml b/.changes/1.4.0/Fixes-20221107-095314.yaml new file mode 100644 index 00000000000..99da9c44522 --- /dev/null +++ b/.changes/1.4.0/Fixes-20221107-095314.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Allow hooks to fail without halting execution flow +time: 2022-11-07T09:53:14.340257-06:00 +custom: + Author: ChenyuLInx + Issue: "5625" diff --git a/.changes/1.4.0/Fixes-20221115-081021.yaml b/.changes/1.4.0/Fixes-20221115-081021.yaml new file mode 100644 index 00000000000..40c81fabacb --- /dev/null +++ b/.changes/1.4.0/Fixes-20221115-081021.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Clarify Error Message for how many models are allowed in a Python file +time: 2022-11-15T08:10:21.527884-05:00 +custom: + Author: justbldwn + Issue: "6245" diff --git a/.changes/1.4.0/Fixes-20221124-163419.yaml b/.changes/1.4.0/Fixes-20221124-163419.yaml new file mode 100644 index 00000000000..010a073269a --- /dev/null +++ b/.changes/1.4.0/Fixes-20221124-163419.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: After this, will be possible to use default values for dbt.config.get +time: 2022-11-24T16:34:19.039512764-03:00 +custom: + Author: devmessias + Issue: "6309" + PR: "6317" diff --git a/.changes/1.4.0/Fixes-20221202-164859.yaml b/.changes/1.4.0/Fixes-20221202-164859.yaml new file mode 100644 index 00000000000..6aad4ced192 --- /dev/null +++ b/.changes/1.4.0/Fixes-20221202-164859.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Use full path for writing manifest +time: 2022-12-02T16:48:59.029519-05:00 +custom: + Author: gshank + Issue: "6055" diff --git a/.changes/1.4.0/Fixes-20221213-112620.yaml b/.changes/1.4.0/Fixes-20221213-112620.yaml new file mode 100644 index 00000000000..a2220f9a920 --- /dev/null +++ b/.changes/1.4.0/Fixes-20221213-112620.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: '[CT-1284] Change Python model default materialization to table' +time: 2022-12-13T11:26:20.550017-08:00 +custom: + Author: aranke + Issue: "6345" diff --git a/.changes/1.4.0/Fixes-20221214-155307.yaml b/.changes/1.4.0/Fixes-20221214-155307.yaml new file mode 100644 index 00000000000..cb37e0a809c --- /dev/null +++ b/.changes/1.4.0/Fixes-20221214-155307.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Repair a regression which prevented basic logging before the logging subsystem + is completely configured. +time: 2022-12-14T15:53:07.396512-05:00 +custom: + Author: peterallenwebb + Issue: "6434" diff --git a/.changes/unreleased/Under the Hood-20220927-194259.yaml b/.changes/1.4.0/Under the Hood-20220927-194259.yaml similarity index 91% rename from .changes/unreleased/Under the Hood-20220927-194259.yaml rename to .changes/1.4.0/Under the Hood-20220927-194259.yaml index dbd85165e2c..b6cb64b0155 100644 --- a/.changes/unreleased/Under the Hood-20220927-194259.yaml +++ b/.changes/1.4.0/Under the Hood-20220927-194259.yaml @@ -4,4 +4,3 @@ time: 2022-09-27T19:42:59.241433-07:00 custom: Author: max-sixty Issue: "5946" - PR: "5947" diff --git a/.changes/unreleased/Under the Hood-20220929-134406.yaml b/.changes/1.4.0/Under the Hood-20220929-134406.yaml similarity index 93% rename from .changes/unreleased/Under the Hood-20220929-134406.yaml rename to .changes/1.4.0/Under the Hood-20220929-134406.yaml index ce69bdf322a..b0175190747 100644 --- a/.changes/unreleased/Under the Hood-20220929-134406.yaml +++ b/.changes/1.4.0/Under the Hood-20220929-134406.yaml @@ -4,4 +4,3 @@ time: 2022-09-29T13:44:06.275941-04:00 custom: Author: peterallenwebb Issue: "5809" - PR: "5975" diff --git a/.changes/unreleased/Under the Hood-20221005-120310.yaml b/.changes/1.4.0/Under the Hood-20221005-120310.yaml similarity index 92% rename from .changes/unreleased/Under the Hood-20221005-120310.yaml rename to .changes/1.4.0/Under the Hood-20221005-120310.yaml index eb87a14fedc..797be31c319 100644 --- a/.changes/unreleased/Under the Hood-20221005-120310.yaml +++ b/.changes/1.4.0/Under the Hood-20221005-120310.yaml @@ -4,4 +4,3 @@ time: 2022-10-05T12:03:10.061263-07:00 custom: Author: max-sixty Issue: "5983" - PR: "5983" diff --git a/.changes/unreleased/Under the Hood-20221007-094627.yaml b/.changes/1.4.0/Under the Hood-20221007-094627.yaml similarity index 91% rename from .changes/unreleased/Under the Hood-20221007-094627.yaml rename to .changes/1.4.0/Under the Hood-20221007-094627.yaml index 950c20577ed..d3a5da61566 100644 --- a/.changes/unreleased/Under the Hood-20221007-094627.yaml +++ b/.changes/1.4.0/Under the Hood-20221007-094627.yaml @@ -4,4 +4,3 @@ time: 2022-10-07T09:46:27.682872-05:00 custom: Author: emmyoop Issue: "6023" - PR: "6024" diff --git a/.changes/unreleased/Under the Hood-20221007-140044.yaml b/.changes/1.4.0/Under the Hood-20221007-140044.yaml similarity index 91% rename from .changes/unreleased/Under the Hood-20221007-140044.yaml rename to .changes/1.4.0/Under the Hood-20221007-140044.yaml index b41e3f6eb5a..971d5a40ce8 100644 --- a/.changes/unreleased/Under the Hood-20221007-140044.yaml +++ b/.changes/1.4.0/Under the Hood-20221007-140044.yaml @@ -4,4 +4,3 @@ time: 2022-10-07T14:00:44.227644-07:00 custom: Author: max-sixty Issue: "6028" - PR: "5978" diff --git a/.changes/unreleased/Under the Hood-20221013-181912.yaml b/.changes/1.4.0/Under the Hood-20221013-181912.yaml similarity index 93% rename from .changes/unreleased/Under the Hood-20221013-181912.yaml rename to .changes/1.4.0/Under the Hood-20221013-181912.yaml index 2f03b9b29ff..4f5218891b4 100644 --- a/.changes/unreleased/Under the Hood-20221013-181912.yaml +++ b/.changes/1.4.0/Under the Hood-20221013-181912.yaml @@ -4,4 +4,3 @@ time: 2022-10-13T18:19:12.167548-04:00 custom: Author: peterallenwebb Issue: "5229" - PR: "6025" diff --git a/.changes/1.4.0/Under the Hood-20221017-151511.yaml b/.changes/1.4.0/Under the Hood-20221017-151511.yaml new file mode 100644 index 00000000000..94f4d27d6de --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221017-151511.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Fixed extra whitespace in strings introduced by black. +time: 2022-10-17T15:15:11.499246-05:00 +custom: + Author: luke-bassett + Issue: "1350" diff --git a/.changes/1.4.0/Under the Hood-20221017-155844.yaml b/.changes/1.4.0/Under the Hood-20221017-155844.yaml new file mode 100644 index 00000000000..c46ef040410 --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221017-155844.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Clean up string formatting +time: 2022-10-17T15:58:44.676549-04:00 +custom: + Author: eve-johns + Issue: "6068" diff --git a/.changes/1.4.0/Under the Hood-20221028-104837.yaml b/.changes/1.4.0/Under the Hood-20221028-104837.yaml new file mode 100644 index 00000000000..446d4898920 --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221028-104837.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Remove the 'root_path' field from most nodes +time: 2022-10-28T10:48:37.687886-04:00 +custom: + Author: gshank + Issue: "6171" diff --git a/.changes/1.4.0/Under the Hood-20221028-110344.yaml b/.changes/1.4.0/Under the Hood-20221028-110344.yaml new file mode 100644 index 00000000000..cbe8dacb3d5 --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221028-110344.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Combine certain logging events with different levels +time: 2022-10-28T11:03:44.887836-04:00 +custom: + Author: gshank + Issue: "6173" diff --git a/.changes/1.4.0/Under the Hood-20221108-074550.yaml b/.changes/1.4.0/Under the Hood-20221108-074550.yaml new file mode 100644 index 00000000000..a8fbc7e208b --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221108-074550.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Convert threading tests to pytest +time: 2022-11-08T07:45:50.589147-06:00 +custom: + Author: stu-k + Issue: "5942" diff --git a/.changes/1.4.0/Under the Hood-20221108-115633.yaml b/.changes/1.4.0/Under the Hood-20221108-115633.yaml new file mode 100644 index 00000000000..ea073719cda --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221108-115633.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Convert postgres index tests to pytest +time: 2022-11-08T11:56:33.743042-06:00 +custom: + Author: stu-k + Issue: "5770" diff --git a/.changes/1.4.0/Under the Hood-20221108-133104.yaml b/.changes/1.4.0/Under the Hood-20221108-133104.yaml new file mode 100644 index 00000000000..6829dc097eb --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221108-133104.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Convert use color tests to pytest +time: 2022-11-08T13:31:04.788547-06:00 +custom: + Author: stu-k + Issue: "5771" diff --git a/.changes/1.4.0/Under the Hood-20221116-130037.yaml b/.changes/1.4.0/Under the Hood-20221116-130037.yaml new file mode 100644 index 00000000000..ecdedd6bd2d --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221116-130037.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Add github actions workflow to generate high level CLI API docs +time: 2022-11-16T13:00:37.916202-06:00 +custom: + Author: stu-k + Issue: "5942" diff --git a/.changes/1.4.0/Under the Hood-20221118-145717.yaml b/.changes/1.4.0/Under the Hood-20221118-145717.yaml new file mode 100644 index 00000000000..934cd9dd5cb --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221118-145717.yaml @@ -0,0 +1,8 @@ +kind: Under the Hood +body: Functionality-neutral refactor of event logging system to improve encapsulation + and modularity. +time: 2022-11-18T14:57:17.792622-05:00 +custom: + Author: peterallenwebb + Issue: "6139" + PR: "6291" diff --git a/.changes/1.4.0/Under the Hood-20221205-164948.yaml b/.changes/1.4.0/Under the Hood-20221205-164948.yaml new file mode 100644 index 00000000000..579f973955b --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221205-164948.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Consolidate ParsedNode and CompiledNode classes +time: 2022-12-05T16:49:48.563583-05:00 +custom: + Author: gshank + Issue: "6383" + PR: "6384" diff --git a/.changes/1.4.0/Under the Hood-20221206-094015.yaml b/.changes/1.4.0/Under the Hood-20221206-094015.yaml new file mode 100644 index 00000000000..ebcb9999430 --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221206-094015.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Prevent doc gen workflow from running on forks +time: 2022-12-06T09:40:15.301984-06:00 +custom: + Author: stu-k + Issue: "6386" + PR: "6390" diff --git a/.changes/1.4.0/Under the Hood-20221206-113053.yaml b/.changes/1.4.0/Under the Hood-20221206-113053.yaml new file mode 100644 index 00000000000..a1f94f68f43 --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221206-113053.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Fix intermittent database connection failure in Windows CI test +time: 2022-12-06T11:30:53.166009-07:00 +custom: + Author: MichelleArk dbeatty10 + Issue: "6394" + PR: "6395" diff --git a/.changes/1.4.0/Under the Hood-20221211-214240.yaml b/.changes/1.4.0/Under the Hood-20221211-214240.yaml new file mode 100644 index 00000000000..adeaefba257 --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221211-214240.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Refactor and clean up manifest nodes +time: 2022-12-11T21:42:40.560074-05:00 +custom: + Author: gshank + Issue: "6426" + PR: "6427" diff --git a/.changes/1.4.0/Under the Hood-20221213-214106.yaml b/.changes/1.4.0/Under the Hood-20221213-214106.yaml new file mode 100644 index 00000000000..708c84661d6 --- /dev/null +++ b/.changes/1.4.0/Under the Hood-20221213-214106.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Restore important legacy logging behaviors, following refactor which removed + them +time: 2022-12-13T21:41:06.815133-05:00 +custom: + Author: peterallenwebb + Issue: "6437" diff --git a/.changes/unreleased/Breaking Changes-20221205-141937.yaml b/.changes/unreleased/Breaking Changes-20221205-141937.yaml new file mode 100644 index 00000000000..be840b20a99 --- /dev/null +++ b/.changes/unreleased/Breaking Changes-20221205-141937.yaml @@ -0,0 +1,9 @@ +kind: Breaking Changes +body: Cleaned up exceptions to directly raise in code. Removed use of all exception + functions in the code base and marked them all as deprecated to be removed next + minor release. +time: 2022-12-05T14:19:37.863032-06:00 +custom: + Author: emmyoop + Issue: "6339" + PR: "6347" diff --git a/.changes/unreleased/Features-20220817-154857.yaml b/.changes/unreleased/Features-20220817-154857.yaml deleted file mode 100644 index c8c0cd9c036..00000000000 --- a/.changes/unreleased/Features-20220817-154857.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Features -body: Proto logging messages -time: 2022-08-17T15:48:57.225267-04:00 -custom: - Author: gshank - Issue: "5610" - PR: "5643" diff --git a/.changes/unreleased/Fixes-20221117-220320.yaml b/.changes/unreleased/Fixes-20221117-220320.yaml new file mode 100644 index 00000000000..2f71fe213fc --- /dev/null +++ b/.changes/unreleased/Fixes-20221117-220320.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Fix typo in util.py +time: 2022-11-17T22:03:20.4836855+09:00 +custom: + Author: eltociear + Issue: "4904" + PR: "6037" diff --git a/.changes/unreleased/Fixes-20221213-113915.yaml b/.changes/unreleased/Fixes-20221213-113915.yaml new file mode 100644 index 00000000000..b92a2d6cbc9 --- /dev/null +++ b/.changes/unreleased/Fixes-20221213-113915.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: '[CT-1591] Don''t parse empty Python files' +time: 2022-12-13T11:39:15.818464-08:00 +custom: + Author: aranke + Issue: "6345" diff --git a/.changes/unreleased/Under the Hood-20221219-193435.yaml b/.changes/unreleased/Under the Hood-20221219-193435.yaml new file mode 100644 index 00000000000..82388dbb759 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221219-193435.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Treat dense text blobs as binary for `git grep` +time: 2022-12-19T19:34:35.890275-07:00 +custom: + Author: dbeatty10 + Issue: "6294" diff --git a/.changie.yaml b/.changie.yaml index 0744c5bb9c7..e417244506b 100644 --- a/.changie.yaml +++ b/.changie.yaml @@ -6,19 +6,67 @@ changelogPath: CHANGELOG.md versionExt: md versionFormat: '## dbt-core {{.Version}} - {{.Time.Format "January 02, 2006"}}' kindFormat: '### {{.Kind}}' -changeFormat: '- {{.Body}} ([#{{.Custom.Issue}}](https://github.com/dbt-labs/dbt-core/issues/{{.Custom.Issue}}), [#{{.Custom.PR}}](https://github.com/dbt-labs/dbt-core/pull/{{.Custom.PR}}))' +changeFormat: |- + {{- $IssueList := list }} + {{- $changes := splitList " " $.Custom.Issue }} + {{- range $issueNbr := $changes }} + {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $issueNbr }} + {{- $IssueList = append $IssueList $changeLink }} + {{- end -}} + - {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}}) kinds: - label: Breaking Changes - label: Features - label: Fixes - label: Docs - changeFormat: '- {{.Body}} ([dbt-docs/#{{.Custom.Issue}}](https://github.com/dbt-labs/dbt-docs/issues/{{.Custom.Issue}}), [dbt-docs/#{{.Custom.PR}}](https://github.com/dbt-labs/dbt-docs/pull/{{.Custom.PR}}))' + changeFormat: |- + {{- $IssueList := list }} + {{- $changes := splitList " " $.Custom.Issue }} + {{- range $issueNbr := $changes }} + {{- $changeLink := "[dbt-docs/#nbr](https://github.com/dbt-labs/dbt-docs/issues/nbr)" | replace "nbr" $issueNbr }} + {{- $IssueList = append $IssueList $changeLink }} + {{- end -}} + - {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}}) - label: Under the Hood - label: Dependencies - changeFormat: '- {{.Body}} ({{if ne .Custom.Issue ""}}[#{{.Custom.Issue}}](https://github.com/dbt-labs/dbt-core/issues/{{.Custom.Issue}}), {{end}}[#{{.Custom.PR}}](https://github.com/dbt-labs/dbt-core/pull/{{.Custom.PR}}))' + changeFormat: |- + {{- $PRList := list }} + {{- $changes := splitList " " $.Custom.PR }} + {{- range $pullrequest := $changes }} + {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }} + {{- $PRList = append $PRList $changeLink }} + {{- end -}} + - {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}}) + skipGlobalChoices: true + additionalChoices: + - key: Author + label: GitHub Username(s) (separated by a single space if multiple) + type: string + minLength: 3 + - key: PR + label: GitHub Pull Request Number (separated by a single space if multiple) + type: string + minLength: 1 - label: Security - changeFormat: '- {{.Body}} ({{if ne .Custom.Issue ""}}[#{{.Custom.Issue}}](https://github.com/dbt-labs/dbt-core/issues/{{.Custom.Issue}}), {{end}}[#{{.Custom.PR}}](https://github.com/dbt-labs/dbt-core/pull/{{.Custom.PR}}))' + changeFormat: |- + {{- $PRList := list }} + {{- $changes := splitList " " $.Custom.PR }} + {{- range $pullrequest := $changes }} + {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }} + {{- $PRList = append $PRList $changeLink }} + {{- end -}} + - {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}}) + skipGlobalChoices: true + additionalChoices: + - key: Author + label: GitHub Username(s) (separated by a single space if multiple) + type: string + minLength: 3 + - key: PR + label: GitHub Pull Request Number (separated by a single space if multiple) + type: string + minLength: 1 newlines: afterChangelogHeader: 1 @@ -33,42 +81,41 @@ custom: type: string minLength: 3 - key: Issue - label: GitHub Issue Number - type: int - minInt: 1 -- key: PR - label: GitHub Pull Request Number - type: int - minInt: 1 + label: GitHub Issue Number (separated by a single space if multiple) + type: string + minLength: 1 footerFormat: | {{- $contributorDict := dict }} {{- /* any names added to this list should be all lowercase for later matching purposes */}} - {{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }} + {{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "aranke" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }} {{- range $change := .Changes }} {{- $authorList := splitList " " $change.Custom.Author }} - {{- /* loop through all authors for a PR */}} + {{- /* loop through all authors for a single changelog */}} {{- range $author := $authorList }} {{- $authorLower := lower $author }} {{- /* we only want to include non-core team contributors */}} {{- if not (has $authorLower $core_team)}} - {{- /* Docs kind link back to dbt-docs instead of dbt-core PRs */}} - {{- $prLink := $change.Kind }} - {{- if eq $change.Kind "Docs" }} - {{- $prLink = "[dbt-docs/#pr](https://github.com/dbt-labs/dbt-docs/pull/pr)" | replace "pr" $change.Custom.PR }} - {{- else }} - {{- $prLink = "[#pr](https://github.com/dbt-labs/dbt-core/pull/pr)" | replace "pr" $change.Custom.PR }} - {{- end }} - {{- /* check if this contributor has other PRs associated with them already */}} - {{- if hasKey $contributorDict $author }} - {{- $prList := get $contributorDict $author }} - {{- $prList = append $prList $prLink }} - {{- $contributorDict := set $contributorDict $author $prList }} - {{- else }} - {{- $prList := list $prLink }} - {{- $contributorDict := set $contributorDict $author $prList }} - {{- end }} - {{- end}} + {{- $changeList := splitList " " $change.Custom.Author }} + {{- /* Docs kind link back to dbt-docs instead of dbt-core issues */}} + {{- $changeLink := $change.Kind }} + {{- if or (eq $change.Kind "Dependencies") (eq $change.Kind "Security") }} + {{- $changeLink = "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $change.Custom.PR }} + {{- else if eq $change.Kind "Docs"}} + {{- $changeLink = "[dbt-docs/#nbr](https://github.com/dbt-labs/dbt-docs/issues/nbr)" | replace "nbr" $change.Custom.Issue }} + {{- else }} + {{- $changeLink = "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $change.Custom.Issue }} + {{- end }} + {{- /* check if this contributor has other changes associated with them already */}} + {{- if hasKey $contributorDict $author }} + {{- $contributionList := get $contributorDict $author }} + {{- $contributionList = append $contributionList $changeLink }} + {{- $contributorDict := set $contributorDict $author $contributionList }} + {{- else }} + {{- $contributionList := list $changeLink }} + {{- $contributorDict := set $contributorDict $author $contributionList }} + {{- end }} + {{- end}} {{- end}} {{- end }} {{- /* no indentation here for formatting so the final markdown doesn't have unneeded indentations */}} diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000000..ff6cbc4608f --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +core/dbt/include/index.html binary +tests/functional/artifacts/data/state/*/manifest.json binary diff --git a/.github/workflows/bot-changelog.yml b/.github/workflows/bot-changelog.yml index 2d06fafe682..c6d2a1507a3 100644 --- a/.github/workflows/bot-changelog.yml +++ b/.github/workflows/bot-changelog.yml @@ -40,7 +40,7 @@ jobs: matrix: include: - label: "dependencies" - changie_kind: "Dependency" + changie_kind: "Dependencies" - label: "snyk" changie_kind: "Security" runs-on: ubuntu-latest @@ -58,4 +58,4 @@ jobs: commit_message: "Add automated changelog yaml from template for bot PR" changie_kind: ${{ matrix.changie_kind }} label: ${{ matrix.label }} - custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n Issue: 4904\n PR: ${{ github.event.pull_request.number }}" + custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n PR: ${{ github.event.pull_request.number }}" diff --git a/.github/workflows/generate-cli-api-docs.yml b/.github/workflows/generate-cli-api-docs.yml new file mode 100644 index 00000000000..bc079499b83 --- /dev/null +++ b/.github/workflows/generate-cli-api-docs.yml @@ -0,0 +1,165 @@ +# **what?** +# On push, if anything in core/dbt/docs or core/dbt/cli has been +# created or modified, regenerate the CLI API docs using sphinx. + +# **why?** +# We watch for changes in core/dbt/cli because the CLI API docs rely on click +# and all supporting flags/params to be generated. We watch for changes in +# core/dbt/docs since any changes to sphinx configuration or any of the +# .rst files there could result in a differently build final index.html file. + +# **when?** +# Whenever a change has been pushed to a branch, and only if there is a diff +# between the PR branch and main's core/dbt/cli and or core/dbt/docs dirs. + +# TODO: add bot comment to PR informing contributor that the docs have been committed +# TODO: figure out why github action triggered pushes cause github to fail to report +# the status of jobs + +name: Generate CLI API docs + +on: + pull_request: + +permissions: + contents: write + pull-requests: write + +env: + CLI_DIR: ${{ github.workspace }}/core/dbt/cli + DOCS_DIR: ${{ github.workspace }}/core/dbt/docs + DOCS_BUILD_DIR: ${{ github.workspace }}/core/dbt/docs/build + +jobs: + check_gen: + name: check if generation needed + runs-on: ubuntu-latest + if: ${{ github.event.pull_request.head.repo.fork == false }} + outputs: + cli_dir_changed: ${{ steps.check_cli.outputs.cli_dir_changed }} + docs_dir_changed: ${{ steps.check_docs.outputs.docs_dir_changed }} + + steps: + - name: "[DEBUG] print variables" + run: | + echo "env.CLI_DIR: ${{ env.CLI_DIR }}" + echo "env.DOCS_BUILD_DIR: ${{ env.DOCS_BUILD_DIR }}" + echo "env.DOCS_DIR: ${{ env.DOCS_DIR }}" + + - name: git checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ github.head_ref }} + + - name: set shas + id: set_shas + run: | + THIS_SHA=$(git rev-parse @) + LAST_SHA=$(git rev-parse @~1) + + echo "this sha: $THIS_SHA" + echo "last sha: $LAST_SHA" + + echo "this_sha=$THIS_SHA" >> $GITHUB_OUTPUT + echo "last_sha=$LAST_SHA" >> $GITHUB_OUTPUT + + - name: check for changes in core/dbt/cli + id: check_cli + run: | + CLI_DIR_CHANGES=$(git diff \ + ${{ steps.set_shas.outputs.last_sha }} \ + ${{ steps.set_shas.outputs.this_sha }} \ + -- ${{ env.CLI_DIR }}) + + if [ -n "$CLI_DIR_CHANGES" ]; then + echo "changes found" + echo $CLI_DIR_CHANGES + echo "cli_dir_changed=true" >> $GITHUB_OUTPUT + exit 0 + fi + echo "cli_dir_changed=false" >> $GITHUB_OUTPUT + echo "no changes found" + + - name: check for changes in core/dbt/docs + id: check_docs + if: steps.check_cli.outputs.cli_dir_changed == 'false' + run: | + DOCS_DIR_CHANGES=$(git diff --name-only \ + ${{ steps.set_shas.outputs.last_sha }} \ + ${{ steps.set_shas.outputs.this_sha }} \ + -- ${{ env.DOCS_DIR }} ':!${{ env.DOCS_BUILD_DIR }}') + + DOCS_BUILD_DIR_CHANGES=$(git diff --name-only \ + ${{ steps.set_shas.outputs.last_sha }} \ + ${{ steps.set_shas.outputs.this_sha }} \ + -- ${{ env.DOCS_BUILD_DIR }}) + + if [ -n "$DOCS_DIR_CHANGES" ] && [ -z "$DOCS_BUILD_DIR_CHANGES" ]; then + echo "changes found" + echo $DOCS_DIR_CHANGES + echo "docs_dir_changed=true" >> $GITHUB_OUTPUT + exit 0 + fi + echo "docs_dir_changed=false" >> $GITHUB_OUTPUT + echo "no changes found" + + gen_docs: + name: generate docs + runs-on: ubuntu-latest + needs: [check_gen] + if: | + needs.check_gen.outputs.cli_dir_changed == 'true' + || needs.check_gen.outputs.docs_dir_changed == 'true' + + steps: + - name: "[DEBUG] print variables" + run: | + echo "env.DOCS_DIR: ${{ env.DOCS_DIR }}" + echo "github head_ref: ${{ github.head_ref }}" + + - name: git checkout + uses: actions/checkout@v3 + with: + ref: ${{ github.head_ref }} + + - name: install python + uses: actions/setup-python@v4.3.0 + with: + python-version: 3.8 + + - name: install dev requirements + run: | + python3 -m venv env + source env/bin/activate + python -m pip install --upgrade pip + pip install -r requirements.txt -r dev-requirements.txt + + - name: generate docs + run: | + source env/bin/activate + cd ${{ env.DOCS_DIR }} + + echo "cleaning existing docs" + make clean + + echo "creating docs" + make html + + - name: debug + run: | + echo ">>>>> status" + git status + echo ">>>>> remotes" + git remote -v + echo ">>>>> branch" + git branch -v + echo ">>>>> log" + git log --pretty=oneline | head -5 + + - name: commit docs + run: | + git config user.name 'Github Build Bot' + git config user.email 'buildbot@fishtownanalytics.com' + git commit -am "Add generated CLI API docs" + git push -u origin ${{ github.head_ref }} diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 257935419c8..8138b730d34 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -73,7 +73,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.7", "3.8", "3.9", "3.10"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] env: TOXENV: "unit" @@ -118,7 +118,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.7", "3.8", "3.9", "3.10"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] os: [ubuntu-20.04] include: - python-version: 3.8 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index f09533b8b36..d902340a91b 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -9,13 +9,4 @@ permissions: jobs: stale: - runs-on: ubuntu-latest - steps: - # pinned at v4 (https://github.com/actions/stale/releases/tag/v4.0.0) - - uses: actions/stale@cdf15f641adb27a71842045a94023bef6945e3aa - with: - stale-issue-message: "This issue has been marked as Stale because it has been open for 180 days with no activity. If you would like the issue to remain open, please remove the stale label or comment on the issue, or it will be closed in 7 days." - stale-pr-message: "This PR has been marked as Stale because it has been open for 180 days with no activity. If you would like the PR to remain open, please remove the stale label or comment on the PR, or it will be closed in 7 days." - close-issue-message: "Although we are closing this issue as stale, it's not gone forever. Issues can be reopened if there is renewed community interest; add a comment to notify the maintainers." - # mark issues/PRs stale when they haven't seen activity in 180 days - days-before-stale: 180 + uses: dbt-labs/actions/.github/workflows/stale-bot-matrix.yml@main diff --git a/.gitignore b/.gitignore index ac91d49c9c4..dc9996305d3 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ __pycache__/ env*/ dbt_env/ build/ +!core/dbt/docs/build develop-eggs/ dist/ downloads/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6877497ae37..ce9847cf454 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ # Eventually the hooks described here will be run as tests before merging each PR. # TODO: remove global exclusion of tests when testing overhaul is complete -exclude: ^test/ +exclude: ^(test/|core/dbt/docs/build/) # Force all unspecified python hooks to run python 3.8 default_language_version: diff --git a/CHANGELOG.md b/CHANGELOG.md index 039de921800..4a91696f68b 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,96 @@ - "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version. - Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry) +## dbt-core 1.4.0-b1 - December 15, 2022 + +### Features + +- Added favor-state flag to optionally favor state nodes even if unselected node exists ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. ([#5610](https://github.com/dbt-labs/dbt-core/issues/5610)) +- Friendlier error messages when packages.yml is malformed ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) +- Migrate dbt-utils current_timestamp macros into core + adapters ([#5521](https://github.com/dbt-labs/dbt-core/issues/5521)) +- Allow partitions in external tables to be supplied as a list ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) +- extend -f flag shorthand for seed command ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) +- This pulls the profile name from args when constructing a RuntimeConfig in lib.py, enabling the dbt-server to override the value that's in the dbt_project.yml ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) +- Adding tarball install method for packages. Allowing package tarball to be specified via url in the packages.yaml. ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) +- Added an md5 function to the base context ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) +- Exposures support metrics in lineage ([#6057](https://github.com/dbt-labs/dbt-core/issues/6057)) +- Add support for Python 3.11 ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) +- incremental predicates ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) + +### Fixes + +- Account for disabled flags on models in schema files more completely ([#3992](https://github.com/dbt-labs/dbt-core/issues/3992)) +- Add validation of enabled config for metrics, exposures and sources ([#6030](https://github.com/dbt-labs/dbt-core/issues/6030)) +- check length of args of python model function before accessing it ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) +- Add functors to ensure event types with str-type attributes are initialized to spec, even when provided non-str type params. ([#5436](https://github.com/dbt-labs/dbt-core/issues/5436)) +- Allow hooks to fail without halting execution flow ([#5625](https://github.com/dbt-labs/dbt-core/issues/5625)) +- Clarify Error Message for how many models are allowed in a Python file ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) +- After this, will be possible to use default values for dbt.config.get ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) +- Use full path for writing manifest ([#6055](https://github.com/dbt-labs/dbt-core/issues/6055)) +- [CT-1284] Change Python model default materialization to table ([#6345](https://github.com/dbt-labs/dbt-core/issues/6345)) +- Repair a regression which prevented basic logging before the logging subsystem is completely configured. ([#6434](https://github.com/dbt-labs/dbt-core/issues/6434)) + +### Docs + +- minor doc correction ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) +- Generate API docs for new CLI interface ([dbt-docs/#5528](https://github.com/dbt-labs/dbt-docs/issues/5528)) +- ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) +- Fix rendering of sample code for metrics ([dbt-docs/#323](https://github.com/dbt-labs/dbt-docs/issues/323)) +- Alphabetize `core/dbt/README.md` ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368)) + +### Under the Hood + +- Put black config in explicit config ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946)) +- Added flat_graph attribute the Manifest class's deepcopy() coverage ([#5809](https://github.com/dbt-labs/dbt-core/issues/5809)) +- Add mypy configs so `mypy` passes from CLI ([#5983](https://github.com/dbt-labs/dbt-core/issues/5983)) +- Exception message cleanup. ([#6023](https://github.com/dbt-labs/dbt-core/issues/6023)) +- Add dmypy cache to gitignore ([#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) +- Provide useful errors when the value of 'materialized' is invalid ([#5229](https://github.com/dbt-labs/dbt-core/issues/5229)) +- Clean up string formatting ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) +- Fixed extra whitespace in strings introduced by black. ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) +- Remove the 'root_path' field from most nodes ([#6171](https://github.com/dbt-labs/dbt-core/issues/6171)) +- Combine certain logging events with different levels ([#6173](https://github.com/dbt-labs/dbt-core/issues/6173)) +- Convert threading tests to pytest ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) +- Convert postgres index tests to pytest ([#5770](https://github.com/dbt-labs/dbt-core/issues/5770)) +- Convert use color tests to pytest ([#5771](https://github.com/dbt-labs/dbt-core/issues/5771)) +- Add github actions workflow to generate high level CLI API docs ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) +- Functionality-neutral refactor of event logging system to improve encapsulation and modularity. ([#6139](https://github.com/dbt-labs/dbt-core/issues/6139)) +- Consolidate ParsedNode and CompiledNode classes ([#6383](https://github.com/dbt-labs/dbt-core/issues/6383)) +- Prevent doc gen workflow from running on forks ([#6386](https://github.com/dbt-labs/dbt-core/issues/6386)) +- Fix intermittent database connection failure in Windows CI test ([#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) +- Refactor and clean up manifest nodes ([#6426](https://github.com/dbt-labs/dbt-core/issues/6426)) +- Restore important legacy logging behaviors, following refactor which removed them ([#6437](https://github.com/dbt-labs/dbt-core/issues/6437)) + +### Dependencies + +- Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core ([#5917](https://github.com/dbt-labs/dbt-core/pull/5917)) +- Bump black from 22.8.0 to 22.10.0 ([#6019](https://github.com/dbt-labs/dbt-core/pull/6019)) +- Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core ([#6108](https://github.com/dbt-labs/dbt-core/pull/6108)) +- Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core ([#6144](https://github.com/dbt-labs/dbt-core/pull/6144)) +- Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core ([#4904](https://github.com/dbt-labs/dbt-core/issues/4904)) + +### Contributors +- [@andy-clapson](https://github.com/andy-clapson) ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) +- [@chamini2](https://github.com/chamini2) ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) +- [@daniel-murray](https://github.com/daniel-murray) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- [@dave-connors-3](https://github.com/dave-connors-3) ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) +- [@dbeatty10](https://github.com/dbeatty10) ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368), [#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) +- [@devmessias](https://github.com/devmessias) ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) +- [@eve-johns](https://github.com/eve-johns) ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) +- [@haritamar](https://github.com/haritamar) ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) +- [@jared-rimmer](https://github.com/jared-rimmer) ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) +- [@josephberni](https://github.com/josephberni) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- [@joshuataylor](https://github.com/joshuataylor) ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) +- [@justbldwn](https://github.com/justbldwn) ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) +- [@luke-bassett](https://github.com/luke-bassett) ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) +- [@max-sixty](https://github.com/max-sixty) ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946), [#5983](https://github.com/dbt-labs/dbt-core/issues/5983), [#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) +- [@paulbenschmidt](https://github.com/paulbenschmidt) ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) +- [@pgoslatara](https://github.com/pgoslatara) ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) +- [@racheldaniel](https://github.com/racheldaniel) ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) +- [@timle2](https://github.com/timle2) ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) +- [@dave-connors-3](https://github.com/dave-connors-3) ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) + ## Previous Releases diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index efbb0a726ad..3bbd8d14d5f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -56,7 +56,7 @@ There are some tools that will be helpful to you in developing locally. While th These are the tools used in `dbt-core` development and testing: -- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.7, 3.8, 3.9, and 3.10 +- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.7, 3.8, 3.9, 3.10 and 3.11 - [`pytest`](https://docs.pytest.org/en/latest/) to define, discover, and run tests - [`flake8`](https://flake8.pycqa.org/en/latest/) for code linting - [`black`](https://github.com/psf/black) for code formatting @@ -160,7 +160,7 @@ suites. #### `tox` -[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run tests. You can also run tests in parallel, for example, you can run unit tests for Python 3.7, Python 3.8, Python 3.9, and Python 3.10 checks in parallel with `tox -p`. Also, you can run unit tests for specific python versions with `tox -e py37`. The configuration for these tests in located in `tox.ini`. +[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run tests. You can also run tests in parallel, for example, you can run unit tests for Python 3.7, Python 3.8, Python 3.9, Python 3.10 and Python 3.11 checks in parallel with `tox -p`. Also, you can run unit tests for specific python versions with `tox -e py37`. The configuration for these tests in located in `tox.ini`. #### `pytest` @@ -201,13 +201,21 @@ Here are some general rules for adding tests: * Sometimes flake8 complains about lines that are actually fine, in which case you can put a comment on the line such as: # noqa or # noqa: ANNN, where ANNN is the error code that flake8 issues. * To collect output for `CProfile`, run dbt with the `-r` option and the name of an output file, i.e. `dbt -r dbt.cprof run`. If you just want to profile parsing, you can do: `dbt -r dbt.cprof parse`. `pip` install `snakeviz` to view the output. Run `snakeviz dbt.cprof` and output will be rendered in a browser window. -## Adding a CHANGELOG Entry +## Adding or modifying a CHANGELOG Entry We use [changie](https://changie.dev) to generate `CHANGELOG` entries. **Note:** Do not edit the `CHANGELOG.md` directly. Your modifications will be lost. Follow the steps to [install `changie`](https://changie.dev/guide/installation/) for your system. -Once changie is installed and your PR is created, simply run `changie new` and changie will walk you through the process of creating a changelog entry. Commit the file that's created and your changelog entry is complete! +Once changie is installed and your PR is created for a new feature, simply run the following command and changie will walk you through the process of creating a changelog entry: + +```shell +changie new +``` + +Commit the file that's created and your changelog entry is complete! + +If you are contributing to a feature already in progress, you will modify the changie yaml file in dbt/.changes/unreleased/ related to your change. If you need help finding this file, please ask within the discussion for the pull request! You don't need to worry about which `dbt-core` version your change will go into. Just create the changelog entry with `changie`, and open your PR against the `main` branch. All merged changes will be included in the next minor version of `dbt-core`. The Core maintainers _may_ choose to "backport" specific changes in order to patch older minor versions. In that case, a maintainer will take care of that backport after merging your PR, before releasing the new version of `dbt-core`. diff --git a/Dockerfile.test b/Dockerfile.test index eb6ba824bcb..b5a373270dd 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -49,6 +49,9 @@ RUN apt-get update \ python3.10 \ python3.10-dev \ python3.10-venv \ + python3.11 \ + python3.11-dev \ + python3.11-venv \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* diff --git a/core/dbt/README.md b/core/dbt/README.md index 5886bf37525..79123a95f47 100644 --- a/core/dbt/README.md +++ b/core/dbt/README.md @@ -2,50 +2,59 @@ ## The following are individual files in this directory. -### deprecations.py - -### flags.py +### compilation.py -### main.py +### constants.py -### tracking.py +### dataclass_schema.py -### version.py +### deprecations.py -### lib.py +### exceptions.py -### node_types.py +### flags.py ### helper_types.py +### hooks.py + +### lib.py + ### links.py -### semver.py +### logger.py -### ui.py +### main.py -### compilation.py +### node_types.py -### dataclass_schema.py +### profiler.py -### exceptions.py +### selected_resources.py -### hooks.py +### semver.py -### logger.py +### tracking.py -### profiler.py +### ui.py ### utils.py +### version.py + ## The subdirectories will be documented in a README in the subdirectory -* config -* include * adapters +* cli +* clients +* config * context +* contracts * deps +* docs +* events * graph +* include +* parser * task -* clients -* events +* tests diff --git a/core/dbt/adapters/base/connections.py b/core/dbt/adapters/base/connections.py index 5fd3769aa74..577cdf6d9a6 100644 --- a/core/dbt/adapters/base/connections.py +++ b/core/dbt/adapters/base/connections.py @@ -41,13 +41,14 @@ from dbt.events.types import ( NewConnection, ConnectionReused, + ConnectionLeftOpenInCleanup, ConnectionLeftOpen, - ConnectionLeftOpen2, + ConnectionClosedInCleanup, ConnectionClosed, - ConnectionClosed2, Rollback, RollbackFailed, ) +from dbt.events.contextvars import get_node_info from dbt import flags from dbt.utils import cast_to_str @@ -169,7 +170,9 @@ def set_connection_name(self, name: Optional[str] = None) -> Connection: if conn.name == conn_name and conn.state == "open": return conn - fire_event(NewConnection(conn_name=conn_name, conn_type=self.TYPE)) + fire_event( + NewConnection(conn_name=conn_name, conn_type=self.TYPE, node_info=get_node_info()) + ) if conn.state == "open": fire_event(ConnectionReused(conn_name=conn_name)) @@ -306,9 +309,9 @@ def cleanup_all(self) -> None: with self.lock: for connection in self.thread_connections.values(): if connection.state not in {"closed", "init"}: - fire_event(ConnectionLeftOpen(conn_name=cast_to_str(connection.name))) + fire_event(ConnectionLeftOpenInCleanup(conn_name=cast_to_str(connection.name))) else: - fire_event(ConnectionClosed(conn_name=cast_to_str(connection.name))) + fire_event(ConnectionClosedInCleanup(conn_name=cast_to_str(connection.name))) self.close(connection) # garbage collect these connections @@ -336,7 +339,9 @@ def _rollback_handle(cls, connection: Connection) -> None: except Exception: fire_event( RollbackFailed( - conn_name=cast_to_str(connection.name), exc_info=traceback.format_exc() + conn_name=cast_to_str(connection.name), + exc_info=traceback.format_exc(), + node_info=get_node_info(), ) ) @@ -345,10 +350,16 @@ def _close_handle(cls, connection: Connection) -> None: """Perform the actual close operation.""" # On windows, sometimes connection handles don't have a close() attr. if hasattr(connection.handle, "close"): - fire_event(ConnectionClosed2(conn_name=cast_to_str(connection.name))) + fire_event( + ConnectionClosed(conn_name=cast_to_str(connection.name), node_info=get_node_info()) + ) connection.handle.close() else: - fire_event(ConnectionLeftOpen2(conn_name=cast_to_str(connection.name))) + fire_event( + ConnectionLeftOpen( + conn_name=cast_to_str(connection.name), node_info=get_node_info() + ) + ) @classmethod def _rollback(cls, connection: Connection) -> None: @@ -359,7 +370,7 @@ def _rollback(cls, connection: Connection) -> None: f'"{connection.name}", but it does not have one open!' ) - fire_event(Rollback(conn_name=cast_to_str(connection.name))) + fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info())) cls._rollback_handle(connection) connection.transaction_open = False @@ -371,7 +382,7 @@ def close(cls, connection: Connection) -> Connection: return connection if connection.transaction_open and connection.handle: - fire_event(Rollback(conn_name=cast_to_str(connection.name))) + fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info())) cls._rollback_handle(connection) connection.transaction_open = False diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py index 3c301c2e7f4..64ebbeac5dd 100644 --- a/core/dbt/adapters/base/impl.py +++ b/core/dbt/adapters/base/impl.py @@ -15,7 +15,6 @@ List, Mapping, Iterator, - Union, Set, ) @@ -23,13 +22,20 @@ import pytz from dbt.exceptions import ( - raise_database_error, - raise_compiler_error, - invalid_type_error, - get_relation_returned_multiple_results, InternalException, + InvalidMacroArgType, + InvalidMacroResult, + InvalidQuoteConfigType, NotImplementedException, + NullRelationCacheAttempted, + NullRelationDropAttempted, + RelationReturnedMultipleResults, + RenameToNoneAttempted, RuntimeException, + SnapshotTargetIncomplete, + SnapshotTargetNotSnapshotTable, + UnexpectedNull, + UnexpectedNonTimestamp, ) from dbt.adapters.protocol import ( @@ -38,16 +44,15 @@ ) from dbt.clients.agate_helper import empty_table, merge_tables, table_from_rows from dbt.clients.jinja import MacroGenerator -from dbt.contracts.graph.compiled import CompileResultNode, CompiledSeedNode from dbt.contracts.graph.manifest import Manifest, MacroManifest -from dbt.contracts.graph.parsed import ParsedSeedNode -from dbt.exceptions import warn_or_error -from dbt.events.functions import fire_event +from dbt.contracts.graph.nodes import ResultNode +from dbt.events.functions import fire_event, warn_or_error from dbt.events.types import ( CacheMiss, ListRelations, CodeExecution, CodeExecutionStatus, + CatalogGenerationError, ) from dbt.utils import filter_null_values, executor, cast_to_str @@ -64,9 +69,6 @@ from dbt.adapters.cache import RelationsCache, _make_ref_key_msg -SeedModel = Union[ParsedSeedNode, CompiledSeedNode] - - GET_CATALOG_MACRO_NAME = "get_catalog" FRESHNESS_MACRO_NAME = "collect_freshness" @@ -102,18 +104,10 @@ def _utc(dt: Optional[datetime], source: BaseRelation, field_name: str) -> datet assume the datetime is already for UTC and add the timezone. """ if dt is None: - raise raise_database_error( - "Expected a non-null value when querying field '{}' of table " - " {} but received value 'null' instead".format(field_name, source) - ) + raise UnexpectedNull(field_name, source) elif not hasattr(dt, "tzinfo"): - raise raise_database_error( - "Expected a timestamp value when querying field '{}' of table " - "{} but received value of type '{}' instead".format( - field_name, source, type(dt).__name__ - ) - ) + raise UnexpectedNonTimestamp(field_name, source, dt) elif dt.tzinfo: return dt.astimezone(pytz.UTC) @@ -243,9 +237,7 @@ def nice_connection_name(self) -> str: return conn.name @contextmanager - def connection_named( - self, name: str, node: Optional[CompileResultNode] = None - ) -> Iterator[None]: + def connection_named(self, name: str, node: Optional[ResultNode] = None) -> Iterator[None]: try: if self.connections.query_header is not None: self.connections.query_header.set(name, node) @@ -257,7 +249,7 @@ def connection_named( self.connections.query_header.reset() @contextmanager - def connection_for(self, node: CompileResultNode) -> Iterator[None]: + def connection_for(self, node: ResultNode) -> Iterator[None]: with self.connection_named(node.unique_id, node): yield @@ -372,7 +364,7 @@ def _get_catalog_schemas(self, manifest: Manifest) -> SchemaSearchMap: lowercase strings. """ info_schema_name_map = SchemaSearchMap() - nodes: Iterator[CompileResultNode] = chain( + nodes: Iterator[ResultNode] = chain( [ node for node in manifest.nodes.values() @@ -441,7 +433,7 @@ def cache_added(self, relation: Optional[BaseRelation]) -> str: """Cache a new relation in dbt. It will show up in `list relations`.""" if relation is None: name = self.nice_connection_name() - raise_compiler_error("Attempted to cache a null relation for {}".format(name)) + raise NullRelationCacheAttempted(name) self.cache.add(relation) # so jinja doesn't render things return "" @@ -453,7 +445,7 @@ def cache_dropped(self, relation: Optional[BaseRelation]) -> str: """ if relation is None: name = self.nice_connection_name() - raise_compiler_error("Attempted to drop a null relation for {}".format(name)) + raise NullRelationDropAttempted(name) self.cache.drop(relation) return "" @@ -470,9 +462,7 @@ def cache_renamed( name = self.nice_connection_name() src_name = _relation_name(from_relation) dst_name = _relation_name(to_relation) - raise_compiler_error( - "Attempted to rename {} to {} for {}".format(src_name, dst_name, name) - ) + raise RenameToNoneAttempted(src_name, dst_name, name) self.cache.rename(from_relation, to_relation) return "" @@ -622,7 +612,7 @@ def get_missing_columns( to_relation. """ if not isinstance(from_relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="get_missing_columns", arg_name="from_relation", got_value=from_relation, @@ -630,7 +620,7 @@ def get_missing_columns( ) if not isinstance(to_relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="get_missing_columns", arg_name="to_relation", got_value=to_relation, @@ -655,7 +645,7 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None: incorrect. """ if not isinstance(relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="valid_snapshot_target", arg_name="relation", got_value=relation, @@ -676,24 +666,16 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None: if missing: if extra: - msg = ( - 'Snapshot target has ("{}") but not ("{}") - is it an ' - "unmigrated previous version archive?".format( - '", "'.join(extra), '", "'.join(missing) - ) - ) + raise SnapshotTargetIncomplete(extra, missing) else: - msg = 'Snapshot target is not a snapshot table (missing "{}")'.format( - '", "'.join(missing) - ) - raise_compiler_error(msg) + raise SnapshotTargetNotSnapshotTable(missing) @available.parse_none def expand_target_column_types( self, from_relation: BaseRelation, to_relation: BaseRelation ) -> None: if not isinstance(from_relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="expand_target_column_types", arg_name="from_relation", got_value=from_relation, @@ -701,7 +683,7 @@ def expand_target_column_types( ) if not isinstance(to_relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="expand_target_column_types", arg_name="to_relation", got_value=to_relation, @@ -783,7 +765,7 @@ def get_relation(self, database: str, schema: str, identifier: str) -> Optional[ "schema": schema, "database": database, } - get_relation_returned_multiple_results(kwargs, matches) + raise RelationReturnedMultipleResults(kwargs, matches) elif matches: return matches[0] @@ -847,10 +829,7 @@ def quote_seed_column(self, column: str, quote_config: Optional[bool]) -> str: elif quote_config is None: pass else: - raise_compiler_error( - f'The seed configuration value of "quote_columns" has an ' - f"invalid type {type(quote_config)}" - ) + raise InvalidQuoteConfigType(quote_config) if quote_columns: return self.quote(column) @@ -1100,11 +1079,7 @@ def calculate_freshness( # now we have a 1-row table of the maximum `loaded_at_field` value and # the current time according to the db. if len(table) != 1 or len(table[0]) != 2: - raise_compiler_error( - 'Got an invalid result from "{}" macro: {}'.format( - FRESHNESS_MACRO_NAME, [tuple(r) for r in table] - ) - ) + raise InvalidMacroResult(FRESHNESS_MACRO_NAME, table) if table[0][0] is None: # no records in the table, so really the max_loaded_at was # infinitely long ago. Just call it 0:00 January 1 year UTC @@ -1327,7 +1302,7 @@ def catch_as_completed( elif isinstance(exc, KeyboardInterrupt) or not isinstance(exc, Exception): raise exc else: - warn_or_error(f"Encountered an error while generating catalog: {str(exc)}") + warn_or_error(CatalogGenerationError(exc=str(exc))) # exc is not None, derives from Exception, and isn't ctrl+c exceptions.append(exc) return merge_tables(tables), exceptions diff --git a/core/dbt/adapters/base/query_headers.py b/core/dbt/adapters/base/query_headers.py index 26f34be9c93..dd88fdb2d41 100644 --- a/core/dbt/adapters/base/query_headers.py +++ b/core/dbt/adapters/base/query_headers.py @@ -5,7 +5,7 @@ from dbt.context.manifest import generate_query_header_context from dbt.contracts.connection import AdapterRequiredConfig, QueryComment -from dbt.contracts.graph.compiled import CompileResultNode +from dbt.contracts.graph.nodes import ResultNode from dbt.contracts.graph.manifest import Manifest from dbt.exceptions import RuntimeException @@ -90,7 +90,7 @@ def add(self, sql: str) -> str: def reset(self): self.set("master", None) - def set(self, name: str, node: Optional[CompileResultNode]): + def set(self, name: str, node: Optional[ResultNode]): wrapped: Optional[NodeWrapper] = None if node is not None: wrapped = NodeWrapper(node) diff --git a/core/dbt/adapters/base/relation.py b/core/dbt/adapters/base/relation.py index 3124384975a..5bc0c56b264 100644 --- a/core/dbt/adapters/base/relation.py +++ b/core/dbt/adapters/base/relation.py @@ -1,9 +1,8 @@ from collections.abc import Hashable -from dataclasses import dataclass -from typing import Optional, TypeVar, Any, Type, Dict, Union, Iterator, Tuple, Set +from dataclasses import dataclass, field +from typing import Optional, TypeVar, Any, Type, Dict, Iterator, Tuple, Set -from dbt.contracts.graph.compiled import CompiledNode -from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedNode +from dbt.contracts.graph.nodes import SourceDefinition, ManifestNode, ResultNode, ParsedNode from dbt.contracts.relation import ( RelationType, ComponentName, @@ -12,7 +11,7 @@ Policy, Path, ) -from dbt.exceptions import InternalException +from dbt.exceptions import ApproximateMatch, InternalException, MultipleDatabasesNotAllowed from dbt.node_types import NodeType from dbt.utils import filter_null_values, deep_merge, classproperty @@ -27,8 +26,10 @@ class BaseRelation(FakeAPIObject, Hashable): path: Path type: Optional[RelationType] = None quote_character: str = '"' - include_policy: Policy = Policy() - quote_policy: Policy = Policy() + # Python 3.11 requires that these use default_factory instead of simple default + # ValueError: mutable default for field include_policy is not allowed: use default_factory + include_policy: Policy = field(default_factory=lambda: Policy()) + quote_policy: Policy = field(default_factory=lambda: Policy()) dbt_created: bool = False def _is_exactish_match(self, field: ComponentName, value: str) -> bool: @@ -39,9 +40,9 @@ def _is_exactish_match(self, field: ComponentName, value: str) -> bool: @classmethod def _get_field_named(cls, field_name): - for field, _ in cls._get_fields(): - if field.name == field_name: - return field + for f, _ in cls._get_fields(): + if f.name == field_name: + return f # this should be unreachable raise ValueError(f"BaseRelation has no {field_name} field!") @@ -52,11 +53,11 @@ def __eq__(self, other): @classmethod def get_default_quote_policy(cls) -> Policy: - return cls._get_field_named("quote_policy").default + return cls._get_field_named("quote_policy").default_factory() @classmethod def get_default_include_policy(cls) -> Policy: - return cls._get_field_named("include_policy").default + return cls._get_field_named("include_policy").default_factory() def get(self, key, default=None): """Override `.get` to return a metadata object so we don't break @@ -99,7 +100,7 @@ def matches( if approximate_match and not exact_match: target = self.create(database=database, schema=schema, identifier=identifier) - dbt.exceptions.approximate_relation_match(target, self) + raise ApproximateMatch(target, self) return exact_match @@ -184,7 +185,7 @@ def quoted(self, identifier): ) @classmethod - def create_from_source(cls: Type[Self], source: ParsedSourceDefinition, **kwargs: Any) -> Self: + def create_from_source(cls: Type[Self], source: SourceDefinition, **kwargs: Any) -> Self: source_quoting = source.quoting.to_dict(omit_none=True) source_quoting.pop("column", None) quote_policy = deep_merge( @@ -209,7 +210,7 @@ def add_ephemeral_prefix(name: str): def create_ephemeral_from_node( cls: Type[Self], config: HasQuoting, - node: Union[ParsedNode, CompiledNode], + node: ManifestNode, ) -> Self: # Note that ephemeral models are based on the name. identifier = cls.add_ephemeral_prefix(node.name) @@ -222,7 +223,7 @@ def create_ephemeral_from_node( def create_from_node( cls: Type[Self], config: HasQuoting, - node: Union[ParsedNode, CompiledNode], + node: ManifestNode, quote_policy: Optional[Dict[str, bool]] = None, **kwargs: Any, ) -> Self: @@ -243,20 +244,20 @@ def create_from_node( def create_from( cls: Type[Self], config: HasQuoting, - node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition], + node: ResultNode, **kwargs: Any, ) -> Self: if node.resource_type == NodeType.Source: - if not isinstance(node, ParsedSourceDefinition): + if not isinstance(node, SourceDefinition): raise InternalException( - "type mismatch, expected ParsedSourceDefinition but got {}".format(type(node)) + "type mismatch, expected SourceDefinition but got {}".format(type(node)) ) return cls.create_from_source(node, **kwargs) else: - if not isinstance(node, (ParsedNode, CompiledNode)): + # Can't use ManifestNode here because of parameterized generics + if not isinstance(node, (ParsedNode)): raise InternalException( - "type mismatch, expected ParsedNode or CompiledNode but " - "got {}".format(type(node)) + f"type mismatch, expected ManifestNode but got {type(node)}" ) return cls.create_from_node(config, node, **kwargs) @@ -437,7 +438,7 @@ def flatten(self, allow_multiple_databases: bool = False): if not allow_multiple_databases: seen = {r.database.lower() for r in self if r.database} if len(seen) > 1: - dbt.exceptions.raise_compiler_error(str(seen)) + raise MultipleDatabasesNotAllowed(seen) for information_schema_name, schema in self.search(): path = {"database": information_schema_name.database, "schema": schema} diff --git a/core/dbt/adapters/cache.py b/core/dbt/adapters/cache.py index 6c60039f262..90c4cab27fb 100644 --- a/core/dbt/adapters/cache.py +++ b/core/dbt/adapters/cache.py @@ -1,4 +1,3 @@ -import re import threading from copy import deepcopy from typing import Any, Dict, Iterable, List, Optional, Set, Tuple @@ -9,7 +8,13 @@ _make_msg_from_ref_key, _ReferenceKey, ) -import dbt.exceptions +from dbt.exceptions import ( + DependentLinkNotCached, + NewNameAlreadyInCache, + NoneRelationFound, + ReferencedLinkNotCached, + TruncatedModelNameCausedCollision, +) from dbt.events.functions import fire_event, fire_event_if from dbt.events.types import ( AddLink, @@ -150,11 +155,7 @@ def rename_key(self, old_key, new_key): :raises InternalError: If the new key already exists. """ if new_key in self.referenced_by: - dbt.exceptions.raise_cache_inconsistent( - 'in rename of "{}" -> "{}", new name is in the cache already'.format( - old_key, new_key - ) - ) + raise NewNameAlreadyInCache(old_key, new_key) if old_key not in self.referenced_by: return @@ -270,15 +271,11 @@ def _add_link(self, referenced_key, dependent_key): if referenced is None: return if referenced is None: - dbt.exceptions.raise_cache_inconsistent( - "in add_link, referenced link key {} not in cache!".format(referenced_key) - ) + raise ReferencedLinkNotCached(referenced_key) dependent = self.relations.get(dependent_key) if dependent is None: - dbt.exceptions.raise_cache_inconsistent( - "in add_link, dependent link key {} not in cache!".format(dependent_key) - ) + raise DependentLinkNotCached(dependent_key) assert dependent is not None # we just raised! @@ -430,24 +427,7 @@ def _check_rename_constraints(self, old_key, new_key): if new_key in self.relations: # Tell user when collision caused by model names truncated during # materialization. - match = re.search("__dbt_backup|__dbt_tmp$", new_key.identifier) - if match: - truncated_model_name_prefix = new_key.identifier[: match.start()] - message_addendum = ( - "\n\nName collisions can occur when the length of two " - "models' names approach your database's builtin limit. " - "Try restructuring your project such that no two models " - "share the prefix '{}'.".format(truncated_model_name_prefix) - + " Then, clean your warehouse of any removed models." - ) - else: - message_addendum = "" - - dbt.exceptions.raise_cache_inconsistent( - "in rename, new key {} already in cache: {}{}".format( - new_key, list(self.relations.keys()), message_addendum - ) - ) + raise TruncatedModelNameCausedCollision(new_key, self.relations) if old_key not in self.relations: fire_event(TemporaryRelation(key=_make_msg_from_ref_key(old_key))) @@ -505,9 +485,7 @@ def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[ ] if None in results: - dbt.exceptions.raise_cache_inconsistent( - "in get_relations, a None relation was found in the cache!" - ) + raise NoneRelationFound() return results def clear(self): diff --git a/core/dbt/adapters/protocol.py b/core/dbt/adapters/protocol.py index f17c2bd6f45..13b9bd79968 100644 --- a/core/dbt/adapters/protocol.py +++ b/core/dbt/adapters/protocol.py @@ -8,7 +8,6 @@ Generic, TypeVar, Tuple, - Union, Dict, Any, ) @@ -17,8 +16,7 @@ import agate from dbt.contracts.connection import Connection, AdapterRequiredConfig, AdapterResponse -from dbt.contracts.graph.compiled import CompiledNode, ManifestNode, NonSourceCompiledNode -from dbt.contracts.graph.parsed import ParsedNode, ParsedSourceDefinition +from dbt.contracts.graph.nodes import ResultNode, ManifestNode from dbt.contracts.graph.model_config import BaseConfig from dbt.contracts.graph.manifest import Manifest from dbt.contracts.relation import Policy, HasQuoting @@ -48,11 +46,7 @@ def get_default_quote_policy(cls) -> Policy: ... @classmethod - def create_from( - cls: Type[Self], - config: HasQuoting, - node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition], - ) -> Self: + def create_from(cls: Type[Self], config: HasQuoting, node: ResultNode) -> Self: ... @@ -65,7 +59,7 @@ def compile_node( node: ManifestNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]] = None, - ) -> NonSourceCompiledNode: + ) -> ManifestNode: ... diff --git a/core/dbt/adapters/sql/connections.py b/core/dbt/adapters/sql/connections.py index f8928a37651..bc1a562ad86 100644 --- a/core/dbt/adapters/sql/connections.py +++ b/core/dbt/adapters/sql/connections.py @@ -10,6 +10,7 @@ from dbt.contracts.connection import Connection, ConnectionState, AdapterResponse from dbt.events.functions import fire_event from dbt.events.types import ConnectionUsed, SQLQuery, SQLCommit, SQLQueryStatus +from dbt.events.contextvars import get_node_info from dbt.utils import cast_to_str @@ -56,7 +57,13 @@ def add_query( connection = self.get_thread_connection() if auto_begin and connection.transaction_open is False: self.begin() - fire_event(ConnectionUsed(conn_type=self.TYPE, conn_name=cast_to_str(connection.name))) + fire_event( + ConnectionUsed( + conn_type=self.TYPE, + conn_name=cast_to_str(connection.name), + node_info=get_node_info(), + ) + ) with self.exception_handler(sql): if abridge_sql_log: @@ -64,7 +71,11 @@ def add_query( else: log_sql = sql - fire_event(SQLQuery(conn_name=cast_to_str(connection.name), sql=log_sql)) + fire_event( + SQLQuery( + conn_name=cast_to_str(connection.name), sql=log_sql, node_info=get_node_info() + ) + ) pre = time.time() cursor = connection.handle.cursor() @@ -72,7 +83,9 @@ def add_query( fire_event( SQLQueryStatus( - status=str(self.get_response(cursor)), elapsed=round((time.time() - pre), 2) + status=str(self.get_response(cursor)), + elapsed=round((time.time() - pre)), + node_info=get_node_info(), ) ) @@ -156,7 +169,7 @@ def commit(self): "it does not have one open!".format(connection.name) ) - fire_event(SQLCommit(conn_name=connection.name)) + fire_event(SQLCommit(conn_name=connection.name, node_info=get_node_info())) self.add_commit_query() connection.transaction_open = False diff --git a/core/dbt/adapters/sql/impl.py b/core/dbt/adapters/sql/impl.py index 20241d9e53d..4606b046f54 100644 --- a/core/dbt/adapters/sql/impl.py +++ b/core/dbt/adapters/sql/impl.py @@ -1,9 +1,8 @@ import agate from typing import Any, Optional, Tuple, Type, List -import dbt.clients.agate_helper from dbt.contracts.connection import Connection -import dbt.exceptions +from dbt.exceptions import RelationTypeNull from dbt.adapters.base import BaseAdapter, available from dbt.adapters.cache import _make_ref_key_msg from dbt.adapters.sql import SQLConnectionManager @@ -132,9 +131,7 @@ def alter_column_type(self, relation, column_name, new_column_type) -> None: def drop_relation(self, relation): if relation.type is None: - dbt.exceptions.raise_compiler_error( - "Tried to drop relation {}, but its type is null.".format(relation) - ) + raise RelationTypeNull(relation) self.cache_dropped(relation) self.execute_macro(DROP_RELATION_MACRO_NAME, kwargs={"relation": relation}) diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index ce160fb8011..5292f795665 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -62,7 +62,6 @@ def invoke(self, args: List[str]) -> Tuple[Optional[List], bool]: @p.cache_selected_only @p.debug @p.enable_legacy_logger -@p.event_buffer_size @p.fail_fast @p.log_cache_events @p.log_format diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index a4119426895..7795fb9d218 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -80,14 +80,6 @@ hidden=True, ) -event_buffer_size = click.option( - "--event-buffer-size", - envvar="DBT_EVENT_BUFFER_SIZE", - help="Sets the max number of events to buffer in EVENT_HISTORY.", - default=100000, - type=click.INT, -) - exclude = click.option("--exclude", envvar=None, help="Specify the nodes to exclude.") fail_fast = click.option( diff --git a/core/dbt/clients/_jinja_blocks.py b/core/dbt/clients/_jinja_blocks.py index c1ef31acf44..fa74a317649 100644 --- a/core/dbt/clients/_jinja_blocks.py +++ b/core/dbt/clients/_jinja_blocks.py @@ -1,7 +1,15 @@ import re from collections import namedtuple -import dbt.exceptions +from dbt.exceptions import ( + BlockDefinitionNotAtTop, + InternalException, + MissingCloseTag, + MissingControlFlowStartTag, + NestedTags, + UnexpectedControlFlowEndTag, + UnexpectedMacroEOF, +) def regex(pat): @@ -139,10 +147,7 @@ def _first_match(self, *patterns, **kwargs): def _expect_match(self, expected_name, *patterns, **kwargs): match = self._first_match(*patterns, **kwargs) if match is None: - msg = 'unexpected EOF, expected {}, got "{}"'.format( - expected_name, self.data[self.pos :] - ) - dbt.exceptions.raise_compiler_error(msg) + raise UnexpectedMacroEOF(expected_name, self.data[self.pos :]) return match def handle_expr(self, match): @@ -256,7 +261,7 @@ def find_tags(self): elif block_type_name is not None: yield self.handle_tag(match) else: - raise dbt.exceptions.InternalException( + raise InternalException( "Invalid regex match in next_block, expected block start, " "expr start, or comment start" ) @@ -265,13 +270,6 @@ def __iter__(self): return self.find_tags() -duplicate_tags = ( - "Got nested tags: {outer.block_type_name} (started at {outer.start}) did " - "not have a matching {{% end{outer.block_type_name} %}} before a " - "subsequent {inner.block_type_name} was found (started at {inner.start})" -) - - _CONTROL_FLOW_TAGS = { "if": "endif", "for": "endfor", @@ -319,33 +317,16 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True): found = self.stack.pop() else: expected = _CONTROL_FLOW_END_TAGS[tag.block_type_name] - dbt.exceptions.raise_compiler_error( - ( - "Got an unexpected control flow end tag, got {} but " - "never saw a preceeding {} (@ {})" - ).format(tag.block_type_name, expected, self.tag_parser.linepos(tag.start)) - ) + raise UnexpectedControlFlowEndTag(tag, expected, self.tag_parser) expected = _CONTROL_FLOW_TAGS[found] if expected != tag.block_type_name: - dbt.exceptions.raise_compiler_error( - ( - "Got an unexpected control flow end tag, got {} but " - "expected {} next (@ {})" - ).format(tag.block_type_name, expected, self.tag_parser.linepos(tag.start)) - ) + raise MissingControlFlowStartTag(tag, expected, self.tag_parser) if tag.block_type_name in allowed_blocks: if self.stack: - dbt.exceptions.raise_compiler_error( - ( - "Got a block definition inside control flow at {}. " - "All dbt block definitions must be at the top level" - ).format(self.tag_parser.linepos(tag.start)) - ) + raise BlockDefinitionNotAtTop(self.tag_parser, tag.start) if self.current is not None: - dbt.exceptions.raise_compiler_error( - duplicate_tags.format(outer=self.current, inner=tag) - ) + raise NestedTags(outer=self.current, inner=tag) if collect_raw_data: raw_data = self.data[self.last_position : tag.start] self.last_position = tag.start @@ -366,11 +347,7 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True): if self.current: linecount = self.data[: self.current.end].count("\n") + 1 - dbt.exceptions.raise_compiler_error( - ("Reached EOF without finding a close tag for {} (searched from line {})").format( - self.current.block_type_name, linecount - ) - ) + raise MissingCloseTag(self.current.block_type_name, linecount) if collect_raw_data: raw_data = self.data[self.last_position :] diff --git a/core/dbt/clients/git.py b/core/dbt/clients/git.py index 9eaa93203e0..4ddbb1969ee 100644 --- a/core/dbt/clients/git.py +++ b/core/dbt/clients/git.py @@ -14,10 +14,10 @@ ) from dbt.exceptions import ( CommandResultError, + GitCheckoutError, + GitCloningError, + GitCloningProblem, RuntimeException, - bad_package_spec, - raise_git_cloning_error, - raise_git_cloning_problem, ) from packaging import version @@ -27,16 +27,6 @@ def _is_commit(revision: str) -> bool: return bool(re.match(r"\b[0-9a-f]{40}\b", revision)) -def _raise_git_cloning_error(repo, revision, error): - stderr = error.stderr.strip() - if "usage: git" in stderr: - stderr = stderr.split("\nusage: git")[0] - if re.match("fatal: destination path '(.+)' already exists", stderr): - raise_git_cloning_error(error) - - bad_package_spec(repo, revision, stderr) - - def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirectory=None): has_revision = revision is not None is_commit = _is_commit(revision or "") @@ -64,7 +54,7 @@ def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirec try: result = run_cmd(cwd, clone_cmd, env={"LC_ALL": "C"}) except CommandResultError as exc: - _raise_git_cloning_error(repo, revision, exc) + raise GitCloningError(repo, revision, exc) if subdirectory: cwd_subdir = os.path.join(cwd, dirname or "") @@ -72,7 +62,7 @@ def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirec try: run_cmd(cwd_subdir, clone_cmd_subdir) except CommandResultError as exc: - _raise_git_cloning_error(repo, revision, exc) + raise GitCloningError(repo, revision, exc) if remove_git_dir: rmdir(os.path.join(dirname, ".git")) @@ -115,8 +105,7 @@ def checkout(cwd, repo, revision=None): try: return _checkout(cwd, repo, revision) except CommandResultError as exc: - stderr = exc.stderr.strip() - bad_package_spec(repo, revision, stderr) + raise GitCheckoutError(repo=repo, revision=revision, error=exc) def get_current_sha(cwd): @@ -145,7 +134,7 @@ def clone_and_checkout( err = exc.stderr exists = re.match("fatal: destination path '(.+)' already exists", err) if not exists: - raise_git_cloning_problem(repo) + raise GitCloningProblem(repo) directory = None start_sha = None diff --git a/core/dbt/clients/jinja.py b/core/dbt/clients/jinja.py index 5e9835952a8..c1b8865e33e 100644 --- a/core/dbt/clients/jinja.py +++ b/core/dbt/clients/jinja.py @@ -25,16 +25,19 @@ ) from dbt.clients._jinja_blocks import BlockIterator, BlockData, BlockTag -from dbt.contracts.graph.compiled import CompiledGenericTestNode -from dbt.contracts.graph.parsed import ParsedGenericTestNode +from dbt.contracts.graph.nodes import GenericTestNode from dbt.exceptions import ( - InternalException, - raise_compiler_error, + CaughtMacroException, + CaughtMacroExceptionWithNode, CompilationException, - invalid_materialization_argument, - MacroReturn, + InternalException, + InvalidMaterializationArg, JinjaRenderingException, + MacroReturn, + MaterializtionMacroNotUsed, + NoSupportedLanguagesFound, + UndefinedCompilation, UndefinedMacroException, ) from dbt import flags @@ -238,7 +241,7 @@ def exception_handler(self) -> Iterator[None]: try: yield except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e: - raise_compiler_error(str(e)) + raise CaughtMacroException(e) def call_macro(self, *args, **kwargs): # called from __call__ methods @@ -297,7 +300,7 @@ def exception_handler(self) -> Iterator[None]: try: yield except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e: - raise_compiler_error(str(e), self.macro) + raise CaughtMacroExceptionWithNode(exc=e, node=self.macro) except CompilationException as e: e.stack.append(self.macro) raise e @@ -377,7 +380,7 @@ def parse(self, parser): node.defaults.append(languages) else: - invalid_materialization_argument(materialization_name, target.name) + raise InvalidMaterializationArg(materialization_name, target.name) if SUPPORTED_LANG_ARG not in node.args: node.args.append(SUPPORTED_LANG_ARG) @@ -452,7 +455,7 @@ def __call__(self, *args, **kwargs): return self def __reduce__(self): - raise_compiler_error(f"{self.name} is undefined", node=node) + raise UndefinedCompilation(name=self.name, node=node) return Undefined @@ -620,7 +623,7 @@ def extract_toplevel_blocks( def add_rendered_test_kwargs( context: Dict[str, Any], - node: Union[ParsedGenericTestNode, CompiledGenericTestNode], + node: GenericTestNode, capture_macros: bool = False, ) -> None: """Render each of the test kwargs in the given context using the native @@ -652,13 +655,13 @@ def _convert_function(value: Any, keypath: Tuple[Union[str, int], ...]) -> Any: def get_supported_languages(node: jinja2.nodes.Macro) -> List[ModelLanguage]: if "materialization" not in node.name: - raise_compiler_error("Only materialization macros can be used with this function") + raise MaterializtionMacroNotUsed(node=node) no_kwargs = not node.defaults no_langs_found = SUPPORTED_LANG_ARG not in node.args if no_kwargs or no_langs_found: - raise_compiler_error(f"No supported_languages found in materialization macro {node.name}") + raise NoSupportedLanguagesFound(node=node) lang_idx = node.args.index(SUPPORTED_LANG_ARG) # indexing defaults from the end diff --git a/core/dbt/clients/jinja_static.py b/core/dbt/clients/jinja_static.py index 337a25eadda..d71211cea6e 100644 --- a/core/dbt/clients/jinja_static.py +++ b/core/dbt/clients/jinja_static.py @@ -1,6 +1,6 @@ import jinja2 from dbt.clients.jinja import get_environment -from dbt.exceptions import raise_compiler_error +from dbt.exceptions import MacroNamespaceNotString, MacroNameNotString def statically_extract_macro_calls(string, ctx, db_wrapper=None): @@ -117,20 +117,14 @@ def statically_parse_adapter_dispatch(func_call, ctx, db_wrapper): func_name = kwarg.value.value possible_macro_calls.append(func_name) else: - raise_compiler_error( - f"The macro_name parameter ({kwarg.value.value}) " - "to adapter.dispatch was not a string" - ) + raise MacroNameNotString(kwarg_value=kwarg.value.value) elif kwarg.key == "macro_namespace": # This will remain to enable static resolution kwarg_type = type(kwarg.value).__name__ if kwarg_type == "Const": macro_namespace = kwarg.value.value else: - raise_compiler_error( - "The macro_namespace parameter to adapter.dispatch " - f"is a {kwarg_type}, not a string" - ) + raise MacroNamespaceNotString(kwarg_type) # positional arguments if packages_arg: diff --git a/core/dbt/clients/system.py b/core/dbt/clients/system.py index d1b1c461f50..e5a02b68475 100644 --- a/core/dbt/clients/system.py +++ b/core/dbt/clients/system.py @@ -157,7 +157,8 @@ def make_symlink(source: str, link_path: str) -> None: Create a symlink at `link_path` referring to `source`. """ if not supports_symlinks(): - dbt.exceptions.system_error("create a symbolic link") + # TODO: why not import these at top? + raise dbt.exceptions.SymbolicLinkError() os.symlink(source, link_path) diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py index 7163b669001..4ae78fd3485 100644 --- a/core/dbt/compilation.py +++ b/core/dbt/compilation.py @@ -1,6 +1,6 @@ import os from collections import defaultdict -from typing import List, Dict, Any, Tuple, cast, Optional +from typing import List, Dict, Any, Tuple, Optional import networkx as nx # type: ignore import pickle @@ -12,23 +12,23 @@ from dbt.clients.system import make_directory from dbt.context.providers import generate_runtime_model_context from dbt.contracts.graph.manifest import Manifest, UniqueID -from dbt.contracts.graph.compiled import ( - COMPILED_TYPES, - CompiledGenericTestNode, +from dbt.contracts.graph.nodes import ( + ManifestNode, + ManifestSQLNode, + GenericTestNode, GraphMemberNode, InjectedCTE, - ManifestNode, - NonSourceCompiledNode, + SeedNode, ) -from dbt.contracts.graph.parsed import ParsedNode from dbt.exceptions import ( - dependency_not_found, + GraphDependencyNotFound, InternalException, RuntimeException, ) from dbt.graph import Graph from dbt.events.functions import fire_event -from dbt.events.types import FoundStats, CompilingNode, WritingInjectedSQLForNode +from dbt.events.types import FoundStats, WritingInjectedSQLForNode +from dbt.events.contextvars import get_node_info from dbt.node_types import NodeType, ModelLanguage from dbt.events.format import pluralize import dbt.tracking @@ -36,14 +36,6 @@ graph_file_name = "graph.gpickle" -def _compiled_type_for(model: ParsedNode): - if type(model) not in COMPILED_TYPES: - raise InternalException( - f"Asked to compile {type(model)} node, but it has no compiled form" - ) - return COMPILED_TYPES[type(model)] - - def print_compile_stats(stats): names = { NodeType.Model: "model", @@ -176,7 +168,7 @@ def initialize(self): # a dict for jinja rendering of SQL def _create_node_context( self, - node: NonSourceCompiledNode, + node: ManifestSQLNode, manifest: Manifest, extra_context: Dict[str, Any], ) -> Dict[str, Any]: @@ -184,7 +176,7 @@ def _create_node_context( context = generate_runtime_model_context(node, self.config, manifest) context.update(extra_context) - if isinstance(node, CompiledGenericTestNode): + if isinstance(node, GenericTestNode): # for test nodes, add a special keyword args value to the context jinja.add_rendered_test_kwargs(context, node) @@ -195,14 +187,6 @@ def add_ephemeral_prefix(self, name: str): relation_cls = adapter.Relation return relation_cls.add_ephemeral_prefix(name) - def _get_relation_name(self, node: ParsedNode): - relation_name = None - if node.is_relational and not node.is_ephemeral_model: - adapter = get_adapter(self.config) - relation_cls = adapter.Relation - relation_name = str(relation_cls.create_from(self.config, node)) - return relation_name - def _inject_ctes_into_sql(self, sql: str, ctes: List[InjectedCTE]) -> str: """ `ctes` is a list of InjectedCTEs like: @@ -261,10 +245,10 @@ def _inject_ctes_into_sql(self, sql: str, ctes: List[InjectedCTE]) -> str: def _recursively_prepend_ctes( self, - model: NonSourceCompiledNode, + model: ManifestSQLNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]], - ) -> Tuple[NonSourceCompiledNode, List[InjectedCTE]]: + ) -> Tuple[ManifestSQLNode, List[InjectedCTE]]: """This method is called by the 'compile_node' method. Starting from the node that it is passed in, it will recursively call itself using the 'extra_ctes'. The 'ephemeral' models do @@ -279,7 +263,8 @@ def _recursively_prepend_ctes( # Just to make it plain that nothing is actually injected for this case if not model.extra_ctes: - model.extra_ctes_injected = True + if not isinstance(model, SeedNode): + model.extra_ctes_injected = True manifest.update_node(model) return (model, model.extra_ctes) @@ -298,6 +283,7 @@ def _recursively_prepend_ctes( f"could not be resolved: {cte.id}" ) cte_model = manifest.nodes[cte.id] + assert not isinstance(cte_model, SeedNode) if not cte_model.is_ephemeral_model: raise InternalException(f"{cte.id} is not ephemeral") @@ -305,8 +291,6 @@ def _recursively_prepend_ctes( # This model has already been compiled, so it's been # through here before if getattr(cte_model, "compiled", False): - assert isinstance(cte_model, tuple(COMPILED_TYPES.values())) - cte_model = cast(NonSourceCompiledNode, cte_model) new_prepended_ctes = cte_model.extra_ctes # if the cte_model isn't compiled, i.e. first time here @@ -343,21 +327,19 @@ def _recursively_prepend_ctes( return model, prepended_ctes - # creates a compiled_node from the ManifestNode passed in, + # Sets compiled fields in the ManifestSQLNode passed in, # creates a "context" dictionary for jinja rendering, # and then renders the "compiled_code" using the node, the # raw_code and the context. def _compile_node( self, - node: ManifestNode, + node: ManifestSQLNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]] = None, - ) -> NonSourceCompiledNode: + ) -> ManifestSQLNode: if extra_context is None: extra_context = {} - fire_event(CompilingNode(unique_id=node.unique_id)) - data = node.to_dict(omit_none=True) data.update( { @@ -367,9 +349,8 @@ def _compile_node( "extra_ctes": [], } ) - compiled_node = _compiled_type_for(node).from_dict(data) - if compiled_node.language == ModelLanguage.python: + if node.language == ModelLanguage.python: # TODO could we also 'minify' this code at all? just aesthetic, not functional # quoating seems like something very specific to sql so far @@ -377,7 +358,7 @@ def _compile_node( # TODO try to find better way to do this, given that original_quoting = self.config.quoting self.config.quoting = {key: False for key in original_quoting.keys()} - context = self._create_node_context(compiled_node, manifest, extra_context) + context = self._create_node_context(node, manifest, extra_context) postfix = jinja.get_rendered( "{{ py_script_postfix(model) }}", @@ -385,23 +366,21 @@ def _compile_node( node, ) # we should NOT jinja render the python model's 'raw code' - compiled_node.compiled_code = f"{node.raw_code}\n\n{postfix}" + node.compiled_code = f"{node.raw_code}\n\n{postfix}" # restore quoting settings in the end since context is lazy evaluated self.config.quoting = original_quoting else: - context = self._create_node_context(compiled_node, manifest, extra_context) - compiled_node.compiled_code = jinja.get_rendered( + context = self._create_node_context(node, manifest, extra_context) + node.compiled_code = jinja.get_rendered( node.raw_code, context, node, ) - compiled_node.relation_name = self._get_relation_name(node) + node.compiled = True - compiled_node.compiled = True - - return compiled_node + return node def write_graph_file(self, linker: Linker, manifest: Manifest): filename = graph_file_name @@ -420,7 +399,7 @@ def link_node(self, linker: Linker, node: GraphMemberNode, manifest: Manifest): elif dependency in manifest.metrics: linker.dependency(node.unique_id, (manifest.metrics[dependency].unique_id)) else: - dependency_not_found(node, dependency) + raise GraphDependencyNotFound(node, dependency) def link_graph(self, linker: Linker, manifest: Manifest, add_test_edges: bool = False): for source in manifest.sources.values(): @@ -508,10 +487,13 @@ def compile(self, manifest: Manifest, write=True, add_test_edges=False) -> Graph return Graph(linker.graph) # writes the "compiled_code" into the target/compiled directory - def _write_node(self, node: NonSourceCompiledNode) -> ManifestNode: - if not node.extra_ctes_injected or node.resource_type == NodeType.Snapshot: + def _write_node(self, node: ManifestSQLNode) -> ManifestSQLNode: + if not node.extra_ctes_injected or node.resource_type in ( + NodeType.Snapshot, + NodeType.Seed, + ): return node - fire_event(WritingInjectedSQLForNode(unique_id=node.unique_id)) + fire_event(WritingInjectedSQLForNode(node_info=get_node_info())) if node.compiled_code: node.compiled_path = node.write_node( @@ -521,11 +503,11 @@ def _write_node(self, node: NonSourceCompiledNode) -> ManifestNode: def compile_node( self, - node: ManifestNode, + node: ManifestSQLNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]] = None, write: bool = True, - ) -> NonSourceCompiledNode: + ) -> ManifestSQLNode: """This is the main entry point into this code. It's called by CompileRunner.compile, GenericRPCRunner.compile, and RunTask.get_hook_sql. It calls '_compile_node' to convert diff --git a/core/dbt/config/profile.py b/core/dbt/config/profile.py index 542062a2f6f..36eddfe33e0 100644 --- a/core/dbt/config/profile.py +++ b/core/dbt/config/profile.py @@ -9,12 +9,14 @@ from dbt.clients.yaml_helper import load_yaml_text from dbt.contracts.connection import Credentials, HasCredentials from dbt.contracts.project import ProfileConfig, UserConfig -from dbt.exceptions import CompilationException -from dbt.exceptions import DbtProfileError -from dbt.exceptions import DbtProjectError -from dbt.exceptions import ValidationException -from dbt.exceptions import RuntimeException -from dbt.exceptions import validator_error_message +from dbt.exceptions import ( + CompilationException, + DbtProfileError, + DbtProjectError, + ValidationException, + RuntimeException, + ProfileConfigInvalid, +) from dbt.events.types import MissingProfileTarget from dbt.events.functions import fire_event from dbt.utils import coerce_dict_str @@ -156,7 +158,7 @@ def validate(self): dct = self.to_profile_info(serialize_credentials=True) ProfileConfig.validate(dct) except ValidationError as exc: - raise DbtProfileError(validator_error_message(exc)) from exc + raise ProfileConfigInvalid(exc) from exc @staticmethod def _credentials_from_profile( diff --git a/core/dbt/config/project.py b/core/dbt/config/project.py index 7a0eb4c8e9d..ebbe2684d22 100644 --- a/core/dbt/config/project.py +++ b/core/dbt/config/project.py @@ -16,19 +16,19 @@ import os from dbt import flags, deprecations -from dbt.clients.system import resolve_path_from_base -from dbt.clients.system import path_exists -from dbt.clients.system import load_file_contents +from dbt.clients.system import path_exists, resolve_path_from_base, load_file_contents from dbt.clients.yaml_helper import load_yaml_text from dbt.contracts.connection import QueryComment -from dbt.exceptions import DbtProjectError -from dbt.exceptions import SemverException -from dbt.exceptions import validator_error_message -from dbt.exceptions import RuntimeException +from dbt.exceptions import ( + DbtProjectError, + SemverException, + ProjectContractBroken, + ProjectContractInvalid, + RuntimeException, +) from dbt.graph import SelectionSpec from dbt.helper_types import NoValue -from dbt.semver import VersionSpecifier -from dbt.semver import versions_compatible +from dbt.semver import VersionSpecifier, versions_compatible from dbt.version import get_installed_version from dbt.utils import MultiDict from dbt.node_types import NodeType @@ -293,7 +293,7 @@ def render_package_metadata(self, renderer: PackageRenderer) -> ProjectPackageMe packages_data = renderer.render_data(self.packages_dict) packages_config = package_config_from_data(packages_data) if not self.project_name: - raise DbtProjectError(DbtProjectError("Package dbt_project.yml must have a name!")) + raise DbtProjectError("Package dbt_project.yml must have a name!") return ProjectPackageMetadata(self.project_name, packages_config.packages) def check_config_path(self, project_dict, deprecated_path, exp_path): @@ -332,7 +332,7 @@ def create_project(self, rendered: RenderComponents) -> "Project": ProjectContract.validate(rendered.project_dict) cfg = ProjectContract.from_dict(rendered.project_dict) except ValidationError as e: - raise DbtProjectError(validator_error_message(e)) from e + raise ProjectContractInvalid(e) from e # name/version are required in the Project definition, so we can assume # they are present name = cfg.name @@ -649,7 +649,7 @@ def validate(self): try: ProjectContract.validate(self.to_project_config()) except ValidationError as e: - raise DbtProjectError(validator_error_message(e)) from e + raise ProjectContractBroken(e) from e @classmethod def from_project_root( diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py index ccf95c65f7c..46f03226b57 100644 --- a/core/dbt/config/runtime.py +++ b/core/dbt/config/runtime.py @@ -3,31 +3,41 @@ from copy import deepcopy from dataclasses import dataclass, field from pathlib import Path -from typing import Dict, Any, Optional, Mapping, Iterator, Iterable, Tuple, List, MutableSet, Type +from typing import ( + Any, + Dict, + Iterable, + Iterator, + Mapping, + MutableSet, + Optional, + Tuple, + Type, +) -from .profile import Profile -from .project import Project -from .renderer import DbtProjectYamlRenderer, ProfileRenderer from dbt import flags -from dbt.adapters.factory import get_relation_class_by_name, get_include_paths -from dbt.helper_types import FQNPath, PathSet, DictDefaultEmptyStr +from dbt.adapters.factory import get_include_paths, get_relation_class_by_name from dbt.config.profile import read_user_config from dbt.config.project import load_raw_project from dbt.contracts.connection import AdapterRequiredConfig, Credentials, HasCredentials from dbt.contracts.graph.manifest import ManifestMetadata -from dbt.contracts.relation import ComponentName -from dbt.ui import warning_tag - from dbt.contracts.project import Configuration, UserConfig +from dbt.contracts.relation import ComponentName +from dbt.dataclass_schema import ValidationError from dbt.exceptions import ( - RuntimeException, + ConfigContractBroken, DbtProjectError, - validator_error_message, - warn_or_error, - raise_compiler_error, + NonUniquePackageName, + RuntimeException, + UninstalledPackagesFound, ) +from dbt.events.functions import warn_or_error +from dbt.events.types import UnusedResourceConfigPath +from dbt.helper_types import DictDefaultEmptyStr, FQNPath, PathSet -from dbt.dataclass_schema import ValidationError +from .profile import Profile +from .project import Project +from .renderer import DbtProjectYamlRenderer, ProfileRenderer def load_project( @@ -227,7 +237,7 @@ def validate(self): try: Configuration.validate(self.serialize()) except ValidationError as e: - raise DbtProjectError(validator_error_message(e)) from e + raise ConfigContractBroken(e) from e @classmethod def collect_parts(cls: Type["RuntimeConfig"], args: Any) -> Tuple[Project, Profile]: @@ -240,7 +250,7 @@ def collect_parts(cls: Type["RuntimeConfig"], args: Any) -> Tuple[Project, Profi args, ) project = load_project(project_root, bool(flags.VERSION_CHECK), profile, cli_vars) - return (project, profile) + return project, profile # Called in main.py, lib.py, task/base.py @classmethod @@ -309,11 +319,11 @@ def get_resource_config_paths(self) -> Dict[str, PathSet]: "exposures": self._get_config_paths(self.exposures), } - def get_unused_resource_config_paths( + def warn_for_unused_resource_config_paths( self, resource_fqns: Mapping[str, PathSet], disabled: PathSet, - ) -> List[FQNPath]: + ) -> None: """Return a list of lists of strings, where each inner list of strings represents a type + FQN path of a resource configuration that is not used. @@ -327,23 +337,13 @@ def get_unused_resource_config_paths( for config_path in config_paths: if not _is_config_used(config_path, fqns): - unused_resource_config_paths.append((resource_type,) + config_path) - return unused_resource_config_paths + resource_path = ".".join(i for i in ((resource_type,) + config_path)) + unused_resource_config_paths.append(resource_path) - def warn_for_unused_resource_config_paths( - self, - resource_fqns: Mapping[str, PathSet], - disabled: PathSet, - ) -> None: - unused = self.get_unused_resource_config_paths(resource_fqns, disabled) - if len(unused) == 0: + if len(unused_resource_config_paths) == 0: return - msg = UNUSED_RESOURCE_CONFIGURATION_PATH_MESSAGE.format( - len(unused), "\n".join("- {}".format(".".join(u)) for u in unused) - ) - - warn_or_error(msg, log_fmt=warning_tag("{}")) + warn_or_error(UnusedResourceConfigPath(unused_config_paths=unused_resource_config_paths)) def load_dependencies(self, base_only=False) -> Mapping[str, "RuntimeConfig"]: if self.dependencies is None: @@ -357,22 +357,15 @@ def load_dependencies(self, base_only=False) -> Mapping[str, "RuntimeConfig"]: count_packages_specified = len(self.packages.packages) # type: ignore count_packages_installed = len(tuple(self._get_project_directories())) if count_packages_specified > count_packages_installed: - raise_compiler_error( - f"dbt found {count_packages_specified} package(s) " - f"specified in packages.yml, but only " - f"{count_packages_installed} package(s) installed " - f'in {self.packages_install_path}. Run "dbt deps" to ' - f"install package dependencies." + raise UninstalledPackagesFound( + count_packages_specified, + count_packages_installed, + self.packages_install_path, ) project_paths = itertools.chain(internal_packages, self._get_project_directories()) for project_name, project in self.load_projects(project_paths): if project_name in all_projects: - raise_compiler_error( - f"dbt found more than one package with the name " - f'"{project_name}" included in this project. Package ' - f"names must be unique in a project. Please rename " - f"one of these packages." - ) + raise NonUniquePackageName(project_name) all_projects[project_name] = project self.dependencies = all_projects return self.dependencies @@ -627,14 +620,6 @@ def from_args(cls: Type[RuntimeConfig], args: Any) -> "RuntimeConfig": return cls.from_parts(project=project, profile=profile, args=args) -UNUSED_RESOURCE_CONFIGURATION_PATH_MESSAGE = """\ -Configuration paths exist in your dbt_project.yml file which do not \ -apply to any resources. -There are {} unused configuration paths: -{} -""" - - def _is_config_used(path, fqns): if fqns: for fqn in fqns: diff --git a/core/dbt/config/utils.py b/core/dbt/config/utils.py index b3be5d5501b..76fd8f6b466 100644 --- a/core/dbt/config/utils.py +++ b/core/dbt/config/utils.py @@ -9,7 +9,7 @@ from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer from dbt.events.functions import fire_event from dbt.events.types import InvalidVarsYAML -from dbt.exceptions import ValidationException, raise_compiler_error +from dbt.exceptions import ValidationException, VarsArgNotYamlDict def parse_cli_vars(var: str) -> Dict[str, Any]: @@ -19,11 +19,7 @@ def parse_cli_vars(var: str) -> Dict[str, Any]: if var_type is dict: return cli_vars else: - type_name = var_type.__name__ - raise_compiler_error( - "The --vars argument must be a YAML dictionary, but was " - "of type '{}'".format(type_name) - ) + raise VarsArgNotYamlDict(var_type) except ValidationException: fire_event(InvalidVarsYAML()) raise diff --git a/core/dbt/constants.py b/core/dbt/constants.py index 1599df3e335..63213476e54 100644 --- a/core/dbt/constants.py +++ b/core/dbt/constants.py @@ -1,3 +1,10 @@ SECRET_ENV_PREFIX = "DBT_ENV_SECRET_" DEFAULT_ENV_PLACEHOLDER = "DBT_DEFAULT_PLACEHOLDER" METADATA_ENV_PREFIX = "DBT_ENV_CUSTOM_ENV_" + +MAXIMUM_SEED_SIZE = 1 * 1024 * 1024 +MAXIMUM_SEED_SIZE_NAME = "1MB" + +PIN_PACKAGE_URL = ( + "https://docs.getdbt.com/docs/package-management#section-specifying-package-versions" +) diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index 68b5edb98c1..59984cb96ab 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -4,19 +4,22 @@ from dbt import flags from dbt import tracking +from dbt import utils from dbt.clients.jinja import get_rendered from dbt.clients.yaml_helper import yaml, safe_load, SafeLoader, Loader, Dumper # noqa: F401 from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER -from dbt.contracts.graph.compiled import CompiledResource +from dbt.contracts.graph.nodes import Resource from dbt.exceptions import ( - CompilationException, + DisallowSecretEnvVar, + EnvVarMissing, MacroReturn, - raise_compiler_error, - raise_parsing_error, - disallow_secret_env_var, + RequiredVarNotFound, + SetStrictWrongType, + ZipStrictWrongType, ) from dbt.events.functions import fire_event, get_invocation_id -from dbt.events.types import MacroEventInfo, MacroEventDebug +from dbt.events.types import JinjaLogInfo, JinjaLogDebug +from dbt.events.contextvars import get_node_info from dbt.version import __version__ as dbt_version # These modules are added to the context. Consider alternative @@ -126,18 +129,17 @@ def __new__(mcls, name, bases, dct): class Var: - UndefinedVarError = "Required var '{}' not found in config:\nVars supplied to {} = {}" _VAR_NOTSET = object() def __init__( self, context: Mapping[str, Any], cli_vars: Mapping[str, Any], - node: Optional[CompiledResource] = None, + node: Optional[Resource] = None, ) -> None: self._context: Mapping[str, Any] = context self._cli_vars: Mapping[str, Any] = cli_vars - self._node: Optional[CompiledResource] = node + self._node: Optional[Resource] = node self._merged: Mapping[str, Any] = self._generate_merged() def _generate_merged(self) -> Mapping[str, Any]: @@ -151,10 +153,7 @@ def node_name(self): return "" def get_missing_var(self, var_name): - dct = {k: self._merged[k] for k in self._merged} - pretty_vars = json.dumps(dct, sort_keys=True, indent=4) - msg = self.UndefinedVarError.format(var_name, self.node_name, pretty_vars) - raise_compiler_error(msg, self._node) + raise RequiredVarNotFound(var_name, self._merged, self._node) def has_var(self, var_name: str): return var_name in self._merged @@ -298,7 +297,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: """ return_value = None if var.startswith(SECRET_ENV_PREFIX): - disallow_secret_env_var(var) + raise DisallowSecretEnvVar(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -313,8 +312,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) if os.environ.get("DBT_MACRO_DEBUGGING"): @@ -495,7 +493,7 @@ def set_strict(value: Iterable[Any]) -> Set[Any]: try: return set(value) except TypeError as e: - raise CompilationException(e) + raise SetStrictWrongType(e) @contextmember("zip") @staticmethod @@ -539,7 +537,7 @@ def zip_strict(*args: Iterable[Any]) -> Iterable[Any]: try: return zip(*args) except TypeError as e: - raise CompilationException(e) + raise ZipStrictWrongType(e) @contextmember @staticmethod @@ -557,9 +555,9 @@ def log(msg: str, info: bool = False) -> str: {% endmacro %}" """ if info: - fire_event(MacroEventInfo(msg=msg)) + fire_event(JinjaLogInfo(msg=msg, node_info=get_node_info())) else: - fire_event(MacroEventDebug(msg=msg)) + fire_event(JinjaLogDebug(msg=msg, node_info=get_node_info())) return "" @contextproperty @@ -687,6 +685,19 @@ def diff_of_two_dicts( dict_diff.update({k: dict_a[k]}) return dict_diff + @contextmember + @staticmethod + def local_md5(value: str) -> str: + """Calculates an MD5 hash of the given string. + It's called "local_md5" to emphasize that it runs locally in dbt (in jinja context) and not an MD5 SQL command. + + :param value: The value to hash + + Usage: + {% set value_hash = local_md5("hello world") %} + """ + return utils.md5(value) + def generate_base_context(cli_vars: Dict[str, Any]) -> Dict[str, Any]: ctx = BaseContext(cli_vars) diff --git a/core/dbt/context/configured.py b/core/dbt/context/configured.py index 64fdcd935b3..7339bdb1152 100644 --- a/core/dbt/context/configured.py +++ b/core/dbt/context/configured.py @@ -8,7 +8,7 @@ from dbt.context.base import contextproperty, contextmember, Var from dbt.context.target import TargetContext -from dbt.exceptions import raise_parsing_error, disallow_secret_env_var +from dbt.exceptions import EnvVarMissing, DisallowSecretEnvVar class ConfiguredContext(TargetContext): @@ -87,7 +87,7 @@ def var(self) -> ConfiguredVar: def env_var(self, var: str, default: Optional[str] = None) -> str: return_value = None if var.startswith(SECRET_ENV_PREFIX): - disallow_secret_env_var(var) + raise DisallowSecretEnvVar(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -105,8 +105,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) class MacroResolvingContext(ConfiguredContext): diff --git a/core/dbt/context/docs.py b/core/dbt/context/docs.py index 26096caa108..89a652736dd 100644 --- a/core/dbt/context/docs.py +++ b/core/dbt/context/docs.py @@ -1,13 +1,12 @@ from typing import Any, Dict, Union from dbt.exceptions import ( - doc_invalid_args, - doc_target_not_found, + DocTargetNotFound, + InvalidDocArgs, ) from dbt.config.runtime import RuntimeConfig -from dbt.contracts.graph.compiled import CompileResultNode from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedMacro +from dbt.contracts.graph.nodes import Macro, ResultNode from dbt.context.base import contextmember from dbt.context.configured import SchemaYamlContext @@ -17,7 +16,7 @@ class DocsRuntimeContext(SchemaYamlContext): def __init__( self, config: RuntimeConfig, - node: Union[ParsedMacro, CompileResultNode], + node: Union[Macro, ResultNode], manifest: Manifest, current_project: str, ) -> None: @@ -53,9 +52,9 @@ def doc(self, *args: str) -> str: elif len(args) == 2: doc_package_name, doc_name = args else: - doc_invalid_args(self.node, args) + raise InvalidDocArgs(self.node, args) - # ParsedDocumentation + # Documentation target_doc = self.manifest.resolve_doc( doc_name, doc_package_name, @@ -69,7 +68,9 @@ def doc(self, *args: str) -> str: # TODO CT-211 source_file.add_node(self.node.unique_id) # type: ignore[union-attr] else: - doc_target_not_found(self.node, doc_name, doc_package_name) + raise DocTargetNotFound( + node=self.node, target_doc_name=doc_name, target_doc_package=doc_package_name + ) return target_doc.block_contents diff --git a/core/dbt/context/exceptions_jinja.py b/core/dbt/context/exceptions_jinja.py new file mode 100644 index 00000000000..5663b4701e0 --- /dev/null +++ b/core/dbt/context/exceptions_jinja.py @@ -0,0 +1,142 @@ +import functools +from typing import NoReturn + +from dbt.events.functions import warn_or_error +from dbt.events.helpers import env_secrets, scrub_secrets +from dbt.events.types import JinjaLogWarning + +from dbt.exceptions import ( + RuntimeException, + MissingConfig, + MissingMaterialization, + MissingRelation, + AmbiguousAlias, + AmbiguousCatalogMatch, + CacheInconsistency, + DataclassNotDict, + CompilationException, + DatabaseException, + DependencyNotFound, + DependencyException, + DuplicatePatchPath, + DuplicateResourceName, + InvalidPropertyYML, + NotImplementedException, + RelationWrongType, +) + + +def warn(msg, node=None): + warn_or_error(JinjaLogWarning(msg=msg), node=node) + return "" + + +def missing_config(model, name) -> NoReturn: + raise MissingConfig(unique_id=model.unique_id, name=name) + + +def missing_materialization(model, adapter_type) -> NoReturn: + raise MissingMaterialization(model=model, adapter_type=adapter_type) + + +def missing_relation(relation, model=None) -> NoReturn: + raise MissingRelation(relation, model) + + +def raise_ambiguous_alias(node_1, node_2, duped_name=None) -> NoReturn: + raise AmbiguousAlias(node_1, node_2, duped_name) + + +def raise_ambiguous_catalog_match(unique_id, match_1, match_2) -> NoReturn: + raise AmbiguousCatalogMatch(unique_id, match_1, match_2) + + +def raise_cache_inconsistent(message) -> NoReturn: + raise CacheInconsistency(message) + + +def raise_dataclass_not_dict(obj) -> NoReturn: + raise DataclassNotDict(obj) + + +def raise_compiler_error(msg, node=None) -> NoReturn: + raise CompilationException(msg, node) + + +def raise_database_error(msg, node=None) -> NoReturn: + raise DatabaseException(msg, node) + + +def raise_dep_not_found(node, node_description, required_pkg) -> NoReturn: + raise DependencyNotFound(node, node_description, required_pkg) + + +def raise_dependency_error(msg) -> NoReturn: + raise DependencyException(scrub_secrets(msg, env_secrets())) + + +def raise_duplicate_patch_name(patch_1, existing_patch_path) -> NoReturn: + raise DuplicatePatchPath(patch_1, existing_patch_path) + + +def raise_duplicate_resource_name(node_1, node_2) -> NoReturn: + raise DuplicateResourceName(node_1, node_2) + + +def raise_invalid_property_yml_version(path, issue) -> NoReturn: + raise InvalidPropertyYML(path, issue) + + +def raise_not_implemented(msg) -> NoReturn: + raise NotImplementedException(msg) + + +def relation_wrong_type(relation, expected_type, model=None) -> NoReturn: + raise RelationWrongType(relation, expected_type, model) + + +# Update this when a new function should be added to the +# dbt context's `exceptions` key! +CONTEXT_EXPORTS = { + fn.__name__: fn + for fn in [ + warn, + missing_config, + missing_materialization, + missing_relation, + raise_ambiguous_alias, + raise_ambiguous_catalog_match, + raise_cache_inconsistent, + raise_dataclass_not_dict, + raise_compiler_error, + raise_database_error, + raise_dep_not_found, + raise_dependency_error, + raise_duplicate_patch_name, + raise_duplicate_resource_name, + raise_invalid_property_yml_version, + raise_not_implemented, + relation_wrong_type, + ] +} + + +# wraps context based exceptions in node info +def wrapper(model): + def wrap(func): + @functools.wraps(func) + def inner(*args, **kwargs): + try: + return func(*args, **kwargs) + except RuntimeException as exc: + exc.add_node(model) + raise exc + + return inner + + return wrap + + +def wrapped_exports(model): + wrap = wrapper(model) + return {name: wrap(export) for name, export in CONTEXT_EXPORTS.items()} diff --git a/core/dbt/context/macro_resolver.py b/core/dbt/context/macro_resolver.py index 2766dc4130c..6e70bafd05e 100644 --- a/core/dbt/context/macro_resolver.py +++ b/core/dbt/context/macro_resolver.py @@ -1,10 +1,10 @@ from typing import Dict, MutableMapping, Optional -from dbt.contracts.graph.parsed import ParsedMacro -from dbt.exceptions import raise_duplicate_macro_name, raise_compiler_error +from dbt.contracts.graph.nodes import Macro +from dbt.exceptions import DuplicateMacroName, PackageNotFoundForMacro from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME from dbt.clients.jinja import MacroGenerator -MacroNamespace = Dict[str, ParsedMacro] +MacroNamespace = Dict[str, Macro] # This class builds the MacroResolver by adding macros @@ -21,7 +21,7 @@ class MacroResolver: def __init__( self, - macros: MutableMapping[str, ParsedMacro], + macros: MutableMapping[str, Macro], root_project_name: str, internal_package_names, ) -> None: @@ -77,7 +77,7 @@ def _build_macros_by_name(self): def _add_macro_to( self, package_namespaces: Dict[str, MacroNamespace], - macro: ParsedMacro, + macro: Macro, ): if macro.package_name in package_namespaces: namespace = package_namespaces[macro.package_name] @@ -86,10 +86,10 @@ def _add_macro_to( package_namespaces[macro.package_name] = namespace if macro.name in namespace: - raise_duplicate_macro_name(macro, macro, macro.package_name) + raise DuplicateMacroName(macro, macro, macro.package_name) package_namespaces[macro.package_name][macro.name] = macro - def add_macro(self, macro: ParsedMacro): + def add_macro(self, macro: Macro): macro_name: str = macro.name # internal macros (from plugins) will be processed separately from @@ -187,7 +187,7 @@ def get_from_package(self, package_name: Optional[str], name: str) -> Optional[M elif package_name in self.macro_resolver.packages: macro = self.macro_resolver.packages[package_name].get(name) else: - raise_compiler_error(f"Could not find package '{package_name}'") + raise PackageNotFoundForMacro(package_name) if not macro: return None macro_func = MacroGenerator(macro, self.ctx, self.node, self.thread_ctx) diff --git a/core/dbt/context/macros.py b/core/dbt/context/macros.py index dccd376b876..921480ec05a 100644 --- a/core/dbt/context/macros.py +++ b/core/dbt/context/macros.py @@ -1,9 +1,9 @@ from typing import Any, Dict, Iterable, Union, Optional, List, Iterator, Mapping, Set from dbt.clients.jinja import MacroGenerator, MacroStack -from dbt.contracts.graph.parsed import ParsedMacro +from dbt.contracts.graph.nodes import Macro from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME -from dbt.exceptions import raise_duplicate_macro_name, raise_compiler_error +from dbt.exceptions import DuplicateMacroName, PackageNotFoundForMacro FlatNamespace = Dict[str, MacroGenerator] @@ -75,7 +75,7 @@ def get_from_package(self, package_name: Optional[str], name: str) -> Optional[M elif package_name in self.packages: return self.packages[package_name].get(name) else: - raise_compiler_error(f"Could not find package '{package_name}'") + raise PackageNotFoundForMacro(package_name) # This class builds the MacroNamespace by adding macros to @@ -112,7 +112,7 @@ def __init__( def _add_macro_to( self, hierarchy: Dict[str, FlatNamespace], - macro: ParsedMacro, + macro: Macro, macro_func: MacroGenerator, ): if macro.package_name in hierarchy: @@ -122,10 +122,10 @@ def _add_macro_to( hierarchy[macro.package_name] = namespace if macro.name in namespace: - raise_duplicate_macro_name(macro_func.macro, macro, macro.package_name) + raise DuplicateMacroName(macro_func.macro, macro, macro.package_name) hierarchy[macro.package_name][macro.name] = macro_func - def add_macro(self, macro: ParsedMacro, ctx: Dict[str, Any]): + def add_macro(self, macro: Macro, ctx: Dict[str, Any]): macro_name: str = macro.name # MacroGenerator is in clients/jinja.py @@ -147,13 +147,11 @@ def add_macro(self, macro: ParsedMacro, ctx: Dict[str, Any]): elif macro.package_name == self.root_package: self.globals[macro_name] = macro_func - def add_macros(self, macros: Iterable[ParsedMacro], ctx: Dict[str, Any]): + def add_macros(self, macros: Iterable[Macro], ctx: Dict[str, Any]): for macro in macros: self.add_macro(macro, ctx) - def build_namespace( - self, macros: Iterable[ParsedMacro], ctx: Dict[str, Any] - ) -> MacroNamespace: + def build_namespace(self, macros: Iterable[Macro], ctx: Dict[str, Any]) -> MacroNamespace: self.add_macros(macros, ctx) # Iterate in reverse-order and overwrite: the packages that are first diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index 597b526e384..2e7af0a79f2 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -19,46 +19,50 @@ from dbt.clients import agate_helper from dbt.clients.jinja import get_rendered, MacroGenerator, MacroStack from dbt.config import RuntimeConfig, Project -from .base import contextmember, contextproperty, Var -from .configured import FQNLookup -from .context_config import ContextConfig from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER +from dbt.context.base import contextmember, contextproperty, Var +from dbt.context.configured import FQNLookup +from dbt.context.context_config import ContextConfig +from dbt.context.exceptions_jinja import wrapped_exports from dbt.context.macro_resolver import MacroResolver, TestMacroNamespace -from .macros import MacroNamespaceBuilder, MacroNamespace -from .manifest import ManifestContext +from dbt.context.macros import MacroNamespaceBuilder, MacroNamespace +from dbt.context.manifest import ManifestContext from dbt.contracts.connection import AdapterResponse from dbt.contracts.graph.manifest import Manifest, Disabled -from dbt.contracts.graph.compiled import ( - CompiledResource, - CompiledSeedNode, +from dbt.contracts.graph.nodes import ( + Macro, + Exposure, + Metric, + SeedNode, + SourceDefinition, + Resource, ManifestNode, ) -from dbt.contracts.graph.parsed import ( - ParsedMacro, - ParsedExposure, - ParsedMetric, - ParsedSeedNode, - ParsedSourceDefinition, -) from dbt.contracts.graph.metrics import MetricReference, ResolvedMetricReference from dbt.events.functions import get_metadata_vars from dbt.exceptions import ( CompilationException, - ParsingException, + ConflictingConfigKeys, + DisallowSecretEnvVar, + EnvVarMissing, InternalException, - ValidationException, + InvalidInlineModelConfig, + InvalidNumberSourceArgs, + InvalidPersistDocsValueType, + LoadAgateTableNotSeed, + LoadAgateTableValueError, + MacroInvalidDispatchArg, + MacrosSourcesUnWriteable, + MetricInvalidArgs, + MissingConfig, + OperationsCannotRefEphemeralNodes, + PackageNotInDeps, + ParsingException, + RefBadContext, + RefInvalidArgs, RuntimeException, - macro_invalid_dispatch_arg, - missing_config, - raise_compiler_error, - ref_invalid_args, - metric_invalid_args, - ref_target_not_found, - target_not_found, - ref_bad_context, - wrapped_exports, - raise_parsing_error, - disallow_secret_env_var, + TargetNotFound, + ValidationException, ) from dbt.config import IsFQNResource from dbt.node_types import NodeType, ModelLanguage @@ -143,7 +147,7 @@ def dispatch( raise CompilationException(msg) if packages is not None: - raise macro_invalid_dispatch_arg(macro_name) + raise MacroInvalidDispatchArg(macro_name) namespace = macro_namespace @@ -237,7 +241,7 @@ def __call__(self, *args: str) -> RelationProxy: elif len(args) == 2: package, name = args else: - ref_invalid_args(self.model, args) + raise RefInvalidArgs(node=self.model, args=args) self.validate_args(name, package) return self.resolve(name, package) @@ -261,9 +265,7 @@ def validate_args(self, source_name: str, table_name: str): def __call__(self, *args: str) -> RelationProxy: if len(args) != 2: - raise_compiler_error( - f"source() takes exactly two arguments ({len(args)} given)", self.model - ) + raise InvalidNumberSourceArgs(args, node=self.model) self.validate_args(args[0], args[1]) return self.resolve(args[0], args[1]) @@ -298,7 +300,7 @@ def __call__(self, *args: str) -> MetricReference: elif len(args) == 2: package, name = args else: - metric_invalid_args(self.model, args) + raise MetricInvalidArgs(node=self.model, args=args) self.validate_args(name, package) return self.resolve(name, package) @@ -319,12 +321,7 @@ def _transform_config(self, config): if oldkey in config: newkey = oldkey.replace("_", "-") if newkey in config: - raise_compiler_error( - 'Invalid config, has conflicting keys "{}" and "{}"'.format( - oldkey, newkey - ), - self.model, - ) + raise ConflictingConfigKeys(oldkey, newkey, node=self.model) config[newkey] = config.pop(oldkey) return config @@ -334,7 +331,7 @@ def __call__(self, *args, **kwargs): elif len(args) == 0 and len(kwargs) > 0: opts = kwargs else: - raise_compiler_error("Invalid inline model config", self.model) + raise InvalidInlineModelConfig(node=self.model) opts = self._transform_config(opts) @@ -382,7 +379,7 @@ def _lookup(self, name, default=_MISSING): else: result = self.model.config.get(name, default) if result is _MISSING: - missing_config(self.model, name) + raise MissingConfig(unique_id=self.model.unique_id, name=name) return result def require(self, name, validator=None): @@ -404,20 +401,14 @@ def get(self, name, default=None, validator=None): def persist_relation_docs(self) -> bool: persist_docs = self.get("persist_docs", default={}) if not isinstance(persist_docs, dict): - raise_compiler_error( - f"Invalid value provided for 'persist_docs'. Expected dict " - f"but received {type(persist_docs)}" - ) + raise InvalidPersistDocsValueType(persist_docs) return persist_docs.get("relation", False) def persist_column_docs(self) -> bool: persist_docs = self.get("persist_docs", default={}) if not isinstance(persist_docs, dict): - raise_compiler_error( - f"Invalid value provided for 'persist_docs'. Expected dict " - f"but received {type(persist_docs)}" - ) + raise InvalidPersistDocsValueType(persist_docs) return persist_docs.get("columns", False) @@ -476,10 +467,11 @@ def resolve(self, target_name: str, target_package: Optional[str] = None) -> Rel ) if target_model is None or isinstance(target_model, Disabled): - ref_target_not_found( - self.model, - target_name, - target_package, + raise TargetNotFound( + node=self.model, + target_name=target_name, + target_kind="node", + target_package=target_package, disabled=isinstance(target_model, Disabled), ) self.validate(target_model, target_name, target_package) @@ -497,7 +489,7 @@ def validate( ) -> None: if resolved.unique_id not in self.model.depends_on.nodes: args = self._repack_args(target_name, target_package) - ref_bad_context(self.model, args) + raise RefBadContext(node=self.model, args=args) class OperationRefResolver(RuntimeRefResolver): @@ -512,13 +504,8 @@ def validate( def create_relation(self, target_model: ManifestNode, name: str) -> RelationProxy: if target_model.is_ephemeral_model: # In operations, we can't ref() ephemeral nodes, because - # ParsedMacros do not support set_cte - raise_compiler_error( - "Operations can not ref() ephemeral nodes, but {} is ephemeral".format( - target_model.name - ), - self.model, - ) + # Macros do not support set_cte + raise OperationsCannotRefEphemeralNodes(target_model.name, node=self.model) else: return super().create_relation(target_model, name) @@ -541,7 +528,7 @@ def resolve(self, source_name: str, table_name: str): ) if target_source is None or isinstance(target_source, Disabled): - target_not_found( + raise TargetNotFound( node=self.model, target_name=f"{source_name}.{table_name}", target_kind="source", @@ -568,7 +555,7 @@ def resolve(self, target_name: str, target_package: Optional[str] = None) -> Met ) if target_metric is None or isinstance(target_metric, Disabled): - target_not_found( + raise TargetNotFound( node=self.model, target_name=target_name, target_kind="metric", @@ -584,9 +571,9 @@ def __init__( self, context: Dict[str, Any], config: RuntimeConfig, - node: CompiledResource, + node: Resource, ) -> None: - self._node: CompiledResource + self._node: Resource self._config: RuntimeConfig = config super().__init__(context, config.cli_vars, node=node) @@ -597,7 +584,7 @@ def packages_for_node(self) -> Iterable[Project]: if package_name != self._config.project_name: if package_name not in dependencies: # I don't think this is actually reachable - raise_compiler_error(f"Node package named {package_name} not found!", self._node) + raise PackageNotInDeps(package_name, node=self._node) yield dependencies[package_name] yield self._config @@ -690,7 +677,7 @@ def __init__( raise InternalException(f"Invalid provider given to context: {provider}") # mypy appeasement - we know it'll be a RuntimeConfig self.config: RuntimeConfig - self.model: Union[ParsedMacro, ManifestNode] = model + self.model: Union[Macro, ManifestNode] = model super().__init__(config, manifest, model.package_name) self.sql_results: Dict[str, AttrDict] = {} self.context_config: Optional[ContextConfig] = context_config @@ -779,8 +766,8 @@ def inner(value: T) -> None: @contextmember def write(self, payload: str) -> str: # macros/source defs aren't 'writeable'. - if isinstance(self.model, (ParsedMacro, ParsedSourceDefinition)): - raise_compiler_error('cannot "write" macros or sources') + if isinstance(self.model, (Macro, SourceDefinition)): + raise MacrosSourcesUnWriteable(node=self.model) self.model.build_path = self.model.write_node(self.config.target_path, "run", payload) return "" @@ -795,20 +782,19 @@ def try_or_compiler_error( try: return func(*args, **kwargs) except Exception: - raise_compiler_error(message_if_exception, self.model) + raise CompilationException(message_if_exception, self.model) @contextmember def load_agate_table(self) -> agate.Table: - if not isinstance(self.model, (ParsedSeedNode, CompiledSeedNode)): - raise_compiler_error( - "can only load_agate_table for seeds (got a {})".format(self.model.resource_type) - ) + if not isinstance(self.model, SeedNode): + raise LoadAgateTableNotSeed(self.model.resource_type, node=self.model) + assert self.model.root_path path = os.path.join(self.model.root_path, self.model.original_file_path) column_types = self.model.config.column_types try: table = agate_helper.from_csv(path, text_columns=column_types) except ValueError as e: - raise_compiler_error(str(e)) + raise LoadAgateTableValueError(e, node=self.model) table.original_abspath = os.path.abspath(path) return table @@ -1210,7 +1196,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: """ return_value = None if var.startswith(SECRET_ENV_PREFIX): - disallow_secret_env_var(var) + raise DisallowSecretEnvVar(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -1219,7 +1205,13 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: if return_value is not None: # Save the env_var value in the manifest and the var name in the source_file. # If this is compiling, do not save because it's irrelevant to parsing. - if self.model and not hasattr(self.model, "compiled"): + compiling = ( + True + if hasattr(self.model, "compiled") + and getattr(self.model, "compiled", False) is True + else False + ) + if self.model and not compiling: # If the environment variable is set from a default, store a string indicating # that so we can skip partial parsing. Otherwise the file will be scheduled for # reparsing. If the default changes, the file will have been updated and therefore @@ -1237,8 +1229,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: source_file.env_vars.append(var) # type: ignore[union-attr] return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) @contextproperty def selected_resources(self) -> List[str]: @@ -1274,7 +1265,7 @@ class MacroContext(ProviderContext): def __init__( self, - model: ParsedMacro, + model: Macro, config: RuntimeConfig, manifest: Manifest, provider: Provider, @@ -1389,7 +1380,7 @@ def generate_parser_model_context( def generate_generate_name_macro_context( - macro: ParsedMacro, + macro: Macro, config: RuntimeConfig, manifest: Manifest, ) -> Dict[str, Any]: @@ -1407,7 +1398,7 @@ def generate_runtime_model_context( def generate_runtime_macro_context( - macro: ParsedMacro, + macro: Macro, config: RuntimeConfig, manifest: Manifest, package_name: Optional[str], @@ -1419,7 +1410,7 @@ def generate_runtime_macro_context( class ExposureRefResolver(BaseResolver): def __call__(self, *args) -> str: if len(args) not in (1, 2): - ref_invalid_args(self.model, args) + raise RefInvalidArgs(node=self.model, args=args) self.model.refs.append(list(args)) return "" @@ -1427,15 +1418,21 @@ def __call__(self, *args) -> str: class ExposureSourceResolver(BaseResolver): def __call__(self, *args) -> str: if len(args) != 2: - raise_compiler_error( - f"source() takes exactly two arguments ({len(args)} given)", self.model - ) + raise InvalidNumberSourceArgs(args, node=self.model) self.model.sources.append(list(args)) return "" +class ExposureMetricResolver(BaseResolver): + def __call__(self, *args) -> str: + if len(args) not in (1, 2): + raise MetricInvalidArgs(node=self.model, args=args) + self.model.metrics.append(list(args)) + return "" + + def generate_parse_exposure( - exposure: ParsedExposure, + exposure: Exposure, config: RuntimeConfig, manifest: Manifest, package_name: str, @@ -1454,6 +1451,12 @@ def generate_parse_exposure( project, manifest, ), + "metric": ExposureMetricResolver( + None, + exposure, + project, + manifest, + ), } @@ -1465,7 +1468,7 @@ def __call__(self, *args) -> str: elif len(args) == 2: package, name = args else: - ref_invalid_args(self.model, args) + raise RefInvalidArgs(node=self.model, args=args) self.validate_args(name, package) self.model.refs.append(list(args)) return "" @@ -1479,7 +1482,7 @@ def validate_args(self, name, package): def generate_parse_metrics( - metric: ParsedMetric, + metric: Metric, config: RuntimeConfig, manifest: Manifest, package_name: str, @@ -1555,7 +1558,7 @@ def _build_test_namespace(self): def env_var(self, var: str, default: Optional[str] = None) -> str: return_value = None if var.startswith(SECRET_ENV_PREFIX): - disallow_secret_env_var(var) + raise DisallowSecretEnvVar(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -1581,8 +1584,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: source_file.add_env_var(var, yaml_key, name) # type: ignore[union-attr] return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) def generate_test_context( diff --git a/core/dbt/context/secret.py b/core/dbt/context/secret.py index 11a6dc54f07..da13509ef50 100644 --- a/core/dbt/context/secret.py +++ b/core/dbt/context/secret.py @@ -4,7 +4,7 @@ from .base import BaseContext, contextmember from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER -from dbt.exceptions import raise_parsing_error +from dbt.exceptions import EnvVarMissing SECRET_PLACEHOLDER = "$$$DBT_SECRET_START$$${}$$$DBT_SECRET_END$$$" @@ -50,8 +50,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: self.env_vars[var] = return_value if var in os.environ else DEFAULT_ENV_PLACEHOLDER return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) def generate_secret_context(cli_vars: Dict[str, Any]) -> Dict[str, Any]: diff --git a/core/dbt/contracts/connection.py b/core/dbt/contracts/connection.py index a32bb443099..fe4ae912229 100644 --- a/core/dbt/contracts/connection.py +++ b/core/dbt/contracts/connection.py @@ -16,6 +16,7 @@ from dbt.utils import translate_aliases from dbt.events.functions import fire_event from dbt.events.types import NewConnectionOpening +from dbt.events.contextvars import get_node_info from typing_extensions import Protocol from dbt.dataclass_schema import ( dbtClassMixin, @@ -112,7 +113,9 @@ def __init__(self, opener: Callable[[Connection], Connection]): self.opener = opener def resolve(self, connection: Connection) -> Connection: - fire_event(NewConnectionOpening(connection_state=connection.state)) + fire_event( + NewConnectionOpening(connection_state=connection.state, node_info=get_node_info()) + ) return self.opener(connection) diff --git a/core/dbt/contracts/files.py b/core/dbt/contracts/files.py index b915a0d1197..93f12a1411e 100644 --- a/core/dbt/contracts/files.py +++ b/core/dbt/contracts/files.py @@ -1,18 +1,16 @@ import hashlib import os from dataclasses import dataclass, field + from mashumaro.types import SerializableType from typing import List, Optional, Union, Dict, Any +from dbt.constants import MAXIMUM_SEED_SIZE from dbt.dataclass_schema import dbtClassMixin, StrEnum from .util import SourceKey -MAXIMUM_SEED_SIZE = 1 * 1024 * 1024 -MAXIMUM_SEED_SIZE_NAME = "1MB" - - class ParseFileType(StrEnum): Macro = "macro" Model = "model" diff --git a/core/dbt/contracts/graph/compiled.py b/core/dbt/contracts/graph/compiled.py deleted file mode 100644 index 118d104f537..00000000000 --- a/core/dbt/contracts/graph/compiled.py +++ /dev/null @@ -1,235 +0,0 @@ -from dbt.contracts.graph.parsed import ( - HasTestMetadata, - ParsedNode, - ParsedAnalysisNode, - ParsedSingularTestNode, - ParsedHookNode, - ParsedModelNode, - ParsedExposure, - ParsedMetric, - ParsedResource, - ParsedRPCNode, - ParsedSqlNode, - ParsedGenericTestNode, - ParsedSeedNode, - ParsedSnapshotNode, - ParsedSourceDefinition, - SeedConfig, - TestConfig, - same_seeds, -) -from dbt.node_types import NodeType -from dbt.contracts.util import Replaceable - -from dbt.dataclass_schema import dbtClassMixin -from dataclasses import dataclass, field -from typing import Optional, List, Union, Dict, Type - - -@dataclass -class InjectedCTE(dbtClassMixin, Replaceable): - id: str - sql: str - - -@dataclass -class CompiledNodeMixin(dbtClassMixin): - # this is a special mixin class to provide a required argument. If a node - # is missing a `compiled` flag entirely, it must not be a CompiledNode. - compiled: bool - - -@dataclass -class CompiledNode(ParsedNode, CompiledNodeMixin): - compiled_code: Optional[str] = None - extra_ctes_injected: bool = False - extra_ctes: List[InjectedCTE] = field(default_factory=list) - relation_name: Optional[str] = None - _pre_injected_sql: Optional[str] = None - - def set_cte(self, cte_id: str, sql: str): - """This is the equivalent of what self.extra_ctes[cte_id] = sql would - do if extra_ctes were an OrderedDict - """ - for cte in self.extra_ctes: - if cte.id == cte_id: - cte.sql = sql - break - else: - self.extra_ctes.append(InjectedCTE(id=cte_id, sql=sql)) - - def __post_serialize__(self, dct): - dct = super().__post_serialize__(dct) - if "_pre_injected_sql" in dct: - del dct["_pre_injected_sql"] - return dct - - -@dataclass -class CompiledAnalysisNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Analysis]}) - - -@dataclass -class CompiledHookNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Operation]}) - index: Optional[int] = None - - -@dataclass -class CompiledModelNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Model]}) - - -# TODO: rm? -@dataclass -class CompiledRPCNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.RPCCall]}) - - -@dataclass -class CompiledSqlNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.SqlOperation]}) - - -@dataclass -class CompiledSeedNode(CompiledNode): - # keep this in sync with ParsedSeedNode! - resource_type: NodeType = field(metadata={"restrict": [NodeType.Seed]}) - config: SeedConfig = field(default_factory=SeedConfig) - - @property - def empty(self): - """Seeds are never empty""" - return False - - def same_body(self, other) -> bool: - return same_seeds(self, other) - - -@dataclass -class CompiledSnapshotNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]}) - - -@dataclass -class CompiledSingularTestNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) - # Was not able to make mypy happy and keep the code working. We need to - # refactor the various configs. - config: TestConfig = field(default_factory=TestConfig) # type:ignore - - -@dataclass -class CompiledGenericTestNode(CompiledNode, HasTestMetadata): - # keep this in sync with ParsedGenericTestNode! - resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) - column_name: Optional[str] = None - file_key_name: Optional[str] = None - # Was not able to make mypy happy and keep the code working. We need to - # refactor the various configs. - config: TestConfig = field(default_factory=TestConfig) # type:ignore - - def same_contents(self, other) -> bool: - if other is None: - return False - - return self.same_config(other) and self.same_fqn(other) and True - - -CompiledTestNode = Union[CompiledSingularTestNode, CompiledGenericTestNode] - - -PARSED_TYPES: Dict[Type[CompiledNode], Type[ParsedResource]] = { - CompiledAnalysisNode: ParsedAnalysisNode, - CompiledModelNode: ParsedModelNode, - CompiledHookNode: ParsedHookNode, - CompiledRPCNode: ParsedRPCNode, - CompiledSqlNode: ParsedSqlNode, - CompiledSeedNode: ParsedSeedNode, - CompiledSnapshotNode: ParsedSnapshotNode, - CompiledSingularTestNode: ParsedSingularTestNode, - CompiledGenericTestNode: ParsedGenericTestNode, -} - - -COMPILED_TYPES: Dict[Type[ParsedResource], Type[CompiledNode]] = { - ParsedAnalysisNode: CompiledAnalysisNode, - ParsedModelNode: CompiledModelNode, - ParsedHookNode: CompiledHookNode, - ParsedRPCNode: CompiledRPCNode, - ParsedSqlNode: CompiledSqlNode, - ParsedSeedNode: CompiledSeedNode, - ParsedSnapshotNode: CompiledSnapshotNode, - ParsedSingularTestNode: CompiledSingularTestNode, - ParsedGenericTestNode: CompiledGenericTestNode, -} - - -# for some types, the compiled type is the parsed type, so make this easy -CompiledType = Union[Type[CompiledNode], Type[ParsedResource]] -CompiledResource = Union[ParsedResource, CompiledNode] - - -def compiled_type_for(parsed: ParsedNode) -> CompiledType: - if type(parsed) in COMPILED_TYPES: - return COMPILED_TYPES[type(parsed)] - else: - return type(parsed) - - -def parsed_instance_for(compiled: CompiledNode) -> ParsedResource: - cls = PARSED_TYPES.get(type(compiled)) - if cls is None: - # how??? - raise ValueError("invalid resource_type: {}".format(compiled.resource_type)) - - return cls.from_dict(compiled.to_dict(omit_none=True)) - - -NonSourceCompiledNode = Union[ - CompiledAnalysisNode, - CompiledSingularTestNode, - CompiledModelNode, - CompiledHookNode, - CompiledRPCNode, - CompiledSqlNode, - CompiledGenericTestNode, - CompiledSeedNode, - CompiledSnapshotNode, -] - -NonSourceParsedNode = Union[ - ParsedAnalysisNode, - ParsedSingularTestNode, - ParsedHookNode, - ParsedModelNode, - ParsedRPCNode, - ParsedSqlNode, - ParsedGenericTestNode, - ParsedSeedNode, - ParsedSnapshotNode, -] - - -# This is anything that can be in manifest.nodes. -ManifestNode = Union[ - NonSourceCompiledNode, - NonSourceParsedNode, -] - -# We allow either parsed or compiled nodes, or parsed sources, as some -# 'compile()' calls in the runner actually just return the original parsed -# node they were given. -CompileResultNode = Union[ - ManifestNode, - ParsedSourceDefinition, -] - -# anything that participates in the graph: sources, exposures, metrics, -# or manifest nodes -GraphMemberNode = Union[ - CompileResultNode, - ParsedExposure, - ParsedMetric, -] diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index a2d22e6e315..c43012ec521 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -16,29 +16,24 @@ TypeVar, Callable, Generic, - cast, AbstractSet, ClassVar, ) from typing_extensions import Protocol from uuid import UUID -from dbt.contracts.graph.compiled import ( - CompileResultNode, +from dbt.contracts.graph.nodes import ( + Macro, + Documentation, + SourceDefinition, + GenericTestNode, + Exposure, + Metric, + UnpatchedSourceDefinition, ManifestNode, - NonSourceCompiledNode, GraphMemberNode, -) -from dbt.contracts.graph.parsed import ( - ParsedMacro, - ParsedDocumentation, - ParsedSourceDefinition, - ParsedGenericTestNode, - ParsedExposure, - ParsedMetric, - HasUniqueID, - UnpatchedSourceDefinition, - ManifestNodes, + ResultNode, + BaseNode, ) from dbt.contracts.graph.unparsed import SourcePatch from dbt.contracts.files import SourceFile, SchemaSourceFile, FileHash, AnySourceFile @@ -46,14 +41,14 @@ from dbt.dataclass_schema import dbtClassMixin from dbt.exceptions import ( CompilationException, - raise_duplicate_resource_name, - raise_compiler_error, + DuplicateResourceName, + DuplicateMacroInPackage, + DuplicateMaterializationName, ) from dbt.helper_types import PathSet from dbt.events.functions import fire_event from dbt.events.types import MergedFromState from dbt.node_types import NodeType -from dbt.ui import line_wrap_message from dbt import flags from dbt import tracking import dbt.utils @@ -96,7 +91,7 @@ def find(self, key, package: Optional[PackageName], manifest: "Manifest"): return self.perform_lookup(unique_id, manifest) return None - def add_doc(self, doc: ParsedDocumentation): + def add_doc(self, doc: Documentation): if doc.name not in self.storage: self.storage[doc.name] = {} self.storage[doc.name][doc.package_name] = doc.unique_id @@ -105,7 +100,7 @@ def populate(self, manifest): for doc in manifest.docs.values(): self.add_doc(doc) - def perform_lookup(self, unique_id: UniqueID, manifest) -> ParsedDocumentation: + def perform_lookup(self, unique_id: UniqueID, manifest) -> Documentation: if unique_id not in manifest.docs: raise dbt.exceptions.InternalException( f"Doc {unique_id} found in cache but not found in manifest" @@ -127,7 +122,7 @@ def find(self, search_name, package: Optional[PackageName], manifest: "Manifest" return self.perform_lookup(unique_id, manifest) return None - def add_source(self, source: ParsedSourceDefinition): + def add_source(self, source: SourceDefinition): if source.search_name not in self.storage: self.storage[source.search_name] = {} @@ -138,7 +133,7 @@ def populate(self, manifest): if hasattr(source, "source_name"): self.add_source(source) - def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> ParsedSourceDefinition: + def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> SourceDefinition: if unique_id not in manifest.sources: raise dbt.exceptions.InternalException( f"Source {unique_id} found in cache but not found in manifest" @@ -198,7 +193,7 @@ def find(self, search_name, package: Optional[PackageName], manifest: "Manifest" return self.perform_lookup(unique_id, manifest) return None - def add_metric(self, metric: ParsedMetric): + def add_metric(self, metric: Metric): if metric.search_name not in self.storage: self.storage[metric.search_name] = {} @@ -209,7 +204,7 @@ def populate(self, manifest): if hasattr(metric, "name"): self.add_metric(metric) - def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> ParsedMetric: + def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> Metric: if unique_id not in manifest.metrics: raise dbt.exceptions.InternalException( f"Metric {unique_id} found in cache but not found in manifest" @@ -325,7 +320,7 @@ def _sort_values(dct): def build_node_edges(nodes: List[ManifestNode]): - """Build the forward and backward edges on the given list of ParsedNodes + """Build the forward and backward edges on the given list of ManifestNodes and return them as two separate dictionaries, each mapping unique IDs to lists of edges. """ @@ -343,10 +338,10 @@ def build_node_edges(nodes: List[ManifestNode]): # Build a map of children of macros and generic tests def build_macro_edges(nodes: List[Any]): forward_edges: Dict[str, List[str]] = { - n.unique_id: [] for n in nodes if n.unique_id.startswith("macro") or n.depends_on.macros + n.unique_id: [] for n in nodes if n.unique_id.startswith("macro") or n.depends_on_macros } for node in nodes: - for unique_id in node.depends_on.macros: + for unique_id in node.depends_on_macros: if unique_id in forward_edges.keys(): forward_edges[unique_id].append(node.unique_id) return _sort_values(forward_edges) @@ -365,7 +360,7 @@ class Locality(enum.IntEnum): @dataclass class MacroCandidate: locality: Locality - macro: ParsedMacro + macro: Macro def __eq__(self, other: object) -> bool: if not isinstance(other, MacroCandidate): @@ -403,12 +398,7 @@ def __eq__(self, other: object) -> bool: return NotImplemented equal = self.specificity == other.specificity and self.locality == other.locality if equal: - raise_compiler_error( - "Found two materializations with the name {} (packages {} and " - "{}). dbt cannot resolve this ambiguity".format( - self.macro.name, self.macro.package_name, other.macro.package_name - ) - ) + raise DuplicateMaterializationName(self.macro, other) return equal @@ -430,16 +420,14 @@ def __lt__(self, other: object) -> bool: class CandidateList(List[M]): - def last(self) -> Optional[ParsedMacro]: + def last(self) -> Optional[Macro]: if not self: return None self.sort() return self[-1].macro -def _get_locality( - macro: ParsedMacro, root_project_name: str, internal_packages: Set[str] -) -> Locality: +def _get_locality(macro: Macro, root_project_name: str, internal_packages: Set[str]) -> Locality: if macro.package_name == root_project_name: return Locality.Root elif macro.package_name in internal_packages: @@ -465,16 +453,16 @@ class Disabled(Generic[D]): target: D -MaybeMetricNode = Optional[Union[ParsedMetric, Disabled[ParsedMetric]]] +MaybeMetricNode = Optional[Union[Metric, Disabled[Metric]]] -MaybeDocumentation = Optional[ParsedDocumentation] +MaybeDocumentation = Optional[Documentation] MaybeParsedSource = Optional[ Union[ - ParsedSourceDefinition, - Disabled[ParsedSourceDefinition], + SourceDefinition, + Disabled[SourceDefinition], ] ] @@ -514,7 +502,7 @@ def __init__(self): def find_macro_by_name( self, name: str, root_project_name: str, package: Optional[str] - ) -> Optional[ParsedMacro]: + ) -> Optional[Macro]: """Find a macro in the graph by its name and package name, or None for any package. The root project name is used to determine priority: - locally defined macros come first @@ -537,7 +525,7 @@ def filter(candidate: MacroCandidate) -> bool: def find_generate_macro_by_name( self, component: str, root_project_name: str - ) -> Optional[ParsedMacro]: + ) -> Optional[Macro]: """ The `generate_X_name` macros are similar to regular ones, but ignore imported packages. @@ -606,11 +594,11 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin): # is added it must all be added in the __reduce_ex__ method in the # args tuple in the right position. nodes: MutableMapping[str, ManifestNode] = field(default_factory=dict) - sources: MutableMapping[str, ParsedSourceDefinition] = field(default_factory=dict) - macros: MutableMapping[str, ParsedMacro] = field(default_factory=dict) - docs: MutableMapping[str, ParsedDocumentation] = field(default_factory=dict) - exposures: MutableMapping[str, ParsedExposure] = field(default_factory=dict) - metrics: MutableMapping[str, ParsedMetric] = field(default_factory=dict) + sources: MutableMapping[str, SourceDefinition] = field(default_factory=dict) + macros: MutableMapping[str, Macro] = field(default_factory=dict) + docs: MutableMapping[str, Documentation] = field(default_factory=dict) + exposures: MutableMapping[str, Exposure] = field(default_factory=dict) + metrics: MutableMapping[str, Metric] = field(default_factory=dict) selectors: MutableMapping[str, Any] = field(default_factory=dict) files: MutableMapping[str, AnySourceFile] = field(default_factory=dict) metadata: ManifestMetadata = field(default_factory=ManifestMetadata) @@ -658,7 +646,7 @@ def __post_deserialize__(cls, obj): obj._lock = flags.MP_CONTEXT.Lock() return obj - def sync_update_node(self, new_node: NonSourceCompiledNode) -> NonSourceCompiledNode: + def sync_update_node(self, new_node: ManifestNode) -> ManifestNode: """update the node with a lock. The only time we should want to lock is when compiling an ephemeral ancestor of a node at runtime, because multiple threads could be just-in-time compiling the same ephemeral @@ -671,21 +659,21 @@ def sync_update_node(self, new_node: NonSourceCompiledNode) -> NonSourceCompiled with self._lock: existing = self.nodes[new_node.unique_id] if getattr(existing, "compiled", False): - # already compiled -> must be a NonSourceCompiledNode - return cast(NonSourceCompiledNode, existing) + # already compiled + return existing _update_into(self.nodes, new_node) return new_node - def update_exposure(self, new_exposure: ParsedExposure): + def update_exposure(self, new_exposure: Exposure): _update_into(self.exposures, new_exposure) - def update_metric(self, new_metric: ParsedMetric): + def update_metric(self, new_metric: Metric): _update_into(self.metrics, new_metric) def update_node(self, new_node: ManifestNode): _update_into(self.nodes, new_node) - def update_source(self, new_source: ParsedSourceDefinition): + def update_source(self, new_source: SourceDefinition): _update_into(self.sources, new_source) def build_flat_graph(self): @@ -738,7 +726,7 @@ def _materialization_candidates_for( def find_materialization_macro_by_name( self, project_name: str, materialization_name: str, adapter_type: str - ) -> Optional[ParsedMacro]: + ) -> Optional[Macro]: candidates: CandidateList = CandidateList( chain.from_iterable( self._materialization_candidates_for( @@ -943,8 +931,8 @@ def resolve_source( search_name = f"{target_source_name}.{target_table_name}" candidates = _search_packages(current_project, node_package) - source: Optional[ParsedSourceDefinition] = None - disabled: Optional[List[ParsedSourceDefinition]] = None + source: Optional[SourceDefinition] = None + disabled: Optional[List[SourceDefinition]] = None for pkg in candidates: source = self.source_lookup.find(search_name, pkg, self) @@ -968,8 +956,8 @@ def resolve_metric( node_package: str, ) -> MaybeMetricNode: - metric: Optional[ParsedMetric] = None - disabled: Optional[List[ParsedMetric]] = None + metric: Optional[Metric] = None + disabled: Optional[List[Metric]] = None candidates = _search_packages(current_project, node_package, target_metric_package) for pkg in candidates: @@ -992,7 +980,7 @@ def resolve_doc( package: Optional[str], current_project: str, node_package: str, - ) -> Optional[ParsedDocumentation]: + ) -> Optional[Documentation]: """Resolve the given documentation. This follows the same algorithm as resolve_ref except the is_enabled checks are unnecessary as docs are always enabled. @@ -1011,6 +999,7 @@ def merge_from_artifact( adapter, other: "WritableManifest", selected: AbstractSet[UniqueID], + favor_state: bool = False, ) -> None: """Given the selected unique IDs and a writable manifest, update this manifest by replacing any unselected nodes with their counterpart. @@ -1025,7 +1014,10 @@ def merge_from_artifact( node.resource_type in refables and not node.is_ephemeral and unique_id not in selected - and not adapter.get_relation(current.database, current.schema, current.identifier) + and ( + not adapter.get_relation(current.database, current.schema, current.identifier) + or favor_state + ) ): merged.add(unique_id) self.nodes[unique_id] = node.replace(deferred=True) @@ -1040,29 +1032,10 @@ def merge_from_artifact( # Methods that were formerly in ParseResult - def add_macro(self, source_file: SourceFile, macro: ParsedMacro): + def add_macro(self, source_file: SourceFile, macro: Macro): if macro.unique_id in self.macros: # detect that the macro exists and emit an error - other_path = self.macros[macro.unique_id].original_file_path - # subtract 2 for the "Compilation Error" indent - # note that the line wrap eats newlines, so if you want newlines, - # this is the result :( - msg = line_wrap_message( - f"""\ - dbt found two macros named "{macro.name}" in the project - "{macro.package_name}". - - - To fix this error, rename or remove one of the following - macros: - - - {macro.original_file_path} - - - {other_path} - """, - subtract=2, - ) - raise_compiler_error(msg) + raise DuplicateMacroInPackage(macro=macro, macro_mapping=self.macros) self.macros[macro.unique_id] = macro source_file.macros.append(macro.unique_id) @@ -1082,30 +1055,30 @@ def add_source(self, source_file: SchemaSourceFile, source: UnpatchedSourceDefin self.sources[source.unique_id] = source # type: ignore source_file.sources.append(source.unique_id) - def add_node_nofile(self, node: ManifestNodes): + def add_node_nofile(self, node: ManifestNode): # nodes can't be overwritten! _check_duplicates(node, self.nodes) self.nodes[node.unique_id] = node - def add_node(self, source_file: AnySourceFile, node: ManifestNodes, test_from=None): + def add_node(self, source_file: AnySourceFile, node: ManifestNode, test_from=None): self.add_node_nofile(node) if isinstance(source_file, SchemaSourceFile): - if isinstance(node, ParsedGenericTestNode): + if isinstance(node, GenericTestNode): assert test_from source_file.add_test(node.unique_id, test_from) - if isinstance(node, ParsedMetric): + if isinstance(node, Metric): source_file.metrics.append(node.unique_id) - if isinstance(node, ParsedExposure): + if isinstance(node, Exposure): source_file.exposures.append(node.unique_id) else: source_file.nodes.append(node.unique_id) - def add_exposure(self, source_file: SchemaSourceFile, exposure: ParsedExposure): + def add_exposure(self, source_file: SchemaSourceFile, exposure: Exposure): _check_duplicates(exposure, self.exposures) self.exposures[exposure.unique_id] = exposure source_file.exposures.append(exposure.unique_id) - def add_metric(self, source_file: SchemaSourceFile, metric: ParsedMetric): + def add_metric(self, source_file: SchemaSourceFile, metric: Metric): _check_duplicates(metric, self.metrics) self.metrics[metric.unique_id] = metric source_file.metrics.append(metric.unique_id) @@ -1117,20 +1090,20 @@ def add_disabled_nofile(self, node: GraphMemberNode): else: self.disabled[node.unique_id] = [node] - def add_disabled(self, source_file: AnySourceFile, node: CompileResultNode, test_from=None): + def add_disabled(self, source_file: AnySourceFile, node: ResultNode, test_from=None): self.add_disabled_nofile(node) if isinstance(source_file, SchemaSourceFile): - if isinstance(node, ParsedGenericTestNode): + if isinstance(node, GenericTestNode): assert test_from source_file.add_test(node.unique_id, test_from) - if isinstance(node, ParsedMetric): + if isinstance(node, Metric): source_file.metrics.append(node.unique_id) - if isinstance(node, ParsedExposure): + if isinstance(node, Exposure): source_file.exposures.append(node.unique_id) else: source_file.nodes.append(node.unique_id) - def add_doc(self, source_file: SourceFile, doc: ParsedDocumentation): + def add_doc(self, source_file: SourceFile, doc: Documentation): _check_duplicates(doc, self.docs) self.docs[doc.unique_id] = doc source_file.docs.append(doc.unique_id) @@ -1183,32 +1156,32 @@ def __init__(self, macros): @dataclass -@schema_version("manifest", 7) +@schema_version("manifest", 8) class WritableManifest(ArtifactMixin): nodes: Mapping[UniqueID, ManifestNode] = field( metadata=dict(description=("The nodes defined in the dbt project and its dependencies")) ) - sources: Mapping[UniqueID, ParsedSourceDefinition] = field( + sources: Mapping[UniqueID, SourceDefinition] = field( metadata=dict(description=("The sources defined in the dbt project and its dependencies")) ) - macros: Mapping[UniqueID, ParsedMacro] = field( + macros: Mapping[UniqueID, Macro] = field( metadata=dict(description=("The macros defined in the dbt project and its dependencies")) ) - docs: Mapping[UniqueID, ParsedDocumentation] = field( + docs: Mapping[UniqueID, Documentation] = field( metadata=dict(description=("The docs defined in the dbt project and its dependencies")) ) - exposures: Mapping[UniqueID, ParsedExposure] = field( + exposures: Mapping[UniqueID, Exposure] = field( metadata=dict( description=("The exposures defined in the dbt project and its dependencies") ) ) - metrics: Mapping[UniqueID, ParsedMetric] = field( + metrics: Mapping[UniqueID, Metric] = field( metadata=dict(description=("The metrics defined in the dbt project and its dependencies")) ) selectors: Mapping[UniqueID, Any] = field( metadata=dict(description=("The selectors defined in selectors.yml")) ) - disabled: Optional[Mapping[UniqueID, List[CompileResultNode]]] = field( + disabled: Optional[Mapping[UniqueID, List[ResultNode]]] = field( metadata=dict(description="A mapping of the disabled nodes in the target") ) parent_map: Optional[NodeEdgeMap] = field( @@ -1229,7 +1202,7 @@ class WritableManifest(ArtifactMixin): @classmethod def compatible_previous_versions(self): - return [("manifest", 4), ("manifest", 5), ("manifest", 6)] + return [("manifest", 4), ("manifest", 5), ("manifest", 6), ("manifest", 7)] def __post_serialize__(self, dct): for unique_id, node in dct["nodes"].items(): @@ -1238,9 +1211,9 @@ def __post_serialize__(self, dct): return dct -def _check_duplicates(value: HasUniqueID, src: Mapping[str, HasUniqueID]): +def _check_duplicates(value: BaseNode, src: Mapping[str, BaseNode]): if value.unique_id in src: - raise_duplicate_resource_name(value, src[value.unique_id]) + raise DuplicateResourceName(value, src[value.unique_id]) K_T = TypeVar("K_T") diff --git a/core/dbt/contracts/graph/metrics.py b/core/dbt/contracts/graph/metrics.py index 20222b4a32b..b895aa5e2f5 100644 --- a/core/dbt/contracts/graph/metrics.py +++ b/core/dbt/contracts/graph/metrics.py @@ -12,7 +12,7 @@ def __str__(self): class ResolvedMetricReference(MetricReference): """ - Simple proxy over a ParsedMetric which delegates property + Simple proxy over a Metric which delegates property lookups to the underlying node. Also adds helper functions for working with metrics (ie. __str__ and templating functions) """ diff --git a/core/dbt/contracts/graph/parsed.py b/core/dbt/contracts/graph/nodes.py similarity index 65% rename from core/dbt/contracts/graph/parsed.py rename to core/dbt/contracts/graph/nodes.py index 860f3fdf662..033318a34c1 100644 --- a/core/dbt/contracts/graph/parsed.py +++ b/core/dbt/contracts/graph/nodes.py @@ -2,7 +2,6 @@ import time from dataclasses import dataclass, field from mashumaro.types import SerializableType -from pathlib import Path from typing import ( Optional, Union, @@ -12,19 +11,15 @@ Sequence, Tuple, Iterator, - TypeVar, ) from dbt.dataclass_schema import dbtClassMixin, ExtensibleDbtClassMixin from dbt.clients.system import write_file -from dbt.contracts.files import FileHash, MAXIMUM_SEED_SIZE_NAME +from dbt.contracts.files import FileHash from dbt.contracts.graph.unparsed import ( - UnparsedNode, - UnparsedDocumentation, Quoting, Docs, - UnparsedBaseNode, FreshnessThreshold, ExternalTable, HasYamlMetadata, @@ -41,7 +36,14 @@ ) from dbt.contracts.util import Replaceable, AdditionalPropertiesMixin from dbt.events.proto_types import NodeInfo -from dbt.exceptions import warn_or_error +from dbt.events.functions import warn_or_error +from dbt.events.types import ( + SeedIncreased, + SeedExceedsLimitSamePath, + SeedExceedsLimitAndPathChanged, + SeedExceedsLimitChecksumChanged, +) +from dbt.events.contextvars import set_contextvars from dbt import flags from dbt.node_types import ModelLanguage, NodeType @@ -57,50 +59,97 @@ SnapshotConfig, ) +# ===================================================================== +# This contains the classes for all of the nodes and node-like objects +# in the manifest. In the "nodes" dictionary of the manifest we find +# all of the objects in the ManifestNode union below. In addition the +# manifest contains "macros", "sources", "metrics", "exposures", "docs", +# and "disabled" dictionaries. +# +# The SeedNode is a ManifestNode, but can't be compiled because it has +# no SQL. +# +# All objects defined in this file should have BaseNode as a parent +# class. +# +# The two objects which do not show up in the DAG are Macro and +# Documentation. +# ===================================================================== + + +# ================================================== +# Various parent classes and node attribute classes +# ================================================== + @dataclass -class ColumnInfo(AdditionalPropertiesMixin, ExtensibleDbtClassMixin, Replaceable): +class BaseNode(dbtClassMixin, Replaceable): + """All nodes or node-like objects in this file should have this as a base class""" + name: str - description: str = "" - meta: Dict[str, Any] = field(default_factory=dict) - data_type: Optional[str] = None - quote: Optional[bool] = None - tags: List[str] = field(default_factory=list) - _extra: Dict[str, Any] = field(default_factory=dict) + resource_type: NodeType + package_name: str + path: str + original_file_path: str + unique_id: str + @property + def search_name(self): + return self.name -@dataclass -class HasFqn(dbtClassMixin, Replaceable): - fqn: List[str] + @property + def file_id(self): + return f"{self.package_name}://{self.original_file_path}" - def same_fqn(self, other: "HasFqn") -> bool: - return self.fqn == other.fqn + @property + def is_refable(self): + return self.resource_type in NodeType.refable() + @property + def should_store_failures(self): + return False -@dataclass -class HasUniqueID(dbtClassMixin, Replaceable): - unique_id: str + # will this node map to an object in the database? + @property + def is_relational(self): + return self.resource_type in NodeType.refable() + + @property + def is_ephemeral(self): + return self.config.materialized == "ephemeral" + + @property + def is_ephemeral_model(self): + return self.is_refable and self.is_ephemeral + + def get_materialization(self): + return self.config.materialized @dataclass -class MacroDependsOn(dbtClassMixin, Replaceable): - macros: List[str] = field(default_factory=list) +class GraphNode(BaseNode): + """Nodes in the DAG. Macro and Documentation don't have fqn.""" - # 'in' on lists is O(n) so this is O(n^2) for # of macros - def add_macro(self, value: str): - if value not in self.macros: - self.macros.append(value) + fqn: List[str] + + def same_fqn(self, other) -> bool: + return self.fqn == other.fqn @dataclass -class DependsOn(MacroDependsOn): - nodes: List[str] = field(default_factory=list) +class ColumnInfo(AdditionalPropertiesMixin, ExtensibleDbtClassMixin, Replaceable): + """Used in all ManifestNodes and SourceDefinition""" - def add_node(self, value: str): - if value not in self.nodes: - self.nodes.append(value) + name: str + description: str = "" + meta: Dict[str, Any] = field(default_factory=dict) + data_type: Optional[str] = None + quote: Optional[bool] = None + tags: List[str] = field(default_factory=list) + _extra: Dict[str, Any] = field(default_factory=dict) +# Metrics, exposures, @dataclass class HasRelationMetadata(dbtClassMixin, Replaceable): database: Optional[str] @@ -117,57 +166,29 @@ def __pre_deserialize__(cls, data): return data -class ParsedNodeMixins(dbtClassMixin): - resource_type: NodeType - depends_on: DependsOn - config: NodeConfig - - @property - def is_refable(self): - return self.resource_type in NodeType.refable() - - @property - def should_store_failures(self): - return self.resource_type == NodeType.Test and ( - self.config.store_failures - if self.config.store_failures is not None - else flags.STORE_FAILURES - ) - - # will this node map to an object in the database? - @property - def is_relational(self): - return self.resource_type in NodeType.refable() or self.should_store_failures +@dataclass +class MacroDependsOn(dbtClassMixin, Replaceable): + """Used only in the Macro class""" - @property - def is_ephemeral(self): - return self.config.materialized == "ephemeral" + macros: List[str] = field(default_factory=list) - @property - def is_ephemeral_model(self): - return self.is_refable and self.is_ephemeral + # 'in' on lists is O(n) so this is O(n^2) for # of macros + def add_macro(self, value: str): + if value not in self.macros: + self.macros.append(value) - @property - def depends_on_nodes(self): - return self.depends_on.nodes - def patch(self, patch: "ParsedNodePatch"): - """Given a ParsedNodePatch, add the new information to the node.""" - # explicitly pick out the parts to update so we don't inadvertently - # step on the model name or anything - # Note: config should already be updated - self.patch_path: Optional[str] = patch.file_id - # update created_at so process_docs will run in partial parsing - self.created_at = time.time() - self.description = patch.description - self.columns = patch.columns +@dataclass +class DependsOn(MacroDependsOn): + nodes: List[str] = field(default_factory=list) - def get_materialization(self): - return self.config.materialized + def add_node(self, value: str): + if value not in self.nodes: + self.nodes.append(value) @dataclass -class ParsedNodeMandatory(UnparsedNode, HasUniqueID, HasFqn, HasRelationMetadata, Replaceable): +class ParsedNodeMandatory(GraphNode, HasRelationMetadata, Replaceable): alias: str checksum: FileHash config: NodeConfig = field(default_factory=NodeConfig) @@ -177,6 +198,8 @@ def identifier(self): return self.alias +# This needs to be in all ManifestNodes and also in SourceDefinition, +# because of "source freshness" @dataclass class NodeInfoMixin: _event_status: Dict[str, Any] = field(default_factory=dict) @@ -196,25 +219,30 @@ def node_info(self): node_info_msg = NodeInfo(**node_info) return node_info_msg + def update_event_status(self, **kwargs): + for k, v in kwargs.items(): + self._event_status[k] = v + set_contextvars(node_info=self.node_info) + + def clear_event_status(self): + self._event_status = dict() + @dataclass -class ParsedNodeDefaults(NodeInfoMixin, ParsedNodeMandatory): +class ParsedNode(NodeInfoMixin, ParsedNodeMandatory, SerializableType): tags: List[str] = field(default_factory=list) - refs: List[List[str]] = field(default_factory=list) - sources: List[List[str]] = field(default_factory=list) - metrics: List[List[str]] = field(default_factory=list) - depends_on: DependsOn = field(default_factory=DependsOn) description: str = field(default="") columns: Dict[str, ColumnInfo] = field(default_factory=dict) meta: Dict[str, Any] = field(default_factory=dict) docs: Docs = field(default_factory=Docs) patch_path: Optional[str] = None - compiled_path: Optional[str] = None build_path: Optional[str] = None deferred: bool = False unrendered_config: Dict[str, Any] = field(default_factory=dict) created_at: float = field(default_factory=lambda: time.time()) config_call_dict: Dict[str, Any] = field(default_factory=dict) + relation_name: Optional[str] = None + raw_code: str = "" def write_node(self, target_path: str, subdirectory: str, payload: str): if os.path.basename(self.path) == os.path.basename(self.original_file_path): @@ -228,12 +256,6 @@ def write_node(self, target_path: str, subdirectory: str, payload: str): write_file(full_path, payload) return full_path - -T = TypeVar("T", bound="ParsedNode") - - -@dataclass -class ParsedNode(ParsedNodeDefaults, ParsedNodeMixins, SerializableType): def _serialize(self): return self.to_dict() @@ -250,26 +272,26 @@ def _deserialize(cls, dct: Dict[str, int]): # between them. resource_type = dct["resource_type"] if resource_type == "model": - return ParsedModelNode.from_dict(dct) + return ModelNode.from_dict(dct) elif resource_type == "analysis": - return ParsedAnalysisNode.from_dict(dct) + return AnalysisNode.from_dict(dct) elif resource_type == "seed": - return ParsedSeedNode.from_dict(dct) + return SeedNode.from_dict(dct) elif resource_type == "rpc": - return ParsedRPCNode.from_dict(dct) + return RPCNode.from_dict(dct) elif resource_type == "sql": - return ParsedSqlNode.from_dict(dct) + return SqlNode.from_dict(dct) elif resource_type == "test": if "test_metadata" in dct: - return ParsedGenericTestNode.from_dict(dct) + return GenericTestNode.from_dict(dct) else: - return ParsedSingularTestNode.from_dict(dct) + return SingularTestNode.from_dict(dct) elif resource_type == "operation": - return ParsedHookNode.from_dict(dct) + return HookNode.from_dict(dct) elif resource_type == "seed": - return ParsedSeedNode.from_dict(dct) + return SeedNode.from_dict(dct) elif resource_type == "snapshot": - return ParsedSnapshotNode.from_dict(dct) + return SnapshotNode.from_dict(dct) else: return cls.from_dict(dct) @@ -285,10 +307,7 @@ def _persist_relation_docs(self) -> bool: return bool(self.config.persist_docs.get("relation")) return False - def same_body(self: T, other: T) -> bool: - return self.raw_code == other.raw_code - - def same_persisted_description(self: T, other: T) -> bool: + def same_persisted_description(self, other) -> bool: # the check on configs will handle the case where we have different # persist settings, so we only have to care about the cases where they # are the same.. @@ -305,7 +324,10 @@ def same_persisted_description(self: T, other: T) -> bool: return True - def same_database_representation(self, other: T) -> bool: + def same_body(self, other) -> bool: + return self.raw_code == other.raw_code + + def same_database_representation(self, other) -> bool: # compare the config representation, not the node's config value. This # compares the configured value, rather than the ultimate value (so # generate_*_name and unset values derived from the target are @@ -318,13 +340,24 @@ def same_database_representation(self, other: T) -> bool: return False return True - def same_config(self, old: T) -> bool: + def same_config(self, old) -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self: T, old: Optional[T]) -> bool: + def patch(self, patch: "ParsedNodePatch"): + """Given a ParsedNodePatch, add the new information to the node.""" + # explicitly pick out the parts to update so we don't inadvertently + # step on the model name or anything + # Note: config should already be updated + self.patch_path: Optional[str] = patch.file_id + # update created_at so process_docs will run in partial parsing + self.created_at = time.time() + self.description = patch.description + self.columns = patch.columns + + def same_contents(self, old) -> bool: if old is None: return False @@ -339,102 +372,198 @@ def same_contents(self: T, old: Optional[T]) -> bool: @dataclass -class ParsedAnalysisNode(ParsedNode): +class InjectedCTE(dbtClassMixin, Replaceable): + """Used in CompiledNodes as part of ephemeral model processing""" + + id: str + sql: str + + +@dataclass +class CompiledNode(ParsedNode): + """Contains attributes necessary for SQL files and nodes with refs, sources, etc, + so all ManifestNodes except SeedNode.""" + + language: str = "sql" + refs: List[List[str]] = field(default_factory=list) + sources: List[List[str]] = field(default_factory=list) + metrics: List[List[str]] = field(default_factory=list) + depends_on: DependsOn = field(default_factory=DependsOn) + compiled_path: Optional[str] = None + compiled: bool = False + compiled_code: Optional[str] = None + extra_ctes_injected: bool = False + extra_ctes: List[InjectedCTE] = field(default_factory=list) + _pre_injected_sql: Optional[str] = None + + @property + def empty(self): + return not self.raw_code.strip() + + def set_cte(self, cte_id: str, sql: str): + """This is the equivalent of what self.extra_ctes[cte_id] = sql would + do if extra_ctes were an OrderedDict + """ + for cte in self.extra_ctes: + if cte.id == cte_id: + cte.sql = sql + break + else: + self.extra_ctes.append(InjectedCTE(id=cte_id, sql=sql)) + + def __post_serialize__(self, dct): + dct = super().__post_serialize__(dct) + if "_pre_injected_sql" in dct: + del dct["_pre_injected_sql"] + # Remove compiled attributes + if "compiled" in dct and dct["compiled"] is False: + del dct["compiled"] + del dct["extra_ctes_injected"] + del dct["extra_ctes"] + # "omit_none" means these might not be in the dictionary + if "compiled_code" in dct: + del dct["compiled_code"] + return dct + + @property + def depends_on_nodes(self): + return self.depends_on.nodes + + @property + def depends_on_macros(self): + return self.depends_on.macros + + +# ==================================== +# CompiledNode subclasses +# ==================================== + + +@dataclass +class AnalysisNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Analysis]}) @dataclass -class ParsedHookNode(ParsedNode): +class HookNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Operation]}) index: Optional[int] = None @dataclass -class ParsedModelNode(ParsedNode): +class ModelNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Model]}) # TODO: rm? @dataclass -class ParsedRPCNode(ParsedNode): +class RPCNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.RPCCall]}) @dataclass -class ParsedSqlNode(ParsedNode): +class SqlNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.SqlOperation]}) -def same_seeds(first: ParsedNode, second: ParsedNode) -> bool: - # for seeds, we check the hashes. If the hashes are different types, - # no match. If the hashes are both the same 'path', log a warning and - # assume they are the same - # if the current checksum is a path, we want to log a warning. - result = first.checksum == second.checksum - - if first.checksum.name == "path": - msg: str - if second.checksum.name != "path": - msg = ( - f"Found a seed ({first.package_name}.{first.name}) " - f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was " - f"<={MAXIMUM_SEED_SIZE_NAME}, so it has changed" - ) - elif result: - msg = ( - f"Found a seed ({first.package_name}.{first.name}) " - f">{MAXIMUM_SEED_SIZE_NAME} in size at the same path, dbt " - f"cannot tell if it has changed: assuming they are the same" - ) - elif not result: - msg = ( - f"Found a seed ({first.package_name}.{first.name}) " - f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was in " - f"a different location, assuming it has changed" - ) - else: - msg = ( - f"Found a seed ({first.package_name}.{first.name}) " - f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file had a " - f"checksum type of {second.checksum.name}, so it has changed" - ) - warn_or_error(msg, node=first) - - return result +# ==================================== +# Seed node +# ==================================== @dataclass -class ParsedSeedNode(ParsedNode): - # keep this in sync with CompiledSeedNode! +class SeedNode(ParsedNode): # No SQLDefaults! resource_type: NodeType = field(metadata={"restrict": [NodeType.Seed]}) config: SeedConfig = field(default_factory=SeedConfig) + # seeds need the root_path because the contents are not loaded initially + # and we need the root_path to load the seed later + root_path: Optional[str] = None + + def same_seeds(self, other: "SeedNode") -> bool: + # for seeds, we check the hashes. If the hashes are different types, + # no match. If the hashes are both the same 'path', log a warning and + # assume they are the same + # if the current checksum is a path, we want to log a warning. + result = self.checksum == other.checksum + + if self.checksum.name == "path": + msg: str + if other.checksum.name != "path": + warn_or_error( + SeedIncreased(package_name=self.package_name, name=self.name), node=self + ) + elif result: + warn_or_error( + SeedExceedsLimitSamePath(package_name=self.package_name, name=self.name), + node=self, + ) + elif not result: + warn_or_error( + SeedExceedsLimitAndPathChanged(package_name=self.package_name, name=self.name), + node=self, + ) + else: + warn_or_error( + SeedExceedsLimitChecksumChanged( + package_name=self.package_name, + name=self.name, + checksum_name=other.checksum.name, + ), + node=self, + ) + + return result @property def empty(self): """Seeds are never empty""" return False - def same_body(self: T, other: T) -> bool: - return same_seeds(self, other) + def same_body(self, other) -> bool: + return self.same_seeds(other) + @property + def depends_on_nodes(self): + return [] -@dataclass -class TestMetadata(dbtClassMixin, Replaceable): - name: str - # kwargs are the args that are left in the test builder after - # removing configs. They are set from the test builder when - # the test node is created. - kwargs: Dict[str, Any] = field(default_factory=dict) - namespace: Optional[str] = None + @property + def depends_on_macros(self): + return [] + @property + def extra_ctes(self): + return [] -@dataclass -class HasTestMetadata(dbtClassMixin): - test_metadata: TestMetadata + @property + def extra_ctes_injected(self): + return False + + @property + def language(self): + return "sql" + + +# ==================================== +# Singular Test node +# ==================================== + + +class TestShouldStoreFailures: + @property + def should_store_failures(self): + if self.config.store_failures: + return self.config.store_failures + return flags.STORE_FAILURES + + @property + def is_relational(self): + if self.should_store_failures: + return True + return False @dataclass -class ParsedSingularTestNode(ParsedNode): +class SingularTestNode(TestShouldStoreFailures, CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) # Was not able to make mypy happy and keep the code working. We need to # refactor the various configs. @@ -445,9 +574,30 @@ def test_node_type(self): return "singular" +# ==================================== +# Generic Test node +# ==================================== + + +@dataclass +class TestMetadata(dbtClassMixin, Replaceable): + name: str + # kwargs are the args that are left in the test builder after + # removing configs. They are set from the test builder when + # the test node is created. + kwargs: Dict[str, Any] = field(default_factory=dict) + namespace: Optional[str] = None + + +# This has to be separated out because it has no default and so +# has to be included as a superclass, not an attribute @dataclass -class ParsedGenericTestNode(ParsedNode, HasTestMetadata): - # keep this in sync with CompiledGenericTestNode! +class HasTestMetadata(dbtClassMixin): + test_metadata: TestMetadata + + +@dataclass +class GenericTestNode(TestShouldStoreFailures, CompiledNode, HasTestMetadata): resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) column_name: Optional[str] = None file_key_name: Optional[str] = None @@ -466,54 +616,39 @@ def test_node_type(self): return "generic" +# ==================================== +# Snapshot node +# ==================================== + + @dataclass -class IntermediateSnapshotNode(ParsedNode): +class IntermediateSnapshotNode(CompiledNode): # at an intermediate stage in parsing, where we've built something better # than an unparsed node for rendering in parse mode, it's pretty possible # that we won't have critical snapshot-related information that is only # defined in config blocks. To fix that, we have an intermediate type that # uses a regular node config, which the snapshot parser will then convert - # into a full ParsedSnapshotNode after rendering. + # into a full ParsedSnapshotNode after rendering. Note: it currently does + # not work to set snapshot config in schema files because of the validation. resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]}) config: EmptySnapshotConfig = field(default_factory=EmptySnapshotConfig) @dataclass -class ParsedSnapshotNode(ParsedNode): +class SnapshotNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]}) config: SnapshotConfig -@dataclass -class ParsedPatch(HasYamlMetadata, Replaceable): - name: str - description: str - meta: Dict[str, Any] - docs: Docs - config: Dict[str, Any] +# ==================================== +# Macro +# ==================================== -# The parsed node update is only the 'patch', not the test. The test became a -# regular parsed node. Note that description and columns must be present, but -# may be empty. @dataclass -class ParsedNodePatch(ParsedPatch): - columns: Dict[str, ColumnInfo] - - -@dataclass -class ParsedMacroPatch(ParsedPatch): - arguments: List[MacroArgument] = field(default_factory=list) - - -@dataclass -class ParsedMacro(UnparsedBaseNode, HasUniqueID): - name: str +class Macro(BaseNode): macro_sql: str resource_type: NodeType = field(metadata={"restrict": [NodeType.Macro]}) - # TODO: can macros even have tags? - tags: List[str] = field(default_factory=list) - # TODO: is this ever populated? depends_on: MacroDependsOn = field(default_factory=MacroDependsOn) description: str = "" meta: Dict[str, Any] = field(default_factory=dict) @@ -523,7 +658,7 @@ class ParsedMacro(UnparsedBaseNode, HasUniqueID): created_at: float = field(default_factory=lambda: time.time()) supported_languages: Optional[List[ModelLanguage]] = None - def patch(self, patch: ParsedMacroPatch): + def patch(self, patch: "ParsedMacroPatch"): self.patch_path: Optional[str] = patch.file_id self.description = patch.description self.created_at = time.time() @@ -531,24 +666,33 @@ def patch(self, patch: ParsedMacroPatch): self.docs = patch.docs self.arguments = patch.arguments - def same_contents(self, other: Optional["ParsedMacro"]) -> bool: + def same_contents(self, other: Optional["Macro"]) -> bool: if other is None: return False # the only thing that makes one macro different from another with the # same name/package is its content return self.macro_sql == other.macro_sql + @property + def depends_on_macros(self): + return self.depends_on.macros + + +# ==================================== +# Documentation node +# ==================================== + @dataclass -class ParsedDocumentation(UnparsedDocumentation, HasUniqueID): - name: str +class Documentation(BaseNode): block_contents: str + resource_type: NodeType = field(metadata={"restrict": [NodeType.Documentation]}) @property def search_name(self): return self.name - def same_contents(self, other: Optional["ParsedDocumentation"]) -> bool: + def same_contents(self, other: Optional["Documentation"]) -> bool: if other is None: return False # the only thing that makes one doc different from another with the @@ -556,6 +700,11 @@ def same_contents(self, other: Optional["ParsedDocumentation"]) -> bool: return self.block_contents == other.block_contents +# ==================================== +# Source node +# ==================================== + + def normalize_test(testdef: TestDef) -> Dict[str, Any]: if isinstance(testdef, str): return {testdef: {}} @@ -564,11 +713,12 @@ def normalize_test(testdef: TestDef) -> Dict[str, Any]: @dataclass -class UnpatchedSourceDefinition(UnparsedBaseNode, HasUniqueID, HasFqn): +class UnpatchedSourceDefinition(BaseNode): source: UnparsedSourceDefinition table: UnparsedSourceTableDefinition + fqn: List[str] resource_type: NodeType = field(metadata={"restrict": [NodeType.Source]}) - patch_path: Optional[Path] = None + patch_path: Optional[str] = None def get_full_source_name(self): return f"{self.source.name}_{self.table.name}" @@ -576,10 +726,6 @@ def get_full_source_name(self): def get_source_representation(self): return f'source("{self.source.name}", "{self.table.name}")' - @property - def name(self) -> str: - return self.get_full_source_name() - @property def quote_columns(self) -> Optional[bool]: result = None @@ -611,13 +757,7 @@ def tests(self) -> List[TestDef]: @dataclass -class ParsedSourceMandatory( - UnparsedBaseNode, - HasUniqueID, - HasRelationMetadata, - HasFqn, -): - name: str +class ParsedSourceMandatory(GraphNode, HasRelationMetadata): source_name: str source_description: str loader: str @@ -626,7 +766,7 @@ class ParsedSourceMandatory( @dataclass -class ParsedSourceDefinition(NodeInfoMixin, ParsedSourceMandatory): +class SourceDefinition(NodeInfoMixin, ParsedSourceMandatory): quoting: Quoting = field(default_factory=Quoting) loaded_at_field: Optional[str] = None freshness: Optional[FreshnessThreshold] = None @@ -637,7 +777,7 @@ class ParsedSourceDefinition(NodeInfoMixin, ParsedSourceMandatory): source_meta: Dict[str, Any] = field(default_factory=dict) tags: List[str] = field(default_factory=list) config: SourceConfig = field(default_factory=SourceConfig) - patch_path: Optional[Path] = None + patch_path: Optional[str] = None unrendered_config: Dict[str, Any] = field(default_factory=dict) relation_name: Optional[str] = None created_at: float = field(default_factory=lambda: time.time()) @@ -647,7 +787,7 @@ def __post_serialize__(self, dct): del dct["_event_status"] return dct - def same_database_representation(self, other: "ParsedSourceDefinition") -> bool: + def same_database_representation(self, other: "SourceDefinition") -> bool: return ( self.database == other.database and self.schema == other.schema @@ -655,26 +795,26 @@ def same_database_representation(self, other: "ParsedSourceDefinition") -> bool: and True ) - def same_quoting(self, other: "ParsedSourceDefinition") -> bool: + def same_quoting(self, other: "SourceDefinition") -> bool: return self.quoting == other.quoting - def same_freshness(self, other: "ParsedSourceDefinition") -> bool: + def same_freshness(self, other: "SourceDefinition") -> bool: return ( self.freshness == other.freshness and self.loaded_at_field == other.loaded_at_field and True ) - def same_external(self, other: "ParsedSourceDefinition") -> bool: + def same_external(self, other: "SourceDefinition") -> bool: return self.external == other.external - def same_config(self, old: "ParsedSourceDefinition") -> bool: + def same_config(self, old: "SourceDefinition") -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self, old: Optional["ParsedSourceDefinition"]) -> bool: + def same_contents(self, old: Optional["SourceDefinition"]) -> bool: # existing when it didn't before is a change! if old is None: return True @@ -740,12 +880,16 @@ def search_name(self): return f"{self.source_name}.{self.name}" +# ==================================== +# Exposure node +# ==================================== + + @dataclass -class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn): - name: str +class Exposure(GraphNode): type: ExposureType owner: ExposureOwner - resource_type: NodeType = NodeType.Exposure + resource_type: NodeType = field(metadata={"restrict": [NodeType.Exposure]}) description: str = "" label: Optional[str] = None maturity: Optional[MaturityType] = None @@ -757,6 +901,7 @@ class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn): depends_on: DependsOn = field(default_factory=DependsOn) refs: List[List[str]] = field(default_factory=list) sources: List[List[str]] = field(default_factory=list) + metrics: List[List[str]] = field(default_factory=list) created_at: float = field(default_factory=lambda: time.time()) @property @@ -767,34 +912,34 @@ def depends_on_nodes(self): def search_name(self): return self.name - def same_depends_on(self, old: "ParsedExposure") -> bool: + def same_depends_on(self, old: "Exposure") -> bool: return set(self.depends_on.nodes) == set(old.depends_on.nodes) - def same_description(self, old: "ParsedExposure") -> bool: + def same_description(self, old: "Exposure") -> bool: return self.description == old.description - def same_label(self, old: "ParsedExposure") -> bool: + def same_label(self, old: "Exposure") -> bool: return self.label == old.label - def same_maturity(self, old: "ParsedExposure") -> bool: + def same_maturity(self, old: "Exposure") -> bool: return self.maturity == old.maturity - def same_owner(self, old: "ParsedExposure") -> bool: + def same_owner(self, old: "Exposure") -> bool: return self.owner == old.owner - def same_exposure_type(self, old: "ParsedExposure") -> bool: + def same_exposure_type(self, old: "Exposure") -> bool: return self.type == old.type - def same_url(self, old: "ParsedExposure") -> bool: + def same_url(self, old: "Exposure") -> bool: return self.url == old.url - def same_config(self, old: "ParsedExposure") -> bool: + def same_config(self, old: "Exposure") -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self, old: Optional["ParsedExposure"]) -> bool: + def same_contents(self, old: Optional["Exposure"]) -> bool: # existing when it didn't before is a change! # metadata/tags changes are not "changes" if old is None: @@ -814,6 +959,11 @@ def same_contents(self, old: Optional["ParsedExposure"]) -> bool: ) +# ==================================== +# Metric node +# ==================================== + + @dataclass class MetricReference(dbtClassMixin, Replaceable): sql: Optional[Union[str, int]] @@ -821,7 +971,7 @@ class MetricReference(dbtClassMixin, Replaceable): @dataclass -class ParsedMetric(UnparsedBaseNode, HasUniqueID, HasFqn): +class Metric(GraphNode): name: str description: str label: str @@ -831,10 +981,10 @@ class ParsedMetric(UnparsedBaseNode, HasUniqueID, HasFqn): filters: List[MetricFilter] time_grains: List[str] dimensions: List[str] + resource_type: NodeType = field(metadata={"restrict": [NodeType.Metric]}) window: Optional[MetricTime] = None model: Optional[str] = None model_unique_id: Optional[str] = None - resource_type: NodeType = NodeType.Metric meta: Dict[str, Any] = field(default_factory=dict) tags: List[str] = field(default_factory=list) config: MetricConfig = field(default_factory=MetricConfig) @@ -853,43 +1003,43 @@ def depends_on_nodes(self): def search_name(self): return self.name - def same_model(self, old: "ParsedMetric") -> bool: + def same_model(self, old: "Metric") -> bool: return self.model == old.model - def same_window(self, old: "ParsedMetric") -> bool: + def same_window(self, old: "Metric") -> bool: return self.window == old.window - def same_dimensions(self, old: "ParsedMetric") -> bool: + def same_dimensions(self, old: "Metric") -> bool: return self.dimensions == old.dimensions - def same_filters(self, old: "ParsedMetric") -> bool: + def same_filters(self, old: "Metric") -> bool: return self.filters == old.filters - def same_description(self, old: "ParsedMetric") -> bool: + def same_description(self, old: "Metric") -> bool: return self.description == old.description - def same_label(self, old: "ParsedMetric") -> bool: + def same_label(self, old: "Metric") -> bool: return self.label == old.label - def same_calculation_method(self, old: "ParsedMetric") -> bool: + def same_calculation_method(self, old: "Metric") -> bool: return self.calculation_method == old.calculation_method - def same_expression(self, old: "ParsedMetric") -> bool: + def same_expression(self, old: "Metric") -> bool: return self.expression == old.expression - def same_timestamp(self, old: "ParsedMetric") -> bool: + def same_timestamp(self, old: "Metric") -> bool: return self.timestamp == old.timestamp - def same_time_grains(self, old: "ParsedMetric") -> bool: + def same_time_grains(self, old: "Metric") -> bool: return self.time_grains == old.time_grains - def same_config(self, old: "ParsedMetric") -> bool: + def same_config(self, old: "Metric") -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self, old: Optional["ParsedMetric"]) -> bool: + def same_contents(self, old: Optional["Metric"]) -> bool: # existing when it didn't before is a change! # metadata/tags changes are not "changes" if old is None: @@ -911,24 +1061,77 @@ def same_contents(self, old: Optional["ParsedMetric"]) -> bool: ) -ManifestNodes = Union[ - ParsedAnalysisNode, - ParsedSingularTestNode, - ParsedHookNode, - ParsedModelNode, - ParsedRPCNode, - ParsedSqlNode, - ParsedGenericTestNode, - ParsedSeedNode, - ParsedSnapshotNode, +# ==================================== +# Patches +# ==================================== + + +@dataclass +class ParsedPatch(HasYamlMetadata, Replaceable): + name: str + description: str + meta: Dict[str, Any] + docs: Docs + config: Dict[str, Any] + + +# The parsed node update is only the 'patch', not the test. The test became a +# regular parsed node. Note that description and columns must be present, but +# may be empty. +@dataclass +class ParsedNodePatch(ParsedPatch): + columns: Dict[str, ColumnInfo] + + +@dataclass +class ParsedMacroPatch(ParsedPatch): + arguments: List[MacroArgument] = field(default_factory=list) + + +# ==================================== +# Node unions/categories +# ==================================== + + +# ManifestNode without SeedNode, which doesn't have the +# SQL related attributes +ManifestSQLNode = Union[ + AnalysisNode, + SingularTestNode, + HookNode, + ModelNode, + RPCNode, + SqlNode, + GenericTestNode, + SnapshotNode, ] +# All SQL nodes plus SeedNode (csv files) +ManifestNode = Union[ + ManifestSQLNode, + SeedNode, +] + +ResultNode = Union[ + ManifestNode, + SourceDefinition, +] + +# All nodes that can be in the DAG +GraphMemberNode = Union[ + ResultNode, + Exposure, + Metric, +] + +# All "nodes" (or node-like objects) in this file +Resource = Union[ + GraphMemberNode, + Documentation, + Macro, +] -ParsedResource = Union[ - ParsedDocumentation, - ParsedMacro, - ParsedNode, - ParsedExposure, - ParsedMetric, - ParsedSourceDefinition, +TestNode = Union[ + SingularTestNode, + GenericTestNode, ] diff --git a/core/dbt/contracts/graph/unparsed.py b/core/dbt/contracts/graph/unparsed.py index 662ec6f01ad..453dc883d7b 100644 --- a/core/dbt/contracts/graph/unparsed.py +++ b/core/dbt/contracts/graph/unparsed.py @@ -24,7 +24,6 @@ @dataclass class UnparsedBaseNode(dbtClassMixin, Replaceable): package_name: str - root_path: str path: str original_file_path: str @@ -364,7 +363,6 @@ def get_table_named(self, name: str) -> Optional[SourceTablePatch]: @dataclass class UnparsedDocumentation(dbtClassMixin, Replaceable): package_name: str - root_path: str path: str original_file_path: str diff --git a/core/dbt/contracts/project.py b/core/dbt/contracts/project.py index b56aeddaf17..2fd7434bd87 100644 --- a/core/dbt/contracts/project.py +++ b/core/dbt/contracts/project.py @@ -12,9 +12,7 @@ from typing import Optional, List, Dict, Union, Any from mashumaro.types import SerializableType -PIN_PACKAGE_URL = ( - "https://docs.getdbt.com/docs/package-management#section-specifying-package-versions" # noqa -) + DEFAULT_SEND_ANONYMOUS_USAGE_STATS = True @@ -57,6 +55,12 @@ class LocalPackage(Package): RawVersion = Union[str, float] +@dataclass +class TarballPackage(Package): + tarball: str + name: str + + @dataclass class GitPackage(Package): git: str @@ -84,7 +88,7 @@ def get_versions(self) -> List[str]: return [str(self.version)] -PackageSpec = Union[LocalPackage, GitPackage, RegistryPackage] +PackageSpec = Union[LocalPackage, TarballPackage, GitPackage, RegistryPackage] @dataclass @@ -218,7 +222,7 @@ class Project(HyphenatedDbtClassMixin, Replaceable): ), ) packages: List[PackageSpec] = field(default_factory=list) - query_comment: Optional[Union[QueryComment, NoValue, str]] = NoValue() + query_comment: Optional[Union[QueryComment, NoValue, str]] = field(default_factory=NoValue) @classmethod def validate(cls, data): @@ -253,7 +257,6 @@ class UserConfig(ExtensibleDbtClassMixin, Replaceable, UserConfigContract): static_parser: Optional[bool] = None indirect_selection: Optional[str] = None cache_selected_only: Optional[bool] = None - event_buffer_size: Optional[int] = None @dataclass diff --git a/core/dbt/contracts/relation.py b/core/dbt/contracts/relation.py index fbe18146bb4..e8cba2ad155 100644 --- a/core/dbt/contracts/relation.py +++ b/core/dbt/contracts/relation.py @@ -9,7 +9,7 @@ from dbt.dataclass_schema import dbtClassMixin, StrEnum from dbt.contracts.util import Replaceable -from dbt.exceptions import raise_dataclass_not_dict, CompilationException +from dbt.exceptions import CompilationException, DataclassNotDict from dbt.utils import deep_merge @@ -43,10 +43,10 @@ def __getitem__(self, key): raise KeyError(key) from None def __iter__(self): - raise_dataclass_not_dict(self) + raise DataclassNotDict(self) def __len__(self): - raise_dataclass_not_dict(self) + raise DataclassNotDict(self) def incorporate(self, **kwargs): value = self.to_dict(omit_none=True) diff --git a/core/dbt/contracts/results.py b/core/dbt/contracts/results.py index a3b7ce2b506..97c43396e33 100644 --- a/core/dbt/contracts/results.py +++ b/core/dbt/contracts/results.py @@ -1,6 +1,5 @@ -from dbt.contracts.graph.manifest import CompileResultNode from dbt.contracts.graph.unparsed import FreshnessThreshold -from dbt.contracts.graph.parsed import ParsedSourceDefinition +from dbt.contracts.graph.nodes import SourceDefinition, ResultNode from dbt.contracts.util import ( BaseArtifactMetadata, ArtifactMixin, @@ -11,11 +10,9 @@ from dbt.exceptions import InternalException from dbt.events.functions import fire_event from dbt.events.types import TimingInfoCollected -from dbt.events.proto_types import RunResultMsg -from dbt.logger import ( - TimingProcessor, - JsonOnly, -) +from dbt.events.proto_types import RunResultMsg, TimingInfoMsg +from dbt.events.contextvars import get_node_info +from dbt.logger import TimingProcessor from dbt.utils import lowercase, cast_to_str, cast_to_int from dbt.dataclass_schema import dbtClassMixin, StrEnum @@ -48,7 +45,14 @@ def begin(self): def end(self): self.completed_at = datetime.utcnow() + def to_msg(self): + timsg = TimingInfoMsg( + name=self.name, started_at=self.started_at, completed_at=self.completed_at + ) + return timsg + +# This is a context manager class collect_timing_info: def __init__(self, name: str): self.timing_info = TimingInfo(name=name) @@ -59,8 +63,13 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, traceback): self.timing_info.end() - with JsonOnly(), TimingProcessor(self.timing_info): - fire_event(TimingInfoCollected()) + # Note: when legacy logger is removed, we can remove the following line + with TimingProcessor(self.timing_info): + fire_event( + TimingInfoCollected( + timing_info=self.timing_info.to_msg(), node_info=get_node_info() + ) + ) class RunningStatus(StrEnum): @@ -128,13 +137,14 @@ def to_msg(self): msg.thread = self.thread_id msg.execution_time = self.execution_time msg.num_failures = cast_to_int(self.failures) - # timing_info, adapter_response, message + msg.timing_info = [ti.to_msg() for ti in self.timing] + # adapter_response return msg @dataclass class NodeResult(BaseResult): - node: CompileResultNode + node: ResultNode @dataclass @@ -220,7 +230,9 @@ def from_execution_results( generated_at: datetime, args: Dict, ): - processed_results = [process_run_result(result) for result in results] + processed_results = [ + process_run_result(result) for result in results if isinstance(result, RunResult) + ] meta = RunResultsMetadata( dbt_schema_version=str(cls.dbt_schema_version), generated_at=generated_at, @@ -271,7 +283,7 @@ def from_success( @dataclass class SourceFreshnessResult(NodeResult): - node: ParsedSourceDefinition + node: SourceDefinition status: FreshnessStatus max_loaded_at: datetime snapshotted_at: datetime diff --git a/core/dbt/contracts/sql.py b/core/dbt/contracts/sql.py index a3e5b3d58db..b80304d2565 100644 --- a/core/dbt/contracts/sql.py +++ b/core/dbt/contracts/sql.py @@ -5,7 +5,7 @@ from dbt.dataclass_schema import dbtClassMixin -from dbt.contracts.graph.compiled import CompileResultNode +from dbt.contracts.graph.nodes import ResultNode from dbt.contracts.results import ( RunResult, RunResultsArtifact, @@ -32,7 +32,7 @@ class RemoteResult(VersionedSchema): class RemoteCompileResultMixin(RemoteResult): raw_code: str compiled_code: str - node: CompileResultNode + node: ResultNode timing: List[TimingInfo] diff --git a/core/dbt/contracts/util.py b/core/dbt/contracts/util.py index f0975fda10b..99f7a35c66d 100644 --- a/core/dbt/contracts/util.py +++ b/core/dbt/contracts/util.py @@ -237,16 +237,61 @@ def rename_sql_attr(node_content: dict) -> dict: return node_content +def upgrade_node_content(node_content): + rename_sql_attr(node_content) + if node_content["resource_type"] != "seed" and "root_path" in node_content: + del node_content["root_path"] + + +def upgrade_seed_content(node_content): + # Remove compilation related attributes + for attr_name in ( + "language", + "refs", + "sources", + "metrics", + "depends_on", + "compiled_path", + "compiled", + "compiled_code", + "extra_ctes_injected", + "extra_ctes", + "relation_name", + ): + if attr_name in node_content: + del node_content[attr_name] + + def upgrade_manifest_json(manifest: dict) -> dict: for node_content in manifest.get("nodes", {}).values(): - node_content = rename_sql_attr(node_content) + upgrade_node_content(node_content) + if node_content["resource_type"] == "seed": + upgrade_seed_content(node_content) for disabled in manifest.get("disabled", {}).values(): # There can be multiple disabled nodes for the same unique_id # so make sure all the nodes get the attr renamed - disabled = [rename_sql_attr(n) for n in disabled] + for node_content in disabled: + upgrade_node_content(node_content) + if node_content["resource_type"] == "seed": + upgrade_seed_content(node_content) for metric_content in manifest.get("metrics", {}).values(): # handle attr renames + value translation ("expression" -> "derived") metric_content = rename_metric_attr(metric_content) + if "root_path" in metric_content: + del metric_content["root_path"] + for exposure_content in manifest.get("exposures", {}).values(): + if "root_path" in exposure_content: + del exposure_content["root_path"] + for source_content in manifest.get("sources", {}).values(): + if "root_path" in exposure_content: + del source_content["root_path"] + for macro_content in manifest.get("macros", {}).values(): + if "root_path" in macro_content: + del macro_content["root_path"] + for doc_content in manifest.get("docs", {}).values(): + if "root_path" in doc_content: + del doc_content["root_path"] + doc_content["resource_type"] = "doc" return manifest @@ -291,7 +336,7 @@ def read_and_check_versions(cls, path: str): expected=str(cls.dbt_schema_version), found=previous_schema_version, ) - if get_manifest_schema_version(data) <= 6: + if get_manifest_schema_version(data) <= 7: data = upgrade_manifest_json(data) return cls.from_dict(data) # type: ignore diff --git a/core/dbt/deprecations.py b/core/dbt/deprecations.py index 223091dea60..f7cee59df5a 100644 --- a/core/dbt/deprecations.py +++ b/core/dbt/deprecations.py @@ -1,14 +1,14 @@ +import abc from typing import Optional, Set, List, Dict, ClassVar import dbt.exceptions -from dbt import ui import dbt.tracking class DBTDeprecation: _name: ClassVar[Optional[str]] = None - _description: ClassVar[Optional[str]] = None + _event: ClassVar[Optional[str]] = None @property def name(self) -> str: @@ -21,66 +21,50 @@ def track_deprecation_warn(self) -> None: dbt.tracking.track_deprecation_warn({"deprecation_name": self.name}) @property - def description(self) -> str: - if self._description is not None: - return self._description - raise NotImplementedError("description not implemented for {}".format(self)) + def event(self) -> abc.ABCMeta: + if self._event is not None: + module_path = dbt.events.types + class_name = self._event + + try: + return getattr(module_path, class_name) + except AttributeError: + msg = f"Event Class `{class_name}` is not defined in `{module_path}`" + raise NameError(msg) + raise NotImplementedError("event not implemented for {}".format(self._event)) def show(self, *args, **kwargs) -> None: if self.name not in active_deprecations: - desc = self.description.format(**kwargs) - msg = ui.line_wrap_message(desc, prefix="Deprecated functionality\n\n") - dbt.exceptions.warn_or_error(msg, log_fmt=ui.warning_tag("{}")) + event = self.event(**kwargs) + dbt.events.functions.warn_or_error(event) self.track_deprecation_warn() active_deprecations.add(self.name) class PackageRedirectDeprecation(DBTDeprecation): _name = "package-redirect" - _description = """\ - The `{old_name}` package is deprecated in favor of `{new_name}`. Please update - your `packages.yml` configuration to use `{new_name}` instead. - """ + _event = "PackageRedirectDeprecation" class PackageInstallPathDeprecation(DBTDeprecation): _name = "install-packages-path" - _description = """\ - The default package install path has changed from `dbt_modules` to `dbt_packages`. - Please update `clean-targets` in `dbt_project.yml` and check `.gitignore` as well. - Or, set `packages-install-path: dbt_modules` if you'd like to keep the current value. - """ + _event = "PackageInstallPathDeprecation" -class ConfigPathDeprecation(DBTDeprecation): - _description = """\ - The `{deprecated_path}` config has been renamed to `{exp_path}`. - Please update your `dbt_project.yml` configuration to reflect this change. - """ - - -class ConfigSourcePathDeprecation(ConfigPathDeprecation): +class ConfigSourcePathDeprecation(DBTDeprecation): _name = "project-config-source-paths" + _event = "ConfigSourcePathDeprecation" -class ConfigDataPathDeprecation(ConfigPathDeprecation): +class ConfigDataPathDeprecation(DBTDeprecation): _name = "project-config-data-paths" - - -_adapter_renamed_description = """\ -The adapter function `adapter.{old_name}` is deprecated and will be removed in -a future release of dbt. Please use `adapter.{new_name}` instead. - -Documentation for {new_name} can be found here: - - https://docs.getdbt.com/docs/adapter -""" + _event = "ConfigDataPathDeprecation" def renamed_method(old_name: str, new_name: str): class AdapterDeprecationWarning(DBTDeprecation): _name = "adapter:{}".format(old_name) - _description = _adapter_renamed_description.format(old_name=old_name, new_name=new_name) + _event = "AdapterDeprecationWarning" dep = AdapterDeprecationWarning() deprecations_list.append(dep) @@ -89,26 +73,12 @@ class AdapterDeprecationWarning(DBTDeprecation): class MetricAttributesRenamed(DBTDeprecation): _name = "metric-attr-renamed" - _description = """\ -dbt-core v1.3 renamed attributes for metrics: -\n 'sql' -> 'expression' -\n 'type' -> 'calculation_method' -\n 'type: expression' -> 'calculation_method: derived' -\nThe old metric parameter names will be fully deprecated in v1.4. -\nPlease remove them from the metric definition of metric '{metric_name}' -\nRelevant issue here: https://github.com/dbt-labs/dbt-core/issues/5849 -""" + _event = "MetricAttributesRenamed" class ExposureNameDeprecation(DBTDeprecation): _name = "exposure-name" - _description = """\ - Starting in v1.3, the 'name' of an exposure should contain only letters, numbers, and underscores. - Exposures support a new property, 'label', which may contain spaces, capital letters, and special characters. - {exposure} does not follow this pattern. - Please update the 'name', and use the 'label' property for a human-friendly title. - This will raise an error in a future version of dbt-core. - """ + _event = "ExposureNameDeprecation" def warn(name, *args, **kwargs): @@ -125,12 +95,12 @@ def warn(name, *args, **kwargs): active_deprecations: Set[str] = set() deprecations_list: List[DBTDeprecation] = [ - ExposureNameDeprecation(), + PackageRedirectDeprecation(), + PackageInstallPathDeprecation(), ConfigSourcePathDeprecation(), ConfigDataPathDeprecation(), - PackageInstallPathDeprecation(), - PackageRedirectDeprecation(), MetricAttributesRenamed(), + ExposureNameDeprecation(), ] deprecations: Dict[str, DBTDeprecation] = {d.name: d for d in deprecations_list} diff --git a/core/dbt/deps/README.md b/core/dbt/deps/README.md index a00802cefbf..99c7fd6fb80 100644 --- a/core/dbt/deps/README.md +++ b/core/dbt/deps/README.md @@ -16,6 +16,8 @@ Defines the base classes of `PinnedPackage` and `UnpinnedPackage`. `downloads_directory` sets the directory packages will be downloaded to. +`_install` has retry logic if the download or untarring process hit exceptions (see `dbt.utils._connection_exception_retry`). + ## `git.py` Extends `PinnedPackage` and `UnpinnedPackage` specific to dbt packages defined with git urls. @@ -28,8 +30,10 @@ Extends `PinnedPackage` and `UnpinnedPackage` specific to dbt packages defined l Extends `PinnedPackage` and `UnpinnedPackage` specific to dbt packages defined on the dbt Hub registry. -`install` has retry logic if the download or untarring process hit exceptions (see `dbt.utils._connection_exception_retry`). ## `resolver.py` Resolves the package definition into package objects to download. + +## `tarball.py` +Extends `PinnedPackage` and `UnpinnedPackage` specific to dbt packages defined by a URL to a tarball hosted on an HTTP server. diff --git a/core/dbt/deps/base.py b/core/dbt/deps/base.py index 1557b0d7a35..f72878422aa 100644 --- a/core/dbt/deps/base.py +++ b/core/dbt/deps/base.py @@ -1,13 +1,16 @@ import abc import os +import functools import tempfile from contextlib import contextmanager +from pathlib import Path from typing import List, Optional, Generic, TypeVar from dbt.clients import system from dbt.contracts.project import ProjectPackageMetadata from dbt.events.functions import fire_event from dbt.events.types import DepsSetDownloadDirectory +from dbt.utils import _connection_exception_retry as connection_exception_retry DOWNLOADS_PATH = None @@ -74,7 +77,7 @@ def _fetch_metadata(self, project, renderer): raise NotImplementedError @abc.abstractmethod - def install(self, project): + def install(self, project, renderer): raise NotImplementedError @abc.abstractmethod @@ -97,6 +100,34 @@ def get_installation_path(self, project, renderer): def get_subdirectory(self): return None + def _install(self, project, renderer): + metadata = self.fetch_metadata(project, renderer) + + tar_name = f"{self.package}.{self.version}.tar.gz" + tar_path = (Path(get_downloads_path()) / tar_name).resolve(strict=False) + system.make_directory(str(tar_path.parent)) + + download_url = metadata.downloads.tarball + deps_path = project.packages_install_path + package_name = self.get_project_name(project, renderer) + + download_untar_fn = functools.partial( + self.download_and_untar, download_url, str(tar_path), deps_path, package_name + ) + connection_exception_retry(download_untar_fn, 5) + + def download_and_untar(self, download_url, tar_path, deps_path, package_name): + """ + Sometimes the download of the files fails and we want to retry. Sometimes the + download appears successful but the file did not make it through as expected + (generally due to a github incident). Either way we want to retry downloading + and untarring to see if we can get a success. Call this within + `_connection_exception_retry` + """ + + system.download(download_url, tar_path) + system.untar_package(tar_path, deps_path, package_name) + SomePinned = TypeVar("SomePinned", bound=PinnedPackage) SomeUnpinned = TypeVar("SomeUnpinned", bound="UnpinnedPackage") diff --git a/core/dbt/deps/git.py b/core/dbt/deps/git.py index 2b08e04632f..683ce2c4dc7 100644 --- a/core/dbt/deps/git.py +++ b/core/dbt/deps/git.py @@ -10,14 +10,9 @@ GitPackage, ) from dbt.deps.base import PinnedPackage, UnpinnedPackage, get_downloads_path -from dbt.exceptions import ExecutableError, warn_or_error, raise_dependency_error -from dbt.events.functions import fire_event -from dbt.events.types import EnsureGitInstalled -from dbt import ui - -PIN_PACKAGE_URL = ( - "https://docs.getdbt.com/docs/package-management#section-specifying-package-versions" # noqa -) +from dbt.exceptions import ExecutableError, MultipleVersionGitDeps +from dbt.events.functions import fire_event, warn_or_error +from dbt.events.types import EnsureGitInstalled, DepsUnpinned def md5sum(s: str): @@ -63,14 +58,6 @@ def nice_version_name(self): else: return "revision {}".format(self.revision) - def unpinned_msg(self): - if self.revision == "HEAD": - return "not pinned, using HEAD (default branch)" - elif self.revision in ("main", "master"): - return f'pinned to the "{self.revision}" branch' - else: - return None - def _checkout(self): """Performs a shallow clone of the repository into the downloads directory. This function can be called repeatedly. If the project has @@ -95,14 +82,8 @@ def _fetch_metadata( ) -> ProjectPackageMetadata: path = self._checkout() - if self.unpinned_msg() and self.warn_unpinned: - warn_or_error( - 'The git package "{}" \n\tis {}.\n\tThis can introduce ' - "breaking changes into your project without warning!\n\nSee {}".format( - self.git, self.unpinned_msg(), PIN_PACKAGE_URL - ), - log_fmt=ui.yellow("WARNING: {}"), - ) + if (self.revision == "HEAD" or self.revision in ("main", "master")) and self.warn_unpinned: + warn_or_error(DepsUnpinned(git=self.git)) partial = PartialProject.from_project_root(path) return partial.render_package_metadata(renderer) @@ -165,10 +146,7 @@ def resolved(self) -> GitPinnedPackage: if len(requested) == 0: requested = {"HEAD"} elif len(requested) > 1: - raise_dependency_error( - "git dependencies should contain exactly one version. " - "{} contains: {}".format(self.git, requested) - ) + raise MultipleVersionGitDeps(self.git, requested) return GitPinnedPackage( git=self.git, diff --git a/core/dbt/deps/registry.py b/core/dbt/deps/registry.py index bd8263e4001..f3398f4b16f 100644 --- a/core/dbt/deps/registry.py +++ b/core/dbt/deps/registry.py @@ -1,23 +1,20 @@ -import os -import functools from typing import List from dbt import semver from dbt import flags from dbt.version import get_installed_version -from dbt.clients import registry, system +from dbt.clients import registry from dbt.contracts.project import ( RegistryPackageMetadata, RegistryPackage, ) -from dbt.deps.base import PinnedPackage, UnpinnedPackage, get_downloads_path +from dbt.deps.base import PinnedPackage, UnpinnedPackage from dbt.exceptions import ( - package_version_not_found, - VersionsNotCompatibleException, DependencyException, - package_not_found, + PackageNotFound, + PackageVersionNotFound, + VersionsNotCompatibleException, ) -from dbt.utils import _connection_exception_retry as connection_exception_retry class RegistryPackageMixin: @@ -60,32 +57,7 @@ def _fetch_metadata(self, project, renderer) -> RegistryPackageMetadata: return RegistryPackageMetadata.from_dict(dct) def install(self, project, renderer): - metadata = self.fetch_metadata(project, renderer) - - tar_name = "{}.{}.tar.gz".format(self.package, self.version) - tar_path = os.path.realpath(os.path.join(get_downloads_path(), tar_name)) - system.make_directory(os.path.dirname(tar_path)) - - download_url = metadata.downloads.tarball - deps_path = project.packages_install_path - package_name = self.get_project_name(project, renderer) - - download_untar_fn = functools.partial( - self.download_and_untar, download_url, tar_path, deps_path, package_name - ) - connection_exception_retry(download_untar_fn, 5) - - def download_and_untar(self, download_url, tar_path, deps_path, package_name): - """ - Sometimes the download of the files fails and we want to retry. Sometimes the - download appears successful but the file did not make it through as expected - (generally due to a github incident). Either way we want to retry downloading - and untarring to see if we can get a success. Call this within - `_connection_exception_retry` - """ - - system.download(download_url, tar_path) - system.untar_package(tar_path, deps_path, package_name) + self._install(project, renderer) class RegistryUnpinnedPackage(RegistryPackageMixin, UnpinnedPackage[RegistryPinnedPackage]): @@ -99,7 +71,7 @@ def __init__( def _check_in_index(self): index = registry.index_cached() if self.package not in index: - package_not_found(self.package) + raise PackageNotFound(self.package) @classmethod def from_contract(cls, contract: RegistryPackage) -> "RegistryUnpinnedPackage": @@ -146,7 +118,7 @@ def resolved(self) -> RegistryPinnedPackage: target = None if not target: # raise an exception if no installable target version is found - package_version_not_found(self.package, range_, installable, should_version_check) + raise PackageVersionNotFound(self.package, range_, installable, should_version_check) latest_compatible = installable[-1] return RegistryPinnedPackage( package=self.package, version=target, version_latest=latest_compatible diff --git a/core/dbt/deps/resolver.py b/core/dbt/deps/resolver.py index 7313280a3ca..b83a3bdee7d 100644 --- a/core/dbt/deps/resolver.py +++ b/core/dbt/deps/resolver.py @@ -1,22 +1,29 @@ from dataclasses import dataclass, field from typing import Dict, List, NoReturn, Union, Type, Iterator, Set, Any -from dbt.exceptions import raise_dependency_error, InternalException +from dbt.exceptions import ( + DuplicateDependencyToRoot, + DuplicateProjectDependency, + MismatchedDependencyTypes, + InternalException, +) from dbt.config import Project from dbt.config.renderer import PackageRenderer from dbt.deps.base import BasePackage, PinnedPackage, UnpinnedPackage from dbt.deps.local import LocalUnpinnedPackage +from dbt.deps.tarball import TarballUnpinnedPackage from dbt.deps.git import GitUnpinnedPackage from dbt.deps.registry import RegistryUnpinnedPackage from dbt.contracts.project import ( LocalPackage, + TarballPackage, GitPackage, RegistryPackage, ) -PackageContract = Union[LocalPackage, GitPackage, RegistryPackage] +PackageContract = Union[LocalPackage, TarballPackage, GitPackage, RegistryPackage] @dataclass @@ -49,10 +56,7 @@ def __setitem__(self, key: BasePackage, value): self.packages[key_str] = value def _mismatched_types(self, old: UnpinnedPackage, new: UnpinnedPackage) -> NoReturn: - raise_dependency_error( - f"Cannot incorporate {new} ({new.__class__.__name__}) in {old} " - f"({old.__class__.__name__}): mismatched types" - ) + raise MismatchedDependencyTypes(new, old) def incorporate(self, package: UnpinnedPackage): key: str = self._pick_key(package) @@ -69,6 +73,8 @@ def update_from(self, src: List[PackageContract]) -> None: for contract in src: if isinstance(contract, LocalPackage): pkg = LocalUnpinnedPackage.from_contract(contract) + elif isinstance(contract, TarballPackage): + pkg = TarballUnpinnedPackage.from_contract(contract) elif isinstance(contract, GitPackage): pkg = GitUnpinnedPackage.from_contract(contract) elif isinstance(contract, RegistryPackage): @@ -101,17 +107,9 @@ def _check_for_duplicate_project_names( for package in final_deps: project_name = package.get_project_name(project, renderer) if project_name in seen: - raise_dependency_error( - f'Found duplicate project "{project_name}". This occurs when ' - "a dependency has the same project name as some other " - "dependency." - ) + raise DuplicateProjectDependency(project_name) elif project_name == project.project_name: - raise_dependency_error( - "Found a dependency with the same name as the root project " - f'"{project_name}". Package names must be unique in a project.' - " Please rename one of these packages." - ) + raise DuplicateDependencyToRoot(project_name) seen.add(project_name) diff --git a/core/dbt/deps/tarball.py b/core/dbt/deps/tarball.py new file mode 100644 index 00000000000..16c9cb0a20d --- /dev/null +++ b/core/dbt/deps/tarball.py @@ -0,0 +1,74 @@ +from dbt.contracts.project import RegistryPackageMetadata, TarballPackage +from dbt.deps.base import PinnedPackage, UnpinnedPackage + + +class TarballPackageMixin: + def __init__(self, tarball: str) -> None: + super().__init__() + self.tarball = tarball + + @property + def name(self): + return self.tarball + + def source_type(self) -> str: + return "tarball" + + +class TarballPinnedPackage(TarballPackageMixin, PinnedPackage): + def __init__(self, tarball: str, package: str) -> None: + super().__init__(tarball) + # setup to recycle RegistryPinnedPackage fns + self.package = package + self.version = "tarball" + + @property + def name(self): + return self.package + + def get_version(self): + return self.version + + def nice_version_name(self): + return f"tarball (url: {self.tarball})" + + def _fetch_metadata(self, project, renderer): + """ + recycle RegistryPackageMetadata so that we can use the install and + download_and_untar from RegistryPinnedPackage next. + build RegistryPackageMetadata from info passed via packages.yml since no + 'metadata' service exists in this case. + """ + + dct = { + "name": self.package, + "packages": [], # note: required by RegistryPackageMetadata + "downloads": {"tarball": self.tarball}, + } + + return RegistryPackageMetadata.from_dict(dct) + + def install(self, project, renderer): + self._install(project, renderer) + + +class TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]): + def __init__( + self, + tarball: str, + package: str, + ) -> None: + super().__init__(tarball) + # setup to recycle RegistryPinnedPackage fns + self.package = package + self.version = "tarball" + + @classmethod + def from_contract(cls, contract: TarballPackage) -> "TarballUnpinnedPackage": + return cls(tarball=contract.tarball, package=contract.name) + + def incorporate(self, other: "TarballUnpinnedPackage") -> "TarballUnpinnedPackage": + return TarballUnpinnedPackage(tarball=self.tarball, package=self.package) + + def resolved(self) -> TarballPinnedPackage: + return TarballPinnedPackage(tarball=self.tarball, package=self.package) diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle new file mode 100644 index 00000000000..8aaad5e25b0 Binary files /dev/null and b/core/dbt/docs/build/doctrees/environment.pickle differ diff --git a/core/dbt/docs/build/doctrees/index.doctree b/core/dbt/docs/build/doctrees/index.doctree new file mode 100644 index 00000000000..3acd417b911 Binary files /dev/null and b/core/dbt/docs/build/doctrees/index.doctree differ diff --git a/core/dbt/docs/build/html/.buildinfo b/core/dbt/docs/build/html/.buildinfo new file mode 100644 index 00000000000..39803f13c3e --- /dev/null +++ b/core/dbt/docs/build/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 1ee31fc16e025fb98598189ba2cb5fcb +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/core/dbt/docs/build/html/_sources/index.rst.txt b/core/dbt/docs/build/html/_sources/index.rst.txt new file mode 100644 index 00000000000..d5e3c6007af --- /dev/null +++ b/core/dbt/docs/build/html/_sources/index.rst.txt @@ -0,0 +1,4 @@ +dbt-core's API documentation +============================ + +.. dbt_click:: dbt.cli.main:cli diff --git a/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js b/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js new file mode 100644 index 00000000000..8549469dc29 --- /dev/null +++ b/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js @@ -0,0 +1,134 @@ +/* + * _sphinx_javascript_frameworks_compat.js + * ~~~~~~~~~~ + * + * Compatability shim for jQuery and underscores.js. + * + * WILL BE REMOVED IN Sphinx 6.0 + * xref RemovedInSphinx60Warning + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} diff --git a/core/dbt/docs/build/html/_static/alabaster.css b/core/dbt/docs/build/html/_static/alabaster.css new file mode 100644 index 00000000000..0eddaeb07d1 --- /dev/null +++ b/core/dbt/docs/build/html/_static/alabaster.css @@ -0,0 +1,701 @@ +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: Georgia, serif; + font-size: 17px; + background-color: #fff; + color: #000; + margin: 0; + padding: 0; +} + + +div.document { + width: 940px; + margin: 30px auto 0 auto; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 220px; +} + +div.sphinxsidebar { + width: 220px; + font-size: 14px; + line-height: 1.5; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.body { + background-color: #fff; + color: #3E4349; + padding: 0 30px 0 30px; +} + +div.body > .section { + text-align: left; +} + +div.footer { + width: 940px; + margin: 20px auto 30px auto; + font-size: 14px; + color: #888; + text-align: right; +} + +div.footer a { + color: #888; +} + +p.caption { + font-family: inherit; + font-size: inherit; +} + + +div.relations { + display: none; +} + + +div.sphinxsidebar a { + color: #444; + text-decoration: none; + border-bottom: 1px dotted #999; +} + +div.sphinxsidebar a:hover { + border-bottom: 1px solid #999; +} + +div.sphinxsidebarwrapper { + padding: 18px 10px; +} + +div.sphinxsidebarwrapper p.logo { + padding: 0; + margin: -10px 0 0 0px; + text-align: center; +} + +div.sphinxsidebarwrapper h1.logo { + margin-top: -10px; + text-align: center; + margin-bottom: 5px; + text-align: left; +} + +div.sphinxsidebarwrapper h1.logo-name { + margin-top: 0px; +} + +div.sphinxsidebarwrapper p.blurb { + margin-top: 0; + font-style: normal; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: Georgia, serif; + color: #444; + font-size: 24px; + font-weight: normal; + margin: 0 0 5px 0; + padding: 0; +} + +div.sphinxsidebar h4 { + font-size: 20px; +} + +div.sphinxsidebar h3 a { + color: #444; +} + +div.sphinxsidebar p.logo a, +div.sphinxsidebar h3 a, +div.sphinxsidebar p.logo a:hover, +div.sphinxsidebar h3 a:hover { + border: none; +} + +div.sphinxsidebar p { + color: #555; + margin: 10px 0; +} + +div.sphinxsidebar ul { + margin: 10px 0; + padding: 0; + color: #000; +} + +div.sphinxsidebar ul li.toctree-l1 > a { + font-size: 120%; +} + +div.sphinxsidebar ul li.toctree-l2 > a { + font-size: 110%; +} + +div.sphinxsidebar input { + border: 1px solid #CCC; + font-family: Georgia, serif; + font-size: 1em; +} + +div.sphinxsidebar hr { + border: none; + height: 1px; + color: #AAA; + background: #AAA; + + text-align: left; + margin-left: 0; + width: 50%; +} + +div.sphinxsidebar .badge { + border-bottom: none; +} + +div.sphinxsidebar .badge:hover { + border-bottom: none; +} + +/* To address an issue with donation coming after search */ +div.sphinxsidebar h3.donation { + margin-top: 10px; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #004B6B; + text-decoration: underline; +} + +a:hover { + color: #6D4100; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: Georgia, serif; + font-weight: normal; + margin: 30px 0px 10px 0px; + padding: 0; +} + +div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } +div.body h2 { font-size: 180%; } +div.body h3 { font-size: 150%; } +div.body h4 { font-size: 130%; } +div.body h5 { font-size: 100%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #DDD; + padding: 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + color: #444; + background: #EAEAEA; +} + +div.body p, div.body dd, div.body li { + line-height: 1.4em; +} + +div.admonition { + margin: 20px 0px; + padding: 10px 30px; + background-color: #EEE; + border: 1px solid #CCC; +} + +div.admonition tt.xref, div.admonition code.xref, div.admonition a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fafafa; +} + +div.admonition p.admonition-title { + font-family: Georgia, serif; + font-weight: normal; + font-size: 24px; + margin: 0 0 10px 0; + padding: 0; + line-height: 1; +} + +div.admonition p.last { + margin-bottom: 0; +} + +div.highlight { + background-color: #fff; +} + +dt:target, .highlight { + background: #FAF3E8; +} + +div.warning { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.danger { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.error { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.caution { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.attention { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.important { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.note { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.tip { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.hint { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.seealso { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.topic { + background-color: #EEE; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre, tt, code { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; + font-size: 0.9em; +} + +.hll { + background-color: #FFC; + margin: 0 -12px; + padding: 0 12px; + display: block; +} + +img.screenshot { +} + +tt.descname, tt.descclassname, code.descname, code.descclassname { + font-size: 0.95em; +} + +tt.descname, code.descname { + padding-right: 0.08em; +} + +img.screenshot { + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils { + border: 1px solid #888; + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils td, table.docutils th { + border: 1px solid #888; + padding: 0.25em 0.7em; +} + +table.field-list, table.footnote { + border: none; + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + +table.footnote { + margin: 15px 0; + width: 100%; + border: 1px solid #EEE; + background: #FDFDFD; + font-size: 0.9em; +} + +table.footnote + table.footnote { + margin-top: -15px; + border-top: none; +} + +table.field-list th { + padding: 0 0.8em 0 0; +} + +table.field-list td { + padding: 0; +} + +table.field-list p { + margin-bottom: 0.8em; +} + +/* Cloned from + * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68 + */ +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +table.footnote td.label { + width: .1px; + padding: 0.3em 0 0.3em 0.5em; +} + +table.footnote td { + padding: 0.3em 0.5em; +} + +dl { + margin: 0; + padding: 0; +} + +dl dd { + margin-left: 30px; +} + +blockquote { + margin: 0 0 0 30px; + padding: 0; +} + +ul, ol { + /* Matches the 30px from the narrow-screen "li > ul" selector below */ + margin: 10px 0 10px 30px; + padding: 0; +} + +pre { + background: #EEE; + padding: 7px 30px; + margin: 15px 0px; + line-height: 1.3em; +} + +div.viewcode-block:target { + background: #ffd; +} + +dl pre, blockquote pre, li pre { + margin-left: 0; + padding-left: 30px; +} + +tt, code { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ +} + +tt.xref, code.xref, a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fff; +} + +a.reference { + text-decoration: none; + border-bottom: 1px dotted #004B6B; +} + +/* Don't put an underline on images */ +a.image-reference, a.image-reference:hover { + border-bottom: none; +} + +a.reference:hover { + border-bottom: 1px solid #6D4100; +} + +a.footnote-reference { + text-decoration: none; + font-size: 0.7em; + vertical-align: top; + border-bottom: 1px dotted #004B6B; +} + +a.footnote-reference:hover { + border-bottom: 1px solid #6D4100; +} + +a:hover tt, a:hover code { + background: #EEE; +} + + +@media screen and (max-width: 870px) { + + div.sphinxsidebar { + display: none; + } + + div.document { + width: 100%; + + } + + div.documentwrapper { + margin-left: 0; + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + } + + div.bodywrapper { + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + margin-left: 0; + } + + ul { + margin-left: 0; + } + + li > ul { + /* Matches the 30px from the "ul, ol" selector above */ + margin-left: 30px; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .bodywrapper { + margin: 0; + } + + .footer { + width: auto; + } + + .github { + display: none; + } + + + +} + + + +@media screen and (max-width: 875px) { + + body { + margin: 0; + padding: 20px 30px; + } + + div.documentwrapper { + float: none; + background: #fff; + } + + div.sphinxsidebar { + display: block; + float: none; + width: 102.5%; + margin: 50px -30px -20px -30px; + padding: 10px 20px; + background: #333; + color: #FFF; + } + + div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, + div.sphinxsidebar h3 a { + color: #fff; + } + + div.sphinxsidebar a { + color: #AAA; + } + + div.sphinxsidebar p.logo { + display: none; + } + + div.document { + width: 100%; + margin: 0; + } + + div.footer { + display: none; + } + + div.bodywrapper { + margin: 0; + } + + div.body { + min-height: 0; + padding: 0; + } + + .rtd_doc_footer { + display: none; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .footer { + width: auto; + } + + .github { + display: none; + } +} + + +/* misc. */ + +.revsys-inline { + display: none!important; +} + +/* Make nested-list/multi-paragraph items look better in Releases changelog + * pages. Without this, docutils' magical list fuckery causes inconsistent + * formatting between different release sub-lists. + */ +div#changelog > div.section > ul > li > p:only-child { + margin-bottom: 0; +} + +/* Hide fugly table cell borders in ..bibliography:: directive output */ +table.docutils.citation, table.docutils.citation td, table.docutils.citation th { + border: none; + /* Below needed in some edge cases; if not applied, bottom shadows appear */ + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + + +/* relbar */ + +.related { + line-height: 30px; + width: 100%; + font-size: 0.9rem; +} + +.related.top { + border-bottom: 1px solid #EEE; + margin-bottom: 20px; +} + +.related.bottom { + border-top: 1px solid #EEE; +} + +.related ul { + padding: 0; + margin: 0; + list-style: none; +} + +.related li { + display: inline; +} + +nav#rellinks { + float: right; +} + +nav#rellinks li+li:before { + content: "|"; +} + +nav#breadcrumbs li+li:before { + content: "\00BB"; +} + +/* Hide certain items when printing */ +@media print { + div.related { + display: none; + } +} \ No newline at end of file diff --git a/core/dbt/docs/build/html/_static/basic.css b/core/dbt/docs/build/html/_static/basic.css new file mode 100644 index 00000000000..4e9a9f1faca --- /dev/null +++ b/core/dbt/docs/build/html/_static/basic.css @@ -0,0 +1,900 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/core/dbt/docs/build/html/_static/custom.css b/core/dbt/docs/build/html/_static/custom.css new file mode 100644 index 00000000000..2a924f1d6a8 --- /dev/null +++ b/core/dbt/docs/build/html/_static/custom.css @@ -0,0 +1 @@ +/* This file intentionally left blank. */ diff --git a/core/dbt/docs/build/html/_static/doctools.js b/core/dbt/docs/build/html/_static/doctools.js new file mode 100644 index 00000000000..527b876ca63 --- /dev/null +++ b/core/dbt/docs/build/html/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/core/dbt/docs/build/html/_static/documentation_options.js b/core/dbt/docs/build/html/_static/documentation_options.js new file mode 100644 index 00000000000..b57ae3b8393 --- /dev/null +++ b/core/dbt/docs/build/html/_static/documentation_options.js @@ -0,0 +1,14 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/core/dbt/docs/build/html/_static/file.png b/core/dbt/docs/build/html/_static/file.png new file mode 100644 index 00000000000..a858a410e4f Binary files /dev/null and b/core/dbt/docs/build/html/_static/file.png differ diff --git a/core/dbt/docs/build/html/_static/jquery-3.6.0.js b/core/dbt/docs/build/html/_static/jquery-3.6.0.js new file mode 100644 index 00000000000..fc6c299b73e --- /dev/null +++ b/core/dbt/docs/build/html/_static/jquery-3.6.0.js @@ -0,0 +1,10881 @@ +/*! + * jQuery JavaScript Library v3.6.0 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright OpenJS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2021-03-02T17:08Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var flat = arr.flat ? function( array ) { + return arr.flat.call( array ); +} : function( array ) { + return arr.concat.apply( [], array ); +}; + + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + +var isFunction = function isFunction( obj ) { + + // Support: Chrome <=57, Firefox <=52 + // In some browsers, typeof returns "function" for HTML elements + // (i.e., `typeof document.createElement( "object" ) === "function"`). + // We don't want to classify *any* DOM node as a function. + // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 + // Plus for old WebKit, typeof returns "function" for HTML collections + // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) + return typeof obj === "function" && typeof obj.nodeType !== "number" && + typeof obj.item !== "function"; + }; + + +var isWindow = function isWindow( obj ) { + return obj != null && obj === obj.window; + }; + + +var document = window.document; + + + + var preservedScriptAttributes = { + type: true, + src: true, + nonce: true, + noModule: true + }; + + function DOMEval( code, node, doc ) { + doc = doc || document; + + var i, val, + script = doc.createElement( "script" ); + + script.text = code; + if ( node ) { + for ( i in preservedScriptAttributes ) { + + // Support: Firefox 64+, Edge 18+ + // Some browsers don't support the "nonce" property on scripts. + // On the other hand, just using `getAttribute` is not enough as + // the `nonce` attribute is reset to an empty string whenever it + // becomes browsing-context connected. + // See https://github.com/whatwg/html/issues/2369 + // See https://html.spec.whatwg.org/#nonce-attributes + // The `node.getAttribute` check was added for the sake of + // `jQuery.globalEval` so that it can fake a nonce-containing node + // via an object. + val = node[ i ] || node.getAttribute && node.getAttribute( i ); + if ( val ) { + script.setAttribute( i, val ); + } + } + } + doc.head.appendChild( script ).parentNode.removeChild( script ); + } + + +function toType( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; +} +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.6.0", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + even: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return ( i + 1 ) % 2; + } ) ); + }, + + odd: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return i % 2; + } ) ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + copy = options[ name ]; + + // Prevent Object.prototype pollution + // Prevent never-ending loop + if ( name === "__proto__" || target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + src = target[ name ]; + + // Ensure proper type for the source value + if ( copyIsArray && !Array.isArray( src ) ) { + clone = []; + } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { + clone = {}; + } else { + clone = src; + } + copyIsArray = false; + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + // Evaluates a script in a provided context; falls back to the global one + // if not specified. + globalEval: function( code, options, doc ) { + DOMEval( code, { nonce: options && options.nonce }, doc ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return flat( ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), + function( _i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); + } ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = toType( obj ); + + if ( isFunction( obj ) || isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.6 + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://js.foundation/ + * + * Date: 2021-02-16 + */ +( function( window ) { +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + nonnativeSelectorCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ( {} ).hasOwnProperty, + arr = [], + pop = arr.pop, + pushNative = arr.push, + push = arr.push, + slice = arr.slice, + + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[ i ] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + + "ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram + identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + + "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + + // "Attribute values must be CSS identifiers [capture 5] + // or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + + whitespace + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + + "*" ), + rdescend = new RegExp( whitespace + "|>" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + + whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + + whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rhtml = /HTML$/i, + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), + funescape = function( escape, nonHex ) { + var high = "0x" + escape.slice( 1 ) - 0x10000; + + return nonHex ? + + // Strip the backslash prefix from a non-hex escape sequence + nonHex : + + // Replace a hexadecimal escape sequence with the encoded Unicode code point + // Support: IE <=11+ + // For values outside the Basic Multilingual Plane (BMP), manually construct a + // surrogate pair + high < 0 ? + String.fromCharCode( high + 0x10000 ) : + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + inDisabledFieldset = addCombinator( + function( elem ) { + return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + ( arr = slice.call( preferredDoc.childNodes ) ), + preferredDoc.childNodes + ); + + // Support: Android<4.0 + // Detect silently failing push.apply + // eslint-disable-next-line no-unused-expressions + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + pushNative.apply( target, slice.call( els ) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + + // Can't trust NodeList.length + while ( ( target[ j++ ] = els[ i++ ] ) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + setDocument( context ); + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { + + // ID selector + if ( ( m = match[ 1 ] ) ) { + + // Document context + if ( nodeType === 9 ) { + if ( ( elem = context.getElementById( m ) ) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && ( elem = newContext.getElementById( m ) ) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[ 2 ] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !nonnativeSelectorCache[ selector + " " ] && + ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && + + // Support: IE 8 only + // Exclude object elements + ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { + + newSelector = selector; + newContext = context; + + // qSA considers elements outside a scoping root when evaluating child or + // descendant combinators, which is not what we want. + // In such cases, we work around the behavior by prefixing every selector in the + // list with an ID selector referencing the scope context. + // The technique has to be used as well when a leading combinator is used + // as such selectors are not recognized by querySelectorAll. + // Thanks to Andrew Dupont for this technique. + if ( nodeType === 1 && + ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + + // We can use :scope instead of the ID hack if the browser + // supports it & if we're not changing the context. + if ( newContext !== context || !support.scope ) { + + // Capture the context ID, setting it first if necessary + if ( ( nid = context.getAttribute( "id" ) ) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", ( nid = expando ) ); + } + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + + toSelector( groups[ i ] ); + } + newSelector = groups.join( "," ); + } + + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + nonnativeSelectorCache( selector, true ); + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return ( cache[ key + " " ] = value ); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement( "fieldset" ); + + try { + return !!fn( el ); + } catch ( e ) { + return false; + } finally { + + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split( "|" ), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[ i ] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( ( cur = cur.nextSibling ) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return ( name === "input" || name === "button" ) && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + inDisabledFieldset( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction( function( argument ) { + argument = +argument; + return markFunction( function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ ( j = matchIndexes[ i ] ) ] ) { + seed[ j ] = !( matches[ j ] = seed[ j ] ); + } + } + } ); + } ); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + var namespace = elem && elem.namespaceURI, + docElem = elem && ( elem.ownerDocument || elem ).documentElement; + + // Support: IE <=8 + // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes + // https://bugs.jquery.com/ticket/4833 + return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9 - 11+, Edge 12 - 18+ + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( preferredDoc != document && + ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, + // Safari 4 - 5 only, Opera <=11.6 - 12.x only + // IE/Edge & older browsers don't support the :scope pseudo-class. + // Support: Safari 6.0 only + // Safari 6.0 supports :scope but it's an alias of :root there. + support.scope = assert( function( el ) { + docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); + return typeof el.querySelectorAll !== "undefined" && + !el.querySelectorAll( ":scope fieldset div" ).length; + } ); + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert( function( el ) { + el.className = "i"; + return !el.getAttribute( "className" ); + } ); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert( function( el ) { + el.appendChild( document.createComment( "" ) ); + return !el.getElementsByTagName( "*" ).length; + } ); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert( function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + } ); + + // ID filter and find + if ( support.getById ) { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute( "id" ) === attrId; + }; + }; + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode( "id" ); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( ( elem = elems[ i++ ] ) ) { + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find[ "TAG" ] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { + + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert( function( el ) { + + var input; + + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll( "[selected]" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push( "~=" ); + } + + // Support: IE 11+, Edge 15 - 18+ + // IE 11/Edge don't find elements on a `[name='']` query in some cases. + // Adding a temporary attribute to the document before the selection works + // around the issue. + // Interestingly, IE 10 & older don't seem to have the issue. + input = document.createElement( "input" ); + input.setAttribute( "name", "" ); + el.appendChild( input ); + if ( !el.querySelectorAll( "[name='']" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + + whitespace + "*(?:''|\"\")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll( ":checked" ).length ) { + rbuggyQSA.push( ":checked" ); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push( ".#.+[+~]" ); + } + + // Support: Firefox <=3.6 - 5 only + // Old Firefox doesn't throw on a badly-escaped identifier. + el.querySelectorAll( "\\\f" ); + rbuggyQSA.push( "[\\r\\n\\f]" ); + } ); + + assert( function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement( "input" ); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll( "[name=d]" ).length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: Opera 10 - 11 only + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll( "*,:x" ); + rbuggyQSA.push( ",.*:" ); + } ); + } + + if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector ) ) ) ) { + + assert( function( el ) { + + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + } ); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + ) ); + } : + function( a, b ) { + if ( b ) { + while ( ( b = b.parentNode ) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { + + // Choose the first element that is related to our preferred document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( a == document || a.ownerDocument == preferredDoc && + contains( preferredDoc, a ) ) { + return -1; + } + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( b == document || b.ownerDocument == preferredDoc && + contains( preferredDoc, b ) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + return a == document ? -1 : + b == document ? 1 : + /* eslint-enable eqeqeq */ + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( ( cur = cur.parentNode ) ) { + ap.unshift( cur ); + } + cur = b; + while ( ( cur = cur.parentNode ) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[ i ] === bp[ i ] ) { + i++; + } + + return i ? + + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[ i ], bp[ i ] ) : + + // Otherwise nodes in our document sort first + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + ap[ i ] == preferredDoc ? -1 : + bp[ i ] == preferredDoc ? 1 : + /* eslint-enable eqeqeq */ + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + setDocument( elem ); + + if ( support.matchesSelector && documentIsHTML && + !nonnativeSelectorCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch ( e ) { + nonnativeSelectorCache( expr, true ); + } + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( context.ownerDocument || context ) != document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( elem.ownerDocument || elem ) != document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return ( sel + "" ).replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + + // If no nodeType, this is expected to be an array + while ( ( node = elem[ i++ ] ) ) { + + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[ 1 ] = match[ 1 ].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[ 3 ] = ( match[ 3 ] || match[ 4 ] || + match[ 5 ] || "" ).replace( runescape, funescape ); + + if ( match[ 2 ] === "~=" ) { + match[ 3 ] = " " + match[ 3 ] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[ 1 ] = match[ 1 ].toLowerCase(); + + if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { + + // nth-* requires argument + if ( !match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[ 4 ] = +( match[ 4 ] ? + match[ 5 ] + ( match[ 6 ] || 1 ) : + 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); + match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); + + // other types prohibit arguments + } else if ( match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[ 6 ] && match[ 2 ]; + + if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[ 3 ] ) { + match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + + // Get excess from tokenize (recursively) + ( excess = tokenize( unquoted, true ) ) && + + // advance to the next closing parenthesis + ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { + + // excess is a negative index + match[ 0 ] = match[ 0 ].slice( 0, excess ); + match[ 2 ] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { + return true; + } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + ( pattern = new RegExp( "(^|" + whitespace + + ")" + className + "(" + whitespace + "|$)" ) ) && classCache( + className, function( elem ) { + return pattern.test( + typeof elem.className === "string" && elem.className || + typeof elem.getAttribute !== "undefined" && + elem.getAttribute( "class" ) || + "" + ); + } ); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + /* eslint-disable max-len */ + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + /* eslint-enable max-len */ + + }; + }, + + "CHILD": function( type, what, _argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, _context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( ( node = node[ dir ] ) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( ( node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + + // Use previously-cached element index if available + if ( useCache ) { + + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + + // Use the same loop as above to seek `elem` from the start + while ( ( node = ++nodeIndex && node && node[ dir ] || + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || + ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction( function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[ i ] ); + seed[ idx ] = !( matches[ idx ] = matched[ i ] ); + } + } ) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + + // Potentially complex pseudos + "not": markFunction( function( selector ) { + + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction( function( seed, matches, _context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( ( elem = unmatched[ i ] ) ) { + seed[ i ] = !( matches[ i ] = elem ); + } + } + } ) : + function( elem, _context, xml ) { + input[ 0 ] = elem; + matcher( input, null, xml, results ); + + // Don't keep the element (issue #299) + input[ 0 ] = null; + return !results.pop(); + }; + } ), + + "has": markFunction( function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + } ), + + "contains": markFunction( function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; + }; + } ), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + + // lang value must be a valid identifier + if ( !ridentifier.test( lang || "" ) ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( ( elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); + return false; + }; + } ), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && + ( !document.hasFocus || document.hasFocus() ) && + !!( elem.type || elem.href || ~elem.tabIndex ); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return ( nodeName === "input" && !!elem.checked ) || + ( nodeName === "option" && !!elem.selected ); + }, + + "selected": function( elem ) { + + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + // eslint-disable-next-line no-unused-expressions + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos[ "empty" ]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( ( attr = elem.getAttribute( "type" ) ) == null || + attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo( function() { + return [ 0 ]; + } ), + + "last": createPositionalPseudo( function( _matchIndexes, length ) { + return [ length - 1 ]; + } ), + + "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + } ), + + "even": createPositionalPseudo( function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "odd": createPositionalPseudo( function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? + argument + length : + argument > length ? + length : + argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ) + } +}; + +Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || ( match = rcomma.exec( soFar ) ) ) { + if ( match ) { + + // Don't consume trailing commas as valid + soFar = soFar.slice( match[ 0 ].length ) || soFar; + } + groups.push( ( tokens = [] ) ); + } + + matched = false; + + // Combinators + if ( ( match = rcombinators.exec( soFar ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + + // Cast descendant combinators to space + type: match[ 0 ].replace( rtrim, " " ) + } ); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || + ( match = preFilters[ type ]( match ) ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + type: type, + matches: match + } ); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[ i ].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || ( elem[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || + ( outerCache[ elem.uniqueID ] = {} ); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( ( oldCache = uniqueCache[ key ] ) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return ( newCache[ 2 ] = oldCache[ 2 ] ); + } else { + + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[ i ]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[ 0 ]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[ i ], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( ( elem = unmatched[ i ] ) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction( function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( + selector || "*", + context.nodeType ? [ context ] : context, + [] + ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( ( elem = temp[ i ] ) ) { + matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) ) { + + // Restore matcherIn since elem is not yet a final match + temp.push( ( matcherIn[ i ] = elem ) ); + } + } + postFinder( null, ( matcherOut = [] ), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) && + ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { + + seed[ temp ] = !( results[ temp ] = elem ); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + } ); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[ 0 ].type ], + implicitRelative = leadingRelative || Expr.relative[ " " ], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + ( checkContext = context ).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { + matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; + } else { + matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[ j ].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens + .slice( 0, i - 1 ) + .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), + + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), + len = elems.length; + + if ( outermost ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + outermostContext = context == document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( !context && elem.ownerDocument != document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( ( matcher = elementMatchers[ j++ ] ) ) { + if ( matcher( elem, context || document, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + + // They will have gone through all possible matchers + if ( ( elem = !matcher && elem ) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( ( matcher = setMatchers[ j++ ] ) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !( unmatched[ i ] || setMatched[ i ] ) ) { + setMatched[ i ] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[ i ] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( + selector, + matcherFromGroupMatchers( elementMatchers, setMatchers ) + ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( ( selector = compiled.selector || selector ) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[ 0 ] = match[ 0 ].slice( 0 ); + if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { + + context = ( Expr.find[ "ID" ]( token.matches[ 0 ] + .replace( runescape, funescape ), context ) || [] )[ 0 ]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[ i ]; + + // Abort if we hit a combinator + if ( Expr.relative[ ( type = token.type ) ] ) { + break; + } + if ( ( find = Expr.find[ type ] ) ) { + + // Search, expanding context for leading sibling combinators + if ( ( seed = find( + token.matches[ 0 ].replace( runescape, funescape ), + rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || + context + ) ) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert( function( el ) { + + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; +} ); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert( function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute( "href" ) === "#"; +} ) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + } ); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert( function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +} ) ) { + addHandle( "value", function( elem, _name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + } ); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert( function( el ) { + return el.getAttribute( "disabled" ) == null; +} ) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; + } + } ); +} + +return Sizzle; + +} )( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +} +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Filtered directly for both simple and complex selectors + return jQuery.filter( qualifier, elements, not ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, _i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, _i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, _i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( elem.contentDocument != null && + + // Support: IE 11+ + // elements with no `data` attribute has an object + // `contentDocument` with a `null` prototype. + getProto( elem.contentDocument ) ) { + + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && toType( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( _i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // rejected_handlers.disable + // fulfilled_handlers.disable + tuples[ 3 - i ][ 3 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock, + + // progress_handlers.lock + tuples[ 0 ][ 3 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the primary Deferred + primary = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + primary.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, primary.done( updateFunc( i ) ).resolve, primary.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( primary.state() === "pending" || + isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return primary.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), primary.reject ); + } + + return primary.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( toType( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, _key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; + + +// Matches dashed string for camelizing +var rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g; + +// Used by camelCase as callback to replace() +function fcamelCase( _all, letter ) { + return letter.toUpperCase(); +} + +// Convert dashed to camelCase; used by the css and data modules +// Support: IE <=9 - 11, Edge 12 - 15 +// Microsoft forgot to hump their vendor prefix (#9572) +function camelCase( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); +} +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( camelCase ); + } else { + key = camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var documentElement = document.documentElement; + + + + var isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ); + }, + composed = { composed: true }; + + // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only + // Check attachment across shadow DOM boundaries when possible (gh-3504) + // Support: iOS 10.0-10.2 only + // Early iOS 10 versions support `attachShadow` but not `getRootNode`, + // leading to errors. We need to check for `getRootNode`. + if ( documentElement.getRootNode ) { + isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ) || + elem.getRootNode( composed ) === elem.ownerDocument; + }; + } +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + isAttached( elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, scale, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = elem.nodeType && + ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Support: Firefox <=54 + // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) + initial = initial / 2; + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + while ( maxIterations-- ) { + + // Evaluate and update our best guess (doubling guesses that zero out). + // Finish if the scale equals or crosses 1 (making the old*new product non-positive). + jQuery.style( elem, prop, initialInUnit + unit ); + if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { + maxIterations = 0; + } + initialInUnit = initialInUnit / scale; + + } + + initialInUnit = initialInUnit * 2; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); + +var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); + + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; + + // Support: IE <=9 only + // IE <=9 replaces "; + support.option = !!div.lastChild; +} )(); + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +// Support: IE <=9 only +if ( !support.option ) { + wrapMap.optgroup = wrapMap.option = [ 1, "" ]; +} + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, attached, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( toType( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + attached = isAttached( elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( attached ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 - 11+ +// focus() and blur() are asynchronous, except when they are no-op. +// So expect focus to be synchronous when the element is already active, +// and blur to be synchronous when the element is not already active. +// (focus and blur are always synchronous in other supported browsers, +// this just defines when we can count on it). +function expectSync( elem, type ) { + return ( elem === safeActiveElement() ) === ( type === "focus" ); +} + +// Support: IE <=9 only +// Accessing document.activeElement can throw unexpectedly +// https://bugs.jquery.com/ticket/13393 +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Only attach events to objects that accept data + if ( !acceptData( elem ) ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = Object.create( null ); + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( nativeEvent ), + + handlers = ( + dataPriv.get( this, "events" ) || Object.create( null ) + )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // If the event is namespaced, then each handler is only invoked if it is + // specially universal or its namespaces are a superset of the event's. + if ( !event.rnamespace || handleObj.namespace === false || + event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + click: { + + // Utilize native event to ensure correct state for checkable inputs + setup: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Claim the first handler + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + // dataPriv.set( el, "click", ... ) + leverageNative( el, "click", returnTrue ); + } + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Force setup before triggering a click + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + leverageNative( el, "click" ); + } + + // Return non-false to allow normal event-path propagation + return true; + }, + + // For cross-browser consistency, suppress native .click() on links + // Also prevent it if we're currently inside a leveraged native-event stack + _default: function( event ) { + var target = event.target; + return rcheckableType.test( target.type ) && + target.click && nodeName( target, "input" ) && + dataPriv.get( target, "click" ) || + nodeName( target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +// Ensure the presence of an event listener that handles manually-triggered +// synthetic events by interrupting progress until reinvoked in response to +// *native* events that it fires directly, ensuring that state changes have +// already occurred before other listeners are invoked. +function leverageNative( el, type, expectSync ) { + + // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add + if ( !expectSync ) { + if ( dataPriv.get( el, type ) === undefined ) { + jQuery.event.add( el, type, returnTrue ); + } + return; + } + + // Register the controller as a special universal handler for all event namespaces + dataPriv.set( el, type, false ); + jQuery.event.add( el, type, { + namespace: false, + handler: function( event ) { + var notAsync, result, + saved = dataPriv.get( this, type ); + + if ( ( event.isTrigger & 1 ) && this[ type ] ) { + + // Interrupt processing of the outer synthetic .trigger()ed event + // Saved data should be false in such cases, but might be a leftover capture object + // from an async native handler (gh-4350) + if ( !saved.length ) { + + // Store arguments for use when handling the inner native event + // There will always be at least one argument (an event object), so this array + // will not be confused with a leftover capture object. + saved = slice.call( arguments ); + dataPriv.set( this, type, saved ); + + // Trigger the native event and capture its result + // Support: IE <=9 - 11+ + // focus() and blur() are asynchronous + notAsync = expectSync( this, type ); + this[ type ](); + result = dataPriv.get( this, type ); + if ( saved !== result || notAsync ) { + dataPriv.set( this, type, false ); + } else { + result = {}; + } + if ( saved !== result ) { + + // Cancel the outer synthetic event + event.stopImmediatePropagation(); + event.preventDefault(); + + // Support: Chrome 86+ + // In Chrome, if an element having a focusout handler is blurred by + // clicking outside of it, it invokes the handler synchronously. If + // that handler calls `.remove()` on the element, the data is cleared, + // leaving `result` undefined. We need to guard against this. + return result && result.value; + } + + // If this is an inner synthetic event for an event with a bubbling surrogate + // (focus or blur), assume that the surrogate already propagated from triggering the + // native event and prevent that from happening again here. + // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the + // bubbling surrogate propagates *after* the non-bubbling base), but that seems + // less bad than duplication. + } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { + event.stopPropagation(); + } + + // If this is a native event triggered above, everything is now in order + // Fire an inner synthetic event with the original arguments + } else if ( saved.length ) { + + // ...and capture the result + dataPriv.set( this, type, { + value: jQuery.event.trigger( + + // Support: IE <=9 - 11+ + // Extend with the prototype to reset the above stopImmediatePropagation() + jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), + saved.slice( 1 ), + this + ) + } ); + + // Abort handling of the native event + event.stopImmediatePropagation(); + } + } + } ); +} + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || Date.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + code: true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + which: true +}, jQuery.event.addProp ); + +jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { + jQuery.event.special[ type ] = { + + // Utilize native event if possible so blur/focus sequence is correct + setup: function() { + + // Claim the first handler + // dataPriv.set( this, "focus", ... ) + // dataPriv.set( this, "blur", ... ) + leverageNative( this, type, expectSync ); + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function() { + + // Force setup before trigger + leverageNative( this, type ); + + // Return non-false to allow normal event-path propagation + return true; + }, + + // Suppress native focus or blur as it's already being fired + // in leverageNative. + _default: function() { + return true; + }, + + delegateType: delegateType + }; +} ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + // Support: IE <=10 - 11, Edge 12 - 13 only + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( elem ).children( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.get( src ); + events = pdataOld.events; + + if ( events ) { + dataPriv.remove( dest, "handle events" ); + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = flat( args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + valueIsFunction = isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( valueIsFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( valueIsFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl && !node.noModule ) { + jQuery._evalUrl( node.src, { + nonce: node.nonce || node.getAttribute( "nonce" ) + }, doc ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && isAttached( node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html; + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = isAttached( elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + +var swap = function( elem, options, callback ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.call( elem ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + +var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + container.style.cssText = "position:absolute;left:-11111px;width:60px;" + + "margin-top:1px;padding:0;border:0"; + div.style.cssText = + "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + + "margin:auto;border:1px;padding:1px;" + + "width:60%;top:1%"; + documentElement.appendChild( container ).appendChild( div ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; + + // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 + // Some styles come back with percentage values, even though they shouldn't + div.style.right = "60%"; + pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; + + // Support: IE 9 - 11 only + // Detect misreporting of content dimensions for box-sizing:border-box elements + boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; + + // Support: IE 9 only + // Detect overflow:scroll screwiness (gh-3699) + // Support: Chrome <=64 + // Don't get tricked when zoom affects offsetWidth (gh-4029) + div.style.position = "absolute"; + scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + function roundPixelMeasures( measure ) { + return Math.round( parseFloat( measure ) ); + } + + var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, + reliableTrDimensionsVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + jQuery.extend( support, { + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelBoxStyles: function() { + computeStyleTests(); + return pixelBoxStylesVal; + }, + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + }, + scrollboxSize: function() { + computeStyleTests(); + return scrollboxSizeVal; + }, + + // Support: IE 9 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Behavior in IE 9 is more subtle than in newer versions & it passes + // some versions of this test; make sure not to make it pass there! + // + // Support: Firefox 70+ + // Only Firefox includes border widths + // in computed dimensions. (gh-4529) + reliableTrDimensions: function() { + var table, tr, trChild, trStyle; + if ( reliableTrDimensionsVal == null ) { + table = document.createElement( "table" ); + tr = document.createElement( "tr" ); + trChild = document.createElement( "div" ); + + table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; + tr.style.cssText = "border:1px solid"; + + // Support: Chrome 86+ + // Height set through cssText does not get applied. + // Computed height then comes back as 0. + tr.style.height = "1px"; + trChild.style.height = "9px"; + + // Support: Android 8 Chrome 86+ + // In our bodyBackground.html iframe, + // display for all div elements is set to "inline", + // which causes a problem only in Android 8 Chrome 86. + // Ensuring the div is display: block + // gets around this issue. + trChild.style.display = "block"; + + documentElement + .appendChild( table ) + .appendChild( tr ) + .appendChild( trChild ); + + trStyle = window.getComputedStyle( tr ); + reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + + parseInt( trStyle.borderTopWidth, 10 ) + + parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; + + documentElement.removeChild( table ); + } + return reliableTrDimensionsVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !isAttached( elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style, + vendorProps = {}; + +// Return a vendor-prefixed property or undefined +function vendorPropName( name ) { + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a potentially-mapped jQuery.cssProps or vendor prefixed property +function finalPropName( name ) { + var final = jQuery.cssProps[ name ] || vendorProps[ name ]; + + if ( final ) { + return final; + } + if ( name in emptyStyle ) { + return name; + } + return vendorProps[ name ] = vendorPropName( name ) || name; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }; + +function setPositiveNumber( _elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { + var i = dimension === "width" ? 1 : 0, + extra = 0, + delta = 0; + + // Adjustment may not be necessary + if ( box === ( isBorderBox ? "border" : "content" ) ) { + return 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin + if ( box === "margin" ) { + delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); + } + + // If we get here with a content-box, we're seeking "padding" or "border" or "margin" + if ( !isBorderBox ) { + + // Add padding + delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // For "border" or "margin", add border + if ( box !== "padding" ) { + delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + + // But still keep track of it otherwise + } else { + extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + + // If we get here with a border-box (content + padding + border), we're seeking "content" or + // "padding" or "margin" + } else { + + // For "content", subtract padding + if ( box === "content" ) { + delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // For "content" or "padding", subtract border + if ( box !== "margin" ) { + delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + // Account for positive content-box scroll gutter when requested by providing computedVal + if ( !isBorderBox && computedVal >= 0 ) { + + // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border + // Assuming integer scroll gutter, subtract the rest and round down + delta += Math.max( 0, Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + computedVal - + delta - + extra - + 0.5 + + // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter + // Use an explicit zero to avoid NaN (gh-3964) + ) ) || 0; + } + + return delta; +} + +function getWidthOrHeight( elem, dimension, extra ) { + + // Start with computed style + var styles = getStyles( elem ), + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). + // Fake content-box until we know it's needed to know the true value. + boxSizingNeeded = !support.boxSizingReliable() || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + valueIsBorderBox = isBorderBox, + + val = curCSS( elem, dimension, styles ), + offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); + + // Support: Firefox <=54 + // Return a confounding non-pixel value or feign ignorance, as appropriate. + if ( rnumnonpx.test( val ) ) { + if ( !extra ) { + return val; + } + val = "auto"; + } + + + // Support: IE 9 - 11 only + // Use offsetWidth/offsetHeight for when box sizing is unreliable. + // In those cases, the computed value can be trusted to be border-box. + if ( ( !support.boxSizingReliable() && isBorderBox || + + // Support: IE 10 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Interestingly, in some cases IE 9 doesn't suffer from this issue. + !support.reliableTrDimensions() && nodeName( elem, "tr" ) || + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + val === "auto" || + + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) + !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && + + // Make sure the element is visible & connected + elem.getClientRects().length ) { + + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Where available, offsetWidth/offsetHeight approximate border box dimensions. + // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the + // retrieved value as a content box dimension. + valueIsBorderBox = offsetProp in elem; + if ( valueIsBorderBox ) { + val = elem[ offsetProp ]; + } + } + + // Normalize "" and auto + val = parseFloat( val ) || 0; + + // Adjust for the element's box model + return ( val + + boxModelAdjustment( + elem, + dimension, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles, + + // Provide the current computed size to request scroll gutter calculation (gh-3589) + val + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "gridArea": true, + "gridColumn": true, + "gridColumnEnd": true, + "gridColumnStart": true, + "gridRow": true, + "gridRowEnd": true, + "gridRowStart": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: {}, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append + // "px" to a few hardcoded values. + if ( type === "number" && !isCustomProp ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( _i, dimension ) { + jQuery.cssHooks[ dimension ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, dimension, extra ); + } ) : + getWidthOrHeight( elem, dimension, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = getStyles( elem ), + + // Only read styles.position if the test has a chance to fail + // to avoid forcing a reflow. + scrollboxSizeBuggy = !support.scrollboxSize() && + styles.position === "absolute", + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) + boxSizingNeeded = scrollboxSizeBuggy || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + subtract = extra ? + boxModelAdjustment( + elem, + dimension, + extra, + isBorderBox, + styles + ) : + 0; + + // Account for unreliable border-box dimensions by comparing offset* to computed and + // faking a content-box to get border and padding (gh-3699) + if ( isBorderBox && scrollboxSizeBuggy ) { + subtract -= Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + parseFloat( styles[ dimension ] ) - + boxModelAdjustment( elem, dimension, "border", false, styles ) - + 0.5 + ); + } + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ dimension ] = value; + value = jQuery.css( elem, dimension ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( prefix !== "margin" ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && ( + jQuery.cssHooks[ tween.prop ] || + tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = Date.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 15 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY and Edge just mirrors + // the overflowX value there. + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + result.stop.bind( result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = Date.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +function classesToArray( value ) { + if ( Array.isArray( value ) ) { + return value; + } + if ( typeof value === "string" ) { + return value.match( rnothtmlwhite ) || []; + } + return []; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isValidValue = type === "string" || Array.isArray( value ); + + if ( typeof stateVal === "boolean" && isValidValue ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( isValidValue ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = classesToArray( value ); + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, valueIsFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + valueIsFunction = isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( valueIsFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +support.focusin = "onfocusin" in window; + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + stopPropagationCallback = function( e ) { + e.stopPropagation(); + }; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = lastElement = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + lastElement = cur; + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + + if ( event.isPropagationStopped() ) { + lastElement.addEventListener( type, stopPropagationCallback ); + } + + elem[ type ](); + + if ( event.isPropagationStopped() ) { + lastElement.removeEventListener( type, stopPropagationCallback ); + } + + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + + // Handle: regular nodes (via `this.ownerDocument`), window + // (via `this.document`) & document (via `this`). + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = { guid: Date.now() }; + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml, parserErrorElem; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) {} + + parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; + if ( !xml || parserErrorElem ) { + jQuery.error( "Invalid XML: " + ( + parserErrorElem ? + jQuery.map( parserErrorElem.childNodes, function( el ) { + return el.textContent; + } ).join( "\n" ) : + data + ) ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && toType( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + if ( a == null ) { + return ""; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ).filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ).map( function( _i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + +originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() + " " ] = + ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) + .concat( match[ 2 ] ); + } + } + match = responseHeaders[ key.toLowerCase() + " " ]; + } + return match == null ? null : match.join( ", " ); + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 15 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available and should be processed, append data to url + if ( s.data && ( s.processData || typeof s.data === "string" ) ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Use a noop converter for missing script but not if jsonp + if ( !isSuccess && + jQuery.inArray( "script", s.dataTypes ) > -1 && + jQuery.inArray( "json", s.dataTypes ) < 0 ) { + s.converters[ "text script" ] = function() {}; + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( _i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + +jQuery.ajaxPrefilter( function( s ) { + var i; + for ( i in s.headers ) { + if ( i.toLowerCase() === "content-type" ) { + s.contentType = s.headers[ i ] || ""; + } + } +} ); + + +jQuery._evalUrl = function( url, options, doc ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + + // Only evaluate the response if it is successful (gh-4126) + // dataFilter is not invoked for failure responses, so using it instead + // of the default converter is kludgy but it works. + converters: { + "text script": function() {} + }, + dataFilter: function( response ) { + jQuery.globalEval( response, options, doc ); + } + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var htmlIsFunction = isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.ontimeout = + xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain or forced-by-attrs requests + if ( s.crossDomain || s.scriptAttrs ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + + + + + + + + + + + + + + + +
+
+
+ + +
+ + +

Index

+ +
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/core/dbt/docs/build/html/index.html b/core/dbt/docs/build/html/index.html new file mode 100644 index 00000000000..d4238bb08c3 --- /dev/null +++ b/core/dbt/docs/build/html/index.html @@ -0,0 +1,855 @@ + + + + + + + + + dbt-core’s API documentation — dbt-core documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

dbt-core’s API documentation¶

+
+

Command: build¶

+
+

defer¶

+

Type: boolean

+

If set, defer to the state variable for resolving unselected nodes.

+
+
+

exclude¶

+

Type: string

+

Specify the nodes to exclude.

+
+
+

fail_fast¶

+

Type: boolean

+

Stop execution on first failure.

+
+
+

full_refresh¶

+

Type: boolean

+

If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

+
+
+

indirect_selection¶

+

Type: choice: [‘eager’, ‘cautious’]

+

Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

+
+
+

log_path¶

+

Type: path

+

Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

+
+
+

models¶

+

Type: string

+

Specify the nodes to include.

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

selector¶

+

Type: string

+

The selector name to use, as defined in selectors.yml

+
+
+

show¶

+

Type: boolean

+

Show a sample of the loaded data in the terminal

+
+
+

state¶

+

Type: path

+

If set, use the given directory as the source for json files to compare with this project.

+
+
+

store_failures¶

+

Type: boolean

+

Store test results (failing rows) in the database

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

target_path¶

+

Type: path

+

Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

+
+
+

threads¶

+

Type: int

+

Specify number of threads to use while executing models. Overrides settings in profiles.yml.

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

version_check¶

+

Type: boolean

+

Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

+
+

Command: clean¶

+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+

Command: compile¶

+
+

defer¶

+

Type: boolean

+

If set, defer to the state variable for resolving unselected nodes.

+
+
+

exclude¶

+

Type: string

+

Specify the nodes to exclude.

+
+
+

full_refresh¶

+

Type: boolean

+

If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

+
+
+

log_path¶

+

Type: path

+

Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

+
+
+

models¶

+

Type: string

+

Specify the nodes to include.

+
+
+

parse_only¶

+

Type: boolean

+

TODO: No help text currently available

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

selector¶

+

Type: string

+

The selector name to use, as defined in selectors.yml

+
+
+

state¶

+

Type: path

+

If set, use the given directory as the source for json files to compare with this project.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

target_path¶

+

Type: path

+

Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

+
+
+

threads¶

+

Type: int

+

Specify number of threads to use while executing models. Overrides settings in profiles.yml.

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

version_check¶

+

Type: boolean

+

Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

+
+

Command: debug¶

+
+

config_dir¶

+

Type: string

+

If specified, DBT will show path information for this project

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

version_check¶

+

Type: boolean

+

Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

+
+

Command: deps¶

+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+

Command: docs¶

+

Command: init¶

+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

skip_profile_setup¶

+

Type: boolean

+

Skip interative profile setup.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+

Command: list¶

+
+

exclude¶

+

Type: string

+

Specify the nodes to exclude.

+
+
+

indirect_selection¶

+

Type: choice: [‘eager’, ‘cautious’]

+

Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

+
+
+

models¶

+

Type: string

+

Specify the nodes to include.

+
+
+

output¶

+

Type: choice: [‘json’, ‘name’, ‘path’, ‘selector’]

+

TODO: No current help text

+
+
+

output_keys¶

+

Type: string

+

TODO: No current help text

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

resource_type¶

+

Type: choice: [‘metric’, ‘source’, ‘analysis’, ‘model’, ‘test’, ‘exposure’, ‘snapshot’, ‘seed’, ‘default’, ‘all’]

+

TODO: No current help text

+
+
+

selector¶

+

Type: string

+

The selector name to use, as defined in selectors.yml

+
+
+

state¶

+

Type: path

+

If set, use the given directory as the source for json files to compare with this project.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+

Command: parse¶

+
+

compile¶

+

Type: boolean

+

TODO: No help text currently available

+
+
+

log_path¶

+

Type: path

+

Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

target_path¶

+

Type: path

+

Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

+
+
+

threads¶

+

Type: int

+

Specify number of threads to use while executing models. Overrides settings in profiles.yml.

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

version_check¶

+

Type: boolean

+

Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

+
+
+

write_manifest¶

+

Type: boolean

+

TODO: No help text currently available

+
+

Command: run¶

+
+

defer¶

+

Type: boolean

+

If set, defer to the state variable for resolving unselected nodes.

+
+
+

exclude¶

+

Type: string

+

Specify the nodes to exclude.

+
+
+

fail_fast¶

+

Type: boolean

+

Stop execution on first failure.

+
+
+

full_refresh¶

+

Type: boolean

+

If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

+
+
+

log_path¶

+

Type: path

+

Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

+
+
+

models¶

+

Type: string

+

Specify the nodes to include.

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

selector¶

+

Type: string

+

The selector name to use, as defined in selectors.yml

+
+
+

state¶

+

Type: path

+

If set, use the given directory as the source for json files to compare with this project.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

target_path¶

+

Type: path

+

Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

+
+
+

threads¶

+

Type: int

+

Specify number of threads to use while executing models. Overrides settings in profiles.yml.

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

version_check¶

+

Type: boolean

+

Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

+
+

Command: run_operation¶

+
+

args¶

+

Type: YAML

+

Supply arguments to the macro. This dictionary will be mapped to the keyword arguments defined in the selected macro. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+

Command: seed¶

+
+

exclude¶

+

Type: string

+

Specify the nodes to exclude.

+
+
+

full_refresh¶

+

Type: boolean

+

If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

+
+
+

log_path¶

+

Type: path

+

Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

+
+
+

models¶

+

Type: string

+

Specify the nodes to include.

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

selector¶

+

Type: string

+

The selector name to use, as defined in selectors.yml

+
+
+

show¶

+

Type: boolean

+

Show a sample of the loaded data in the terminal

+
+
+

state¶

+

Type: path

+

If set, use the given directory as the source for json files to compare with this project.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

target_path¶

+

Type: path

+

Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

+
+
+

threads¶

+

Type: int

+

Specify number of threads to use while executing models. Overrides settings in profiles.yml.

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

version_check¶

+

Type: boolean

+

Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

+
+

Command: snapshot¶

+
+

defer¶

+

Type: boolean

+

If set, defer to the state variable for resolving unselected nodes.

+
+
+

exclude¶

+

Type: string

+

Specify the nodes to exclude.

+
+
+

models¶

+

Type: string

+

Specify the nodes to include.

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

selector¶

+

Type: string

+

The selector name to use, as defined in selectors.yml

+
+
+

state¶

+

Type: path

+

If set, use the given directory as the source for json files to compare with this project.

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

threads¶

+

Type: int

+

Specify number of threads to use while executing models. Overrides settings in profiles.yml.

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+

Command: source¶

+

Command: test¶

+
+

defer¶

+

Type: boolean

+

If set, defer to the state variable for resolving unselected nodes.

+
+
+

exclude¶

+

Type: string

+

Specify the nodes to exclude.

+
+
+

fail_fast¶

+

Type: boolean

+

Stop execution on first failure.

+
+
+

indirect_selection¶

+

Type: choice: [‘eager’, ‘cautious’]

+

Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

+
+
+

log_path¶

+

Type: path

+

Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

+
+
+

models¶

+

Type: string

+

Specify the nodes to include.

+
+
+

profile¶

+

Type: string

+

Which profile to load. Overrides setting in dbt_project.yml.

+
+
+

profiles_dir¶

+

Type: path

+

Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

+
+
+

project_dir¶

+

Type: path

+

Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

+
+
+

selector¶

+

Type: string

+

The selector name to use, as defined in selectors.yml

+
+
+

state¶

+

Type: path

+

If set, use the given directory as the source for json files to compare with this project.

+
+
+

store_failures¶

+

Type: boolean

+

Store test results (failing rows) in the database

+
+
+

target¶

+

Type: string

+

Which target to load for the given profile

+
+
+

target_path¶

+

Type: path

+

Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

+
+
+

threads¶

+

Type: int

+

Specify number of threads to use while executing models. Overrides settings in profiles.yml.

+
+
+

vars¶

+

Type: YAML

+

Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

+
+
+

version_check¶

+

Type: boolean

+

Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

+
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/core/dbt/docs/build/html/objects.inv b/core/dbt/docs/build/html/objects.inv new file mode 100644 index 00000000000..e46f3932608 Binary files /dev/null and b/core/dbt/docs/build/html/objects.inv differ diff --git a/core/dbt/docs/build/html/search.html b/core/dbt/docs/build/html/search.html new file mode 100644 index 00000000000..f94c6ef0835 --- /dev/null +++ b/core/dbt/docs/build/html/search.html @@ -0,0 +1,121 @@ + + + + + + + + Search — dbt-core documentation + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Search

+ + + + +

+ Searching for multiple words only shows matches that contain + all words. +

+ + +
+ + + +
+ + + +
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/core/dbt/docs/build/html/searchindex.js b/core/dbt/docs/build/html/searchindex.js new file mode 100644 index 00000000000..25dd9fd3af5 --- /dev/null +++ b/core/dbt/docs/build/html/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "string": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "from": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "select": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "those": 0, "have": 0, "been": 0, "explicitli": 0, "path": 0, "configur": 0, "log": 0, "onli": 0, "appli": 0, "thi": 0, "current": 0, "overrid": 0, "dbt_log_path": 0, "i": 0, "includ": 0, "which": 0, "load": 0, "dbt_project": 0, "yml": 0, "directori": 0, "look": 0, "file": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "The": 0, "name": 0, "us": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "project": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "avail": 0, "inform": 0, "skip": 0, "inter": 0, "setup": 0, "metric": 0, "analysi": 0, "exposur": 0, "macro": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "command": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "log_path": 0, "model": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "resource_typ": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "log_path": [[0, "build|log_path"], [0, "compile|log_path"], [0, "parse|log_path"], [0, "run|log_path"], [0, "seed|log_path"], [0, "test|log_path"]], "models": [[0, "build|models"], [0, "compile|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"]], "output": [[0, "list|output"]], "output_keys": [[0, "list|output_keys"]], "resource_type": [[0, "list|resource_type"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file diff --git a/core/dbt/docs/source/conf.py b/core/dbt/docs/source/conf.py index 17ff44e41a0..d9962bbfc8b 100644 --- a/core/dbt/docs/source/conf.py +++ b/core/dbt/docs/source/conf.py @@ -7,7 +7,7 @@ # For the full list of built-in configuration values, see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html -sys.path.insert(0, os.path.abspath("../..")) +sys.path.insert(0, os.path.abspath("../../..")) sys.path.insert(0, os.path.abspath("./_ext")) # -- Project information ----------------------------------------------------- diff --git a/core/dbt/events/README.md b/core/dbt/events/README.md index cadc59ab126..52edd7d35d4 100644 --- a/core/dbt/events/README.md +++ b/core/dbt/events/README.md @@ -8,9 +8,10 @@ The event module provides types that represent what is happening in dbt in `even When events are processed via `fire_event`, nearly everything is logged. Whether or not the user has enabled the debug flag, all debug messages are still logged to the file. However, some events are particularly time consuming to construct because they return a huge amount of data. Today, the only messages in this category are cache events and are only logged if the `--log-cache-events` flag is on. This is important because these messages should not be created unless they are going to be logged, because they cause a noticable performance degredation. These events use a "fire_event_if" functions. # Adding a New Event -New events need to have a proto message definition created in core/dbt/events/types.proto. Every message must include EventInfo as the first field, named "info" and numbered 1. To update the proto_types.py file, in the core/dbt/events directory: ```protoc --python_betterproto_out . types.proto``` - -A matching class needs to be created in the core/dbt/events/types.py file, which will have two superclasses, the "Level" mixin and the generated class from proto_types.py. These classes will also generally have two methods, a "code" method that returns the event code, and a "message" method that is used to construct the "msg" from the event fields. In addition the "Level" mixin will provide a "level_tag" method to set the level (which can also be overridden using the "info" convenience function from functions.py) +* Add a new message in types.proto with an EventInfo field first +* run the protoc compiler to update proto_types.py: ```protoc --python_betterproto_out . types.proto``` +* Add a wrapping class in core/dbt/event/types.py with a Level superclass and the superclass from proto_types.py, plus code and message methods +* Add the class to tests/unit/test_events.py Note that no attributes can exist in these event classes except for fields defined in the protobuf definitions, because the betterproto metaclass will throw an error. Betterproto provides a to_dict() method to convert the generated classes to a dictionary and from that to json. However some attributes will successfully convert to dictionaries but not to serialized protobufs, so we need to test both output formats. diff --git a/core/dbt/events/adapter_endpoint.py b/core/dbt/events/adapter_endpoint.py index 68a73d8aecb..c26ac376437 100644 --- a/core/dbt/events/adapter_endpoint.py +++ b/core/dbt/events/adapter_endpoint.py @@ -1,6 +1,7 @@ import traceback from dataclasses import dataclass from dbt.events.functions import fire_event +from dbt.events.contextvars import get_node_info from dbt.events.types import ( AdapterEventDebug, AdapterEventInfo, @@ -15,27 +16,39 @@ class AdapterLogger: name: str def debug(self, msg, *args): - event = AdapterEventDebug(name=self.name, base_msg=msg, args=args) + event = AdapterEventDebug( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) def info(self, msg, *args): - event = AdapterEventInfo(name=self.name, base_msg=msg, args=args) + event = AdapterEventInfo( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) def warning(self, msg, *args): - event = AdapterEventWarning(name=self.name, base_msg=msg, args=args) + event = AdapterEventWarning( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) def error(self, msg, *args): - event = AdapterEventError(name=self.name, base_msg=msg, args=args) + event = AdapterEventError( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) # The default exc_info=True is what makes this method different def exception(self, msg, *args): - event = AdapterEventError(name=self.name, base_msg=msg, args=args) + event = AdapterEventError( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) event.exc_info = traceback.format_exc() fire_event(event) def critical(self, msg, *args): - event = AdapterEventError(name=self.name, base_msg=msg, args=args) + event = AdapterEventError( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) diff --git a/core/dbt/events/base_types.py b/core/dbt/events/base_types.py index cd3275c02a9..db74016099a 100644 --- a/core/dbt/events/base_types.py +++ b/core/dbt/events/base_types.py @@ -1,4 +1,5 @@ from dataclasses import dataclass +from enum import Enum import os import threading from datetime import datetime @@ -43,13 +44,25 @@ def get_thread_name() -> str: return threading.current_thread().name +# EventLevel is an Enum, but mixing in the 'str' type is suggested in the Python +# documentation, and provides support for json conversion, which fails otherwise. +class EventLevel(str, Enum): + DEBUG = "debug" + TEST = "test" + INFO = "info" + WARN = "warn" + ERROR = "error" + + @dataclass class BaseEvent: """BaseEvent for proto message generated python events""" def __post_init__(self): super().__post_init__() - self.info.level = self.level_tag() + if not self.info.level: + self.info.level = self.level_tag() + assert self.info.level in ["info", "warn", "error", "debug", "test"] if not hasattr(self.info, "msg") or not self.info.msg: self.info.msg = self.message() self.info.invocation_id = get_invocation_id() @@ -60,43 +73,55 @@ def __post_init__(self): self.info.code = self.code() self.info.name = type(self).__name__ - def level_tag(self): - raise Exception("level_tag() not implemented for event") + # This is here because although we know that info should always + # exist, mypy doesn't. + def log_level(self) -> EventLevel: + return self.info.level # type: ignore - def message(self): + def level_tag(self) -> EventLevel: + return EventLevel.DEBUG + + def message(self) -> str: raise Exception("message() not implemented for event") +# DynamicLevel requires that the level be supplied on the +# event construction call using the "info" function from functions.py +@dataclass # type: ignore[misc] +class DynamicLevel(BaseEvent): + pass + + @dataclass class TestLevel(BaseEvent): __test__ = False - def level_tag(self) -> str: - return "test" + def level_tag(self) -> EventLevel: + return EventLevel.TEST @dataclass # type: ignore[misc] class DebugLevel(BaseEvent): - def level_tag(self) -> str: - return "debug" + def level_tag(self) -> EventLevel: + return EventLevel.DEBUG @dataclass # type: ignore[misc] class InfoLevel(BaseEvent): - def level_tag(self) -> str: - return "info" + def level_tag(self) -> EventLevel: + return EventLevel.INFO @dataclass # type: ignore[misc] class WarnLevel(BaseEvent): - def level_tag(self) -> str: - return "warn" + def level_tag(self) -> EventLevel: + return EventLevel.WARN @dataclass # type: ignore[misc] class ErrorLevel(BaseEvent): - def level_tag(self) -> str: - return "error" + def level_tag(self) -> EventLevel: + return EventLevel.ERROR # Included to ensure classes with str-type message members are initialized correctly. diff --git a/core/dbt/events/contextvars.py b/core/dbt/events/contextvars.py new file mode 100644 index 00000000000..4aa507eb29b --- /dev/null +++ b/core/dbt/events/contextvars.py @@ -0,0 +1,84 @@ +import contextlib +import contextvars + +from typing import Any, Generator, Mapping, Dict +from dbt.events.proto_types import NodeInfo + + +LOG_PREFIX = "log_" +LOG_PREFIX_LEN = len(LOG_PREFIX) + +_log_context_vars: Dict[str, contextvars.ContextVar] = {} + + +def get_contextvars() -> Dict[str, Any]: + rv = {} + ctx = contextvars.copy_context() + + for k in ctx: + if k.name.startswith(LOG_PREFIX) and ctx[k] is not Ellipsis: + rv[k.name[LOG_PREFIX_LEN:]] = ctx[k] + + return rv + + +def get_node_info(): + cvars = get_contextvars() + if "node_info" in cvars: + return cvars["node_info"] + else: + return NodeInfo() + + +def clear_contextvars() -> None: + ctx = contextvars.copy_context() + for k in ctx: + if k.name.startswith(LOG_PREFIX): + k.set(Ellipsis) + + +# put keys and values into context. Returns the contextvar.Token mapping +# Save and pass to reset_contextvars +def set_contextvars(**kwargs: Any) -> Mapping[str, contextvars.Token]: + cvar_tokens = {} + for k, v in kwargs.items(): + log_key = f"{LOG_PREFIX}{k}" + try: + var = _log_context_vars[log_key] + except KeyError: + var = contextvars.ContextVar(log_key, default=Ellipsis) + _log_context_vars[log_key] = var + + cvar_tokens[k] = var.set(v) + + return cvar_tokens + + +# reset by Tokens +def reset_contextvars(**kwargs: contextvars.Token) -> None: + for k, v in kwargs.items(): + log_key = f"{LOG_PREFIX}{k}" + var = _log_context_vars[log_key] + var.reset(v) + + +# remove from contextvars +def unset_contextvars(*keys: str) -> None: + for k in keys: + if k in _log_context_vars: + log_key = f"{LOG_PREFIX}{k}" + _log_context_vars[log_key].set(Ellipsis) + + +# Context manager or decorator to set and unset the context vars +@contextlib.contextmanager +def log_contextvars(**kwargs: Any) -> Generator[None, None, None]: + context = get_contextvars() + saved = {k: context[k] for k in context.keys() & kwargs.keys()} + + set_contextvars(**kwargs) + try: + yield + finally: + unset_contextvars(*kwargs.keys()) + set_contextvars(**saved) diff --git a/core/dbt/events/eventmgr.py b/core/dbt/events/eventmgr.py new file mode 100644 index 00000000000..97a7d5d4360 --- /dev/null +++ b/core/dbt/events/eventmgr.py @@ -0,0 +1,212 @@ +from colorama import Style +from dataclasses import dataclass +from datetime import datetime +from enum import Enum +import json +import logging +from logging.handlers import RotatingFileHandler +import threading +from typing import Any, Callable, List, Optional, TextIO +from uuid import uuid4 + +from dbt.events.base_types import BaseEvent, EventLevel + + +# A Filter is a function which takes a BaseEvent and returns True if the event +# should be logged, False otherwise. +Filter = Callable[[BaseEvent], bool] + + +# Default filter which logs every event +def NoFilter(_: BaseEvent) -> bool: + return True + + +# A Scrubber removes secrets from an input string, returning a sanitized string. +Scrubber = Callable[[str], str] + + +# Provide a pass-through scrubber implementation, also used as a default +def NoScrubber(s: str) -> str: + return s + + +class LineFormat(Enum): + PlainText = 1 + DebugText = 2 + Json = 3 + + +# Map from dbt event levels to python log levels +_log_level_map = { + EventLevel.DEBUG: 10, + EventLevel.TEST: 10, + EventLevel.INFO: 20, + EventLevel.WARN: 30, + EventLevel.ERROR: 40, +} + + +# We should consider fixing the problem, but log_level() can return a string for +# DynamicLevel events, even thought it is supposed to return an EventLevel. This +# function gets a string for the level, no matter what. +def _get_level_str(e: BaseEvent) -> str: + return e.log_level().value if isinstance(e.log_level(), EventLevel) else str(e.log_level()) + + +# We need this function for now because the numeric log severity levels in +# Python do not match those for logbook, so we have to explicitly call the +# correct function by name. +def send_to_logger(l, level: str, log_line: str): + if level == "test": + l.debug(log_line) + elif level == "debug": + l.debug(log_line) + elif level == "info": + l.info(log_line) + elif level == "warn": + l.warning(log_line) + elif level == "error": + l.error(log_line) + else: + raise AssertionError( + f"While attempting to log {log_line}, encountered the unhandled level: {level}" + ) + + +@dataclass +class LoggerConfig: + name: str + filter: Filter = NoFilter + scrubber: Scrubber = NoScrubber + line_format: LineFormat = LineFormat.PlainText + level: EventLevel = EventLevel.WARN + use_colors: bool = False + output_stream: Optional[TextIO] = None + output_file_name: Optional[str] = None + logger: Optional[Any] = None + + +class _Logger: + def __init__(self, event_manager: "EventManager", config: LoggerConfig) -> None: + self.name: str = config.name + self.filter: Filter = config.filter + self.scrubber: Scrubber = config.scrubber + self.level: EventLevel = config.level + self.event_manager: EventManager = event_manager + self._python_logger: Optional[logging.Logger] = config.logger + self._stream: Optional[TextIO] = config.output_stream + + if config.output_file_name: + log = logging.getLogger(config.name) + log.setLevel(_log_level_map[config.level]) + handler = RotatingFileHandler( + filename=str(config.output_file_name), + encoding="utf8", + maxBytes=10 * 1024 * 1024, # 10 mb + backupCount=5, + ) + + handler.setFormatter(logging.Formatter(fmt="%(message)s")) + log.handlers.clear() + log.addHandler(handler) + + self._python_logger = log + + def create_line(self, e: BaseEvent) -> str: + raise NotImplementedError() + + def write_line(self, e: BaseEvent): + line = self.create_line(e) + python_level = _log_level_map[e.log_level()] + if self._python_logger is not None: + send_to_logger(self._python_logger, _get_level_str(e), line) + elif self._stream is not None and _log_level_map[self.level] <= python_level: + self._stream.write(line + "\n") + + def flush(self): + if self._python_logger is not None: + for handler in self._python_logger.handlers: + handler.flush() + elif self._stream is not None: + self._stream.flush() + + +class _TextLogger(_Logger): + def __init__(self, event_manager: "EventManager", config: LoggerConfig) -> None: + super().__init__(event_manager, config) + self.use_colors = config.use_colors + self.use_debug_format = config.line_format == LineFormat.DebugText + + def create_line(self, e: BaseEvent) -> str: + return self.create_debug_line(e) if self.use_debug_format else self.create_info_line(e) + + def create_info_line(self, e: BaseEvent) -> str: + ts: str = datetime.utcnow().strftime("%H:%M:%S") + scrubbed_msg: str = self.scrubber(e.message()) # type: ignore + return f"{self._get_color_tag()}{ts} {scrubbed_msg}" + + def create_debug_line(self, e: BaseEvent) -> str: + log_line: str = "" + # Create a separator if this is the beginning of an invocation + # TODO: This is an ugly hack, get rid of it if we can + if type(e).__name__ == "MainReportVersion": + separator = 30 * "=" + log_line = f"\n\n{separator} {datetime.utcnow()} | {self.event_manager.invocation_id} {separator}\n" + ts: str = datetime.utcnow().strftime("%H:%M:%S.%f") + scrubbed_msg: str = self.scrubber(e.message()) # type: ignore + level = _get_level_str(e) + log_line += ( + f"{self._get_color_tag()}{ts} [{level:<5}]{self._get_thread_name()} {scrubbed_msg}" + ) + return log_line + + def _get_color_tag(self) -> str: + return "" if not self.use_colors else Style.RESET_ALL + + def _get_thread_name(self) -> str: + thread_name = "" + if threading.current_thread().name: + thread_name = threading.current_thread().name + thread_name = thread_name[:10] + thread_name = thread_name.ljust(10, " ") + thread_name = f" [{thread_name}]:" + return thread_name + + +class _JsonLogger(_Logger): + def create_line(self, e: BaseEvent) -> str: + from dbt.events.functions import event_to_dict + + event_dict = event_to_dict(e) + raw_log_line = json.dumps(event_dict, sort_keys=True) + line = self.scrubber(raw_log_line) # type: ignore + return line + + +class EventManager: + def __init__(self) -> None: + self.loggers: List[_Logger] = [] + self.callbacks: List[Callable[[BaseEvent], None]] = [] + self.invocation_id: str = str(uuid4()) + + def fire_event(self, e: BaseEvent) -> None: + for logger in self.loggers: + if logger.filter(e): # type: ignore + logger.write_line(e) + + for callback in self.callbacks: + callback(e) + + def add_logger(self, config: LoggerConfig): + logger = ( + _JsonLogger(self, config) + if config.line_format == LineFormat.Json + else _TextLogger(self, config) + ) + logger.event_manager = self + self.loggers.append(logger) + + def flush(self): + for logger in self.loggers: + logger.flush() diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index 7a652a998f6..ff5b267bc5e 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -1,125 +1,151 @@ import betterproto -import io +from dbt.constants import METADATA_ENV_PREFIX +from dbt.events.base_types import BaseEvent, Cache, EventLevel, NoFile, NoStdOut +from dbt.events.eventmgr import EventManager, LoggerConfig, LineFormat, NoFilter +from dbt.events.helpers import env_secrets, scrub_secrets +from dbt.events.proto_types import EventInfo +from dbt.events.types import EmptyLine +import dbt.flags as flags +from dbt.logger import GLOBAL_LOGGER, make_log_dir_if_missing +from functools import partial import json -import logging import os import sys -import threading +from typing import Callable, Dict, Optional, TextIO import uuid -from collections import deque -from datetime import datetime -from io import StringIO, TextIOWrapper -from logging import Logger -from logging.handlers import RotatingFileHandler -from typing import Callable, Dict, List, Optional, Union -import dbt.flags as flags -import logbook -from colorama import Style -from dbt.constants import METADATA_ENV_PREFIX, SECRET_ENV_PREFIX -from dbt.events.base_types import BaseEvent, Cache, NoFile, NoStdOut -from dbt.events.types import EmptyLine, EventBufferFull, MainReportVersion -from dbt.logger import make_log_dir_if_missing -# create the module-globals -LOG_VERSION = 2 -EVENT_HISTORY = None +LOG_VERSION = 3 +metadata_vars: Optional[Dict[str, str]] = None -DEFAULT_FILE_LOGGER_NAME = "default_file" -FILE_LOG = logging.getLogger(DEFAULT_FILE_LOGGER_NAME) -DEFAULT_STDOUT_LOGGER_NAME = "default_std_out" -STDOUT_LOG = logging.getLogger(DEFAULT_STDOUT_LOGGER_NAME) +def setup_event_logger(log_path: str, log_format: str, use_colors: bool, debug: bool): + cleanup_event_logger() + make_log_dir_if_missing(log_path) -invocation_id: Optional[str] = None -metadata_vars: Optional[Dict[str, str]] = None + if flags.ENABLE_LEGACY_LOGGER: + EVENT_MANAGER.add_logger(_get_logbook_log_config(debug)) + else: + EVENT_MANAGER.add_logger(_get_stdout_config(log_format, debug, use_colors)) + + if _CAPTURE_STREAM: + # Create second stdout logger to support test which want to know what's + # being sent to stdout. + # debug here is true because we need to capture debug events, and we pass in false in main + capture_config = _get_stdout_config(log_format, True, use_colors) + capture_config.output_stream = _CAPTURE_STREAM + EVENT_MANAGER.add_logger(capture_config) + + # create and add the file logger to the event manager + EVENT_MANAGER.add_logger( + _get_logfile_config(os.path.join(log_path, "dbt.log"), use_colors, log_format) + ) -def setup_event_logger(log_path, log_format, use_colors, debug): - global FILE_LOG - global STDOUT_LOG +def _get_stdout_config(log_format: str, debug: bool, use_colors: bool) -> LoggerConfig: + fmt = LineFormat.PlainText + if log_format == "json": + fmt = LineFormat.Json + elif debug: + fmt = LineFormat.DebugText + level = EventLevel.DEBUG if debug else EventLevel.INFO + + return LoggerConfig( + name="stdout_log", + level=level, + use_colors=use_colors, + line_format=fmt, + scrubber=env_scrubber, + filter=partial( + _stdout_filter, bool(flags.LOG_CACHE_EVENTS), debug, bool(flags.QUIET), log_format + ), + output_stream=sys.stdout, + ) - make_log_dir_if_missing(log_path) - # TODO this default should live somewhere better - log_dest = os.path.join(log_path, "dbt.log") - level = logging.DEBUG if debug else logging.INFO - - # overwrite the STDOUT_LOG logger with the configured one - STDOUT_LOG = logging.getLogger("configured_std_out") - STDOUT_LOG.setLevel(level) - setattr(STDOUT_LOG, "format_json", log_format == "json") - setattr(STDOUT_LOG, "format_color", True if use_colors else False) - - FORMAT = "%(message)s" - stdout_passthrough_formatter = logging.Formatter(fmt=FORMAT) - - stdout_handler = logging.StreamHandler(sys.stdout) - stdout_handler.setFormatter(stdout_passthrough_formatter) - stdout_handler.setLevel(level) - # clear existing stdout TextIOWrapper stream handlers - STDOUT_LOG.handlers = [ - h - for h in STDOUT_LOG.handlers - if not (hasattr(h, "stream") and isinstance(h.stream, TextIOWrapper)) # type: ignore - ] - STDOUT_LOG.addHandler(stdout_handler) - - # overwrite the FILE_LOG logger with the configured one - FILE_LOG = logging.getLogger("configured_file") - FILE_LOG.setLevel(logging.DEBUG) # always debug regardless of user input - setattr(FILE_LOG, "format_json", log_format == "json") - setattr(FILE_LOG, "format_color", True if use_colors else False) - - file_passthrough_formatter = logging.Formatter(fmt=FORMAT) - - file_handler = RotatingFileHandler( - filename=log_dest, encoding="utf8", maxBytes=10 * 1024 * 1024, backupCount=5 # 10 mb +def _stdout_filter( + log_cache_events: bool, debug_mode: bool, quiet_mode: bool, log_format: str, evt: BaseEvent +) -> bool: + return ( + not isinstance(evt, NoStdOut) + and (not isinstance(evt, Cache) or log_cache_events) + and (evt.log_level() != EventLevel.DEBUG or debug_mode) + and (evt.log_level() == EventLevel.ERROR or not quiet_mode) + and not (log_format == "json" and type(evt) == EmptyLine) ) - file_handler.setFormatter(file_passthrough_formatter) - file_handler.setLevel(logging.DEBUG) # always debug regardless of user input - FILE_LOG.handlers.clear() - FILE_LOG.addHandler(file_handler) -# used for integration tests -def capture_stdout_logs() -> StringIO: - global STDOUT_LOG - capture_buf = io.StringIO() - stdout_capture_handler = logging.StreamHandler(capture_buf) - stdout_capture_handler.setLevel(logging.DEBUG) - STDOUT_LOG.addHandler(stdout_capture_handler) - return capture_buf +def _get_logfile_config(log_path: str, use_colors: bool, log_format: str) -> LoggerConfig: + return LoggerConfig( + name="file_log", + line_format=LineFormat.Json if log_format == "json" else LineFormat.DebugText, + use_colors=use_colors, + level=EventLevel.DEBUG, # File log is *always* debug level + scrubber=env_scrubber, + filter=partial(_logfile_filter, bool(flags.LOG_CACHE_EVENTS), log_format), + output_file_name=log_path, + ) -# used for integration tests -def stop_capture_stdout_logs() -> None: - global STDOUT_LOG - STDOUT_LOG.handlers = [ - h - for h in STDOUT_LOG.handlers - if not (hasattr(h, "stream") and isinstance(h.stream, StringIO)) # type: ignore - ] +def _logfile_filter(log_cache_events: bool, log_format: str, evt: BaseEvent) -> bool: + return ( + not isinstance(evt, NoFile) + and not (isinstance(evt, Cache) and not log_cache_events) + and not (log_format == "json" and type(evt) == EmptyLine) + ) + + +def _get_logbook_log_config(debug: bool) -> LoggerConfig: + # use the default one since this code should be removed when we remove logbook + config = _get_stdout_config("", debug, bool(flags.USE_COLORS)) + config.name = "logbook_log" + config.filter = NoFilter if flags.LOG_CACHE_EVENTS else lambda e: not isinstance(e, Cache) + config.logger = GLOBAL_LOGGER + return config + + +def env_scrubber(msg: str) -> str: + return scrub_secrets(msg, env_secrets()) -def env_secrets() -> List[str]: - return [v for k, v in os.environ.items() if k.startswith(SECRET_ENV_PREFIX) and v.strip()] +def cleanup_event_logger(): + # Reset to a no-op manager to release streams associated with logs. This is + # especially important for tests, since pytest replaces the stdout stream + # during test runs, and closes the stream after the test is over. + EVENT_MANAGER.loggers.clear() + EVENT_MANAGER.callbacks.clear() -def scrub_secrets(msg: str, secrets: List[str]) -> str: - scrubbed = msg +# Since dbt-rpc does not do its own log setup, and since some events can +# currently fire before logs can be configured by setup_event_logger(), we +# create a default configuration with default settings and no file output. +EVENT_MANAGER: EventManager = EventManager() +EVENT_MANAGER.add_logger( + _get_logbook_log_config(flags.DEBUG) # type: ignore + if flags.ENABLE_LEGACY_LOGGER + else _get_stdout_config(flags.LOG_FORMAT, flags.DEBUG, flags.USE_COLORS) # type: ignore +) - for secret in secrets: - scrubbed = scrubbed.replace(secret, "*****") +# This global, and the following two functions for capturing stdout logs are +# an unpleasant hack we intend to remove as part of API-ification. The GitHub +# issue #6350 was opened for that work. +_CAPTURE_STREAM: Optional[TextIO] = None - return scrubbed + +# used for integration tests +def capture_stdout_logs(stream: TextIO): + global _CAPTURE_STREAM + _CAPTURE_STREAM = stream + + +def stop_capture_stdout_logs(): + global _CAPTURE_STREAM + _CAPTURE_STREAM = None # returns a dictionary representation of the event fields. # the message may contain secrets which must be scrubbed at the usage site. -def event_to_json( - event: BaseEvent, -) -> str: +def event_to_json(event: BaseEvent) -> str: event_dict = event_to_dict(event) raw_log_line = json.dumps(event_dict, sort_keys=True) return raw_log_line @@ -128,108 +154,24 @@ def event_to_json( def event_to_dict(event: BaseEvent) -> dict: event_dict = dict() try: - # We could use to_json here, but it wouldn't sort the keys. - # The 'to_json' method just does json.dumps on the dict anyway. event_dict = event.to_dict(casing=betterproto.Casing.SNAKE, include_default_values=True) # type: ignore except AttributeError as exc: event_type = type(event).__name__ raise Exception(f"type {event_type} is not serializable. {str(exc)}") + # We don't want an empty NodeInfo in output + if "node_info" in event_dict and event_dict["node_info"]["node_name"] == "": + del event_dict["node_info"] return event_dict -# translates an Event to a completely formatted text-based log line -# type hinting everything as strings so we don't get any unintentional string conversions via str() -def reset_color() -> str: - return Style.RESET_ALL if getattr(STDOUT_LOG, "format_color", False) else "" - - -def create_info_text_log_line(e: BaseEvent) -> str: - color_tag: str = reset_color() - ts: str = get_ts().strftime("%H:%M:%S") # TODO: get this from the event.ts? - scrubbed_msg: str = scrub_secrets(e.message(), env_secrets()) - log_line: str = f"{color_tag}{ts} {scrubbed_msg}" - return log_line - - -def create_debug_text_log_line(e: BaseEvent) -> str: - log_line: str = "" - # Create a separator if this is the beginning of an invocation - if type(e) == MainReportVersion: - separator = 30 * "=" - log_line = f"\n\n{separator} {get_ts()} | {get_invocation_id()} {separator}\n" - color_tag: str = reset_color() - ts: str = get_ts().strftime("%H:%M:%S.%f") - scrubbed_msg: str = scrub_secrets(e.message(), env_secrets()) - # Make the levels all 5 characters so they line up - level: str = f"{e.level_tag():<5}" - thread = "" - if threading.current_thread().name: - thread_name = threading.current_thread().name - thread_name = thread_name[:10] - thread_name = thread_name.ljust(10, " ") - thread = f" [{thread_name}]:" - log_line = log_line + f"{color_tag}{ts} [{level}]{thread} {scrubbed_msg}" - return log_line - - -# translates an Event to a completely formatted json log line -def create_json_log_line(e: BaseEvent) -> Optional[str]: - if type(e) == EmptyLine: - return None # will not be sent to logger - raw_log_line = event_to_json(e) - return scrub_secrets(raw_log_line, env_secrets()) - - -# calls create_stdout_text_log_line() or create_json_log_line() according to logger config -def create_log_line(e: BaseEvent, file_output=False) -> Optional[str]: - global FILE_LOG - global STDOUT_LOG - - if FILE_LOG.name == DEFAULT_FILE_LOGGER_NAME and STDOUT_LOG.name == DEFAULT_STDOUT_LOGGER_NAME: - - # TODO: This is only necessary because our test framework doesn't correctly set up logging. - # This code should be moved to the test framework when we do CT-XXX (tix # needed) - null_handler = logging.NullHandler() - FILE_LOG.addHandler(null_handler) - setattr(FILE_LOG, "format_json", False) - setattr(FILE_LOG, "format_color", False) - - stdout_handler = logging.StreamHandler(sys.stdout) - stdout_handler.setLevel(logging.INFO) - STDOUT_LOG.setLevel(logging.INFO) - STDOUT_LOG.addHandler(stdout_handler) - setattr(STDOUT_LOG, "format_json", False) - setattr(STDOUT_LOG, "format_color", False) - - logger = FILE_LOG if file_output else STDOUT_LOG - if getattr(logger, "format_json"): - return create_json_log_line(e) # json output, both console and file - elif file_output is True or flags.DEBUG: - return create_debug_text_log_line(e) # default file output - else: - return create_info_text_log_line(e) # console output - - -# allows for reuse of this obnoxious if else tree. -# do not use for exceptions, it doesn't pass along exc_info, stack_info, or extra -def send_to_logger(l: Union[Logger, logbook.Logger], level_tag: str, log_line: str): - if not log_line: - return - if level_tag == "test": - # TODO after implmenting #3977 send to new test level - l.debug(log_line) - elif level_tag == "debug": - l.debug(log_line) - elif level_tag == "info": - l.info(log_line) - elif level_tag == "warn": - l.warning(log_line) - elif level_tag == "error": - l.error(log_line) +def warn_or_error(event, node=None): + if flags.WARN_ERROR: + # TODO: resolve this circular import when at top + from dbt.exceptions import EventCompilationException + + raise EventCompilationException(event.info.msg, node) else: - raise AssertionError( - f"While attempting to log {log_line}, encountered the unhandled level: {level_tag}" - ) + fire_event(event) # an alternative to fire_event which only creates and logs the event value @@ -244,30 +186,7 @@ def fire_event_if(conditional: bool, lazy_e: Callable[[], BaseEvent]) -> None: # (i.e. - mutating the event history, printing to stdout, logging # to files, etc.) def fire_event(e: BaseEvent) -> None: - # skip logs when `--log-cache-events` is not passed - if isinstance(e, Cache) and not flags.LOG_CACHE_EVENTS: - return - - add_to_event_history(e) - - # always logs debug level regardless of user input - if not isinstance(e, NoFile): - log_line = create_log_line(e, file_output=True) - # doesn't send exceptions to exception logger - if log_line: - send_to_logger(FILE_LOG, level_tag=e.level_tag(), log_line=log_line) - - if not isinstance(e, NoStdOut): - # explicitly checking the debug flag here so that potentially expensive-to-construct - # log messages are not constructed if debug messages are never shown. - if e.level_tag() == "debug" and not flags.DEBUG: - return # eat the message in case it was one of the expensive ones - if e.level_tag() != "error" and flags.QUIET: - return # eat all non-exception messages in quiet mode - - log_line = create_log_line(e) - if log_line: - send_to_logger(STDOUT_LOG, level_tag=e.level_tag(), log_line=log_line) + EVENT_MANAGER.fire_event(e) def get_metadata_vars() -> Dict[str, str]: @@ -287,44 +206,18 @@ def reset_metadata_vars() -> None: def get_invocation_id() -> str: - global invocation_id - if invocation_id is None: - invocation_id = str(uuid.uuid4()) - return invocation_id + return EVENT_MANAGER.invocation_id def set_invocation_id() -> None: # This is primarily for setting the invocation_id for separate # commands in the dbt servers. It shouldn't be necessary for the CLI. - global invocation_id - invocation_id = str(uuid.uuid4()) - - -# exactly one time stamp per concrete event -def get_ts() -> datetime: - ts = datetime.utcnow() - return ts - - -# preformatted time stamp -def get_ts_rfc3339() -> str: - ts = get_ts() - ts_rfc3339 = ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ") - return ts_rfc3339 - - -def add_to_event_history(event): - if flags.EVENT_BUFFER_SIZE == 0: - return - global EVENT_HISTORY - if EVENT_HISTORY is None: - reset_event_history() - EVENT_HISTORY.append(event) - # We only set the EventBufferFull message for event buffers >= 10,000 - if flags.EVENT_BUFFER_SIZE >= 10000 and len(EVENT_HISTORY) == (flags.EVENT_BUFFER_SIZE - 1): - fire_event(EventBufferFull()) + EVENT_MANAGER.invocation_id = str(uuid.uuid4()) -def reset_event_history(): - global EVENT_HISTORY - EVENT_HISTORY = deque(maxlen=flags.EVENT_BUFFER_SIZE) +# Currently used to set the level in EventInfo, so logging events can +# provide more than one "level". Might be used in the future to set +# more fields in EventInfo, once some of that information is no longer global +def info(level="info"): + info = EventInfo(level=level) + return info diff --git a/core/dbt/events/helpers.py b/core/dbt/events/helpers.py new file mode 100644 index 00000000000..2570c8653c9 --- /dev/null +++ b/core/dbt/events/helpers.py @@ -0,0 +1,16 @@ +import os +from typing import List +from dbt.constants import SECRET_ENV_PREFIX + + +def env_secrets() -> List[str]: + return [v for k, v in os.environ.items() if k.startswith(SECRET_ENV_PREFIX) and v.strip()] + + +def scrub_secrets(msg: str, secrets: List[str]) -> str: + scrubbed = msg + + for secret in secrets: + scrubbed = scrubbed.replace(secret, "*****") + + return scrubbed diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index d75713285db..5ee384643d3 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -23,6 +23,7 @@ class EventInfo(betterproto.Message): extra: Dict[str, str] = betterproto.map_field( 9, betterproto.TYPE_STRING, betterproto.TYPE_STRING ) + category: str = betterproto.string_field(10) @dataclass @@ -52,7 +53,6 @@ class NodeInfo(betterproto.Message): class RunResultMsg(betterproto.Message): """RunResult""" - # status: Union[RunStatus, TestStatus, FreshnessStatus] status: str = betterproto.string_field(1) message: str = betterproto.string_field(2) timing_info: List["TimingInfoMsg"] = betterproto.message_field(3) @@ -281,14 +281,74 @@ class ProjectCreated(betterproto.Message): slack_url: str = betterproto.string_field(4) +@dataclass +class PackageRedirectDeprecation(betterproto.Message): + """D001""" + + info: "EventInfo" = betterproto.message_field(1) + old_name: str = betterproto.string_field(2) + new_name: str = betterproto.string_field(3) + + +@dataclass +class PackageInstallPathDeprecation(betterproto.Message): + """D002""" + + info: "EventInfo" = betterproto.message_field(1) + + +@dataclass +class ConfigSourcePathDeprecation(betterproto.Message): + """D003""" + + info: "EventInfo" = betterproto.message_field(1) + deprecated_path: str = betterproto.string_field(2) + exp_path: str = betterproto.string_field(3) + + +@dataclass +class ConfigDataPathDeprecation(betterproto.Message): + """D004""" + + info: "EventInfo" = betterproto.message_field(1) + deprecated_path: str = betterproto.string_field(2) + exp_path: str = betterproto.string_field(3) + + +@dataclass +class AdapterDeprecationWarning(betterproto.Message): + """D005""" + + info: "EventInfo" = betterproto.message_field(1) + old_name: str = betterproto.string_field(2) + new_name: str = betterproto.string_field(3) + + +@dataclass +class MetricAttributesRenamed(betterproto.Message): + """D006""" + + info: "EventInfo" = betterproto.message_field(1) + metric_name: str = betterproto.string_field(2) + + +@dataclass +class ExposureNameDeprecation(betterproto.Message): + """D007""" + + info: "EventInfo" = betterproto.message_field(1) + exposure: str = betterproto.string_field(2) + + @dataclass class AdapterEventDebug(betterproto.Message): """E001""" info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - base_msg: str = betterproto.string_field(3) - args: List[str] = betterproto.string_field(4) + node_info: "NodeInfo" = betterproto.message_field(2) + name: str = betterproto.string_field(3) + base_msg: str = betterproto.string_field(4) + args: List[str] = betterproto.string_field(5) @dataclass @@ -296,9 +356,10 @@ class AdapterEventInfo(betterproto.Message): """E002""" info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - base_msg: str = betterproto.string_field(3) - args: List[str] = betterproto.string_field(4) + node_info: "NodeInfo" = betterproto.message_field(2) + name: str = betterproto.string_field(3) + base_msg: str = betterproto.string_field(4) + args: List[str] = betterproto.string_field(5) @dataclass @@ -306,9 +367,10 @@ class AdapterEventWarning(betterproto.Message): """E003""" info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - base_msg: str = betterproto.string_field(3) - args: List[str] = betterproto.string_field(4) + node_info: "NodeInfo" = betterproto.message_field(2) + name: str = betterproto.string_field(3) + base_msg: str = betterproto.string_field(4) + args: List[str] = betterproto.string_field(5) @dataclass @@ -316,10 +378,11 @@ class AdapterEventError(betterproto.Message): """E004""" info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - base_msg: str = betterproto.string_field(3) - args: List[str] = betterproto.string_field(4) - exc_info: str = betterproto.string_field(5) + node_info: "NodeInfo" = betterproto.message_field(2) + name: str = betterproto.string_field(3) + base_msg: str = betterproto.string_field(4) + args: List[str] = betterproto.string_field(5) + exc_info: str = betterproto.string_field(6) @dataclass @@ -327,8 +390,9 @@ class NewConnection(betterproto.Message): """E005""" info: "EventInfo" = betterproto.message_field(1) - conn_type: str = betterproto.string_field(2) - conn_name: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_type: str = betterproto.string_field(3) + conn_name: str = betterproto.string_field(4) @dataclass @@ -340,7 +404,7 @@ class ConnectionReused(betterproto.Message): @dataclass -class ConnectionLeftOpen(betterproto.Message): +class ConnectionLeftOpenInCleanup(betterproto.Message): """E007""" info: "EventInfo" = betterproto.message_field(1) @@ -348,7 +412,7 @@ class ConnectionLeftOpen(betterproto.Message): @dataclass -class ConnectionClosed(betterproto.Message): +class ConnectionClosedInCleanup(betterproto.Message): """E008""" info: "EventInfo" = betterproto.message_field(1) @@ -360,24 +424,27 @@ class RollbackFailed(betterproto.Message): """E009""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) - exc_info: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) + exc_info: str = betterproto.string_field(4) @dataclass -class ConnectionClosed2(betterproto.Message): +class ConnectionClosed(betterproto.Message): """E010""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) @dataclass -class ConnectionLeftOpen2(betterproto.Message): +class ConnectionLeftOpen(betterproto.Message): """E011""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) @dataclass @@ -385,7 +452,8 @@ class Rollback(betterproto.Message): """E012""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) @dataclass @@ -413,8 +481,9 @@ class ConnectionUsed(betterproto.Message): """E015""" info: "EventInfo" = betterproto.message_field(1) - conn_type: str = betterproto.string_field(2) - conn_name: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_type: str = betterproto.string_field(3) + conn_name: str = betterproto.string_field(4) @dataclass @@ -422,8 +491,9 @@ class SQLQuery(betterproto.Message): """E016""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) - sql: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) + sql: str = betterproto.string_field(4) @dataclass @@ -431,8 +501,9 @@ class SQLQueryStatus(betterproto.Message): """E017""" info: "EventInfo" = betterproto.message_field(1) - status: str = betterproto.string_field(2) - elapsed: float = betterproto.float_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + status: str = betterproto.string_field(3) + elapsed: float = betterproto.float_field(4) @dataclass @@ -440,7 +511,8 @@ class SQLCommit(betterproto.Message): """E018""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) @dataclass @@ -608,7 +680,8 @@ class NewConnectionOpening(betterproto.Message): """E037""" info: "EventInfo" = betterproto.message_field(1) - connection_state: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + connection_state: str = betterproto.string_field(3) @dataclass @@ -629,6 +702,14 @@ class CodeExecutionStatus(betterproto.Message): elapsed: float = betterproto.float_field(3) +@dataclass +class CatalogGenerationError(betterproto.Message): + """E040""" + + info: "EventInfo" = betterproto.message_field(1) + exc: str = betterproto.string_field(2) + + @dataclass class WriteCatalogFailure(betterproto.Message): """E041""" @@ -1066,19 +1147,122 @@ class PartialParsingDeletedExposure(betterproto.Message): @dataclass -class InvalidDisabledSourceInTestNode(betterproto.Message): +class InvalidDisabledTargetInTestNode(betterproto.Message): """I050""" info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + resource_type_title: str = betterproto.string_field(2) + unique_id: str = betterproto.string_field(3) + original_file_path: str = betterproto.string_field(4) + target_kind: str = betterproto.string_field(5) + target_name: str = betterproto.string_field(6) + target_package: str = betterproto.string_field(7) @dataclass -class InvalidRefInTestNode(betterproto.Message): +class UnusedResourceConfigPath(betterproto.Message): """I051""" info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + unused_config_paths: List[str] = betterproto.string_field(2) + + +@dataclass +class SeedIncreased(betterproto.Message): + """I052""" + + info: "EventInfo" = betterproto.message_field(1) + package_name: str = betterproto.string_field(2) + name: str = betterproto.string_field(3) + + +@dataclass +class SeedExceedsLimitSamePath(betterproto.Message): + """I053""" + + info: "EventInfo" = betterproto.message_field(1) + package_name: str = betterproto.string_field(2) + name: str = betterproto.string_field(3) + + +@dataclass +class SeedExceedsLimitAndPathChanged(betterproto.Message): + """I054""" + + info: "EventInfo" = betterproto.message_field(1) + package_name: str = betterproto.string_field(2) + name: str = betterproto.string_field(3) + + +@dataclass +class SeedExceedsLimitChecksumChanged(betterproto.Message): + """I055""" + + info: "EventInfo" = betterproto.message_field(1) + package_name: str = betterproto.string_field(2) + name: str = betterproto.string_field(3) + checksum_name: str = betterproto.string_field(4) + + +@dataclass +class UnusedTables(betterproto.Message): + """I056""" + + info: "EventInfo" = betterproto.message_field(1) + unused_tables: List[str] = betterproto.string_field(2) + + +@dataclass +class WrongResourceSchemaFile(betterproto.Message): + """I057""" + + info: "EventInfo" = betterproto.message_field(1) + patch_name: str = betterproto.string_field(2) + resource_type: str = betterproto.string_field(3) + plural_resource_type: str = betterproto.string_field(4) + yaml_key: str = betterproto.string_field(5) + file_path: str = betterproto.string_field(6) + + +@dataclass +class NoNodeForYamlKey(betterproto.Message): + """I058""" + + info: "EventInfo" = betterproto.message_field(1) + patch_name: str = betterproto.string_field(2) + yaml_key: str = betterproto.string_field(3) + file_path: str = betterproto.string_field(4) + + +@dataclass +class MacroPatchNotFound(betterproto.Message): + """I059""" + + info: "EventInfo" = betterproto.message_field(1) + patch_name: str = betterproto.string_field(2) + + +@dataclass +class NodeNotFoundOrDisabled(betterproto.Message): + """I060""" + + info: "EventInfo" = betterproto.message_field(1) + original_file_path: str = betterproto.string_field(2) + unique_id: str = betterproto.string_field(3) + resource_type_title: str = betterproto.string_field(4) + target_name: str = betterproto.string_field(5) + target_kind: str = betterproto.string_field(6) + target_package: str = betterproto.string_field(7) + disabled: str = betterproto.string_field(8) + + +@dataclass +class JinjaLogWarning(betterproto.Message): + """I061""" + + info: "EventInfo" = betterproto.message_field(1) + node_info: "NodeInfo" = betterproto.message_field(2) + msg: str = betterproto.string_field(3) @dataclass @@ -1166,19 +1350,21 @@ class SelectorReportInvalidSelector(betterproto.Message): @dataclass -class MacroEventInfo(betterproto.Message): +class JinjaLogInfo(betterproto.Message): """M011""" info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + msg: str = betterproto.string_field(3) @dataclass -class MacroEventDebug(betterproto.Message): +class JinjaLogDebug(betterproto.Message): """M012""" info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + msg: str = betterproto.string_field(3) @dataclass @@ -1309,6 +1495,23 @@ class DepsSetDownloadDirectory(betterproto.Message): path: str = betterproto.string_field(2) +@dataclass +class DepsUnpinned(betterproto.Message): + """M029""" + + info: "EventInfo" = betterproto.message_field(1) + revision: str = betterproto.string_field(2) + git: str = betterproto.string_field(3) + + +@dataclass +class NoNodesForSelectionCriteria(betterproto.Message): + """M030""" + + info: "EventInfo" = betterproto.message_field(1) + spec_raw: str = betterproto.string_field(2) + + @dataclass class RunningOperationCaughtError(betterproto.Message): """Q001""" @@ -1357,57 +1560,21 @@ class SQLRunnerException(betterproto.Message): @dataclass -class PrintErrorTestResult(betterproto.Message): +class LogTestResult(betterproto.Message): """Q007""" info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) name: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - num_models: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - - -@dataclass -class PrintPassTestResult(betterproto.Message): - """Q008""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - num_models: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - - -@dataclass -class PrintWarnTestResult(betterproto.Message): - """Q009""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - num_models: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - num_failures: int = betterproto.int32_field(7) - - -@dataclass -class PrintFailureTestResult(betterproto.Message): - """Q010""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - num_models: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - num_failures: int = betterproto.int32_field(7) + status: str = betterproto.string_field(4) + index: int = betterproto.int32_field(5) + num_models: int = betterproto.int32_field(6) + execution_time: float = betterproto.float_field(7) + num_failures: int = betterproto.int32_field(8) @dataclass -class PrintStartLine(betterproto.Message): +class LogStartLine(betterproto.Message): """Q011""" info: "EventInfo" = betterproto.message_field(1) @@ -1418,7 +1585,7 @@ class PrintStartLine(betterproto.Message): @dataclass -class PrintModelResultLine(betterproto.Message): +class LogModelResult(betterproto.Message): """Q012""" info: "EventInfo" = betterproto.message_field(1) @@ -1427,40 +1594,11 @@ class PrintModelResultLine(betterproto.Message): status: str = betterproto.string_field(4) index: int = betterproto.int32_field(5) total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) - - -@dataclass -class PrintModelErrorResultLine(betterproto.Message): - """Q013""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - description: str = betterproto.string_field(3) - status: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) + execution_time: int = betterproto.int32_field(7) @dataclass -class PrintSnapshotErrorResultLine(betterproto.Message): - """Q014""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - description: str = betterproto.string_field(3) - status: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) - cfg: Dict[str, str] = betterproto.map_field( - 8, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - - -@dataclass -class PrintSnapshotResultLine(betterproto.Message): +class LogSnapshotResult(betterproto.Message): """Q015""" info: "EventInfo" = betterproto.message_field(1) @@ -1476,87 +1614,36 @@ class PrintSnapshotResultLine(betterproto.Message): @dataclass -class PrintSeedErrorResultLine(betterproto.Message): +class LogSeedResult(betterproto.Message): """Q016""" info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) status: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - total: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - schema: str = betterproto.string_field(7) - relation: str = betterproto.string_field(8) + result_message: str = betterproto.string_field(4) + index: int = betterproto.int32_field(5) + total: int = betterproto.int32_field(6) + execution_time: float = betterproto.float_field(7) + schema: str = betterproto.string_field(8) + relation: str = betterproto.string_field(9) @dataclass -class PrintSeedResultLine(betterproto.Message): - """Q017""" +class LogFreshnessResult(betterproto.Message): + """Q018""" info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - status: str = betterproto.string_field(3) + status: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(3) index: int = betterproto.int32_field(4) total: int = betterproto.int32_field(5) execution_time: float = betterproto.float_field(6) - schema: str = betterproto.string_field(7) - relation: str = betterproto.string_field(8) - - -@dataclass -class PrintFreshnessErrorLine(betterproto.Message): - """Q018""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - index: int = betterproto.int32_field(3) - total: int = betterproto.int32_field(4) - execution_time: float = betterproto.float_field(5) - source_name: str = betterproto.string_field(6) - table_name: str = betterproto.string_field(7) + source_name: str = betterproto.string_field(7) + table_name: str = betterproto.string_field(8) @dataclass -class PrintFreshnessErrorStaleLine(betterproto.Message): - """Q019""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - index: int = betterproto.int32_field(3) - total: int = betterproto.int32_field(4) - execution_time: float = betterproto.float_field(5) - source_name: str = betterproto.string_field(6) - table_name: str = betterproto.string_field(7) - - -@dataclass -class PrintFreshnessWarnLine(betterproto.Message): - """Q020""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - index: int = betterproto.int32_field(3) - total: int = betterproto.int32_field(4) - execution_time: float = betterproto.float_field(5) - source_name: str = betterproto.string_field(6) - table_name: str = betterproto.string_field(7) - - -@dataclass -class PrintFreshnessPassLine(betterproto.Message): - """Q021""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - index: int = betterproto.int32_field(3) - total: int = betterproto.int32_field(4) - execution_time: float = betterproto.float_field(5) - source_name: str = betterproto.string_field(6) - table_name: str = betterproto.string_field(7) - - -@dataclass -class PrintCancelLine(betterproto.Message): +class LogCancelLine(betterproto.Message): """Q022""" info: "EventInfo" = betterproto.message_field(1) @@ -1577,7 +1664,6 @@ class NodeStart(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) - unique_id: str = betterproto.string_field(3) @dataclass @@ -1586,7 +1672,6 @@ class NodeFinished(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) - unique_id: str = betterproto.string_field(3) run_result: "RunResultMsg" = betterproto.message_field(4) @@ -1605,14 +1690,7 @@ class ConcurrencyLine(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) num_threads: int = betterproto.int32_field(2) target_name: str = betterproto.string_field(3) - - -@dataclass -class CompilingNode(betterproto.Message): - """Q028""" - - info: "EventInfo" = betterproto.message_field(1) - unique_id: str = betterproto.string_field(2) + node_count: int = betterproto.int32_field(4) @dataclass @@ -1620,7 +1698,7 @@ class WritingInjectedSQLForNode(betterproto.Message): """Q029""" info: "EventInfo" = betterproto.message_field(1) - unique_id: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) @dataclass @@ -1629,7 +1707,6 @@ class NodeCompiling(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) - unique_id: str = betterproto.string_field(3) @dataclass @@ -1638,11 +1715,10 @@ class NodeExecuting(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) - unique_id: str = betterproto.string_field(3) @dataclass -class PrintHookStartLine(betterproto.Message): +class LogHookStartLine(betterproto.Message): """Q032""" info: "EventInfo" = betterproto.message_field(1) @@ -1653,7 +1729,7 @@ class PrintHookStartLine(betterproto.Message): @dataclass -class PrintHookEndLine(betterproto.Message): +class LogHookEndLine(betterproto.Message): """Q033""" info: "EventInfo" = betterproto.message_field(1) @@ -1678,6 +1754,13 @@ class SkippingDetails(betterproto.Message): total: int = betterproto.int32_field(7) +@dataclass +class NothingToDo(betterproto.Message): + """Q035""" + + info: "EventInfo" = betterproto.message_field(1) + + @dataclass class RunningOperationUncaughtError(betterproto.Message): """Q036""" @@ -1697,13 +1780,21 @@ class EndRunResult(betterproto.Message): success: bool = betterproto.bool_field(5) +@dataclass +class NoNodesSelected(betterproto.Message): + """Q038""" + + info: "EventInfo" = betterproto.message_field(1) + + @dataclass class CatchableExceptionOnRun(betterproto.Message): """W002""" info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) - exc_info: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + exc: str = betterproto.string_field(3) + exc_info: str = betterproto.string_field(4) @dataclass @@ -1821,10 +1912,12 @@ class TimingInfoCollected(betterproto.Message): """Z010""" info: "EventInfo" = betterproto.message_field(1) + node_info: "NodeInfo" = betterproto.message_field(2) + timing_info: "TimingInfoMsg" = betterproto.message_field(3) @dataclass -class PrintDebugStackTrace(betterproto.Message): +class LogDebugStackTrace(betterproto.Message): """Z011""" info: "EventInfo" = betterproto.message_field(1) @@ -1991,7 +2084,7 @@ class EndOfRunSummary(betterproto.Message): @dataclass -class PrintSkipBecauseError(betterproto.Message): +class LogSkipBecauseError(betterproto.Message): """Z034""" info: "EventInfo" = betterproto.message_field(1) @@ -2066,34 +2159,9 @@ class TrackingInitializeFailure(betterproto.Message): exc_info: str = betterproto.string_field(2) -@dataclass -class GeneralWarningMsg(betterproto.Message): - """Z046""" - - info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) - log_fmt: str = betterproto.string_field(3) - - -@dataclass -class GeneralWarningException(betterproto.Message): - """Z047""" - - info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) - log_fmt: str = betterproto.string_field(3) - - -@dataclass -class EventBufferFull(betterproto.Message): - """Z048""" - - info: "EventInfo" = betterproto.message_field(1) - - @dataclass class RunResultWarningMessage(betterproto.Message): - """Z049""" + """Z046""" info: "EventInfo" = betterproto.message_field(1) msg: str = betterproto.string_field(2) diff --git a/core/dbt/events/test_types.py b/core/dbt/events/test_types.py index 5f4a10cd7d7..cf7307125ca 100644 --- a/core/dbt/events/test_types.py +++ b/core/dbt/events/test_types.py @@ -61,18 +61,3 @@ def code(self): def message(self) -> str: return f"Unit Test: {self.msg}" - - -# since mypy doesn't run on every file we need to suggest to mypy that every -# class gets instantiated. But we don't actually want to run this code. -# making the conditional `if False` causes mypy to skip it as dead code so -# we need to skirt around that by computing something it doesn't check statically. -# -# TODO remove these lines once we run mypy everywhere. -if 1 == 0: - IntegrationTestInfo(msg="") - IntegrationTestDebug(msg="") - IntegrationTestWarn(msg="") - IntegrationTestError(msg="") - IntegrationTestException(msg="") - UnitTestInfo(msg="") diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index eaa05b4f93d..1c330106d92 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -15,6 +15,7 @@ message EventInfo { string thread = 7; google.protobuf.Timestamp ts = 8; map extra = 9; + string category = 10; } // TimingInfo @@ -38,7 +39,6 @@ message NodeInfo { // RunResult message RunResultMsg { - // status: Union[RunStatus, TestStatus, FreshnessStatus] string status = 1; string message = 2; repeated TimingInfoMsg timing_info = 3; @@ -213,46 +213,98 @@ message ProjectCreated { string slack_url = 4; } +// D - Deprecation + +// D001 +message PackageRedirectDeprecation { + EventInfo info = 1; + string old_name = 2; + string new_name = 3; +} + +// D002 +message PackageInstallPathDeprecation { + EventInfo info = 1; +} + +// D003 +message ConfigSourcePathDeprecation { + EventInfo info = 1; + string deprecated_path = 2; + string exp_path = 3; +} + +// D004 +message ConfigDataPathDeprecation { + EventInfo info = 1; + string deprecated_path = 2; + string exp_path = 3; +} + +//D005 +message AdapterDeprecationWarning { + EventInfo info = 1; + string old_name = 2; + string new_name = 3; +} + +//D006 +message MetricAttributesRenamed { + EventInfo info = 1; + string metric_name = 2; +} + +//D007 +message ExposureNameDeprecation { + EventInfo info = 1; + string exposure = 2; +} + // E - DB Adapter // E001 message AdapterEventDebug { EventInfo info = 1; - string name = 2; - string base_msg = 3; - repeated string args = 4; + NodeInfo node_info = 2; + string name = 3; + string base_msg = 4; + repeated string args = 5; } // E002 message AdapterEventInfo { EventInfo info = 1; - string name = 2; - string base_msg = 3; - repeated string args = 4; + NodeInfo node_info = 2; + string name = 3; + string base_msg = 4; + repeated string args = 5; } // E003 message AdapterEventWarning { EventInfo info = 1; - string name = 2; - string base_msg = 3; - repeated string args = 4; + NodeInfo node_info = 2; + string name = 3; + string base_msg = 4; + repeated string args = 5; } // E004 message AdapterEventError { EventInfo info = 1; - string name = 2; - string base_msg = 3; - repeated string args = 4; - string exc_info = 5; + NodeInfo node_info = 2; + string name = 3; + string base_msg = 4; + repeated string args = 5; + string exc_info = 6; } // E005 message NewConnection { EventInfo info = 1; - string conn_type = 2; - string conn_name = 3; + NodeInfo node_info = 2; + string conn_type = 3; + string conn_name = 4; } // E006 @@ -262,13 +314,13 @@ message ConnectionReused { } // E007 -message ConnectionLeftOpen { +message ConnectionLeftOpenInCleanup { EventInfo info = 1; string conn_name = 2; } // E008 -message ConnectionClosed { +message ConnectionClosedInCleanup { EventInfo info = 1; string conn_name = 2; } @@ -276,26 +328,30 @@ message ConnectionClosed { // E009 message RollbackFailed { EventInfo info = 1; - string conn_name = 2; - string exc_info = 3; + NodeInfo node_info = 2; + string conn_name = 3; + string exc_info = 4; } // E010 -message ConnectionClosed2 { +message ConnectionClosed { EventInfo info = 1; - string conn_name = 2; + NodeInfo node_info = 2; + string conn_name = 3; } // E011 -message ConnectionLeftOpen2 { +message ConnectionLeftOpen { EventInfo info = 1; - string conn_name = 2; + NodeInfo node_info = 2; + string conn_name = 3; } // E012 message Rollback { EventInfo info = 1; - string conn_name = 2; + NodeInfo node_info = 2; + string conn_name = 3; } // E013 @@ -317,28 +373,32 @@ message ListRelations { // E015 message ConnectionUsed { EventInfo info = 1; - string conn_type = 2; - string conn_name = 3; + NodeInfo node_info = 2; + string conn_type = 3; + string conn_name = 4; } // E016 message SQLQuery { EventInfo info = 1; - string conn_name = 2; - string sql = 3; + NodeInfo node_info = 2; + string conn_name = 3; + string sql = 4; } // E017 message SQLQueryStatus { EventInfo info = 1; - string status = 2; - float elapsed = 3; + NodeInfo node_info = 2; + string status = 3; + float elapsed = 4; } // E018 message SQLCommit { EventInfo info = 1; - string conn_name = 2; + NodeInfo node_info = 2; + string conn_name = 3; } // E019 @@ -455,13 +515,13 @@ message AdapterImportError { message PluginLoadError { EventInfo info = 1; string exc_info = 2; - } // E037 message NewConnectionOpening { EventInfo info = 1; - string connection_state = 2; + NodeInfo node_info = 2; + string connection_state = 3; } // E038 @@ -478,7 +538,11 @@ message CodeExecutionStatus { float elapsed = 3; } -// Skipped E040 +// E040 +message CatalogGenerationError { + EventInfo info = 1; + string exc = 2; +} // E041 message WriteCatalogFailure { @@ -806,17 +870,99 @@ message PartialParsingDeletedExposure { } // I050 -message InvalidDisabledSourceInTestNode { +message InvalidDisabledTargetInTestNode { EventInfo info = 1; - string msg = 2; + string resource_type_title = 2; + string unique_id = 3; + string original_file_path = 4; + string target_kind = 5; + string target_name = 6; + string target_package = 7; } // I051 -message InvalidRefInTestNode { +message UnusedResourceConfigPath { EventInfo info = 1; - string msg = 2; + repeated string unused_config_paths = 2; +} + +// I052 +message SeedIncreased { + EventInfo info = 1; + string package_name = 2; + string name = 3; +} + +// I053 +message SeedExceedsLimitSamePath { + EventInfo info = 1; + string package_name = 2; + string name = 3; } +// I054 +message SeedExceedsLimitAndPathChanged { + EventInfo info = 1; + string package_name = 2; + string name = 3; +} + +// I055 +message SeedExceedsLimitChecksumChanged { + EventInfo info = 1; + string package_name = 2; + string name = 3; + string checksum_name = 4; +} + +// I056 +message UnusedTables { + EventInfo info = 1; + repeated string unused_tables = 2; +} + +// I057 +message WrongResourceSchemaFile { + EventInfo info = 1; + string patch_name = 2; + string resource_type = 3; + string plural_resource_type = 4; + string yaml_key = 5; + string file_path = 6; +} + +// I058 +message NoNodeForYamlKey { + EventInfo info = 1; + string patch_name = 2; + string yaml_key = 3; + string file_path = 4; +} + +// I059 +message MacroPatchNotFound { + EventInfo info = 1; + string patch_name = 2; +} + +// I060 +message NodeNotFoundOrDisabled { + EventInfo info = 1; + string original_file_path = 2; + string unique_id = 3; + string resource_type_title = 4; + string target_name = 5; + string target_kind = 6; + string target_package = 7; + string disabled = 8; +} + +// I061 +message JinjaLogWarning { + EventInfo info = 1; + NodeInfo node_info = 2; + string msg = 3; +} // M - Deps generation @@ -885,15 +1031,17 @@ message SelectorReportInvalidSelector { } // M011 -message MacroEventInfo { +message JinjaLogInfo { EventInfo info = 1; - string msg = 2; + NodeInfo node_info = 2; + string msg = 3; } // M012 -message MacroEventDebug { +message JinjaLogDebug { EventInfo info = 1; - string msg = 2; + NodeInfo node_info = 2; + string msg = 3; } // M013 @@ -992,6 +1140,19 @@ message DepsSetDownloadDirectory { string path = 2; } +// M029 +message DepsUnpinned { + EventInfo info = 1; + string revision = 2; + string git = 3; +} + +// M030 +message NoNodesForSelectionCriteria { + EventInfo info = 1; + string spec_raw = 2; +} + // Q - Node execution // Q001 @@ -1030,49 +1191,23 @@ message SQLRunnerException { } // Q007 -message PrintErrorTestResult { +message LogTestResult { EventInfo info = 1; NodeInfo node_info = 2; string name = 3; - int32 index = 4; - int32 num_models = 5; - float execution_time = 6; + string status = 4; + int32 index = 5; + int32 num_models = 6; + float execution_time = 7; + int32 num_failures = 8; } -// Q008 -message PrintPassTestResult { - EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - int32 index = 4; - int32 num_models = 5; - float execution_time = 6; -} -// Q009 -message PrintWarnTestResult { - EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - int32 index = 4; - int32 num_models = 5; - float execution_time = 6; - int32 num_failures = 7; -} +// Skipped Q008, Q009, Q010 -// Q010 -message PrintFailureTestResult { - EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - int32 index = 4; - int32 num_models = 5; - float execution_time = 6; - int32 num_failures = 7; -} // Q011 -message PrintStartLine { +message LogStartLine { EventInfo info = 1; NodeInfo node_info = 2; string description = 3; @@ -1081,29 +1216,20 @@ message PrintStartLine { } // Q012 -message PrintModelResultLine { +message LogModelResult { EventInfo info = 1; NodeInfo node_info = 2; string description = 3; string status = 4; int32 index = 5; int32 total = 6; - float execution_time = 7; + int32 execution_time = 7; } -// Q013 -message PrintModelErrorResultLine { - EventInfo info = 1; - NodeInfo node_info = 2; - string description = 3; - string status = 4; - int32 index = 5; - int32 total = 6; - float execution_time = 7; -} +// skipped Q013, Q014 -// Q014 -message PrintSnapshotErrorResultLine { +// Q015 +message LogSnapshotResult { EventInfo info = 1; NodeInfo node_info = 2; string description = 3; @@ -1114,88 +1240,39 @@ message PrintSnapshotErrorResultLine { map cfg = 8; } -// Q015 -message PrintSnapshotResultLine { +// Q016 +message LogSeedResult { EventInfo info = 1; NodeInfo node_info = 2; - string description = 3; - string status = 4; + string status = 3; + string result_message = 4; int32 index = 5; int32 total = 6; float execution_time = 7; - map cfg = 8; + string schema = 8; + string relation = 9; } -// Q016 -message PrintSeedErrorResultLine { - EventInfo info = 1; - NodeInfo node_info = 2; - string status = 3; - int32 index = 4; - int32 total = 5; - float execution_time = 6; - string schema = 7; - string relation = 8; -} +// Skipped Q017 -// Q017 -message PrintSeedResultLine { +// Q018 +message LogFreshnessResult { EventInfo info = 1; - NodeInfo node_info = 2; - string status = 3; + string status = 2; + NodeInfo node_info = 3; int32 index = 4; int32 total = 5; float execution_time = 6; - string schema = 7; - string relation = 8; + string source_name = 7; + string table_name = 8; } -// Q018 -message PrintFreshnessErrorLine { - EventInfo info = 1; - NodeInfo node_info = 2; - int32 index = 3; - int32 total = 4; - float execution_time = 5; - string source_name = 6; - string table_name = 7; -} -// Q019 -message PrintFreshnessErrorStaleLine { - EventInfo info = 1; - NodeInfo node_info = 2; - int32 index = 3; - int32 total = 4; - float execution_time = 5; - string source_name = 6; - string table_name = 7; -} - -// Q020 -message PrintFreshnessWarnLine { - EventInfo info = 1; - NodeInfo node_info = 2; - int32 index = 3; - int32 total = 4; - float execution_time = 5; - string source_name = 6; - string table_name = 7; -} +// Skipped Q019, Q020, Q021 -// Q021 -message PrintFreshnessPassLine { - EventInfo info = 1; - NodeInfo node_info = 2; - int32 index = 3; - int32 total = 4; - float execution_time = 5; - string source_name = 6; - string table_name = 7; -} // Q022 -message PrintCancelLine { +message LogCancelLine { EventInfo info = 1; string conn_name = 2; } @@ -1210,14 +1287,12 @@ message DefaultSelector { message NodeStart { EventInfo info = 1; NodeInfo node_info = 2; - string unique_id = 3; } // Q025 message NodeFinished { EventInfo info = 1; NodeInfo node_info = 2; - string unique_id = 3; RunResultMsg run_result = 4; } @@ -1232,36 +1307,31 @@ message ConcurrencyLine { EventInfo info = 1; int32 num_threads = 2; string target_name = 3; + int32 node_count = 4; } -// Q028 -message CompilingNode { - EventInfo info = 1; - string unique_id = 2; -} +// Skipped Q028 // Q029 message WritingInjectedSQLForNode { EventInfo info = 1; - string unique_id = 2; + NodeInfo node_info = 2; } // Q030 message NodeCompiling { EventInfo info = 1; NodeInfo node_info = 2; - string unique_id = 3; } // Q031 message NodeExecuting { EventInfo info = 1; NodeInfo node_info = 2; - string unique_id = 3; } // Q032 -message PrintHookStartLine { +message LogHookStartLine { EventInfo info = 1; NodeInfo node_info = 2; string statement = 3; @@ -1270,7 +1340,7 @@ message PrintHookStartLine { } // Q033 -message PrintHookEndLine { +message LogHookEndLine { EventInfo info = 1; NodeInfo node_info = 2; string statement = 3; @@ -1291,7 +1361,10 @@ message SkippingDetails { int32 total = 7; } -// Skipped Q035 +// Q035 +message NothingToDo { + EventInfo info = 1; +} // Q036 message RunningOperationUncaughtError { @@ -1308,6 +1381,11 @@ message EndRunResult { bool success = 5; } +// Q038 +message NoNodesSelected { + EventInfo info = 1; +} + // W - Node testing // Skipped W001 @@ -1315,8 +1393,9 @@ message EndRunResult { // W002 message CatchableExceptionOnRun { EventInfo info = 1; - string exc = 2; - string exc_info = 3; + NodeInfo node_info = 2; + string exc = 3; + string exc_info = 4; } // W003 @@ -1408,10 +1487,12 @@ message SystemReportReturnCode { // Z010 message TimingInfoCollected { EventInfo info = 1; + NodeInfo node_info = 2; + TimingInfoMsg timing_info = 3; } // Z011 -message PrintDebugStackTrace { +message LogDebugStackTrace { EventInfo info = 1; string exc_info = 2; } @@ -1538,7 +1619,7 @@ message EndOfRunSummary { // Skipped Z031, Z032, Z033 // Z034 -message PrintSkipBecauseError { +message LogSkipBecauseError { EventInfo info = 1; string schema = 2; string relation = 3; @@ -1596,25 +1677,6 @@ message TrackingInitializeFailure { // Skipped Z045 // Z046 -message GeneralWarningMsg { - EventInfo info = 1; - string msg = 2; - string log_fmt = 3; -} - -// Z047 -message GeneralWarningException { - EventInfo info = 1; - string exc = 2; - string log_fmt = 3; -} - -// Z048 -message EventBufferFull { - EventInfo info = 1; -} - -// Z049 message RunResultWarningMessage { EventInfo info = 1; string msg = 2; diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index f6e66f941d2..0a0cd04fe1d 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -1,6 +1,8 @@ from dataclasses import dataclass -from dbt import ui +from dbt.ui import line_wrap_message, warning_tag, red, green, yellow +from dbt.constants import MAXIMUM_SEED_SIZE_NAME, PIN_PACKAGE_URL from dbt.events.base_types import ( + DynamicLevel, NoFile, DebugLevel, InfoLevel, @@ -12,9 +14,9 @@ ) from dbt.events.format import format_fancy_output_line, pluralize -# The generated classes quote the included message classes, requiring the following line +# The generated classes quote the included message classes, requiring the following lines from dbt.events.proto_types import EventInfo, RunResultMsg, ListOfStrings # noqa -from dbt.events.proto_types import NodeInfo, ReferenceKeyMsg +from dbt.events.proto_types import NodeInfo, ReferenceKeyMsg, TimingInfoMsg # noqa from dbt.events import proto_types as pt from dbt.node_types import NodeType @@ -32,10 +34,11 @@ # | Code | Description | # |:----:|:-------------------:| # | A | Pre-project loading | +# | D | Deprecations | # | E | DB adapter | # | I | Project parsing | # | M | Deps generation | -# | Q | Node execution | +# | Q | Node execution | # | W | Node testing | # | Z | Misc | # | T | Test only | @@ -305,6 +308,114 @@ def message(self) -> str: """ +# ======================================================= +# D - Deprecations +# ======================================================= + + +@dataclass +class PackageRedirectDeprecation(WarnLevel, pt.PackageRedirectDeprecation): # noqa + def code(self): + return "D001" + + def message(self): + description = ( + f"The `{self.old_name}` package is deprecated in favor of `{self.new_name}`. Please " + f"update your `packages.yml` configuration to use `{self.new_name}` instead." + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class PackageInstallPathDeprecation(WarnLevel, pt.PackageInstallPathDeprecation): # noqa + def code(self): + return "D002" + + def message(self): + description = """\ + The default package install path has changed from `dbt_modules` to `dbt_packages`. + Please update `clean-targets` in `dbt_project.yml` and check `.gitignore` as well. + Or, set `packages-install-path: dbt_modules` if you'd like to keep the current value. + """ + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class ConfigSourcePathDeprecation(WarnLevel, pt.ConfigSourcePathDeprecation): # noqa + def code(self): + return "D003" + + def message(self): + description = ( + f"The `{self.deprecated_path}` config has been renamed to `{self.exp_path}`." + "Please update your `dbt_project.yml` configuration to reflect this change." + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class ConfigDataPathDeprecation(WarnLevel, pt.ConfigDataPathDeprecation): # noqa + def code(self): + return "D004" + + def message(self): + description = ( + f"The `{self.deprecated_path}` config has been renamed to `{self.exp_path}`." + "Please update your `dbt_project.yml` configuration to reflect this change." + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class AdapterDeprecationWarning(WarnLevel, pt.AdapterDeprecationWarning): # noqa + def code(self): + return "D005" + + def message(self): + description = ( + f"The adapter function `adapter.{self.old_name}` is deprecated and will be removed in " + f"a future release of dbt. Please use `adapter.{self.new_name}` instead. " + f"\n\nDocumentation for {self.new_name} can be found here:" + f"\n\nhttps://docs.getdbt.com/docs/adapter" + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class MetricAttributesRenamed(WarnLevel, pt.MetricAttributesRenamed): # noqa + def code(self): + return "D006" + + def message(self): + description = ( + "dbt-core v1.3 renamed attributes for metrics:" + "\n 'sql' -> 'expression'" + "\n 'type' -> 'calculation_method'" + "\n 'type: expression' -> 'calculation_method: derived'" + "\nThe old metric parameter names will be fully deprecated in v1.4." + f"\nPlease remove them from the metric definition of metric '{self.metric_name}'" + "\nRelevant issue here: https://github.com/dbt-labs/dbt-core/issues/5849" + ) + + return warning_tag(f"Deprecated functionality\n\n{description}") + + +@dataclass +class ExposureNameDeprecation(WarnLevel, pt.ExposureNameDeprecation): # noqa + def code(self): + return "D007" + + def message(self): + description = ( + "Starting in v1.3, the 'name' of an exposure should contain only letters, " + "numbers, and underscores. Exposures support a new property, 'label', which may " + f"contain spaces, capital letters, and special characters. {self.exposure} does not " + "follow this pattern. Please update the 'name', and use the 'label' property for a " + "human-friendly title. This will raise an error in a future version of dbt-core." + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + # ======================================================= # E - DB Adapter # ======================================================= @@ -365,7 +476,7 @@ def message(self) -> str: @dataclass -class ConnectionLeftOpen(DebugLevel, pt.ConnectionLeftOpen): +class ConnectionLeftOpenInCleanup(DebugLevel, pt.ConnectionLeftOpenInCleanup): def code(self): return "E007" @@ -374,7 +485,7 @@ def message(self) -> str: @dataclass -class ConnectionClosed(DebugLevel, pt.ConnectionClosed): +class ConnectionClosedInCleanup(DebugLevel, pt.ConnectionClosedInCleanup): def code(self): return "E008" @@ -393,7 +504,7 @@ def message(self) -> str: # TODO: can we combine this with ConnectionClosed? @dataclass -class ConnectionClosed2(DebugLevel, pt.ConnectionClosed2): +class ConnectionClosed(DebugLevel, pt.ConnectionClosed): def code(self): return "E010" @@ -403,7 +514,7 @@ def message(self) -> str: # TODO: can we combine this with ConnectionLeftOpen? @dataclass -class ConnectionLeftOpen2(DebugLevel, pt.ConnectionLeftOpen2): +class ConnectionLeftOpen(DebugLevel, pt.ConnectionLeftOpen): def code(self): return "E011" @@ -675,7 +786,13 @@ def message(self) -> str: return f"Execution status: {self.status} in {self.elapsed} seconds" -# Skipped E040 +@dataclass +class CatalogGenerationError(WarnLevel, pt.CatalogGenerationError): + def code(self): + return "E040" + + def message(self) -> str: + return f"Encountered an error while generating catalog: {self.exc}" @dataclass @@ -1218,23 +1335,194 @@ def message(self) -> str: return f"Partial parsing: deleted exposure {self.unique_id}" -# TODO: switch to storing structured info and calling get_target_failure_msg @dataclass -class InvalidDisabledSourceInTestNode( - WarnLevel, EventStringFunctor, pt.InvalidDisabledSourceInTestNode -): +class InvalidDisabledTargetInTestNode(WarnLevel, pt.InvalidDisabledTargetInTestNode): def code(self): return "I050" def message(self) -> str: - return ui.warning_tag(self.msg) + + target_package_string = "" + if self.target_package != target_package_string: + target_package_string = "in package '{}' ".format(self.target_package) + + msg = "{} '{}' ({}) depends on a {} named '{}' {}which is disabled".format( + self.resource_type_title, + self.unique_id, + self.original_file_path, + self.target_kind, + self.target_name, + target_package_string, + ) + + return warning_tag(msg) @dataclass -class InvalidRefInTestNode(DebugLevel, EventStringFunctor, pt.InvalidRefInTestNode): +class UnusedResourceConfigPath(WarnLevel, pt.UnusedResourceConfigPath): def code(self): return "I051" + def message(self) -> str: + path_list = "\n".join(f"- {u}" for u in self.unused_config_paths) + msg = ( + "Configuration paths exist in your dbt_project.yml file which do not " + "apply to any resources.\n" + f"There are {len(self.unused_config_paths)} unused configuration paths:\n{path_list}" + ) + return warning_tag(msg) + + +@dataclass +class SeedIncreased(WarnLevel, pt.SeedIncreased): + def code(self): + return "I052" + + def message(self) -> str: + msg = ( + f"Found a seed ({self.package_name}.{self.name}) " + f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was " + f"<={MAXIMUM_SEED_SIZE_NAME}, so it has changed" + ) + return msg + + +@dataclass +class SeedExceedsLimitSamePath(WarnLevel, pt.SeedExceedsLimitSamePath): + def code(self): + return "I053" + + def message(self) -> str: + msg = ( + f"Found a seed ({self.package_name}.{self.name}) " + f">{MAXIMUM_SEED_SIZE_NAME} in size at the same path, dbt " + f"cannot tell if it has changed: assuming they are the same" + ) + return msg + + +@dataclass +class SeedExceedsLimitAndPathChanged(WarnLevel, pt.SeedExceedsLimitAndPathChanged): + def code(self): + return "I054" + + def message(self) -> str: + msg = ( + f"Found a seed ({self.package_name}.{self.name}) " + f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was in " + f"a different location, assuming it has changed" + ) + return msg + + +@dataclass +class SeedExceedsLimitChecksumChanged(WarnLevel, pt.SeedExceedsLimitChecksumChanged): + def code(self): + return "I055" + + def message(self) -> str: + msg = ( + f"Found a seed ({self.package_name}.{self.name}) " + f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file had a " + f"checksum type of {self.checksum_name}, so it has changed" + ) + return msg + + +@dataclass +class UnusedTables(WarnLevel, pt.UnusedTables): + def code(self): + return "I056" + + def message(self) -> str: + msg = [ + "During parsing, dbt encountered source overrides that had no target:", + ] + msg += self.unused_tables + msg.append("") + return warning_tag("\n".join(msg)) + + +@dataclass +class WrongResourceSchemaFile(WarnLevel, pt.WrongResourceSchemaFile): + def code(self): + return "I057" + + def message(self) -> str: + msg = line_wrap_message( + f"""\ + '{self.patch_name}' is a {self.resource_type} node, but it is + specified in the {self.yaml_key} section of + {self.file_path}. + To fix this error, place the `{self.patch_name}` + specification under the {self.plural_resource_type} key instead. + """ + ) + return warning_tag(msg) + + +@dataclass +class NoNodeForYamlKey(WarnLevel, pt.NoNodeForYamlKey): + def code(self): + return "I058" + + def message(self) -> str: + msg = ( + f"Did not find matching node for patch with name '{self.patch_name}' " + f"in the '{self.yaml_key}' section of " + f"file '{self.file_path}'" + ) + return warning_tag(msg) + + +@dataclass +class MacroPatchNotFound(WarnLevel, pt.MacroPatchNotFound): + def code(self): + return "I059" + + def message(self) -> str: + msg = f'Found patch for macro "{self.patch_name}" which was not found' + return warning_tag(msg) + + +@dataclass +class NodeNotFoundOrDisabled(WarnLevel, pt.NodeNotFoundOrDisabled): + def code(self): + return "I060" + + def message(self) -> str: + # this is duplicated logic from exceptions.get_not_found_or_disabled_msg + # when we convert exceptions to be stuctured maybe it can be combined? + # convverting the bool to a string since None is also valid + if self.disabled == "None": + reason = "was not found or is disabled" + elif self.disabled == "True": + reason = "is disabled" + else: + reason = "was not found" + + target_package_string = "" + if self.target_package is not None: + target_package_string = "in package '{}' ".format(self.target_package) + + msg = "{} '{}' ({}) depends on a {} named '{}' {}which {}".format( + self.resource_type_title, + self.unique_id, + self.original_file_path, + self.target_kind, + self.target_name, + target_package_string, + reason, + ) + + return warning_tag(msg) + + +@dataclass +class JinjaLogWarning(WarnLevel, pt.JinjaLogWarning): + def code(self): + return "I061" + def message(self) -> str: return self.msg @@ -1338,20 +1626,22 @@ def message(self) -> str: @dataclass -class MacroEventInfo(InfoLevel, EventStringFunctor, pt.MacroEventInfo): +class JinjaLogInfo(InfoLevel, EventStringFunctor, pt.JinjaLogInfo): def code(self): return "M011" def message(self) -> str: + # This is for the log method used in macros so msg cannot be built here return self.msg @dataclass -class MacroEventDebug(DebugLevel, EventStringFunctor, pt.MacroEventDebug): +class JinjaLogDebug(DebugLevel, EventStringFunctor, pt.JinjaLogDebug): def code(self): return "M012" def message(self) -> str: + # This is for the log method used in macros so msg cannot be built here return self.msg @@ -1417,7 +1707,7 @@ def code(self): def message(self) -> str: return "Updates available for packages: {} \ \nUpdate your versions in packages.yml, then run dbt deps".format( - self.packages + self.packages.value ) @@ -1505,6 +1795,35 @@ def message(self) -> str: return f"Set downloads directory='{self.path}'" +@dataclass +class DepsUnpinned(WarnLevel, pt.DepsUnpinned): + def code(self): + return "M029" + + def message(self) -> str: + if self.revision == "HEAD": + unpinned_msg = "not pinned, using HEAD (default branch)" + elif self.revision in ("main", "master"): + unpinned_msg = f'pinned to the "{self.revision}" branch' + else: + unpinned_msg = None + + msg = ( + f'The git package "{self.git}" \n\tis {unpinned_msg}.\n\tThis can introduce ' + f"breaking changes into your project without warning!\n\nSee {PIN_PACKAGE_URL}" + ) + return yellow(f"WARNING: {msg}") + + +@dataclass +class NoNodesForSelectionCriteria(WarnLevel, pt.NoNodesForSelectionCriteria): + def code(self): + return "M030" + + def message(self) -> str: + return f"The selection criterion '{self.spec_raw}' does not match any nodes" + + # ======================================================= # Q - Node execution # ======================================================= @@ -1565,76 +1884,54 @@ def message(self) -> str: @dataclass -@dataclass -class PrintErrorTestResult(ErrorLevel, pt.PrintErrorTestResult): +class LogTestResult(DynamicLevel, pt.LogTestResult): def code(self): return "Q007" def message(self) -> str: - info = "ERROR" - msg = f"{info} {self.name}" - return format_fancy_output_line( - msg=msg, - status=ui.red(info), - index=self.index, - total=self.num_models, - execution_time=self.execution_time, - ) - - -@dataclass -class PrintPassTestResult(InfoLevel, pt.PrintPassTestResult): - def code(self): - return "Q008" - - def message(self) -> str: - info = "PASS" + if self.status == "error": + info = "ERROR" + status = red(info) + elif self.status == "pass": + info = "PASS" + status = green(info) + elif self.status == "warn": + info = f"WARN {self.num_failures}" + status = yellow(info) + else: # self.status == "fail": + info = f"FAIL {self.num_failures}" + status = red(info) msg = f"{info} {self.name}" - return format_fancy_output_line( - msg=msg, - status=ui.green(info), - index=self.index, - total=self.num_models, - execution_time=self.execution_time, - ) - - -@dataclass -class PrintWarnTestResult(WarnLevel, pt.PrintWarnTestResult): - def code(self): - return "Q009" - def message(self) -> str: - info = f"WARN {self.num_failures}" - msg = f"{info} {self.name}" return format_fancy_output_line( msg=msg, - status=ui.yellow(info), + status=status, index=self.index, total=self.num_models, execution_time=self.execution_time, ) + @classmethod + def status_to_level(cls, status): + # The statuses come from TestStatus + # TODO should this return EventLevel enum instead? + level_lookup = { + "fail": "error", + "pass": "info", + "warn": "warn", + "error": "error", + } + if status in level_lookup: + return level_lookup[status] + else: + return "info" -@dataclass -class PrintFailureTestResult(ErrorLevel, pt.PrintFailureTestResult): - def code(self): - return "Q010" - def message(self) -> str: - info = f"FAIL {self.num_failures}" - msg = f"{info} {self.name}" - return format_fancy_output_line( - msg=msg, - status=ui.red(info), - index=self.index, - total=self.num_models, - execution_time=self.execution_time, - ) +# Skipped Q008, Q009, Q010 @dataclass -class PrintStartLine(InfoLevel, pt.PrintStartLine): # noqa +class LogStartLine(InfoLevel, pt.LogStartLine): # noqa def code(self): return "Q011" @@ -1644,67 +1941,48 @@ def message(self) -> str: @dataclass -class PrintModelResultLine(InfoLevel, pt.PrintModelResultLine): +class LogModelResult(DynamicLevel, pt.LogModelResult): def code(self): return "Q012" def message(self) -> str: - info = "OK created" - msg = f"{info} {self.description}" - return format_fancy_output_line( - msg=msg, - status=ui.green(self.status), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) - - -@dataclass -class PrintModelErrorResultLine(ErrorLevel, pt.PrintModelErrorResultLine): - def code(self): - return "Q013" + if self.status == "error": + info = "ERROR creating" + status = red(self.status.upper()) + else: + info = "OK created" + status = green(self.status) - def message(self) -> str: - info = "ERROR creating" msg = f"{info} {self.description}" return format_fancy_output_line( msg=msg, - status=ui.red(self.status.upper()), + status=status, index=self.index, total=self.total, execution_time=self.execution_time, ) -@dataclass -class PrintSnapshotErrorResultLine(ErrorLevel, pt.PrintSnapshotErrorResultLine): - def code(self): - return "Q014" - - def message(self) -> str: - info = "ERROR snapshotting" - msg = "{info} {description}".format(info=info, description=self.description, **self.cfg) - return format_fancy_output_line( - msg=msg, - status=ui.red(self.status.upper()), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) +# Skipped Q013, Q014 @dataclass -class PrintSnapshotResultLine(InfoLevel, pt.PrintSnapshotResultLine): +class LogSnapshotResult(DynamicLevel, pt.LogSnapshotResult): def code(self): return "Q015" def message(self) -> str: - info = "OK snapshotted" + if self.status == "error": + info = "ERROR snapshotting" + status = red(self.status.upper()) + else: + info = "OK snapshotted" + status = green(self.status) + msg = "{info} {description}".format(info=info, description=self.description, **self.cfg) return format_fancy_output_line( msg=msg, - status=ui.green(self.status), + status=status, index=self.index, total=self.total, execution_time=self.execution_time, @@ -1712,115 +1990,84 @@ def message(self) -> str: @dataclass -class PrintSeedErrorResultLine(ErrorLevel, pt.PrintSeedErrorResultLine): +class LogSeedResult(DynamicLevel, pt.LogSeedResult): def code(self): return "Q016" def message(self) -> str: - info = "ERROR loading" + if self.status == "error": + info = "ERROR loading" + status = red(self.status.upper()) + else: + info = "OK loaded" + status = green(self.result_message) msg = f"{info} seed file {self.schema}.{self.relation}" return format_fancy_output_line( msg=msg, - status=ui.red(self.status.upper()), + status=status, index=self.index, total=self.total, execution_time=self.execution_time, ) -@dataclass -class PrintSeedResultLine(InfoLevel, pt.PrintSeedResultLine): - def code(self): - return "Q017" - - def message(self) -> str: - info = "OK loaded" - msg = f"{info} seed file {self.schema}.{self.relation}" - return format_fancy_output_line( - msg=msg, - status=ui.green(self.status), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) +# Skipped Q017 @dataclass -class PrintFreshnessErrorLine(ErrorLevel, pt.PrintFreshnessErrorLine): +class LogFreshnessResult(DynamicLevel, pt.LogFreshnessResult): def code(self): return "Q018" def message(self) -> str: - info = "ERROR" - msg = f"{info} freshness of {self.source_name}.{self.table_name}" - return format_fancy_output_line( - msg=msg, - status=ui.red(info), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) - - -@dataclass -class PrintFreshnessErrorStaleLine(ErrorLevel, pt.PrintFreshnessErrorStaleLine): - def code(self): - return "Q019" - - def message(self) -> str: - info = "ERROR STALE" - msg = f"{info} freshness of {self.source_name}.{self.table_name}" - return format_fancy_output_line( - msg=msg, - status=ui.red(info), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) - - -@dataclass -class PrintFreshnessWarnLine(WarnLevel, pt.PrintFreshnessWarnLine): - def code(self): - return "Q020" - - def message(self) -> str: - info = "WARN" + if self.status == "runtime error": + info = "ERROR" + status = red(info) + elif self.status == "error": + info = "ERROR STALE" + status = red(info) + elif self.status == "warn": + info = "WARN" + status = yellow(info) + else: + info = "PASS" + status = green(info) msg = f"{info} freshness of {self.source_name}.{self.table_name}" return format_fancy_output_line( msg=msg, - status=ui.yellow(info), + status=status, index=self.index, total=self.total, execution_time=self.execution_time, ) + @classmethod + def status_to_level(cls, status): + # The statuses come from FreshnessStatus + # TODO should this return EventLevel enum instead? + level_lookup = { + "runtime error": "error", + "pass": "info", + "warn": "warn", + "error": "error", + } + if status in level_lookup: + return level_lookup[status] + else: + return "info" -@dataclass -class PrintFreshnessPassLine(InfoLevel, pt.PrintFreshnessPassLine): - def code(self): - return "Q021" - def message(self) -> str: - info = "PASS" - msg = f"{info} freshness of {self.source_name}.{self.table_name}" - return format_fancy_output_line( - msg=msg, - status=ui.green(info), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) +# Skipped Q019, Q020, Q021 @dataclass -class PrintCancelLine(ErrorLevel, pt.PrintCancelLine): +class LogCancelLine(ErrorLevel, pt.LogCancelLine): def code(self): return "Q022" def message(self) -> str: msg = "CANCEL query {}".format(self.conn_name) - return format_fancy_output_line(msg=msg, status=ui.red("CANCEL"), index=None, total=None) + return format_fancy_output_line(msg=msg, status=red("CANCEL"), index=None, total=None) @dataclass @@ -1838,7 +2085,7 @@ def code(self): return "Q024" def message(self) -> str: - return f"Began running node {self.unique_id}" + return f"Began running node {self.node_info.unique_id}" @dataclass @@ -1847,7 +2094,7 @@ def code(self): return "Q025" def message(self) -> str: - return f"Finished running node {self.unique_id}" + return f"Finished running node {self.node_info.unique_id}" @dataclass @@ -1861,7 +2108,7 @@ def message(self) -> str: "cancellation. Some queries may still be " "running!" ) - return ui.yellow(msg) + return yellow(msg) @dataclass @@ -1873,13 +2120,7 @@ def message(self) -> str: return f"Concurrency: {self.num_threads} threads (target='{self.target_name}')" -@dataclass -class CompilingNode(DebugLevel, pt.CompilingNode): - def code(self): - return "Q028" - - def message(self) -> str: - return f"Compiling {self.unique_id}" +# Skipped Q028 @dataclass @@ -1888,7 +2129,7 @@ def code(self): return "Q029" def message(self) -> str: - return f'Writing injected SQL for node "{self.unique_id}"' + return f'Writing injected SQL for node "{self.node_info.unique_id}"' @dataclass @@ -1897,7 +2138,7 @@ def code(self): return "Q030" def message(self) -> str: - return f"Began compiling node {self.unique_id}" + return f"Began compiling node {self.node_info.unique_id}" @dataclass @@ -1906,11 +2147,11 @@ def code(self): return "Q031" def message(self) -> str: - return f"Began executing node {self.unique_id}" + return f"Began executing node {self.node_info.unique_id}" @dataclass -class PrintHookStartLine(InfoLevel, pt.PrintHookStartLine): # noqa +class LogHookStartLine(InfoLevel, pt.LogHookStartLine): # noqa def code(self): return "Q032" @@ -1922,7 +2163,7 @@ def message(self) -> str: @dataclass -class PrintHookEndLine(InfoLevel, pt.PrintHookEndLine): # noqa +class LogHookEndLine(InfoLevel, pt.LogHookEndLine): # noqa def code(self): return "Q033" @@ -1930,7 +2171,7 @@ def message(self) -> str: msg = "OK hook: {}".format(self.statement) return format_fancy_output_line( msg=msg, - status=ui.green(self.status), + status=green(self.status), index=self.index, total=self.total, execution_time=self.execution_time, @@ -1949,11 +2190,17 @@ def message(self) -> str: else: msg = f"SKIP {self.resource_type} {self.node_name}" return format_fancy_output_line( - msg=msg, status=ui.yellow("SKIP"), index=self.index, total=self.total + msg=msg, status=yellow("SKIP"), index=self.index, total=self.total ) -# Skipped Q035 +@dataclass +class NothingToDo(WarnLevel, pt.NothingToDo): + def code(self): + return "Q035" + + def message(self) -> str: + return "Nothing to do. Try checking your model configs and model specification args" @dataclass @@ -1974,6 +2221,15 @@ def message(self) -> str: return "Command end result" +@dataclass +class NoNodesSelected(WarnLevel, pt.NoNodesSelected): + def code(self): + return "Q038" + + def message(self) -> str: + return "No nodes selected!" + + # ======================================================= # W - Node testing # ======================================================= @@ -2003,7 +2259,7 @@ def message(self) -> str: """.strip() return "{prefix}\n{error}\n\n{note}".format( - prefix=ui.red(prefix), error=str(self.exc).strip(), note=internal_error_string + prefix=red(prefix), error=str(self.exc).strip(), note=internal_error_string ) @@ -2017,7 +2273,7 @@ def message(self) -> str: if node_description is None: node_description = self.unique_id prefix = "Unhandled error while executing {}".format(node_description) - return "{prefix}\n{error}".format(prefix=ui.red(prefix), error=str(self.exc).strip()) + return "{prefix}\n{error}".format(prefix=red(prefix), error=str(self.exc).strip()) @dataclass @@ -2133,18 +2389,18 @@ def code(self): return "Z010" def message(self) -> str: - return "finished collecting timing info" + return f"Timing info for {self.node_info.unique_id} ({self.timing_info.name}): {self.timing_info.started_at} => {self.timing_info.completed_at}" # This prints the stack trace at the debug level while allowing just the nice exception message # at the error level - or whatever other level chosen. Used in multiple places. @dataclass -class PrintDebugStackTrace(DebugLevel, pt.PrintDebugStackTrace): # noqa +class LogDebugStackTrace(DebugLevel, pt.LogDebugStackTrace): # noqa def code(self): return "Z011" def message(self) -> str: - return "" + return f"{self.exc_info}" # We don't write "clean" events to the log, because the clean command @@ -2241,7 +2497,7 @@ def code(self): def message(self) -> str: info = "Warning" - return ui.yellow(f"{info} in {self.resource_type} {self.node_name} ({self.path})") + return yellow(f"{info} in {self.resource_type} {self.node_name} ({self.path})") @dataclass @@ -2251,7 +2507,7 @@ def code(self): def message(self) -> str: info = "Failure" - return ui.red(f"{info} in {self.resource_type} {self.node_name} ({self.path})") + return red(f"{info} in {self.resource_type} {self.node_name} ({self.path})") @dataclass @@ -2270,6 +2526,7 @@ def code(self): return "Z024" def message(self) -> str: + # This is the message on the result object, cannot be built here return f" {self.msg}" @@ -2302,13 +2559,16 @@ def message(self) -> str: return f" See test failures:\n {border}\n {msg}\n {border}" +# FirstRunResultError and AfterFirstRunResultError are just splitting the message from the result +# object into multiple log lines +# TODO: is this reallly needed? See printer.py @dataclass class FirstRunResultError(ErrorLevel, EventStringFunctor, pt.FirstRunResultError): def code(self): return "Z028" def message(self) -> str: - return ui.yellow(self.msg) + return yellow(self.msg) @dataclass @@ -2329,13 +2589,13 @@ def message(self) -> str: error_plural = pluralize(self.num_errors, "error") warn_plural = pluralize(self.num_warnings, "warning") if self.keyboard_interrupt: - message = ui.yellow("Exited because of keyboard interrupt.") + message = yellow("Exited because of keyboard interrupt.") elif self.num_errors > 0: - message = ui.red("Completed with {} and {}:".format(error_plural, warn_plural)) + message = red("Completed with {} and {}:".format(error_plural, warn_plural)) elif self.num_warnings > 0: - message = ui.yellow("Completed with {}:".format(warn_plural)) + message = yellow("Completed with {}:".format(warn_plural)) else: - message = ui.green("Completed successfully") + message = green("Completed successfully") return message @@ -2343,14 +2603,14 @@ def message(self) -> str: @dataclass -class PrintSkipBecauseError(ErrorLevel, pt.PrintSkipBecauseError): +class LogSkipBecauseError(ErrorLevel, pt.LogSkipBecauseError): def code(self): return "Z034" def message(self) -> str: msg = f"SKIP relation {self.schema}.{self.relation} due to ephemeral model error" return format_fancy_output_line( - msg=msg, status=ui.red("ERROR SKIP"), index=self.index, total=self.total + msg=msg, status=red("ERROR SKIP"), index=self.index, total=self.total ) @@ -2446,423 +2706,12 @@ def message(self) -> str: return "Got an exception trying to initialize tracking" -# Skipped Z045 - - -@dataclass -class GeneralWarningMsg(WarnLevel, EventStringFunctor, pt.GeneralWarningMsg): - def code(self): - return "Z046" - - def message(self) -> str: - return self.log_fmt.format(self.msg) if self.log_fmt is not None else self.msg - - -@dataclass -class GeneralWarningException(WarnLevel, pt.GeneralWarningException): - def code(self): - return "Z047" - - def message(self) -> str: - return self.log_fmt.format(str(self.exc)) if self.log_fmt is not None else str(self.exc) - - -@dataclass -class EventBufferFull(WarnLevel, pt.EventBufferFull): - def code(self): - return "Z048" - - def message(self) -> str: - return ( - "Internal logging/event buffer full." - "Earliest logs/events will be dropped as new ones are fired (FIFO)." - ) - - +# this is the message from the result object @dataclass class RunResultWarningMessage(WarnLevel, EventStringFunctor, pt.RunResultWarningMessage): def code(self): - return "Z049" + return "Z046" def message(self) -> str: + # This is the message on the result object, cannot be formatted in event return self.msg - - -# since mypy doesn't run on every file we need to suggest to mypy that every -# class gets instantiated. But we don't actually want to run this code. -# making the conditional `if False` causes mypy to skip it as dead code so -# we need to skirt around that by computing something it doesn't check statically. -# -# TODO remove these lines once we run mypy everywhere. -if 1 == 0: - - # A - pre-project loading - MainReportVersion(version="") - MainReportArgs(args={}) - MainTrackingUserState(user_state="") - MergedFromState(num_merged=0, sample=[]) - MissingProfileTarget(profile_name="", target_name="") - InvalidVarsYAML() - DbtProjectError() - DbtProjectErrorException(exc="") - DbtProfileError() - DbtProfileErrorException(exc="") - ProfileListTitle() - ListSingleProfile(profile="") - NoDefinedProfiles() - ProfileHelpMessage() - StarterProjectPath(dir="") - ConfigFolderDirectory(dir="") - NoSampleProfileFound(adapter="") - ProfileWrittenWithSample(name="", path="") - ProfileWrittenWithTargetTemplateYAML(name="", path="") - ProfileWrittenWithProjectTemplateYAML(name="", path="") - SettingUpProfile() - InvalidProfileTemplateYAML() - ProjectNameAlreadyExists(name="") - ProjectCreated(project_name="") - - # E - DB Adapter ====================== - AdapterEventDebug() - AdapterEventInfo() - AdapterEventWarning() - AdapterEventError() - NewConnection(conn_type="", conn_name="") - ConnectionReused(conn_name="") - ConnectionLeftOpen(conn_name="") - ConnectionClosed(conn_name="") - RollbackFailed(conn_name="") - ConnectionClosed2(conn_name="") - ConnectionLeftOpen2(conn_name="") - Rollback(conn_name="") - CacheMiss(conn_name="", database="", schema="") - ListRelations(database="", schema="") - ConnectionUsed(conn_type="", conn_name="") - SQLQuery(conn_name="", sql="") - SQLQueryStatus(status="", elapsed=0.1) - SQLCommit(conn_name="") - ColTypeChange( - orig_type="", new_type="", table=ReferenceKeyMsg(database="", schema="", identifier="") - ) - SchemaCreation(relation=ReferenceKeyMsg(database="", schema="", identifier="")) - SchemaDrop(relation=ReferenceKeyMsg(database="", schema="", identifier="")) - UncachedRelation( - dep_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ref_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ) - AddLink( - dep_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ref_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ) - AddRelation(relation=ReferenceKeyMsg(database="", schema="", identifier="")) - DropMissingRelation(relation=ReferenceKeyMsg(database="", schema="", identifier="")) - DropCascade( - dropped=ReferenceKeyMsg(database="", schema="", identifier=""), - consequences=[ReferenceKeyMsg(database="", schema="", identifier="")], - ) - DropRelation(dropped=ReferenceKeyMsg()) - UpdateReference( - old_key=ReferenceKeyMsg(database="", schema="", identifier=""), - new_key=ReferenceKeyMsg(database="", schema="", identifier=""), - cached_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ) - TemporaryRelation(key=ReferenceKeyMsg(database="", schema="", identifier="")) - RenameSchema( - old_key=ReferenceKeyMsg(database="", schema="", identifier=""), - new_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ) - DumpBeforeAddGraph(dump=dict()) - DumpAfterAddGraph(dump=dict()) - DumpBeforeRenameSchema(dump=dict()) - DumpAfterRenameSchema(dump=dict()) - AdapterImportError(exc="") - PluginLoadError(exc_info="") - NewConnectionOpening(connection_state="") - CodeExecution(conn_name="", code_content="") - CodeExecutionStatus(status="", elapsed=0.1) - WriteCatalogFailure(num_exceptions=0) - CatalogWritten(path="") - CannotGenerateDocs() - BuildingCatalog() - DatabaseErrorRunningHook(hook_type="") - HooksRunning(num_hooks=0, hook_type="") - HookFinished(stat_line="", execution="", execution_time=0) - - # I - Project parsing ====================== - ParseCmdStart() - ParseCmdCompiling() - ParseCmdWritingManifest() - ParseCmdDone() - ManifestDependenciesLoaded() - ManifestLoaderCreated() - ManifestLoaded() - ManifestChecked() - ManifestFlatGraphBuilt() - ParseCmdPerfInfoPath(path="") - GenericTestFileParse(path="") - MacroFileParse(path="") - PartialParsingFullReparseBecauseOfError() - PartialParsingExceptionFile(file="") - PartialParsingFile(file_id="") - PartialParsingException(exc_info={}) - PartialParsingSkipParsing() - PartialParsingMacroChangeStartFullParse() - PartialParsingProjectEnvVarsChanged() - PartialParsingProfileEnvVarsChanged() - PartialParsingDeletedMetric(unique_id="") - ManifestWrongMetadataVersion(version="") - PartialParsingVersionMismatch(saved_version="", current_version="") - PartialParsingFailedBecauseConfigChange() - PartialParsingFailedBecauseProfileChange() - PartialParsingFailedBecauseNewProjectDependency() - PartialParsingFailedBecauseHashChanged() - PartialParsingNotEnabled() - ParsedFileLoadFailed(path="", exc="", exc_info="") - PartialParseSaveFileNotFound() - StaticParserCausedJinjaRendering(path="") - UsingExperimentalParser(path="") - SampleFullJinjaRendering(path="") - StaticParserFallbackJinjaRendering(path="") - StaticParsingMacroOverrideDetected(path="") - StaticParserSuccess(path="") - StaticParserFailure(path="") - ExperimentalParserSuccess(path="") - ExperimentalParserFailure(path="") - PartialParsingEnabled(deleted=0, added=0, changed=0) - PartialParsingAddedFile(file_id="") - PartialParsingDeletedFile(file_id="") - PartialParsingUpdatedFile(file_id="") - PartialParsingNodeMissingInSourceFile(file_id="") - PartialParsingMissingNodes(file_id="") - PartialParsingChildMapMissingUniqueID(unique_id="") - PartialParsingUpdateSchemaFile(file_id="") - PartialParsingDeletedSource(unique_id="") - PartialParsingDeletedExposure(unique_id="") - InvalidDisabledSourceInTestNode(msg="") - InvalidRefInTestNode(msg="") - - # M - Deps generation ====================== - - GitSparseCheckoutSubdirectory(subdir="") - GitProgressCheckoutRevision(revision="") - GitProgressUpdatingExistingDependency(dir="") - GitProgressPullingNewDependency(dir="") - GitNothingToDo(sha="") - GitProgressUpdatedCheckoutRange(start_sha="", end_sha="") - GitProgressCheckedOutAt(end_sha="") - RegistryProgressGETRequest(url="") - RegistryProgressGETResponse(url="", resp_code=1234) - SelectorReportInvalidSelector(valid_selectors="", spec_method="", raw_spec="") - MacroEventInfo(msg="") - MacroEventDebug(msg="") - DepsNoPackagesFound() - DepsStartPackageInstall(package_name="") - DepsInstallInfo(version_name="") - DepsUpdateAvailable(version_latest="") - DepsUpToDate() - DepsListSubdirectory(subdirectory="") - DepsNotifyUpdatesAvailable(packages=ListOfStrings()) - RetryExternalCall(attempt=0, max=0) - RecordRetryException(exc="") - RegistryIndexProgressGETRequest(url="") - RegistryIndexProgressGETResponse(url="", resp_code=1234) - RegistryResponseUnexpectedType(response=""), - RegistryResponseMissingTopKeys(response=""), - RegistryResponseMissingNestedKeys(response=""), - RegistryResponseExtraNestedKeys(response=""), - DepsSetDownloadDirectory(path="") - - # Q - Node execution ====================== - - RunningOperationCaughtError(exc="") - CompileComplete() - FreshnessCheckComplete() - SeedHeader(header="") - SeedHeaderSeparator(len_header=0) - SQLRunnerException(exc="") - PrintErrorTestResult( - name="", - index=0, - num_models=0, - execution_time=0, - ) - PrintPassTestResult( - name="", - index=0, - num_models=0, - execution_time=0, - ) - PrintWarnTestResult( - name="", - index=0, - num_models=0, - execution_time=0, - num_failures=0, - ) - PrintFailureTestResult( - name="", - index=0, - num_models=0, - execution_time=0, - num_failures=0, - ) - PrintStartLine(description="", index=0, total=0, node_info=NodeInfo()) - PrintModelResultLine( - description="", - status="", - index=0, - total=0, - execution_time=0, - ) - PrintModelErrorResultLine( - description="", - status="", - index=0, - total=0, - execution_time=0, - ) - PrintSnapshotErrorResultLine( - status="", - description="", - cfg={}, - index=0, - total=0, - execution_time=0, - ) - PrintSnapshotResultLine( - status="", - description="", - cfg={}, - index=0, - total=0, - execution_time=0, - ) - PrintSeedErrorResultLine( - status="", - index=0, - total=0, - execution_time=0, - schema="", - relation="", - ) - PrintSeedResultLine( - status="", - index=0, - total=0, - execution_time=0, - schema="", - relation="", - ) - PrintFreshnessErrorLine( - source_name="", - table_name="", - index=0, - total=0, - execution_time=0, - ) - PrintFreshnessErrorStaleLine( - source_name="", - table_name="", - index=0, - total=0, - execution_time=0, - ) - PrintFreshnessWarnLine( - source_name="", - table_name="", - index=0, - total=0, - execution_time=0, - ) - PrintFreshnessPassLine( - source_name="", - table_name="", - index=0, - total=0, - execution_time=0, - ) - PrintCancelLine(conn_name="") - DefaultSelector(name="") - NodeStart(unique_id="") - NodeFinished(unique_id="") - QueryCancelationUnsupported(type="") - ConcurrencyLine(num_threads=0, target_name="") - CompilingNode(unique_id="") - WritingInjectedSQLForNode(unique_id="") - NodeCompiling(unique_id="") - NodeExecuting(unique_id="") - PrintHookStartLine( - statement="", - index=0, - total=0, - ) - PrintHookEndLine( - statement="", - status="", - index=0, - total=0, - execution_time=0, - ) - SkippingDetails( - resource_type="", - schema="", - node_name="", - index=0, - total=0, - ) - RunningOperationUncaughtError(exc="") - EndRunResult() - - # W - Node testing ====================== - - CatchableExceptionOnRun(exc="") - InternalExceptionOnRun(build_path="", exc="") - GenericExceptionOnRun(build_path="", unique_id="", exc="") - NodeConnectionReleaseError(node_name="", exc="") - FoundStats(stat_line="") - - # Z - misc ====================== - - MainKeyboardInterrupt() - MainEncounteredError(exc="") - MainStackTrace(stack_trace="") - SystemErrorRetrievingModTime(path="") - SystemCouldNotWrite(path="", reason="", exc="") - SystemExecutingCmd(cmd=[""]) - SystemStdOutMsg(bmsg=b"") - SystemStdErrMsg(bmsg=b"") - SystemReportReturnCode(returncode=0) - TimingInfoCollected() - PrintDebugStackTrace() - CheckCleanPath(path="") - ConfirmCleanPath(path="") - ProtectedCleanPath(path="") - FinishedCleanPaths() - OpenCommand(open_cmd="", profiles_dir="") - EmptyLine() - ServingDocsPort(address="", port=0) - ServingDocsAccessInfo(port="") - ServingDocsExitInfo() - RunResultWarning(resource_type="", node_name="", path="") - RunResultFailure(resource_type="", node_name="", path="") - StatsLine(stats={}) - RunResultError(msg="") - RunResultErrorNoMessage(status="") - SQLCompiledPath(path="") - CheckNodeTestFailure(relation_name="") - FirstRunResultError(msg="") - AfterFirstRunResultError(msg="") - EndOfRunSummary(num_errors=0, num_warnings=0, keyboard_interrupt=False) - PrintSkipBecauseError(schema="", relation="", index=0, total=0) - EnsureGitInstalled() - DepsCreatingLocalSymlink() - DepsSymlinkNotAvailable() - DisableTracking() - SendingEvent(kwargs="") - SendEventFailure() - FlushEvents() - FlushEventsFailure() - TrackingInitializeFailure() - GeneralWarningMsg(msg="", log_fmt="") - GeneralWarningException(exc="", log_fmt="") - EventBufferFull() diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index db824e19bf1..515ec86054b 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -1,24 +1,29 @@ import builtins -import functools -from typing import NoReturn, Optional, Mapping, Any - -from dbt.events.functions import fire_event, scrub_secrets, env_secrets -from dbt.events.types import GeneralWarningMsg, GeneralWarningException +import json +import re +from typing import Any, Dict, List, Mapping, NoReturn, Optional, Union + +# from dbt.contracts.graph import ManifestNode # or ParsedNode? +from dbt.dataclass_schema import ValidationError +from dbt.events.functions import warn_or_error +from dbt.events.helpers import env_secrets, scrub_secrets +from dbt.events.types import JinjaLogWarning +from dbt.events.contextvars import get_node_info from dbt.node_types import NodeType -from dbt import flags -from dbt.ui import line_wrap_message, warning_tag +from dbt.ui import line_wrap_message import dbt.dataclass_schema -def validator_error_message(exc): - """Given a dbt.dataclass_schema.ValidationError (which is basically a - jsonschema.ValidationError), return the relevant parts as a string +class MacroReturn(builtins.BaseException): """ - if not isinstance(exc, dbt.dataclass_schema.ValidationError): - return str(exc) - path = "[%s]" % "][".join(map(repr, exc.relative_path)) - return "at path {}: {}".format(path, exc.message) + Hack of all hacks + This is not actually an exception. + It's how we return a value from a macro. + """ + + def __init__(self, value): + self.value = value class Exception(builtins.Exception): @@ -33,25 +38,53 @@ def data(self): } -class MacroReturn(builtins.BaseException): - """ - Hack of all hacks - """ +class InternalException(Exception): + def __init__(self, msg: str): + self.stack: List = [] + self.msg = scrub_secrets(msg, env_secrets()) - def __init__(self, value): - self.value = value + @property + def type(self): + return "Internal" + def process_stack(self): + lines = [] + stack = self.stack + first = True -class InternalException(Exception): - pass + if len(stack) > 1: + lines.append("") + + for item in stack: + msg = "called by" + + if first: + msg = "in" + first = False + + lines.append(f"> {msg}") + + return lines + + def __str__(self): + if hasattr(self.msg, "split"): + split_msg = self.msg.split("\n") + else: + split_msg = str(self.msg).split("\n") + + lines = ["{}".format(self.type + " Error")] + split_msg + + lines += self.process_stack() + + return lines[0] + "\n" + "\n".join([" " + line for line in lines[1:]]) class RuntimeException(RuntimeError, Exception): CODE = 10001 MESSAGE = "Runtime error" - def __init__(self, msg, node=None): - self.stack = [] + def __init__(self, msg: str, node=None): + self.stack: List = [] self.node = node self.msg = scrub_secrets(msg, env_secrets()) @@ -70,14 +103,14 @@ def node_to_string(self, node): return "" if not hasattr(node, "name"): # we probably failed to parse a block, so we can't know the name - return "{} ({})".format(node.resource_type, node.original_file_path) + return f"{node.resource_type} ({node.original_file_path})" if hasattr(node, "contents"): # handle FileBlocks. They aren't really nodes but we want to render # out the path we know at least. This indicates an error during # block parsing. - return "{}".format(node.path.original_file_path) - return "{} {} ({})".format(node.resource_type, node.name, node.original_file_path) + return f"{node.path.original_file_path}" + return f"{node.resource_type} {node.name} ({node.original_file_path})" def process_stack(self): lines = [] @@ -94,15 +127,24 @@ def process_stack(self): msg = "in" first = False - lines.append("> {} {}".format(msg, self.node_to_string(item))) + lines.append(f"> {msg} {self.node_to_string(item)}") return lines - def __str__(self, prefix="! "): + def validator_error_message(self, exc: builtins.Exception): + """Given a dbt.dataclass_schema.ValidationError (which is basically a + jsonschema.ValidationError), return the relevant parts as a string + """ + if not isinstance(exc, dbt.dataclass_schema.ValidationError): + return str(exc) + path = "[%s]" % "][".join(map(repr, exc.relative_path)) + return f"at path {path}: {exc.message}" + + def __str__(self, prefix: str = "! "): node_string = "" if self.node is not None: - node_string = " in {}".format(self.node_to_string(self.node)) + node_string = f" in {self.node_to_string(self.node)}" if hasattr(self.msg, "split"): split_msg = self.msg.split("\n") @@ -139,7 +181,7 @@ class RPCTimeoutException(RuntimeException): CODE = 10008 MESSAGE = "RPC timeout error" - def __init__(self, timeout): + def __init__(self, timeout: Optional[float]): super().__init__(self.MESSAGE) self.timeout = timeout @@ -148,7 +190,7 @@ def data(self): result.update( { "timeout": self.timeout, - "message": "RPC timed out after {}s".format(self.timeout), + "message": f"RPC timed out after {self.timeout}s", } ) return result @@ -158,15 +200,15 @@ class RPCKilledException(RuntimeException): CODE = 10009 MESSAGE = "RPC process killed" - def __init__(self, signum): + def __init__(self, signum: int): self.signum = signum - self.message = "RPC process killed by signal {}".format(self.signum) - super().__init__(self.message) + self.msg = f"RPC process killed by signal {self.signum}" + super().__init__(self.msg) def data(self): return { "signum": self.signum, - "message": self.message, + "message": self.msg, } @@ -174,7 +216,7 @@ class RPCCompiling(RuntimeException): CODE = 10010 MESSAGE = 'RPC server is compiling the project, call the "status" method for' " compile status" - def __init__(self, msg=None, node=None): + def __init__(self, msg: str = None, node=None): if msg is None: msg = "compile in progress" super().__init__(msg, node) @@ -186,13 +228,13 @@ class RPCLoadException(RuntimeException): 'RPC server failed to compile project, call the "status" method for' " compile status" ) - def __init__(self, cause): + def __init__(self, cause: Dict[str, Any]): self.cause = cause - self.message = "{}: {}".format(self.MESSAGE, self.cause["message"]) - super().__init__(self.message) + self.msg = f'{self.MESSAGE}: {self.cause["message"]}' + super().__init__(self.msg) def data(self): - return {"cause": self.cause, "message": self.message} + return {"cause": self.cause, "message": self.msg} class DatabaseException(RuntimeException): @@ -203,7 +245,7 @@ def process_stack(self): lines = [] if hasattr(self.node, "build_path") and self.node.build_path: - lines.append("compiled Code at {}".format(self.node.build_path)) + lines.append(f"compiled Code at {self.node.build_path}") return lines + RuntimeException.process_stack(self) @@ -220,6 +262,17 @@ class CompilationException(RuntimeException): def type(self): return "Compilation" + def _fix_dupe_msg(self, path_1: str, path_2: str, name: str, type_name: str) -> str: + if path_1 == path_2: + return ( + f"remove one of the {type_name} entries for {name} in this file:\n - {path_1!s}\n" + ) + else: + return ( + f"remove the {type_name} entry for {name} in one of these files:\n" + f" - {path_1!s}\n{path_2!s}" + ) + class RecursionException(RuntimeException): pass @@ -239,14 +292,13 @@ def type(self): return "Parsing" +# TODO: this isn't raised in the core codebase. Is it raised elsewhere? class JSONValidationException(ValidationException): def __init__(self, typename, errors): self.typename = typename self.errors = errors self.errors_message = ", ".join(errors) - msg = 'Invalid arguments passed to "{}" instance: {}'.format( - self.typename, self.errors_message - ) + msg = f'Invalid arguments passed to "{self.typename}" instance: {self.errors_message}' super().__init__(msg) def __reduce__(self): @@ -260,7 +312,7 @@ def __init__(self, expected: str, found: Optional[str]): self.found = found self.filename = "input file" - super().__init__(self.get_message()) + super().__init__(msg=self.get_message()) def add_filename(self, filename: str): self.filename = filename @@ -287,7 +339,7 @@ class JinjaRenderingException(CompilationException): class UndefinedMacroException(CompilationException): - def __str__(self, prefix="! ") -> str: + def __str__(self, prefix: str = "! ") -> str: msg = super().__str__(prefix) return ( f"{msg}. This can happen when calling a macro that does " @@ -304,7 +356,7 @@ def __init__(self, task_id): self.task_id = task_id def __str__(self): - return "{}: {}".format(self.MESSAGE, self.task_id) + return f"{self.MESSAGE}: {self.task_id}" class AliasException(ValidationException): @@ -321,9 +373,9 @@ class DbtConfigError(RuntimeException): CODE = 10007 MESSAGE = "DBT Configuration Error" - def __init__(self, message, project=None, result_type="invalid_project", path=None): + def __init__(self, msg: str, project=None, result_type="invalid_project", path=None): self.project = project - super().__init__(message) + super().__init__(msg) self.result_type = result_type self.path = path @@ -339,8 +391,8 @@ class FailFastException(RuntimeException): CODE = 10013 MESSAGE = "FailFast Error" - def __init__(self, message, result=None, node=None): - super().__init__(msg=message, node=node) + def __init__(self, msg: str, result=None, node=None): + super().__init__(msg=msg, node=node) self.result = result @property @@ -361,7 +413,7 @@ class DbtProfileError(DbtConfigError): class SemverException(Exception): - def __init__(self, msg=None): + def __init__(self, msg: str = None): self.msg = msg if msg is not None: super().__init__(msg) @@ -374,7 +426,10 @@ class VersionsNotCompatibleException(SemverException): class NotImplementedException(Exception): - pass + def __init__(self, msg: str): + self.msg = msg + self.formatted_msg = f"ERROR: {self.msg}" + super().__init__(self.formatted_msg) class FailedToConnectException(DatabaseException): @@ -382,52 +437,58 @@ class FailedToConnectException(DatabaseException): class CommandError(RuntimeException): - def __init__(self, cwd, cmd, message="Error running command"): + def __init__(self, cwd: str, cmd: List[str], msg: str = "Error running command"): cmd_scrubbed = list(scrub_secrets(cmd_txt, env_secrets()) for cmd_txt in cmd) - super().__init__(message) + super().__init__(msg) self.cwd = cwd self.cmd = cmd_scrubbed - self.args = (cwd, cmd_scrubbed, message) + self.args = (cwd, cmd_scrubbed, msg) def __str__(self): if len(self.cmd) == 0: - return "{}: No arguments given".format(self.msg) - return '{}: "{}"'.format(self.msg, self.cmd[0]) + return f"{self.msg}: No arguments given" + return f'{self.msg}: "{self.cmd[0]}"' class ExecutableError(CommandError): - def __init__(self, cwd, cmd, message): - super().__init__(cwd, cmd, message) + def __init__(self, cwd: str, cmd: List[str], msg: str): + super().__init__(cwd, cmd, msg) class WorkingDirectoryError(CommandError): - def __init__(self, cwd, cmd, message): - super().__init__(cwd, cmd, message) + def __init__(self, cwd: str, cmd: List[str], msg: str): + super().__init__(cwd, cmd, msg) def __str__(self): - return '{}: "{}"'.format(self.msg, self.cwd) + return f'{self.msg}: "{self.cwd}"' class CommandResultError(CommandError): - def __init__(self, cwd, cmd, returncode, stdout, stderr, message="Got a non-zero returncode"): - super().__init__(cwd, cmd, message) + def __init__( + self, + cwd: str, + cmd: List[str], + returncode: Union[int, Any], + stdout: bytes, + stderr: bytes, + msg: str = "Got a non-zero returncode", + ): + super().__init__(cwd, cmd, msg) self.returncode = returncode self.stdout = scrub_secrets(stdout.decode("utf-8"), env_secrets()) self.stderr = scrub_secrets(stderr.decode("utf-8"), env_secrets()) - self.args = (cwd, self.cmd, returncode, self.stdout, self.stderr, message) + self.args = (cwd, self.cmd, returncode, self.stdout, self.stderr, msg) def __str__(self): - return "{} running: {}".format(self.msg, self.cmd) + return f"{self.msg} running: {self.cmd}" class InvalidConnectionException(RuntimeException): - def __init__(self, thread_id, known, node=None): + def __init__(self, thread_id, known: List): self.thread_id = thread_id self.known = known super().__init__( - msg="connection never acquired for thread {}, have {}".format( - self.thread_id, self.known - ) + msg="connection never acquired for thread {self.thread_id}, have {self.known}" ) @@ -441,694 +502,1874 @@ class DuplicateYamlKeyException(CompilationException): pass -def raise_compiler_error(msg, node=None) -> NoReturn: - raise CompilationException(msg, node) +class ConnectionException(Exception): + """ + There was a problem with the connection that returned a bad response, + timed out, or resulted in a file that is corrupt. + """ + pass -def raise_parsing_error(msg, node=None) -> NoReturn: - raise ParsingException(msg, node) +# event level exception +class EventCompilationException(CompilationException): + def __init__(self, msg: str, node): + self.msg = scrub_secrets(msg, env_secrets()) + self.node = node + super().__init__(msg=self.msg) -def raise_database_error(msg, node=None) -> NoReturn: - raise DatabaseException(msg, node) +# compilation level exceptions +class GraphDependencyNotFound(CompilationException): + def __init__(self, node, dependency: str): + self.node = node + self.dependency = dependency + super().__init__(msg=self.get_message()) -def raise_dependency_error(msg) -> NoReturn: - raise DependencyException(scrub_secrets(msg, env_secrets())) + def get_message(self) -> str: + msg = f"'{self.node.unique_id}' depends on '{self.dependency}' which is not in the graph!" + return msg -def raise_git_cloning_error(error: CommandResultError) -> NoReturn: - error.cmd = scrub_secrets(str(error.cmd), env_secrets()) - raise error +# client level exceptions -def raise_git_cloning_problem(repo) -> NoReturn: - repo = scrub_secrets(repo, env_secrets()) - msg = """\ - Something went wrong while cloning {} - Check the debug logs for more information - """ - raise RuntimeException(msg.format(repo)) +class NoSupportedLanguagesFound(CompilationException): + def __init__(self, node): + self.node = node + self.msg = f"No supported_languages found in materialization macro {self.node.name}" + super().__init__(msg=self.msg) -def disallow_secret_env_var(env_var_name) -> NoReturn: - """Raise an error when a secret env var is referenced outside allowed - rendering contexts""" - msg = ( - "Secret env vars are allowed only in profiles.yml or packages.yml. " - "Found '{env_var_name}' referenced elsewhere." - ) - raise_parsing_error(msg.format(env_var_name=env_var_name)) +class MaterializtionMacroNotUsed(CompilationException): + def __init__(self, node): + self.node = node + self.msg = "Only materialization macros can be used with this function" + super().__init__(msg=self.msg) -def invalid_type_error( - method_name, arg_name, got_value, expected_type, version="0.13.0" -) -> NoReturn: - """Raise a CompilationException when an adapter method available to macros - has changed. - """ - got_type = type(got_value) - msg = ( - "As of {version}, 'adapter.{method_name}' expects argument " - "'{arg_name}' to be of type '{expected_type}', instead got " - "{got_value} ({got_type})" - ) - raise_compiler_error( - msg.format( - version=version, - method_name=method_name, - arg_name=arg_name, - expected_type=expected_type, - got_value=got_value, - got_type=got_type, - ) - ) +class UndefinedCompilation(CompilationException): + def __init__(self, name: str, node): + self.name = name + self.node = node + self.msg = f"{self.name} is undefined" + super().__init__(msg=self.msg) -def invalid_bool_error(got_value, macro_name) -> NoReturn: - """Raise a CompilationException when a macro expects a boolean but gets some - other value. - """ - msg = ( - "Macro '{macro_name}' returns '{got_value}'. It is not type 'bool' " - "and cannot not be converted reliably to a bool." - ) - raise_compiler_error(msg.format(macro_name=macro_name, got_value=got_value)) +class CaughtMacroExceptionWithNode(CompilationException): + def __init__(self, exc, node): + self.exc = exc + self.node = node + super().__init__(msg=str(exc)) -def ref_invalid_args(model, args) -> NoReturn: - raise_compiler_error("ref() takes at most two arguments ({} given)".format(len(args)), model) +class CaughtMacroException(CompilationException): + def __init__(self, exc): + self.exc = exc + super().__init__(msg=str(exc)) -def metric_invalid_args(model, args) -> NoReturn: - raise_compiler_error( - "metric() takes at most two arguments ({} given)".format(len(args)), model - ) +class MacroNameNotString(CompilationException): + def __init__(self, kwarg_value): + self.kwarg_value = kwarg_value + super().__init__(msg=self.get_message()) + def get_message(self) -> str: + msg = ( + f"The macro_name parameter ({self.kwarg_value}) " + "to adapter.dispatch was not a string" + ) + return msg -def ref_bad_context(model, args) -> NoReturn: - ref_args = ", ".join("'{}'".format(a) for a in args) - ref_string = "{{{{ ref({}) }}}}".format(ref_args) - base_error_msg = """dbt was unable to infer all dependencies for the model "{model_name}". -This typically happens when ref() is placed within a conditional block. +class MissingControlFlowStartTag(CompilationException): + def __init__(self, tag, expected_tag: str, tag_parser): + self.tag = tag + self.expected_tag = expected_tag + self.tag_parser = tag_parser + super().__init__(msg=self.get_message()) -To fix this, add the following hint to the top of the model "{model_name}": + def get_message(self) -> str: + linepos = self.tag_parser.linepos(self.tag.start) + msg = ( + f"Got an unexpected control flow end tag, got {self.tag.block_type_name} but " + f"expected {self.expected_tag} next (@ {linepos})" + ) + return msg --- depends_on: {ref_string}""" - # This explicitly references model['name'], instead of model['alias'], for - # better error messages. Ex. If models foo_users and bar_users are aliased - # to 'users', in their respective schemas, then you would want to see - # 'bar_users' in your error messge instead of just 'users'. - if isinstance(model, dict): # TODO: remove this path - model_name = model["name"] - model_path = model["path"] - else: - model_name = model.name - model_path = model.path - error_msg = base_error_msg.format( - model_name=model_name, model_path=model_path, ref_string=ref_string - ) - raise_compiler_error(error_msg, model) +class UnexpectedControlFlowEndTag(CompilationException): + def __init__(self, tag, expected_tag: str, tag_parser): + self.tag = tag + self.expected_tag = expected_tag + self.tag_parser = tag_parser + super().__init__(msg=self.get_message()) -def doc_invalid_args(model, args) -> NoReturn: - raise_compiler_error("doc() takes at most two arguments ({} given)".format(len(args)), model) + def get_message(self) -> str: + linepos = self.tag_parser.linepos(self.tag.start) + msg = ( + f"Got an unexpected control flow end tag, got {self.tag.block_type_name} but " + f"never saw a preceeding {self.expected_tag} (@ {linepos})" + ) + return msg -def doc_target_not_found( - model, target_doc_name: str, target_doc_package: Optional[str] -) -> NoReturn: - target_package_string = "" +class UnexpectedMacroEOF(CompilationException): + def __init__(self, expected_name: str, actual_name: str): + self.expected_name = expected_name + self.actual_name = actual_name + super().__init__(msg=self.get_message()) - if target_doc_package is not None: - target_package_string = "in package '{}' ".format(target_doc_package) + def get_message(self) -> str: + msg = f'unexpected EOF, expected {self.expected_name}, got "{self.actual_name}"' + return msg - msg = ("Documentation for '{}' depends on doc '{}' {} which was not found").format( - model.unique_id, target_doc_name, target_package_string - ) - raise_compiler_error(msg, model) +class MacroNamespaceNotString(CompilationException): + def __init__(self, kwarg_type: Any): + self.kwarg_type = kwarg_type + super().__init__(msg=self.get_message()) -def _get_target_failure_msg( - original_file_path, - unique_id, - resource_type_title, - target_name: str, - target_model_package: Optional[str], - include_path: bool, - reason: str, - target_kind: str, -) -> str: - target_package_string = "" - if target_model_package is not None: - target_package_string = "in package '{}' ".format(target_model_package) - - source_path_string = "" - if include_path: - source_path_string = " ({})".format(original_file_path) - - return "{} '{}'{} depends on a {} named '{}' {}which {}".format( - resource_type_title, - unique_id, - source_path_string, - target_kind, - target_name, - target_package_string, - reason, - ) + def get_message(self) -> str: + msg = ( + "The macro_namespace parameter to adapter.dispatch " + f"is a {self.kwarg_type}, not a string" + ) + return msg -def get_target_not_found_or_disabled_msg( - node, - target_name: str, - target_package: Optional[str], - disabled: Optional[bool] = None, -) -> str: - if disabled is None: - reason = "was not found or is disabled" - elif disabled is True: - reason = "is disabled" - else: - reason = "was not found" - return _get_target_failure_msg( - node.original_file_path, - node.unique_id, - node.resource_type.title(), - target_name, - target_package, - include_path=True, - reason=reason, - target_kind="node", - ) +class NestedTags(CompilationException): + def __init__(self, outer, inner): + self.outer = outer + self.inner = inner + super().__init__(msg=self.get_message()) + def get_message(self) -> str: + msg = ( + f"Got nested tags: {self.outer.block_type_name} (started at {self.outer.start}) did " + f"not have a matching {{{{% end{self.outer.block_type_name} %}}}} before a " + f"subsequent {self.inner.block_type_name} was found (started at {self.inner.start})" + ) + return msg -def ref_target_not_found( - model, - target_model_name: str, - target_model_package: Optional[str], - disabled: Optional[bool] = None, -) -> NoReturn: - msg = get_target_not_found_or_disabled_msg( - model, target_model_name, target_model_package, disabled - ) - raise_compiler_error(msg, model) +class BlockDefinitionNotAtTop(CompilationException): + def __init__(self, tag_parser, tag_start): + self.tag_parser = tag_parser + self.tag_start = tag_start + super().__init__(msg=self.get_message()) -def get_not_found_or_disabled_msg( - node, - target_name: str, - target_kind: str, - target_package: Optional[str] = None, - disabled: Optional[bool] = None, -) -> str: - if disabled is None: - reason = "was not found or is disabled" - elif disabled is True: - reason = "is disabled" - else: - reason = "was not found" - return _get_target_failure_msg( - node.original_file_path, - node.unique_id, - node.resource_type.title(), - target_name, - target_package, - include_path=True, - reason=reason, - target_kind=target_kind, - ) + def get_message(self) -> str: + position = self.tag_parser.linepos(self.tag_start) + msg = ( + f"Got a block definition inside control flow at {position}. " + "All dbt block definitions must be at the top level" + ) + return msg -def target_not_found( - node, - target_name: str, - target_kind: str, - target_package: Optional[str] = None, - disabled: Optional[bool] = None, -) -> NoReturn: - msg = get_not_found_or_disabled_msg( - node=node, - target_name=target_name, - target_kind=target_kind, - target_package=target_package, - disabled=disabled, - ) +class MissingCloseTag(CompilationException): + def __init__(self, block_type_name: str, linecount: int): + self.block_type_name = block_type_name + self.linecount = linecount + super().__init__(msg=self.get_message()) - raise_compiler_error(msg, node) + def get_message(self) -> str: + msg = f"Reached EOF without finding a close tag for {self.block_type_name} (searched from line {self.linecount})" + return msg -def dependency_not_found(model, target_model_name): - raise_compiler_error( - "'{}' depends on '{}' which is not in the graph!".format( - model.unique_id, target_model_name - ), - model, - ) +class GitCloningProblem(RuntimeException): + def __init__(self, repo: str): + self.repo = scrub_secrets(repo, env_secrets()) + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"""\ + Something went wrong while cloning {self.repo} + Check the debug logs for more information + """ + return msg -def macro_not_found(model, target_macro_id): - raise_compiler_error( - model, - "'{}' references macro '{}' which is not defined!".format( - model.unique_id, target_macro_id - ), - ) +class GitCloningError(InternalException): + def __init__(self, repo: str, revision: str, error: CommandResultError): + self.repo = repo + self.revision = revision + self.error = error + super().__init__(msg=self.get_message()) + def get_message(self) -> str: + stderr = self.error.stderr.strip() + if "usage: git" in stderr: + stderr = stderr.split("\nusage: git")[0] + if re.match("fatal: destination path '(.+)' already exists", stderr): + self.error.cmd = list(scrub_secrets(str(self.error.cmd), env_secrets())) + raise self.error -def macro_invalid_dispatch_arg(macro_name) -> NoReturn: - msg = """\ - The "packages" argument of adapter.dispatch() has been deprecated. - Use the "macro_namespace" argument instead. + msg = f"Error checking out spec='{self.revision}' for repo {self.repo}\n{stderr}" + return scrub_secrets(msg, env_secrets()) - Raised during dispatch for: {} - For more information, see: +class GitCheckoutError(InternalException): + def __init__(self, repo: str, revision: str, error: CommandResultError): + self.repo = repo + self.revision = revision + self.stderr = error.stderr.strip() + super().__init__(msg=self.get_message()) - https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch - """ - raise_compiler_error(msg.format(macro_name)) + def get_message(self) -> str: + msg = f"Error checking out spec='{self.revision}' for repo {self.repo}\n{self.stderr}" + return scrub_secrets(msg, env_secrets()) -def materialization_not_available(model, adapter_type): - materialization = model.get_materialization() +class InvalidMaterializationArg(CompilationException): + def __init__(self, name: str, argument: str): + self.name = name + self.argument = argument + super().__init__(msg=self.get_message()) - raise_compiler_error( - "Materialization '{}' is not available for {}!".format(materialization, adapter_type), - model, - ) + def get_message(self) -> str: + msg = f"materialization '{self.name}' received unknown argument '{self.argument}'." + return msg -def missing_materialization(model, adapter_type): - materialization = model.get_materialization() +class SymbolicLinkError(CompilationException): + def __init__(self): + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + "dbt encountered an error when attempting to create a symbolic link. " + "If this error persists, please create an issue at: \n\n" + "https://github.com/dbt-labs/dbt-core" + ) - valid_types = "'default'" + return msg - if adapter_type != "default": - valid_types = "'default' and '{}'".format(adapter_type) - raise_compiler_error( - "No materialization '{}' was found for adapter {}! (searched types {})".format( - materialization, adapter_type, valid_types - ), - model, - ) +# context level exceptions -def bad_package_spec(repo, spec, error_message): - msg = "Error checking out spec='{}' for repo {}\n{}".format(spec, repo, error_message) - raise InternalException(scrub_secrets(msg, env_secrets())) +class ZipStrictWrongType(CompilationException): + def __init__(self, exc): + self.exc = exc + msg = str(self.exc) + super().__init__(msg=msg) -def raise_cache_inconsistent(message): - raise InternalException("Cache inconsistency detected: {}".format(message)) +class SetStrictWrongType(CompilationException): + def __init__(self, exc): + self.exc = exc + msg = str(self.exc) + super().__init__(msg=msg) -def missing_config(model, name): - raise_compiler_error( - "Model '{}' does not define a required config parameter '{}'.".format( - model.unique_id, name - ), - model, - ) +class LoadAgateTableValueError(CompilationException): + def __init__(self, exc: ValueError, node): + self.exc = exc + self.node = node + msg = str(self.exc) + super().__init__(msg=msg) -def missing_relation(relation, model=None): - raise_compiler_error("Relation {} not found!".format(relation), model) +class LoadAgateTableNotSeed(CompilationException): + def __init__(self, resource_type, node): + self.resource_type = resource_type + self.node = node + msg = f"can only load_agate_table for seeds (got a {self.resource_type})" + super().__init__(msg=msg) -def raise_dataclass_not_dict(obj): - msg = ( - 'The object ("{obj}") was used as a dictionary. This ' - "capability has been removed from objects of this type." - ) - raise_compiler_error(msg) +class MacrosSourcesUnWriteable(CompilationException): + def __init__(self, node): + self.node = node + msg = 'cannot "write" macros or sources' + super().__init__(msg=msg) -def relation_wrong_type(relation, expected_type, model=None): - raise_compiler_error( - ( - "Trying to create {expected_type} {relation}, " - "but it currently exists as a {current_type}. Either " - "drop {relation} manually, or run dbt with " - "`--full-refresh` and dbt will drop it for you." - ).format(relation=relation, current_type=relation.type, expected_type=expected_type), - model, - ) +class PackageNotInDeps(CompilationException): + def __init__(self, package_name: str, node): + self.package_name = package_name + self.node = node + msg = f"Node package named {self.package_name} not found!" + super().__init__(msg=msg) -def package_not_found(package_name): - raise_dependency_error("Package {} was not found in the package index".format(package_name)) +class OperationsCannotRefEphemeralNodes(CompilationException): + def __init__(self, target_name: str, node): + self.target_name = target_name + self.node = node + msg = f"Operations can not ref() ephemeral nodes, but {target_name} is ephemeral" + super().__init__(msg=msg) -def package_version_not_found( - package_name, version_range, available_versions, should_version_check -): - base_msg = ( - "Could not find a matching compatible version for package {}\n" - " Requested range: {}\n" - " Compatible versions: {}\n" - ) - addendum = ( - ( - "\n" - " Not shown: package versions incompatible with installed version of dbt-core\n" - " To include them, run 'dbt --no-version-check deps'" +class InvalidPersistDocsValueType(CompilationException): + def __init__(self, persist_docs: Any): + self.persist_docs = persist_docs + msg = ( + "Invalid value provided for 'persist_docs'. Expected dict " + f"but received {type(self.persist_docs)}" ) - if should_version_check - else "" - ) - msg = base_msg.format(package_name, version_range, available_versions) + addendum - raise_dependency_error(msg) - + super().__init__(msg=msg) -def invalid_materialization_argument(name, argument): - raise_compiler_error( - "materialization '{}' received unknown argument '{}'.".format(name, argument) - ) +class InvalidInlineModelConfig(CompilationException): + def __init__(self, node): + self.node = node + msg = "Invalid inline model config" + super().__init__(msg=msg) -def system_error(operation_name): - raise_compiler_error( - "dbt encountered an error when attempting to {}. " - "If this error persists, please create an issue at: \n\n" - "https://github.com/dbt-labs/dbt-core".format(operation_name) - ) +class ConflictingConfigKeys(CompilationException): + def __init__(self, oldkey: str, newkey: str, node): + self.oldkey = oldkey + self.newkey = newkey + self.node = node + msg = f'Invalid config, has conflicting keys "{self.oldkey}" and "{self.newkey}"' + super().__init__(msg=msg) -class ConnectionException(Exception): - """ - There was a problem with the connection that returned a bad response, - timed out, or resulted in a file that is corrupt. - """ - pass +class InvalidNumberSourceArgs(CompilationException): + def __init__(self, args, node): + self.args = args + self.node = node + msg = f"source() takes exactly two arguments ({len(self.args)} given)" + super().__init__(msg=msg) -def raise_dep_not_found(node, node_description, required_pkg): - raise_compiler_error( - 'Error while parsing {}.\nThe required package "{}" was not found. ' - "Is the package installed?\nHint: You may need to run " - "`dbt deps`.".format(node_description, required_pkg), - node=node, - ) +class RequiredVarNotFound(CompilationException): + def __init__(self, var_name: str, merged: Dict, node): + self.var_name = var_name + self.merged = merged + self.node = node + super().__init__(msg=self.get_message()) + def get_message(self) -> str: + if self.node is not None: + node_name = self.node.name + else: + node_name = "" -def multiple_matching_relations(kwargs, matches): - raise_compiler_error( - "get_relation returned more than one relation with the given args. " - "Please specify a database or schema to narrow down the result set." - "\n{}\n\n{}".format(kwargs, matches) - ) + dct = {k: self.merged[k] for k in self.merged} + pretty_vars = json.dumps(dct, sort_keys=True, indent=4) + msg = f"Required var '{self.var_name}' not found in config:\nVars supplied to {node_name} = {pretty_vars}" + return msg -def get_relation_returned_multiple_results(kwargs, matches): - multiple_matching_relations(kwargs, matches) +class PackageNotFoundForMacro(CompilationException): + def __init__(self, package_name: str): + self.package_name = package_name + msg = f"Could not find package '{self.package_name}'" + super().__init__(msg=msg) -def approximate_relation_match(target, relation): - raise_compiler_error( - "When searching for a relation, dbt found an approximate match. " - "Instead of guessing \nwhich relation to use, dbt will move on. " - "Please delete {relation}, or rename it to be less ambiguous." - "\nSearched for: {target}\nFound: {relation}".format(target=target, relation=relation) - ) +class DisallowSecretEnvVar(ParsingException): + def __init__(self, env_var_name: str): + self.env_var_name = env_var_name + super().__init__(msg=self.get_message()) -def raise_duplicate_macro_name(node_1, node_2, namespace) -> NoReturn: - duped_name = node_1.name - if node_1.package_name != node_2.package_name: - extra = ' ("{}" and "{}" are both in the "{}" namespace)'.format( - node_1.package_name, node_2.package_name, namespace - ) - else: - extra = "" - - raise_compiler_error( - 'dbt found two macros with the name "{}" in the namespace "{}"{}. ' - "Since these macros have the same name and exist in the same " - "namespace, dbt will be unable to decide which to call. To fix this, " - "change the name of one of these macros:\n- {} ({})\n- {} ({})".format( - duped_name, - namespace, - extra, - node_1.unique_id, - node_1.original_file_path, - node_2.unique_id, - node_2.original_file_path, + def get_message(self) -> str: + msg = ( + "Secret env vars are allowed only in profiles.yml or packages.yml. " + f"Found '{self.env_var_name}' referenced elsewhere." ) - ) - + return msg -def raise_duplicate_resource_name(node_1, node_2): - duped_name = node_1.name - node_type = NodeType(node_1.resource_type) - pluralized = ( - node_type.pluralize() - if node_1.resource_type == node_2.resource_type - else "resources" # still raise if ref() collision, e.g. model + seed - ) - action = "looking for" - # duplicate 'ref' targets - if node_type in NodeType.refable(): - formatted_name = f'ref("{duped_name}")' - # duplicate sources - elif node_type == NodeType.Source: - duped_name = node_1.get_full_source_name() - formatted_name = node_1.get_source_representation() - # duplicate docs blocks - elif node_type == NodeType.Documentation: - formatted_name = f'doc("{duped_name}")' - # duplicate generic tests - elif node_type == NodeType.Test and hasattr(node_1, "test_metadata"): - column_name = f'column "{node_1.column_name}" in ' if node_1.column_name else "" - model_name = node_1.file_key_name - duped_name = f'{node_1.name}" defined on {column_name}"{model_name}' - action = "running" - formatted_name = "tests" - # all other resource types - else: - formatted_name = duped_name - - # should this be raise_parsing_error instead? - raise_compiler_error( - f""" -dbt found two {pluralized} with the name "{duped_name}". +class InvalidMacroArgType(CompilationException): + def __init__(self, method_name: str, arg_name: str, got_value: Any, expected_type): + self.method_name = method_name + self.arg_name = arg_name + self.got_value = got_value + self.expected_type = expected_type + super().__init__(msg=self.get_message()) -Since these resources have the same name, dbt will be unable to find the correct resource + def get_message(self) -> str: + got_type = type(self.got_value) + msg = ( + f"'adapter.{self.method_name}' expects argument " + f"'{self.arg_name}' to be of type '{self.expected_type}', instead got " + f"{self.got_value} ({got_type})" + ) + return msg + + +class InvalidBoolean(CompilationException): + def __init__(self, return_value: Any, macro_name: str): + self.return_value = return_value + self.macro_name = macro_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"Macro '{self.macro_name}' returns '{self.return_value}'. It is not type 'bool' " + "and cannot not be converted reliably to a bool." + ) + return msg + + +class RefInvalidArgs(CompilationException): + def __init__(self, node, args): + self.node = node + self.args = args + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"ref() takes at most two arguments ({len(self.args)} given)" + return msg + + +class MetricInvalidArgs(CompilationException): + def __init__(self, node, args): + self.node = node + self.args = args + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"metric() takes at most two arguments ({len(self.args)} given)" + return msg + + +class RefBadContext(CompilationException): + def __init__(self, node, args): + self.node = node + self.args = args + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + # This explicitly references model['name'], instead of model['alias'], for + # better error messages. Ex. If models foo_users and bar_users are aliased + # to 'users', in their respective schemas, then you would want to see + # 'bar_users' in your error messge instead of just 'users'. + if isinstance(self.node, dict): + model_name = self.node["name"] + else: + model_name = self.node.name + + ref_args = ", ".join("'{}'".format(a) for a in self.args) + ref_string = f"{{{{ ref({ref_args}) }}}}" + + msg = f"""dbt was unable to infer all dependencies for the model "{model_name}". +This typically happens when ref() is placed within a conditional block. + +To fix this, add the following hint to the top of the model "{model_name}": + +-- depends_on: {ref_string}""" + + return msg + + +class InvalidDocArgs(CompilationException): + def __init__(self, node, args): + self.node = node + self.args = args + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"doc() takes at most two arguments ({len(self.args)} given)" + return msg + + +class DocTargetNotFound(CompilationException): + def __init__(self, node, target_doc_name: str, target_doc_package: Optional[str]): + self.node = node + self.target_doc_name = target_doc_name + self.target_doc_package = target_doc_package + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + target_package_string = "" + if self.target_doc_package is not None: + target_package_string = f"in package '{self. target_doc_package}' " + msg = f"Documentation for '{self.node.unique_id}' depends on doc '{self.target_doc_name}' {target_package_string} which was not found" + return msg + + +class MacroInvalidDispatchArg(CompilationException): + def __init__(self, macro_name: str): + self.macro_name = macro_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"""\ + The "packages" argument of adapter.dispatch() has been deprecated. + Use the "macro_namespace" argument instead. + + Raised during dispatch for: {self.macro_name} + + For more information, see: + + https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch + """ + return msg + + +class DuplicateMacroName(CompilationException): + def __init__(self, node_1, node_2, namespace: str): + self.node_1 = node_1 + self.node_2 = node_2 + self.namespace = namespace + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + duped_name = self.node_1.name + if self.node_1.package_name != self.node_2.package_name: + extra = f' ("{self.node_1.package_name}" and "{self.node_2.package_name}" are both in the "{self.namespace}" namespace)' + else: + extra = "" + + msg = ( + f'dbt found two macros with the name "{duped_name}" in the namespace "{self.namespace}"{extra}. ' + "Since these macros have the same name and exist in the same " + "namespace, dbt will be unable to decide which to call. To fix this, " + f"change the name of one of these macros:\n- {self.node_1.unique_id} " + f"({self.node_1.original_file_path})\n- {self.node_2.unique_id} ({self.node_2.original_file_path})" + ) + + return msg + + +# parser level exceptions +class InvalidDictParse(ParsingException): + def __init__(self, exc: ValidationError, node): + self.exc = exc + self.node = node + msg = self.validator_error_message(exc) + super().__init__(msg=msg) + + +class InvalidConfigUpdate(ParsingException): + def __init__(self, exc: ValidationError, node): + self.exc = exc + self.node = node + msg = self.validator_error_message(exc) + super().__init__(msg=msg) + + +class PythonParsingException(ParsingException): + def __init__(self, exc: SyntaxError, node): + self.exc = exc + self.node = node + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + validated_exc = self.validator_error_message(self.exc) + msg = f"{validated_exc}\n{self.exc.text}" + return msg + + +class PythonLiteralEval(ParsingException): + def __init__(self, exc: Exception, node): + self.exc = exc + self.node = node + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"Error when trying to literal_eval an arg to dbt.ref(), dbt.source(), dbt.config() or dbt.config.get() \n{self.exc}\n" + "https://docs.python.org/3/library/ast.html#ast.literal_eval\n" + "In dbt python model, `dbt.ref`, `dbt.source`, `dbt.config`, `dbt.config.get` function args only support Python literal structures" + ) + + return msg + + +class InvalidModelConfig(ParsingException): + def __init__(self, exc: ValidationError, node): + self.msg = self.validator_error_message(exc) + self.node = node + super().__init__(msg=self.msg) + + +class YamlParseListFailure(ParsingException): + def __init__( + self, + path: str, + key: str, + yaml_data: List, + cause, + ): + self.path = path + self.key = key + self.yaml_data = yaml_data + self.cause = cause + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + if isinstance(self.cause, str): + reason = self.cause + elif isinstance(self.cause, ValidationError): + reason = self.validator_error_message(self.cause) + else: + reason = self.cause.msg + msg = f"Invalid {self.key} config given in {self.path} @ {self.key}: {self.yaml_data} - {reason}" + return msg + + +class YamlParseDictFailure(ParsingException): + def __init__( + self, + path: str, + key: str, + yaml_data: Dict[str, Any], + cause, + ): + self.path = path + self.key = key + self.yaml_data = yaml_data + self.cause = cause + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + if isinstance(self.cause, str): + reason = self.cause + elif isinstance(self.cause, ValidationError): + reason = self.validator_error_message(self.cause) + else: + reason = self.cause.msg + msg = f"Invalid {self.key} config given in {self.path} @ {self.key}: {self.yaml_data} - {reason}" + return msg + + +class YamlLoadFailure(ParsingException): + def __init__(self, project_name: Optional[str], path: str, exc: ValidationException): + self.project_name = project_name + self.path = path + self.exc = exc + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + reason = self.validator_error_message(self.exc) + + msg = f"Error reading {self.project_name}: {self.path} - {reason}" + + return msg + + +class InvalidTestConfig(ParsingException): + def __init__(self, exc: ValidationError, node): + self.msg = self.validator_error_message(exc) + self.node = node + super().__init__(msg=self.msg) + + +class InvalidSchemaConfig(ParsingException): + def __init__(self, exc: ValidationError, node): + self.msg = self.validator_error_message(exc) + self.node = node + super().__init__(msg=self.msg) + + +class InvalidSnapshopConfig(ParsingException): + def __init__(self, exc: ValidationError, node): + self.msg = self.validator_error_message(exc) + self.node = node + super().__init__(msg=self.msg) + + +class SameKeyNested(CompilationException): + def __init__(self): + msg = "Test cannot have the same key at the top-level and in config" + super().__init__(msg=msg) + + +class TestArgIncludesModel(CompilationException): + def __init__(self): + msg = 'Test arguments include "model", which is a reserved argument' + super().__init__(msg=msg) + + +class UnexpectedTestNamePattern(CompilationException): + def __init__(self, test_name: str): + self.test_name = test_name + msg = f"Test name string did not match expected pattern: {self.test_name}" + super().__init__(msg=msg) + + +class CustomMacroPopulatingConfigValues(CompilationException): + def __init__( + self, target_name: str, column_name: Optional[str], name: str, key: str, err_msg: str + ): + self.target_name = target_name + self.column_name = column_name + self.name = name + self.key = key + self.err_msg = err_msg + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + # Generic tests do not include custom macros in the Jinja + # rendering context, so this will almost always fail. As it + # currently stands, the error message is inscrutable, which + # has caused issues for some projects migrating from + # pre-0.20.0 to post-0.20.0. + # See https://github.com/dbt-labs/dbt-core/issues/4103 + # and https://github.com/dbt-labs/dbt-core/issues/5294 + + msg = ( + f"The {self.target_name}.{self.column_name} column's " + f'"{self.name}" test references an undefined ' + f"macro in its {self.key} configuration argument. " + f"The macro {self.err_msg}.\n" + "Please note that the generic test configuration parser " + "currently does not support using custom macros to " + "populate configuration values" + ) + return msg + + +class TagsNotListOfStrings(CompilationException): + def __init__(self, tags: Any): + self.tags = tags + msg = f"got {self.tags} ({type(self.tags)}) for tags, expected a list of strings" + super().__init__(msg=msg) + + +class TagNotString(CompilationException): + def __init__(self, tag: Any): + self.tag = tag + msg = f"got {self.tag} ({type(self.tag)}) for tag, expected a str" + super().__init__(msg=msg) + + +class TestNameNotString(ParsingException): + def __init__(self, test_name: Any): + self.test_name = test_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + + msg = f"test name must be a str, got {type(self.test_name)} (value {self.test_name})" + return msg + + +class TestArgsNotDict(ParsingException): + def __init__(self, test_args: Any): + self.test_args = test_args + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + + msg = f"test arguments must be a dict, got {type(self.test_args)} (value {self.test_args})" + return msg + + +class TestDefinitionDictLength(ParsingException): + def __init__(self, test): + self.test = test + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + + msg = ( + "test definition dictionary must have exactly one key, got" + f" {self.test} instead ({len(self.test)} keys)" + ) + return msg + + +class TestInvalidType(ParsingException): + def __init__(self, test: Any): + self.test = test + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"test must be dict or str, got {type(self.test)} (value {self.test})" + return msg + + +# This is triggered across multiple files +class EnvVarMissing(ParsingException): + def __init__(self, var: str): + self.var = var + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"Env var required but not provided: '{self.var}'" + return msg + + +class TargetNotFound(CompilationException): + def __init__( + self, + node, + target_name: str, + target_kind: str, + target_package: Optional[str] = None, + disabled: Optional[bool] = None, + ): + self.node = node + self.target_name = target_name + self.target_kind = target_kind + self.target_package = target_package + self.disabled = disabled + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + original_file_path = self.node.original_file_path + unique_id = self.node.unique_id + resource_type_title = self.node.resource_type.title() + + if self.disabled is None: + reason = "was not found or is disabled" + elif self.disabled is True: + reason = "is disabled" + else: + reason = "was not found" + + target_package_string = "" + if self.target_package is not None: + target_package_string = f"in package '{self.target_package}' " + + msg = ( + f"{resource_type_title} '{unique_id}' ({original_file_path}) depends on a " + f"{self.target_kind} named '{self.target_name}' {target_package_string}which {reason}" + ) + return msg + + +class DuplicateSourcePatchName(CompilationException): + def __init__(self, patch_1, patch_2): + self.patch_1 = patch_1 + self.patch_2 = patch_2 + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + name = f"{self.patch_1.overrides}.{self.patch_1.name}" + fix = self._fix_dupe_msg( + self.patch_1.path, + self.patch_2.path, + name, + "sources", + ) + msg = ( + f"dbt found two schema.yml entries for the same source named " + f"{self.patch_1.name} in package {self.patch_1.overrides}. Sources may only be " + f"overridden a single time. To fix this, {fix}" + ) + return msg + + +class DuplicateMacroPatchName(CompilationException): + def __init__(self, patch_1, existing_patch_path): + self.patch_1 = patch_1 + self.existing_patch_path = existing_patch_path + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + package_name = self.patch_1.package_name + name = self.patch_1.name + fix = self._fix_dupe_msg( + self.patch_1.original_file_path, self.existing_patch_path, name, "macros" + ) + msg = ( + f"dbt found two schema.yml entries for the same macro in package " + f"{package_name} named {name}. Macros may only be described a single " + f"time. To fix this, {fix}" + ) + return msg + + +# core level exceptions +class DuplicateAlias(AliasException): + def __init__(self, kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str): + self.kwargs = kwargs + self.aliases = aliases + self.canonical_key = canonical_key + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + # dupe found: go through the dict so we can have a nice-ish error + key_names = ", ".join( + "{}".format(k) for k in self.kwargs if self.aliases.get(k) == self.canonical_key + ) + msg = f'Got duplicate keys: ({key_names}) all map to "{self.canonical_key}"' + return msg + + +# Postgres Exceptions + + +class UnexpectedDbReference(NotImplementedException): + def __init__(self, adapter, database, expected): + self.adapter = adapter + self.database = database + self.expected = expected + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"Cross-db references not allowed in {self.adapter} ({self.database} vs {self.expected})" + return msg + + +class CrossDbReferenceProhibited(CompilationException): + def __init__(self, adapter, exc_msg: str): + self.adapter = adapter + self.exc_msg = exc_msg + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"Cross-db references not allowed in adapter {self.adapter}: Got {self.exc_msg}" + return msg + + +class IndexConfigNotDict(CompilationException): + def __init__(self, raw_index: Any): + self.raw_index = raw_index + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"Invalid index config:\n" + f" Got: {self.raw_index}\n" + f' Expected a dictionary with at minimum a "columns" key' + ) + return msg + + +class InvalidIndexConfig(CompilationException): + def __init__(self, exc: TypeError): + self.exc = exc + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + validator_msg = self.validator_error_message(self.exc) + msg = f"Could not parse index config: {validator_msg}" + return msg + + +# adapters exceptions +class InvalidMacroResult(CompilationException): + def __init__(self, freshness_macro_name: str, table): + self.freshness_macro_name = freshness_macro_name + self.table = table + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f'Got an invalid result from "{self.freshness_macro_name}" macro: {[tuple(r) for r in self.table]}' + + return msg + + +class SnapshotTargetNotSnapshotTable(CompilationException): + def __init__(self, missing: List): + self.missing = missing + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = 'Snapshot target is not a snapshot table (missing "{}")'.format( + '", "'.join(self.missing) + ) + return msg + + +class SnapshotTargetIncomplete(CompilationException): + def __init__(self, extra: List, missing: List): + self.extra = extra + self.missing = missing + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + 'Snapshot target has ("{}") but not ("{}") - is it an ' + "unmigrated previous version archive?".format( + '", "'.join(self.extra), '", "'.join(self.missing) + ) + ) + return msg + + +class RenameToNoneAttempted(CompilationException): + def __init__(self, src_name: str, dst_name: str, name: str): + self.src_name = src_name + self.dst_name = dst_name + self.name = name + self.msg = f"Attempted to rename {self.src_name} to {self.dst_name} for {self.name}" + super().__init__(msg=self.msg) + + +class NullRelationDropAttempted(CompilationException): + def __init__(self, name: str): + self.name = name + self.msg = f"Attempted to drop a null relation for {self.name}" + super().__init__(msg=self.msg) + + +class NullRelationCacheAttempted(CompilationException): + def __init__(self, name: str): + self.name = name + self.msg = f"Attempted to cache a null relation for {self.name}" + super().__init__(msg=self.msg) + + +class InvalidQuoteConfigType(CompilationException): + def __init__(self, quote_config: Any): + self.quote_config = quote_config + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + 'The seed configuration value of "quote_columns" has an ' + f"invalid type {type(self.quote_config)}" + ) + return msg + + +class MultipleDatabasesNotAllowed(CompilationException): + def __init__(self, databases): + self.databases = databases + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = str(self.databases) + return msg + + +class RelationTypeNull(CompilationException): + def __init__(self, relation): + self.relation = relation + self.msg = f"Tried to drop relation {self.relation}, but its type is null." + super().__init__(msg=self.msg) + + +class MaterializationNotAvailable(CompilationException): + def __init__(self, model, adapter_type: str): + self.model = model + self.adapter_type = adapter_type + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + materialization = self.model.get_materialization() + msg = f"Materialization '{materialization}' is not available for {self.adapter_type}!" + return msg + + +class RelationReturnedMultipleResults(CompilationException): + def __init__(self, kwargs: Mapping[str, Any], matches: List): + self.kwargs = kwargs + self.matches = matches + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + "get_relation returned more than one relation with the given args. " + "Please specify a database or schema to narrow down the result set." + f"\n{self.kwargs}\n\n{self.matches}" + ) + return msg + + +class ApproximateMatch(CompilationException): + def __init__(self, target, relation): + self.target = target + self.relation = relation + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + + msg = ( + "When searching for a relation, dbt found an approximate match. " + "Instead of guessing \nwhich relation to use, dbt will move on. " + f"Please delete {self.relation}, or rename it to be less ambiguous." + f"\nSearched for: {self.target}\nFound: {self.relation}" + ) + + return msg + + +# adapters exceptions +class UnexpectedNull(DatabaseException): + def __init__(self, field_name: str, source): + self.field_name = field_name + self.source = source + msg = ( + f"Expected a non-null value when querying field '{self.field_name}' of table " + f" {self.source} but received value 'null' instead" + ) + super().__init__(msg) + + +class UnexpectedNonTimestamp(DatabaseException): + def __init__(self, field_name: str, source, dt: Any): + self.field_name = field_name + self.source = source + self.type_name = type(dt).__name__ + msg = ( + f"Expected a timestamp value when querying field '{self.field_name}' of table " + f"{self.source} but received value of type '{self.type_name}' instead" + ) + super().__init__(msg) + + +# deps exceptions +class MultipleVersionGitDeps(DependencyException): + def __init__(self, git: str, requested): + self.git = git + self.requested = requested + msg = ( + "git dependencies should contain exactly one version. " + f"{self.git} contains: {self.requested}" + ) + super().__init__(msg) + + +class DuplicateProjectDependency(DependencyException): + def __init__(self, project_name: str): + self.project_name = project_name + msg = ( + f'Found duplicate project "{self.project_name}". This occurs when ' + "a dependency has the same project name as some other dependency." + ) + super().__init__(msg) + + +class DuplicateDependencyToRoot(DependencyException): + def __init__(self, project_name: str): + self.project_name = project_name + msg = ( + "Found a dependency with the same name as the root project " + f'"{self.project_name}". Package names must be unique in a project.' + " Please rename one of these packages." + ) + super().__init__(msg) + + +class MismatchedDependencyTypes(DependencyException): + def __init__(self, new, old): + self.new = new + self.old = old + msg = ( + f"Cannot incorporate {self.new} ({self.new.__class__.__name__}) in {self.old} " + f"({self.old.__class__.__name__}): mismatched types" + ) + super().__init__(msg) + + +class PackageVersionNotFound(DependencyException): + def __init__( + self, + package_name: str, + version_range, + available_versions: List[str], + should_version_check: bool, + ): + self.package_name = package_name + self.version_range = version_range + self.available_versions = available_versions + self.should_version_check = should_version_check + super().__init__(self.get_message()) + + def get_message(self) -> str: + base_msg = ( + "Could not find a matching compatible version for package {}\n" + " Requested range: {}\n" + " Compatible versions: {}\n" + ) + addendum = ( + ( + "\n" + " Not shown: package versions incompatible with installed version of dbt-core\n" + " To include them, run 'dbt --no-version-check deps'" + ) + if self.should_version_check + else "" + ) + msg = ( + base_msg.format(self.package_name, self.version_range, self.available_versions) + + addendum + ) + return msg + + +class PackageNotFound(DependencyException): + def __init__(self, package_name: str): + self.package_name = package_name + msg = f"Package {self.package_name} was not found in the package index" + super().__init__(msg) + + +# config level exceptions + + +class ProfileConfigInvalid(DbtProfileError): + def __init__(self, exc: ValidationError): + self.exc = exc + msg = self.validator_error_message(self.exc) + super().__init__(msg=msg) + + +class ProjectContractInvalid(DbtProjectError): + def __init__(self, exc: ValidationError): + self.exc = exc + msg = self.validator_error_message(self.exc) + super().__init__(msg=msg) + + +class ProjectContractBroken(DbtProjectError): + def __init__(self, exc: ValidationError): + self.exc = exc + msg = self.validator_error_message(self.exc) + super().__init__(msg=msg) + + +class ConfigContractBroken(DbtProjectError): + def __init__(self, exc: ValidationError): + self.exc = exc + msg = self.validator_error_message(self.exc) + super().__init__(msg=msg) + + +class NonUniquePackageName(CompilationException): + def __init__(self, project_name: str): + self.project_name = project_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + "dbt found more than one package with the name " + f'"{self.project_name}" included in this project. Package ' + "names must be unique in a project. Please rename " + "one of these packages." + ) + return msg + + +class UninstalledPackagesFound(CompilationException): + def __init__( + self, + count_packages_specified: int, + count_packages_installed: int, + packages_install_path: str, + ): + self.count_packages_specified = count_packages_specified + self.count_packages_installed = count_packages_installed + self.packages_install_path = packages_install_path + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"dbt found {self.count_packages_specified} package(s) " + "specified in packages.yml, but only " + f"{self.count_packages_installed} package(s) installed " + f'in {self.packages_install_path}. Run "dbt deps" to ' + "install package dependencies." + ) + return msg + + +class VarsArgNotYamlDict(CompilationException): + def __init__(self, var_type): + self.var_type = var_type + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + type_name = self.var_type.__name__ + + msg = f"The --vars argument must be a YAML dictionary, but was of type '{type_name}'" + return msg + + +# contracts level + + +class DuplicateMacroInPackage(CompilationException): + def __init__(self, macro, macro_mapping: Mapping): + self.macro = macro + self.macro_mapping = macro_mapping + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + other_path = self.macro_mapping[self.macro.unique_id].original_file_path + # subtract 2 for the "Compilation Error" indent + # note that the line wrap eats newlines, so if you want newlines, + # this is the result :( + msg = line_wrap_message( + f"""\ + dbt found two macros named "{self.macro.name}" in the project + "{self.macro.package_name}". + + + To fix this error, rename or remove one of the following + macros: + + - {self.macro.original_file_path} + + - {other_path} + """, + subtract=2, + ) + return msg + + +class DuplicateMaterializationName(CompilationException): + def __init__(self, macro, other_macro): + self.macro = macro + self.other_macro = other_macro + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + macro_name = self.macro.name + macro_package_name = self.macro.package_name + other_package_name = self.other_macro.macro.package_name + + msg = ( + f"Found two materializations with the name {macro_name} (packages " + f"{macro_package_name} and {other_package_name}). dbt cannot resolve " + "this ambiguity" + ) + return msg + + +# jinja exceptions +class MissingConfig(CompilationException): + def __init__(self, unique_id: str, name: str): + self.unique_id = unique_id + self.name = name + msg = ( + f"Model '{self.unique_id}' does not define a required config parameter '{self.name}'." + ) + super().__init__(msg=msg) + + +class MissingMaterialization(CompilationException): + def __init__(self, model, adapter_type): + self.model = model + self.adapter_type = adapter_type + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + materialization = self.model.get_materialization() + + valid_types = "'default'" + + if self.adapter_type != "default": + valid_types = f"'default' and '{self.adapter_type}'" + + msg = f"No materialization '{materialization}' was found for adapter {self.adapter_type}! (searched types {valid_types})" + return msg + + +class MissingRelation(CompilationException): + def __init__(self, relation, model=None): + self.relation = relation + self.model = model + msg = f"Relation {self.relation} not found!" + super().__init__(msg=msg) + + +class AmbiguousAlias(CompilationException): + def __init__(self, node_1, node_2, duped_name=None): + self.node_1 = node_1 + self.node_2 = node_2 + if duped_name is None: + self.duped_name = f"{self.node_1.database}.{self.node_1.schema}.{self.node_1.alias}" + else: + self.duped_name = duped_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + + msg = ( + f'dbt found two resources with the database representation "{self.duped_name}".\ndbt ' + "cannot create two resources with identical database representations. " + "To fix this,\nchange the configuration of one of these resources:" + f"\n- {self.node_1.unique_id} ({self.node_1.original_file_path})\n- {self.node_2.unique_id} ({self.node_2.original_file_path})" + ) + return msg + + +class AmbiguousCatalogMatch(CompilationException): + def __init__(self, unique_id: str, match_1, match_2): + self.unique_id = unique_id + self.match_1 = match_1 + self.match_2 = match_2 + super().__init__(msg=self.get_message()) + + def get_match_string(self, match): + match_schema = match.get("metadata", {}).get("schema") + match_name = match.get("metadata", {}).get("name") + return f"{match_schema}.{match_name}" + + def get_message(self) -> str: + msg = ( + "dbt found two relations in your warehouse with similar database identifiers. " + "dbt\nis unable to determine which of these relations was created by the model " + f'"{self.unique_id}".\nIn order for dbt to correctly generate the catalog, one ' + "of the following relations must be deleted or renamed:\n\n - " + f"{self.get_match_string(self.match_1)}\n - {self.get_match_string(self.match_2)}" + ) + + return msg + + +class CacheInconsistency(InternalException): + def __init__(self, msg: str): + self.msg = msg + formatted_msg = f"Cache inconsistency detected: {self.msg}" + super().__init__(msg=formatted_msg) + + +class NewNameAlreadyInCache(CacheInconsistency): + def __init__(self, old_key: str, new_key: str): + self.old_key = old_key + self.new_key = new_key + msg = ( + f'in rename of "{self.old_key}" -> "{self.new_key}", new name is in the cache already' + ) + super().__init__(msg) + + +class ReferencedLinkNotCached(CacheInconsistency): + def __init__(self, referenced_key: str): + self.referenced_key = referenced_key + msg = f"in add_link, referenced link key {self.referenced_key} not in cache!" + super().__init__(msg) + + +class DependentLinkNotCached(CacheInconsistency): + def __init__(self, dependent_key: str): + self.dependent_key = dependent_key + msg = f"in add_link, dependent link key {self.dependent_key} not in cache!" + super().__init__(msg) + + +class TruncatedModelNameCausedCollision(CacheInconsistency): + def __init__(self, new_key, relations: Dict): + self.new_key = new_key + self.relations = relations + super().__init__(self.get_message()) + + def get_message(self) -> str: + # Tell user when collision caused by model names truncated during + # materialization. + match = re.search("__dbt_backup|__dbt_tmp$", self.new_key.identifier) + if match: + truncated_model_name_prefix = self.new_key.identifier[: match.start()] + message_addendum = ( + "\n\nName collisions can occur when the length of two " + "models' names approach your database's builtin limit. " + "Try restructuring your project such that no two models " + f"share the prefix '{truncated_model_name_prefix}'. " + "Then, clean your warehouse of any removed models." + ) + else: + message_addendum = "" + + msg = f"in rename, new key {self.new_key} already in cache: {list(self.relations.keys())}{message_addendum}" + + return msg + + +class NoneRelationFound(CacheInconsistency): + def __init__(self): + msg = "in get_relations, a None relation was found in the cache!" + super().__init__(msg) + + +# this is part of the context and also raised in dbt.contracts.relation.py +class DataclassNotDict(CompilationException): + def __init__(self, obj: Any): + self.obj = obj + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f'The object ("{self.obj}") was used as a dictionary. This ' + "capability has been removed from objects of this type." + ) + + return msg + + +class DependencyNotFound(CompilationException): + def __init__(self, node, node_description, required_pkg): + self.node = node + self.node_description = node_description + self.required_pkg = required_pkg + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"Error while parsing {self.node_description}.\nThe required package " + f'"{self.required_pkg}" was not found. Is the package installed?\n' + "Hint: You may need to run `dbt deps`." + ) + + return msg + + +class DuplicatePatchPath(CompilationException): + def __init__(self, patch_1, existing_patch_path): + self.patch_1 = patch_1 + self.existing_patch_path = existing_patch_path + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + name = self.patch_1.name + fix = self._fix_dupe_msg( + self.patch_1.original_file_path, + self.existing_patch_path, + name, + "resource", + ) + msg = ( + f"dbt found two schema.yml entries for the same resource named " + f"{name}. Resources and their associated columns may only be " + f"described a single time. To fix this, {fix}" + ) + return msg + + +# should this inherit ParsingException instead? +class DuplicateResourceName(CompilationException): + def __init__(self, node_1, node_2): + self.node_1 = node_1 + self.node_2 = node_2 + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + duped_name = self.node_1.name + node_type = NodeType(self.node_1.resource_type) + pluralized = ( + node_type.pluralize() + if self.node_1.resource_type == self.node_2.resource_type + else "resources" # still raise if ref() collision, e.g. model + seed + ) + + action = "looking for" + # duplicate 'ref' targets + if node_type in NodeType.refable(): + formatted_name = f'ref("{duped_name}")' + # duplicate sources + elif node_type == NodeType.Source: + duped_name = self.node_1.get_full_source_name() + formatted_name = self.node_1.get_source_representation() + # duplicate docs blocks + elif node_type == NodeType.Documentation: + formatted_name = f'doc("{duped_name}")' + # duplicate generic tests + elif node_type == NodeType.Test and hasattr(self.node_1, "test_metadata"): + column_name = ( + f'column "{self.node_1.column_name}" in ' if self.node_1.column_name else "" + ) + model_name = self.node_1.file_key_name + duped_name = f'{self.node_1.name}" defined on {column_name}"{model_name}' + action = "running" + formatted_name = "tests" + # all other resource types + else: + formatted_name = duped_name + + msg = f""" +dbt found two {pluralized} with the name "{duped_name}". + +Since these resources have the same name, dbt will be unable to find the correct resource when {action} {formatted_name}. To fix this, change the name of one of these resources: -- {node_1.unique_id} ({node_1.original_file_path}) -- {node_2.unique_id} ({node_2.original_file_path}) +- {self.node_1.unique_id} ({self.node_1.original_file_path}) +- {self.node_2.unique_id} ({self.node_2.original_file_path}) """.strip() - ) + return msg -def raise_ambiguous_alias(node_1, node_2, duped_name=None): - if duped_name is None: - duped_name = f"{node_1.database}.{node_1.schema}.{node_1.alias}" - - raise_compiler_error( - 'dbt found two resources with the database representation "{}".\ndbt ' - "cannot create two resources with identical database representations. " - "To fix this,\nchange the configuration of one of these resources:" - "\n- {} ({})\n- {} ({})".format( - duped_name, - node_1.unique_id, - node_1.original_file_path, - node_2.unique_id, - node_2.original_file_path, +class InvalidPropertyYML(CompilationException): + def __init__(self, path: str, issue: str): + self.path = path + self.issue = issue + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"The yml property file at {self.path} is invalid because {self.issue}. " + "Please consult the documentation for more information on yml property file " + "syntax:\n\nhttps://docs.getdbt.com/reference/configs-and-properties" ) - ) + return msg -def raise_ambiguous_catalog_match(unique_id, match_1, match_2): - def get_match_string(match): - return "{}.{}".format( - match.get("metadata", {}).get("schema"), - match.get("metadata", {}).get("name"), - ) +class PropertyYMLMissingVersion(InvalidPropertyYML): + def __init__(self, path: str): + self.path = path + self.issue = f"the yml property file {self.path} is missing a version tag" + super().__init__(self.path, self.issue) - raise_compiler_error( - "dbt found two relations in your warehouse with similar database " - "identifiers. dbt\nis unable to determine which of these relations " - 'was created by the model "{unique_id}".\nIn order for dbt to ' - "correctly generate the catalog, one of the following relations must " - "be deleted or renamed:\n\n - {match_1_s}\n - {match_2_s}".format( - unique_id=unique_id, - match_1_s=get_match_string(match_1), - match_2_s=get_match_string(match_2), + +class PropertyYMLVersionNotInt(InvalidPropertyYML): + def __init__(self, path: str, version: Any): + self.path = path + self.version = version + self.issue = ( + "its 'version:' tag must be an integer (e.g. version: 2)." + f" {self.version} is not an integer" ) - ) + super().__init__(self.path, self.issue) -def raise_patch_targets_not_found(patches): - patch_list = "\n\t".join( - "model {} (referenced in path {})".format(p.name, p.original_file_path) - for p in patches.values() - ) - raise_compiler_error( - "dbt could not find models for the following patches:\n\t{}".format(patch_list) - ) +class PropertyYMLInvalidTag(InvalidPropertyYML): + def __init__(self, path: str, version: int): + self.path = path + self.version = version + self.issue = f"its 'version:' tag is set to {self.version}. Only 2 is supported" + super().__init__(self.path, self.issue) -def _fix_dupe_msg(path_1: str, path_2: str, name: str, type_name: str) -> str: - if path_1 == path_2: - return f"remove one of the {type_name} entries for {name} in this file:\n - {path_1!s}\n" - else: - return ( - f"remove the {type_name} entry for {name} in one of these files:\n" - f" - {path_1!s}\n{path_2!s}" +class RelationWrongType(CompilationException): + def __init__(self, relation, expected_type, model=None): + self.relation = relation + self.expected_type = expected_type + self.model = model + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"Trying to create {self.expected_type} {self.relation}, " + f"but it currently exists as a {self.relation.type}. Either " + f"drop {self.relation} manually, or run dbt with " + "`--full-refresh` and dbt will drop it for you." ) + return msg + -def raise_duplicate_patch_name(patch_1, existing_patch_path): - name = patch_1.name - fix = _fix_dupe_msg( - patch_1.original_file_path, - existing_patch_path, - name, - "resource", - ) - raise_compiler_error( - f"dbt found two schema.yml entries for the same resource named " - f"{name}. Resources and their associated columns may only be " - f"described a single time. To fix this, {fix}" - ) +# These are copies of what's in dbt/context/exceptions_jinja.py to not immediately break adapters +# utilizing these functions as exceptions. These are direct copies to avoid circular imports. +# They will be removed in 1 (or 2?) versions. Issue to be created to ensure it happens. + +# TODO: add deprecation to functions +def warn(msg, node=None): + warn_or_error(JinjaLogWarning(msg=msg, node_info=get_node_info())) + return "" -def raise_duplicate_macro_patch_name(patch_1, existing_patch_path): - package_name = patch_1.package_name - name = patch_1.name - fix = _fix_dupe_msg(patch_1.original_file_path, existing_patch_path, name, "macros") - raise_compiler_error( - f"dbt found two schema.yml entries for the same macro in package " - f"{package_name} named {name}. Macros may only be described a single " - f"time. To fix this, {fix}" - ) +def missing_config(model, name) -> NoReturn: + raise MissingConfig(unique_id=model.unique_id, name=name) + + +def missing_materialization(model, adapter_type) -> NoReturn: + raise MissingMaterialization(model=model, adapter_type=adapter_type) + + +def missing_relation(relation, model=None) -> NoReturn: + raise MissingRelation(relation, model) + + +def raise_ambiguous_alias(node_1, node_2, duped_name=None) -> NoReturn: + raise AmbiguousAlias(node_1, node_2, duped_name) + + +def raise_ambiguous_catalog_match(unique_id, match_1, match_2) -> NoReturn: + raise AmbiguousCatalogMatch(unique_id, match_1, match_2) + + +def raise_cache_inconsistent(message) -> NoReturn: + raise CacheInconsistency(message) + + +def raise_dataclass_not_dict(obj) -> NoReturn: + raise DataclassNotDict(obj) + + +# note: this is called all over the code in addition to in jinja +def raise_compiler_error(msg, node=None) -> NoReturn: + raise CompilationException(msg, node) + + +def raise_database_error(msg, node=None) -> NoReturn: + raise DatabaseException(msg, node) + + +def raise_dep_not_found(node, node_description, required_pkg) -> NoReturn: + raise DependencyNotFound(node, node_description, required_pkg) + + +def raise_dependency_error(msg) -> NoReturn: + raise DependencyException(scrub_secrets(msg, env_secrets())) + + +def raise_duplicate_patch_name(patch_1, existing_patch_path) -> NoReturn: + raise DuplicatePatchPath(patch_1, existing_patch_path) + + +def raise_duplicate_resource_name(node_1, node_2) -> NoReturn: + raise DuplicateResourceName(node_1, node_2) + + +def raise_invalid_property_yml_version(path, issue) -> NoReturn: + raise InvalidPropertyYML(path, issue) + + +def raise_not_implemented(msg) -> NoReturn: + raise NotImplementedException(msg) + + +def relation_wrong_type(relation, expected_type, model=None) -> NoReturn: + raise RelationWrongType(relation, expected_type, model) + + +# these were implemented in core so deprecating here by calling the new exception directly +def raise_duplicate_alias( + kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str +) -> NoReturn: + raise DuplicateAlias(kwargs, aliases, canonical_key) def raise_duplicate_source_patch_name(patch_1, patch_2): - name = f"{patch_1.overrides}.{patch_1.name}" - fix = _fix_dupe_msg( - patch_1.path, - patch_2.path, - name, - "sources", - ) - raise_compiler_error( - f"dbt found two schema.yml entries for the same source named " - f"{patch_1.name} in package {patch_1.overrides}. Sources may only be " - f"overridden a single time. To fix this, {fix}" - ) + raise DuplicateSourcePatchName(patch_1, patch_2) + + +def raise_duplicate_macro_patch_name(patch_1, existing_patch_path): + raise DuplicateMacroPatchName(patch_1, existing_patch_path) + + +def raise_duplicate_macro_name(node_1, node_2, namespace) -> NoReturn: + raise DuplicateMacroName(node_1, node_2, namespace) + + +def approximate_relation_match(target, relation): + raise ApproximateMatch(target, relation) -def raise_invalid_property_yml_version(path, issue): - raise_compiler_error( - "The yml property file at {} is invalid because {}. Please consult the " - "documentation for more information on yml property file syntax:\n\n" - "https://docs.getdbt.com/reference/configs-and-properties".format(path, issue) +def get_relation_returned_multiple_results(kwargs, matches): + raise RelationReturnedMultipleResults(kwargs, matches) + + +def system_error(operation_name): + # Note: This was converted for core to use SymbolicLinkError because it's the only way it was used. Maintaining flexibility here for now. + msg = ( + f"dbt encountered an error when attempting to {operation_name}. " + "If this error persists, please create an issue at: \n\n" + "https://github.com/dbt-labs/dbt-core" ) + raise CompilationException(msg) -def raise_unrecognized_credentials_type(typename, supported_types): - raise_compiler_error( - 'Unrecognized credentials type "{}" - supported types are ({})'.format( - typename, ", ".join('"{}"'.format(t) for t in supported_types) - ) +def invalid_materialization_argument(name, argument): + raise InvalidMaterializationArg(name, argument) + + +def bad_package_spec(repo, spec, error_message): + msg = f"Error checking out spec='{spec}' for repo {repo}\n{error_message}" + raise InternalException(scrub_secrets(msg, env_secrets())) + + +def raise_git_cloning_error(error: CommandResultError) -> NoReturn: + error.cmd = list(scrub_secrets(str(error.cmd), env_secrets())) + raise error + + +def raise_git_cloning_problem(repo) -> NoReturn: + raise GitCloningProblem(repo) + + +def macro_invalid_dispatch_arg(macro_name) -> NoReturn: + raise MacroInvalidDispatchArg(macro_name) + + +def dependency_not_found(node, dependency): + raise GraphDependencyNotFound(node, dependency) + + +def target_not_found( + node, + target_name: str, + target_kind: str, + target_package: Optional[str] = None, + disabled: Optional[bool] = None, +) -> NoReturn: + raise TargetNotFound( + node=node, + target_name=target_name, + target_kind=target_kind, + target_package=target_package, + disabled=disabled, ) -def warn_invalid_patch(patch, resource_type): - msg = line_wrap_message( - f"""\ - '{patch.name}' is a {resource_type} node, but it is - specified in the {patch.yaml_key} section of - {patch.original_file_path}. - To fix this error, place the `{patch.name}` - specification under the {resource_type.pluralize()} key instead. - """ +def doc_target_not_found( + model, target_doc_name: str, target_doc_package: Optional[str] +) -> NoReturn: + raise DocTargetNotFound( + node=model, target_doc_name=target_doc_name, target_doc_package=target_doc_package ) - warn_or_error(msg, log_fmt=warning_tag("{}")) -def raise_not_implemented(msg): - raise NotImplementedException("ERROR: {}".format(msg)) +def doc_invalid_args(model, args) -> NoReturn: + raise InvalidDocArgs(node=model, args=args) + +def ref_bad_context(model, args) -> NoReturn: + raise RefBadContext(node=model, args=args) + + +def metric_invalid_args(model, args) -> NoReturn: + raise MetricInvalidArgs(node=model, args=args) -def raise_duplicate_alias( - kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str -) -> NoReturn: - # dupe found: go through the dict so we can have a nice-ish error - key_names = ", ".join("{}".format(k) for k in kwargs if aliases.get(k) == canonical_key) - raise AliasException(f'Got duplicate keys: ({key_names}) all map to "{canonical_key}"') +def ref_invalid_args(model, args) -> NoReturn: + raise RefInvalidArgs(node=model, args=args) -def warn_or_error(msg, node=None, log_fmt=None): - if flags.WARN_ERROR: - raise_compiler_error(scrub_secrets(msg, env_secrets()), node) - else: - fire_event(GeneralWarningMsg(msg=msg, log_fmt=log_fmt)) +def invalid_bool_error(got_value, macro_name) -> NoReturn: + raise InvalidBoolean(return_value=got_value, macro_name=macro_name) -def warn_or_raise(exc, log_fmt=None): - if flags.WARN_ERROR: - raise exc - else: - fire_event(GeneralWarningException(exc=str(exc), log_fmt=log_fmt)) +def invalid_type_error(method_name, arg_name, got_value, expected_type) -> NoReturn: + """Raise a CompilationException when an adapter method available to macros + has changed. + """ + raise InvalidMacroArgType(method_name, arg_name, got_value, expected_type) -def warn(msg, node=None): - # there's no reason to expose log_fmt to macros - it's only useful for - # handling colors - warn_or_error(msg, node=node) - return "" +def disallow_secret_env_var(env_var_name) -> NoReturn: + """Raise an error when a secret env var is referenced outside allowed + rendering contexts""" + raise DisallowSecretEnvVar(env_var_name) + + +def raise_parsing_error(msg, node=None) -> NoReturn: + raise ParsingException(msg, node) + + +# These are the exceptions functions that were not called within dbt-core but will remain here but deprecated to give a chance to rework +# TODO: is this valid? Should I create a special exception class for this? +def raise_unrecognized_credentials_type(typename, supported_types): + msg = 'Unrecognized credentials type "{}" - supported types are ({})'.format( + typename, ", ".join('"{}"'.format(t) for t in supported_types) + ) + raise CompilationException(msg) + + +def raise_patch_targets_not_found(patches): + patch_list = "\n\t".join( + f"model {p.name} (referenced in path {p.original_file_path})" for p in patches.values() + ) + msg = f"dbt could not find models for the following patches:\n\t{patch_list}" + raise CompilationException(msg) + + +def multiple_matching_relations(kwargs, matches): + raise RelationReturnedMultipleResults(kwargs, matches) + + +# while this isn't in our code I wouldn't be surpised it's in adapter code +def materialization_not_available(model, adapter_type): + raise MaterializationNotAvailable(model, adapter_type) + + +def macro_not_found(model, target_macro_id): + msg = f"'{model.unique_id}' references macro '{target_macro_id}' which is not defined!" + raise CompilationException(msg=msg, node=model) -# Update this when a new function should be added to the -# dbt context's `exceptions` key! -CONTEXT_EXPORTS = { - fn.__name__: fn - for fn in [ - warn, - missing_config, - missing_materialization, - missing_relation, - raise_ambiguous_alias, - raise_ambiguous_catalog_match, - raise_cache_inconsistent, - raise_dataclass_not_dict, - raise_compiler_error, - raise_database_error, - raise_dep_not_found, - raise_dependency_error, - raise_duplicate_patch_name, - raise_duplicate_resource_name, - raise_invalid_property_yml_version, - raise_not_implemented, - relation_wrong_type, - ] -} - - -def wrapper(model): - def wrap(func): - @functools.wraps(func) - def inner(*args, **kwargs): - try: - return func(*args, **kwargs) - except RuntimeException as exc: - exc.add_node(model) - raise exc - - return inner - - return wrap - - -def wrapped_exports(model): - wrap = wrapper(model) - return {name: wrap(export) for name, export in CONTEXT_EXPORTS.items()} +# adapters use this to format messages. it should be deprecated but live on for now +def validator_error_message(exc): + """Given a dbt.dataclass_schema.ValidationError (which is basically a + jsonschema.ValidationError), return the relevant parts as a string + """ + if not isinstance(exc, dbt.dataclass_schema.ValidationError): + return str(exc) + path = "[%s]" % "][".join(map(repr, exc.relative_path)) + return "at path {}: {}".format(path, exc.message) diff --git a/core/dbt/flags.py b/core/dbt/flags.py index bff51c2b343..14e60c834c6 100644 --- a/core/dbt/flags.py +++ b/core/dbt/flags.py @@ -52,7 +52,6 @@ "PRINTER_WIDTH", "PROFILES_DIR", "INDIRECT_SELECTION", - "EVENT_BUFFER_SIZE", "TARGET_PATH", "LOG_PATH", ] @@ -73,11 +72,11 @@ "LOG_CACHE_EVENTS": False, "LOG_FORMAT": None, "LOG_PATH": None, + "QUIET": False, "NO_PRINT": False, "PARTIAL_PARSE": True, "PRINTER_WIDTH": 80, "PROFILES_DIR": DEFAULT_PROFILES_DIR, - "QUIET": False, "SEND_ANONYMOUS_USAGE_STATS": True, "STATIC_PARSER": True, "TARGET_PATH": None, @@ -115,6 +114,7 @@ def env_set_path(key: str) -> Optional[Path]: MACRO_DEBUGGING = env_set_truthy("DBT_MACRO_DEBUGGING") DEFER_MODE = env_set_truthy("DBT_DEFER_TO_STATE") +FAVOR_STATE_MODE = env_set_truthy("DBT_FAVOR_STATE_STATE") ARTIFACT_STATE_PATH = env_set_path("DBT_ARTIFACT_STATE_PATH") ENABLE_LEGACY_LOGGER = env_set_truthy("DBT_ENABLE_LEGACY_LOGGER") @@ -135,7 +135,7 @@ def set_from_args(args, user_config): global STRICT_MODE, FULL_REFRESH, WARN_ERROR, USE_EXPERIMENTAL_PARSER, STATIC_PARSER global WRITE_JSON, PARTIAL_PARSE, USE_COLORS, STORE_FAILURES, PROFILES_DIR, DEBUG, LOG_FORMAT global INDIRECT_SELECTION, VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS, ANONYMOUS_USAGE_STATS - global PRINTER_WIDTH, WHICH, LOG_CACHE_EVENTS, EVENT_BUFFER_SIZE, QUIET, NO_PRINT, CACHE_SELECTED_ONLY + global PRINTER_WIDTH, WHICH, LOG_CACHE_EVENTS, QUIET, NO_PRINT, CACHE_SELECTED_ONLY global TARGET_PATH, LOG_PATH STRICT_MODE = False # backwards compatibility @@ -148,7 +148,6 @@ def set_from_args(args, user_config): ANONYMOUS_USAGE_STATS = get_flag_value("ANONYMOUS_USAGE_STATS", args, user_config) CACHE_SELECTED_ONLY = get_flag_value("CACHE_SELECTED_ONLY", args, user_config) DEBUG = get_flag_value("DEBUG", args, user_config) - EVENT_BUFFER_SIZE = get_flag_value("EVENT_BUFFER_SIZE", args, user_config) FAIL_FAST = get_flag_value("FAIL_FAST", args, user_config) INDIRECT_SELECTION = get_flag_value("INDIRECT_SELECTION", args, user_config) LOG_CACHE_EVENTS = get_flag_value("LOG_CACHE_EVENTS", args, user_config) @@ -186,7 +185,7 @@ def _set_overrides_from_env(): def get_flag_value(flag, args, user_config): flag_value = _load_flag_value(flag, args, user_config) - if flag in ["PRINTER_WIDTH", "EVENT_BUFFER_SIZE"]: # must be ints + if flag == "PRINTER_WIDTH": # must be ints flag_value = int(flag_value) if flag == "PROFILES_DIR": flag_value = os.path.abspath(flag_value) @@ -248,7 +247,6 @@ def get_flag_dict(): "printer_width": PRINTER_WIDTH, "indirect_selection": INDIRECT_SELECTION, "log_cache_events": LOG_CACHE_EVENTS, - "event_buffer_size": EVENT_BUFFER_SIZE, "quiet": QUIET, "no_print": NO_PRINT, } diff --git a/core/dbt/graph/queue.py b/core/dbt/graph/queue.py index 56248409754..3c3b9625d27 100644 --- a/core/dbt/graph/queue.py +++ b/core/dbt/graph/queue.py @@ -5,8 +5,12 @@ from typing import Dict, Set, List, Generator, Optional from .graph import UniqueId -from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedExposure, ParsedMetric -from dbt.contracts.graph.compiled import GraphMemberNode +from dbt.contracts.graph.nodes import ( + SourceDefinition, + Exposure, + Metric, + GraphMemberNode, +) from dbt.contracts.graph.manifest import Manifest from dbt.node_types import NodeType @@ -48,7 +52,7 @@ def _include_in_cost(self, node_id: UniqueId) -> bool: if node.resource_type != NodeType.Model: return False # must be a Model - tell mypy this won't be a Source or Exposure or Metric - assert not isinstance(node, (ParsedSourceDefinition, ParsedExposure, ParsedMetric)) + assert not isinstance(node, (SourceDefinition, Exposure, Metric)) if node.is_ephemeral: return False return True diff --git a/core/dbt/graph/selector.py b/core/dbt/graph/selector.py index 49b73fc71c4..ed91596712b 100644 --- a/core/dbt/graph/selector.py +++ b/core/dbt/graph/selector.py @@ -5,15 +5,14 @@ from .selector_methods import MethodManager from .selector_spec import SelectionCriteria, SelectionSpec, IndirectSelection -from dbt.events.functions import fire_event -from dbt.events.types import SelectorReportInvalidSelector +from dbt.events.functions import fire_event, warn_or_error +from dbt.events.types import SelectorReportInvalidSelector, NoNodesForSelectionCriteria from dbt.node_types import NodeType from dbt.exceptions import ( InternalException, InvalidSelectorException, - warn_or_error, ) -from dbt.contracts.graph.compiled import GraphMemberNode +from dbt.contracts.graph.nodes import GraphMemberNode from dbt.contracts.graph.manifest import Manifest from dbt.contracts.state import PreviousState @@ -24,11 +23,6 @@ def get_package_names(nodes): return set([node.split(".")[1] for node in nodes]) -def alert_non_existence(raw_spec, nodes): - if len(nodes) == 0: - warn_or_error(f"The selection criterion '{str(raw_spec)}' does not match any nodes") - - def can_select_indirectly(node): """If a node is not selected itself, but its parent(s) are, it may qualify for indirect selection. @@ -142,8 +136,8 @@ def select_nodes_recursively(self, spec: SelectionSpec) -> Tuple[Set[UniqueId], direct_nodes = self.incorporate_indirect_nodes(initial_direct, indirect_nodes) - if spec.expect_exists: - alert_non_existence(spec.raw, direct_nodes) + if spec.expect_exists and len(direct_nodes) == 0: + warn_or_error(NoNodesForSelectionCriteria(spec_raw=str(spec.raw))) return direct_nodes, indirect_nodes @@ -223,7 +217,7 @@ def expand_selection( if can_select_indirectly(node): # should we add it in directly? if indirect_selection == IndirectSelection.Eager or set( - node.depends_on.nodes + node.depends_on_nodes ) <= set(selected): direct_nodes.add(unique_id) # if not: @@ -247,7 +241,7 @@ def incorporate_indirect_nodes( for unique_id in indirect_nodes: if unique_id in self.manifest.nodes: node = self.manifest.nodes[unique_id] - if set(node.depends_on.nodes) <= set(selected): + if set(node.depends_on_nodes) <= set(selected): selected.add(unique_id) return selected diff --git a/core/dbt/graph/selector_methods.py b/core/dbt/graph/selector_methods.py index 577cf825512..c77625649bc 100644 --- a/core/dbt/graph/selector_methods.py +++ b/core/dbt/graph/selector_methods.py @@ -7,20 +7,15 @@ from .graph import UniqueId -from dbt.contracts.graph.compiled import ( - CompiledSingularTestNode, - CompiledGenericTestNode, - CompileResultNode, - ManifestNode, -) from dbt.contracts.graph.manifest import Manifest, WritableManifest -from dbt.contracts.graph.parsed import ( - HasTestMetadata, - ParsedSingularTestNode, - ParsedExposure, - ParsedMetric, - ParsedGenericTestNode, - ParsedSourceDefinition, +from dbt.contracts.graph.nodes import ( + SingularTestNode, + Exposure, + Metric, + GenericTestNode, + SourceDefinition, + ResultNode, + ManifestNode, ) from dbt.contracts.state import PreviousState from dbt.exceptions import ( @@ -76,7 +71,7 @@ def is_selected_node(fqn: List[str], node_selector: str): return True -SelectorTarget = Union[ParsedSourceDefinition, ManifestNode, ParsedExposure, ParsedMetric] +SelectorTarget = Union[SourceDefinition, ManifestNode, Exposure, Metric] class SelectorMethod(metaclass=abc.ABCMeta): @@ -99,7 +94,7 @@ def parsed_nodes( def source_nodes( self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, ParsedSourceDefinition]]: + ) -> Iterator[Tuple[UniqueId, SourceDefinition]]: for key, source in self.manifest.sources.items(): unique_id = UniqueId(key) @@ -107,9 +102,7 @@ def source_nodes( continue yield unique_id, source - def exposure_nodes( - self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, ParsedExposure]]: + def exposure_nodes(self, included_nodes: Set[UniqueId]) -> Iterator[Tuple[UniqueId, Exposure]]: for key, exposure in self.manifest.exposures.items(): unique_id = UniqueId(key) @@ -117,9 +110,7 @@ def exposure_nodes( continue yield unique_id, exposure - def metric_nodes( - self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, ParsedMetric]]: + def metric_nodes(self, included_nodes: Set[UniqueId]) -> Iterator[Tuple[UniqueId, Metric]]: for key, metric in self.manifest.metrics.items(): unique_id = UniqueId(key) @@ -139,13 +130,13 @@ def all_nodes( def configurable_nodes( self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, CompileResultNode]]: + ) -> Iterator[Tuple[UniqueId, ResultNode]]: yield from chain(self.parsed_nodes(included_nodes), self.source_nodes(included_nodes)) def non_source_nodes( self, included_nodes: Set[UniqueId], - ) -> Iterator[Tuple[UniqueId, Union[ParsedExposure, ManifestNode, ParsedMetric]]]: + ) -> Iterator[Tuple[UniqueId, Union[Exposure, ManifestNode, Metric]]]: yield from chain( self.parsed_nodes(included_nodes), self.exposure_nodes(included_nodes), @@ -286,8 +277,6 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu root = Path.cwd() paths = set(p.relative_to(root) for p in root.glob(selector)) for node, real_node in self.all_nodes(included_nodes): - if Path(real_node.root_path) != root: - continue ofp = Path(real_node.original_file_path) if ofp in paths: yield node @@ -387,26 +376,26 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu class TestNameSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: for node, real_node in self.parsed_nodes(included_nodes): - if isinstance(real_node, HasTestMetadata): - if real_node.test_metadata.name == selector: + if real_node.resource_type == NodeType.Test and hasattr(real_node, "test_metadata"): + if real_node.test_metadata.name == selector: # type: ignore[union-attr] yield node class TestTypeSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: - search_types: Tuple[Type, ...] + search_type: Type # continue supporting 'schema' + 'data' for backwards compatibility if selector in ("generic", "schema"): - search_types = (ParsedGenericTestNode, CompiledGenericTestNode) + search_type = GenericTestNode elif selector in ("singular", "data"): - search_types = (ParsedSingularTestNode, CompiledSingularTestNode) + search_type = SingularTestNode else: raise RuntimeException( f'Invalid test type selector {selector}: expected "generic" or ' '"singular"' ) for node, real_node in self.parsed_nodes(included_nodes): - if isinstance(real_node, search_types): + if isinstance(real_node, search_type): yield node @@ -438,6 +427,9 @@ def _macros_modified(self) -> List[str]: return modified def recursively_check_macros_modified(self, node, visited_macros): + if not hasattr(node, "depends_on"): + return False + for macro_uid in node.depends_on.macros: if macro_uid in visited_macros: continue diff --git a/core/dbt/helper_types.py b/core/dbt/helper_types.py index eec26a20c64..a8ff90fa75f 100644 --- a/core/dbt/helper_types.py +++ b/core/dbt/helper_types.py @@ -3,7 +3,7 @@ # necessary for annotating constructors from __future__ import annotations -from dataclasses import dataclass +from dataclasses import dataclass, field from datetime import timedelta from pathlib import Path from typing import Tuple, AbstractSet, Union @@ -85,7 +85,7 @@ def __eq__(self, other): class NoValue(dbtClassMixin): """Sometimes, you want a way to say none that isn't None""" - novalue: NVEnum = NVEnum.novalue + novalue: NVEnum = field(default_factory=lambda: NVEnum.novalue) dbtClassMixin.register_field_encoders( diff --git a/core/dbt/include/global_project/macros/materializations/models/incremental/incremental.sql b/core/dbt/include/global_project/macros/materializations/models/incremental/incremental.sql index 602067616d2..e8ff5c1ea4f 100644 --- a/core/dbt/include/global_project/macros/materializations/models/incremental/incremental.sql +++ b/core/dbt/include/global_project/macros/materializations/models/incremental/incremental.sql @@ -50,9 +50,9 @@ {#-- Get the incremental_strategy, the macro to use for the strategy, and build the sql --#} {% set incremental_strategy = config.get('incremental_strategy') or 'default' %} - {% set incremental_predicates = config.get('incremental_predicates', none) %} + {% set incremental_predicates = config.get('predicates', none) or config.get('incremental_predicates', none) %} {% set strategy_sql_macro_func = adapter.get_incremental_strategy_macro(context, incremental_strategy) %} - {% set strategy_arg_dict = ({'target_relation': target_relation, 'temp_relation': temp_relation, 'unique_key': unique_key, 'dest_columns': dest_columns, 'predicates': incremental_predicates }) %} + {% set strategy_arg_dict = ({'target_relation': target_relation, 'temp_relation': temp_relation, 'unique_key': unique_key, 'dest_columns': dest_columns, 'incremental_predicates': incremental_predicates }) %} {% set build_sql = strategy_sql_macro_func(strategy_arg_dict) %} {% endif %} diff --git a/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql b/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql index 836d768d01a..5033178be49 100644 --- a/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql +++ b/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql @@ -1,9 +1,9 @@ -{% macro get_merge_sql(target, source, unique_key, dest_columns, predicates=none) -%} - {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, predicates) }} +{% macro get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} + {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, incremental_predicates) }} {%- endmacro %} -{% macro default__get_merge_sql(target, source, unique_key, dest_columns, predicates) -%} - {%- set predicates = [] if predicates is none else [] + predicates -%} +{% macro default__get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} + {%- set predicates = [] if incremental_predicates is none else [] + incremental_predicates -%} {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute="name")) -%} {%- set merge_update_columns = config.get('merge_update_columns') -%} {%- set merge_exclude_columns = config.get('merge_exclude_columns') -%} @@ -32,7 +32,7 @@ merge into {{ target }} as DBT_INTERNAL_DEST using {{ source }} as DBT_INTERNAL_SOURCE - on {{ predicates | join(' and ') }} + on {{"(" ~ predicates | join(") and (") ~ ")"}} {% if unique_key %} when matched then update set @@ -50,11 +50,11 @@ {% endmacro %} -{% macro get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%} - {{ adapter.dispatch('get_delete_insert_merge_sql', 'dbt')(target, source, unique_key, dest_columns) }} +{% macro get_delete_insert_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} + {{ adapter.dispatch('get_delete_insert_merge_sql', 'dbt')(target, source, unique_key, dest_columns, incremental_predicates) }} {%- endmacro %} -{% macro default__get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%} +{% macro default__get_delete_insert_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute="name")) -%} @@ -65,8 +65,13 @@ where ( {% for key in unique_key %} {{ source }}.{{ key }} = {{ target }}.{{ key }} - {{ "and " if not loop.last }} + {{ "and " if not loop.last}} {% endfor %} + {% if incremental_predicates %} + {% for predicate in incremental_predicates %} + and {{ predicate }} + {% endfor %} + {% endif %} ); {% else %} delete from {{ target }} @@ -74,7 +79,12 @@ {{ unique_key }}) in ( select ({{ unique_key }}) from {{ source }} - ); + ) + {%- if incremental_predicates %} + {% for predicate in incremental_predicates %} + and {{ predicate }} + {% endfor %} + {%- endif -%}; {% endif %} {% endif %} diff --git a/core/dbt/include/global_project/macros/materializations/models/incremental/strategies.sql b/core/dbt/include/global_project/macros/materializations/models/incremental/strategies.sql index 5226d01de16..72082ccad32 100644 --- a/core/dbt/include/global_project/macros/materializations/models/incremental/strategies.sql +++ b/core/dbt/include/global_project/macros/materializations/models/incremental/strategies.sql @@ -21,7 +21,7 @@ {% macro default__get_incremental_delete_insert_sql(arg_dict) %} - {% do return(get_delete_insert_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["unique_key"], arg_dict["dest_columns"])) %} + {% do return(get_delete_insert_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["unique_key"], arg_dict["dest_columns"], arg_dict["incremental_predicates"])) %} {% endmacro %} @@ -35,7 +35,7 @@ {% macro default__get_incremental_merge_sql(arg_dict) %} - {% do return(get_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["unique_key"], arg_dict["dest_columns"])) %} + {% do return(get_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["unique_key"], arg_dict["dest_columns"], arg_dict["incremental_predicates"])) %} {% endmacro %} @@ -48,7 +48,7 @@ {% macro default__get_incremental_insert_overwrite_sql(arg_dict) %} - {% do return(get_insert_overwrite_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["dest_columns"], arg_dict["predicates"])) %} + {% do return(get_insert_overwrite_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["dest_columns"], arg_dict["incremental_predicates"])) %} {% endmacro %} diff --git a/core/dbt/include/global_project/macros/python_model/python.sql b/core/dbt/include/global_project/macros/python_model/python.sql index 2155662987e..c56ff7f31c8 100644 --- a/core/dbt/include/global_project/macros/python_model/python.sql +++ b/core/dbt/include/global_project/macros/python_model/python.sql @@ -30,12 +30,13 @@ def source(*args, dbt_load_df_function): {% macro build_config_dict(model) %} {%- set config_dict = {} -%} - {%- for key in model.config.config_keys_used -%} + {% set config_dbt_used = zip(model.config.config_keys_used, model.config.config_keys_defaults) | list %} + {%- for key, default in config_dbt_used -%} {# weird type testing with enum, would be much easier to write this logic in Python! #} {%- if key == 'language' -%} {%- set value = 'python' -%} {%- endif -%} - {%- set value = model.config[key] -%} + {%- set value = model.config.get(key, default) -%} {%- do config_dict.update({key: value}) -%} {%- endfor -%} config_dict = {{ config_dict }} diff --git a/core/dbt/include/index.html b/core/dbt/include/index.html index 182b6b49f99..65749e446d0 100644 --- a/core/dbt/include/index.html +++ b/core/dbt/include/index.html @@ -90,7 +90,7 @@ https://github.com/jquery/jquery/blob/master/src/event.js */var r=function(e,t){this.recycle(e,t)};function i(){return!1}function o(){return!0}r.prototype={instanceString:function(){return"event"},recycle:function(e,t){if(this.isImmediatePropagationStopped=this.isPropagationStopped=this.isDefaultPrevented=i,null!=e&&e.preventDefault?(this.type=e.type,this.isDefaultPrevented=e.defaultPrevented?o:i):null!=e&&e.type?t=e:this.type=e,null!=t&&(this.originalEvent=t.originalEvent,this.type=null!=t.type?t.type:this.type,this.cy=t.cy,this.target=t.target,this.position=t.position,this.renderedPosition=t.renderedPosition,this.namespace=t.namespace,this.layout=t.layout),null!=this.cy&&null!=this.position&&null==this.renderedPosition){var n=this.position,r=this.cy.zoom(),a=this.cy.pan();this.renderedPosition={x:n.x*r+a.x,y:n.y*r+a.y}}this.timeStamp=e&&e.timeStamp||Date.now()},preventDefault:function(){this.isDefaultPrevented=o;var e=this.originalEvent;e&&e.preventDefault&&e.preventDefault()},stopPropagation:function(){this.isPropagationStopped=o;var e=this.originalEvent;e&&e.stopPropagation&&e.stopPropagation()},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=o,this.stopPropagation()},isDefaultPrevented:i,isPropagationStopped:i,isImmediatePropagationStopped:i},e.exports=r},function(e,t,n){"use strict";var r=n(1);e.exports=function(e,t){var n=e.cy().hasCompoundNodes();function i(e){var t=e.pstyle("z-compound-depth");return"auto"===t.value?n?e.zDepth():0:"bottom"===t.value?-1:"top"===t.value?r.MAX_INT:0}var o=i(e)-i(t);if(0!==o)return o;function a(e){return"auto"===e.pstyle("z-index-compare").value&&e.isNode()?1:0}var s=a(e)-a(t);if(0!==s)return s;var l=e.pstyle("z-index").value-t.pstyle("z-index").value;return 0!==l?l:e.poolIndex()-t.poolIndex()}},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(6),a=function e(t){if(!(this instanceof e))return new e(t);r.core(t)?(this._private={cy:t,coreStyle:{}},this.length=0,this.resetToDefault()):i.error("A style must have a core reference")},s=a.prototype;s.instanceString=function(){return"style"},s.clear=function(){for(var e=0;e=e.deqFastCost*m)break}else if(o){if(h>=e.deqCost*c||h>=e.deqAvgCost*l)break}else if(g>=e.deqNoDrawCost*(1e3/60))break;var v=e.deq(t,f,d);if(!(v.length>0))break;for(var b=0;b0&&(e.onDeqd(t,u),!o&&e.shouldRedraw(t,u,f,d)&&i())}),o(t))}}}}},function(e,t,n){"use strict";var r=n(0),i=n(12),o=n(94),a=n(136),s=function(e){return void 0===e&&(e={}),r.plainObject(e)?new i(e):r.string(e)?o.apply(o,arguments):void 0};s.use=function(e){var t=Array.prototype.slice.call(arguments,1);return t.unshift(s),e.apply(null,t),this},s.version=n(137),s.stylesheet=s.Stylesheet=a,e.exports=s},function(e,t,n){"use strict";var r=n(0);e.exports={hex2tuple:function(e){if((4===e.length||7===e.length)&&"#"===e[0]){var t=void 0,n=void 0,r=void 0;return 4===e.length?(t=parseInt(e[1]+e[1],16),n=parseInt(e[2]+e[2],16),r=parseInt(e[3]+e[3],16)):(t=parseInt(e[1]+e[2],16),n=parseInt(e[3]+e[4],16),r=parseInt(e[5]+e[6],16)),[t,n,r]}},hsl2tuple:function(e){var t=void 0,n=void 0,r=void 0,i=void 0,o=void 0,a=void 0,s=void 0,l=void 0;function c(e,t,n){return n<0&&(n+=1),n>1&&(n-=1),n<1/6?e+6*(t-e)*n:n<.5?t:n<2/3?e+(t-e)*(2/3-n)*6:e}var u=new RegExp("^"+this.regex.hsla+"$").exec(e);if(u){if((n=parseInt(u[1]))<0?n=(360- -1*n%360)%360:n>360&&(n%=360),n/=360,(r=parseFloat(u[2]))<0||r>100)return;if(r/=100,(i=parseFloat(u[3]))<0||i>100)return;if(i/=100,void 0!==(o=u[4])&&((o=parseFloat(o))<0||o>1))return;if(0===r)a=s=l=Math.round(255*i);else{var d=i<.5?i*(1+r):i+r-i*r,f=2*i-d;a=Math.round(255*c(f,d,n+1/3)),s=Math.round(255*c(f,d,n)),l=Math.round(255*c(f,d,n-1/3))}t=[a,s,l,o]}return t},rgb2tuple:function(e){var t=void 0,n=new RegExp("^"+this.regex.rgba+"$").exec(e);if(n){t=[];for(var r=[],i=1;i<=3;i++){var o=n[i];if("%"===o[o.length-1]&&(r[i]=!0),o=parseFloat(o),r[i]&&(o=o/100*255),o<0||o>255)return;t.push(Math.floor(o))}var a=r[1]||r[2]||r[3],s=r[1]&&r[2]&&r[3];if(a&&!s)return;var l=n[4];if(void 0!==l){if((l=parseFloat(l))<0||l>1)return;t.push(l)}}return t},colorname2tuple:function(e){return this.colors[e.toLowerCase()]},color2tuple:function(e){return(r.array(e)?e:null)||this.colorname2tuple(e)||this.hex2tuple(e)||this.rgb2tuple(e)||this.hsl2tuple(e)},colors:{transparent:[0,0,0,0],aliceblue:[240,248,255],antiquewhite:[250,235,215],aqua:[0,255,255],aquamarine:[127,255,212],azure:[240,255,255],beige:[245,245,220],bisque:[255,228,196],black:[0,0,0],blanchedalmond:[255,235,205],blue:[0,0,255],blueviolet:[138,43,226],brown:[165,42,42],burlywood:[222,184,135],cadetblue:[95,158,160],chartreuse:[127,255,0],chocolate:[210,105,30],coral:[255,127,80],cornflowerblue:[100,149,237],cornsilk:[255,248,220],crimson:[220,20,60],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgoldenrod:[184,134,11],darkgray:[169,169,169],darkgreen:[0,100,0],darkgrey:[169,169,169],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkseagreen:[143,188,143],darkslateblue:[72,61,139],darkslategray:[47,79,79],darkslategrey:[47,79,79],darkturquoise:[0,206,209],darkviolet:[148,0,211],deeppink:[255,20,147],deepskyblue:[0,191,255],dimgray:[105,105,105],dimgrey:[105,105,105],dodgerblue:[30,144,255],firebrick:[178,34,34],floralwhite:[255,250,240],forestgreen:[34,139,34],fuchsia:[255,0,255],gainsboro:[220,220,220],ghostwhite:[248,248,255],gold:[255,215,0],goldenrod:[218,165,32],gray:[128,128,128],grey:[128,128,128],green:[0,128,0],greenyellow:[173,255,47],honeydew:[240,255,240],hotpink:[255,105,180],indianred:[205,92,92],indigo:[75,0,130],ivory:[255,255,240],khaki:[240,230,140],lavender:[230,230,250],lavenderblush:[255,240,245],lawngreen:[124,252,0],lemonchiffon:[255,250,205],lightblue:[173,216,230],lightcoral:[240,128,128],lightcyan:[224,255,255],lightgoldenrodyellow:[250,250,210],lightgray:[211,211,211],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightsalmon:[255,160,122],lightseagreen:[32,178,170],lightskyblue:[135,206,250],lightslategray:[119,136,153],lightslategrey:[119,136,153],lightsteelblue:[176,196,222],lightyellow:[255,255,224],lime:[0,255,0],limegreen:[50,205,50],linen:[250,240,230],magenta:[255,0,255],maroon:[128,0,0],mediumaquamarine:[102,205,170],mediumblue:[0,0,205],mediumorchid:[186,85,211],mediumpurple:[147,112,219],mediumseagreen:[60,179,113],mediumslateblue:[123,104,238],mediumspringgreen:[0,250,154],mediumturquoise:[72,209,204],mediumvioletred:[199,21,133],midnightblue:[25,25,112],mintcream:[245,255,250],mistyrose:[255,228,225],moccasin:[255,228,181],navajowhite:[255,222,173],navy:[0,0,128],oldlace:[253,245,230],olive:[128,128,0],olivedrab:[107,142,35],orange:[255,165,0],orangered:[255,69,0],orchid:[218,112,214],palegoldenrod:[238,232,170],palegreen:[152,251,152],paleturquoise:[175,238,238],palevioletred:[219,112,147],papayawhip:[255,239,213],peachpuff:[255,218,185],peru:[205,133,63],pink:[255,192,203],plum:[221,160,221],powderblue:[176,224,230],purple:[128,0,128],red:[255,0,0],rosybrown:[188,143,143],royalblue:[65,105,225],saddlebrown:[139,69,19],salmon:[250,128,114],sandybrown:[244,164,96],seagreen:[46,139,87],seashell:[255,245,238],sienna:[160,82,45],silver:[192,192,192],skyblue:[135,206,235],slateblue:[106,90,205],slategray:[112,128,144],slategrey:[112,128,144],snow:[255,250,250],springgreen:[0,255,127],steelblue:[70,130,180],tan:[210,180,140],teal:[0,128,128],thistle:[216,191,216],tomato:[255,99,71],turquoise:[64,224,208],violet:[238,130,238],wheat:[245,222,179],white:[255,255,255],whitesmoke:[245,245,245],yellow:[255,255,0],yellowgreen:[154,205,50]}}},function(e,t,n){"use strict";var r=n(0);e.exports={mapEmpty:function(e){return null==e||0===Object.keys(e).length},pushMap:function(e){var t=this.getMap(e);null==t?this.setMap(this.extend({},e,{value:[e.value]})):t.push(e.value)},setMap:function(e){for(var t=e.map,n=e.keys,i=n.length,o=0;ot?1:0}e.exports={sort:{ascending:r,descending:function(e,t){return-1*r(e,t)}}}},function(e,t,n){"use strict";function r(){this._obj={}}var i=r.prototype;i.set=function(e,t){this._obj[e]=t},i.delete=function(e){this._obj[e]=null},i.has=function(e){return null!=this._obj[e]},i.get=function(e){return this._obj[e]},e.exports=r},function(e,t,n){"use strict";var r=n(1),i={};[n(30),n(31),n(33),n(34),n(35),n(36),n(37),n(38),n(39),n(40),n(41)].forEach((function(e){r.extend(i,e)})),e.exports=i},function(e,t,n){"use strict";var r=n(0),i=function(e){return e={bfs:e.bfs||!e.dfs,dfs:e.dfs||!e.bfs},function(t,n,i){var o;r.plainObject(t)&&!r.elementOrCollection(t)&&(t=(o=t).roots||o.root,n=o.visit,i=o.directed),i=2!==arguments.length||r.fn(n)?i:n,n=r.fn(n)?n:function(){};for(var a,s=this._private.cy,l=t=r.string(t)?this.filter(t):t,c=[],u=[],d={},f={},p={},h=0,g=this.nodes(),m=this.edges(),v=0;v0;){var y=g.pop(),x=p(y),w=y.id();if(u[w]=x,x!==1/0){var k=y.neighborhood().intersect(f);for(m=0;m0)for(n.unshift(t);c[i.id()];){var o=c[i.id()];n.unshift(o.edge),n.unshift(o.node),i=o.node}return a.collection(n)}}}};e.exports=o},function(e,t){e.exports=n},function(e,t,n){"use strict";var r=n(0),i={kruskal:function(e){var t=this.cy();function n(e){for(var t=0;t0;){var y=n(p,v),x=i.getElementById(p[y]),w=x.id();if(b++,w==d){var k=t(u,d,h,[]);return{found:!0,distance:m[w],path:this.spawn(k),steps:b}}f.push(w),p.splice(y,1);for(var A=x._private.edges,E=0;Eb&&(u[m][v]=b,p[m][v]=v,h[m][v]=o[c])}if(!i)for(c=0;cb&&(u[m][v]=b,p[m][v]=v,h[m][v]=o[c]);for(var y=0;yu&&(u=t)},f=function(e){return c[e]},p=0;p0?S.edgesTo(E)[0]:E.edgesTo(S)[0]);E=E.id(),y[E]>y[k]+$&&(y[E]=y[k]+$,x.nodes.indexOf(E)<0?x.push(E):x.updateItem(E),b[E]=0,v[E]=[]),y[E]==y[k]+$&&(b[E]=b[E]+b[k],v[E].push(k))}else for(A=0;A0;)for(E=m.pop(),A=0;A0:void 0}},clearQueue:function(){return function(){var e=void 0!==this.length?this:[this];if(!(this._private.cy||this).styleEnabled())return this;for(var t=0;t0&&this.spawn(n).updateStyle().emit("class"),t},addClass:function(e){return this.toggleClass(e,!0)},hasClass:function(e){var t=this[0];return null!=t&&t._private.classes.has(e)},toggleClass:function(e,t){for(var n=e.match(/\S+/g)||[],r=[],i=0,o=this.length;i0&&this.spawn(r).updateStyle().emit("class"),this},removeClass:function(e){return this.toggleClass(e,!1)},flashClass:function(e,t){var n=this;if(null==t)t=250;else if(0===t)return n;return n.addClass(e),setTimeout((function(){n.removeClass(e)}),t),n}};e.exports=i},function(e,t,n){"use strict";n(0);var r=n(6),i={allAre:function(e){var t=new r(e);return this.every((function(e){return t.matches(e)}))},is:function(e){var t=new r(e);return this.some((function(e){return t.matches(e)}))},some:function(e,t){for(var n=0;n\\?\\@\\[\\]\\^\\`\\{\\|\\}\\~]",comparatorOp:"=|\\!=|>|>=|<|<=|\\$=|\\^=|\\*=",boolOp:"\\?|\\!|\\^",string:"\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'",number:n(1).regex.number,meta:"degree|indegree|outdegree",separator:"\\s*,\\s*",descendant:"\\s+",child:"\\s+>\\s+",subject:"\\$",group:"node|edge|\\*",directedEdge:"\\s+->\\s+",undirectedEdge:"\\s+<->\\s+"};r.variable="(?:[\\w-]|(?:\\\\"+r.metaChar+"))+",r.value=r.string+"|"+r.number,r.className=r.variable,r.id=r.variable,function(){var e=void 0,t=void 0,n=void 0;for(e=r.comparatorOp.split("|"),n=0;n=0||"="!==t&&(r.comparatorOp+="|\\!"+t)}(),e.exports=r},function(e,t,n){"use strict";var r=n(15).stateSelectorMatches,i=n(0),o=function(e,t){for(var n=!0,r=0;r=0&&(d=d.toLowerCase(),f=f.toLowerCase(),a=a.replace("@",""),p=!0);var h=!1;a.indexOf("!")>=0&&(a=a.replace("!",""),h=!0),p&&(s=f.toLowerCase(),u=d.toLowerCase());var g=!1;switch(a){case"*=":c=d.indexOf(f)>=0;break;case"$=":c=d.indexOf(f,d.length-f.length)>=0;break;case"^=":c=0===d.indexOf(f);break;case"=":c=u===s;break;case">":g=!0,c=u>s;break;case">=":g=!0,c=u>=s;break;case"<":g=!0,c=u0;){var u=o.shift();t(u),a.add(u.id()),s&&i(o,a,u)}return e}function a(e,t,n){if(n.isParent())for(var r=n._private.children,i=0;i1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,a)},i.forEachUp=function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,s)},i.forEachUpAndDown=function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,l)},i.ancestors=i.parents,e.exports=i},function(e,t,n){"use strict";var r,i=n(4),o=void 0;(o=r={data:i.data({field:"data",bindingEvent:"data",allowBinding:!0,allowSetting:!0,settingEvent:"data",settingTriggersEvent:!0,triggerFnName:"trigger",allowGetting:!0,immutableKeys:{id:!0,source:!0,target:!0,parent:!0},updateStyle:!0}),removeData:i.removeData({field:"data",event:"data",triggerFnName:"trigger",triggerEvent:!0,immutableKeys:{id:!0,source:!0,target:!0,parent:!0},updateStyle:!0}),scratch:i.data({field:"scratch",bindingEvent:"scratch",allowBinding:!0,allowSetting:!0,settingEvent:"scratch",settingTriggersEvent:!0,triggerFnName:"trigger",allowGetting:!0,updateStyle:!0}),removeScratch:i.removeData({field:"scratch",event:"scratch",triggerFnName:"trigger",triggerEvent:!0,updateStyle:!0}),rscratch:i.data({field:"rscratch",allowBinding:!1,allowSetting:!0,settingTriggersEvent:!1,allowGetting:!0}),removeRscratch:i.removeData({field:"rscratch",triggerEvent:!1}),id:function(){var e=this[0];if(e)return e._private.data.id}}).attr=o.data,o.removeAttr=o.removeData,e.exports=r},function(e,t,n){"use strict";var r=n(1),i={};function o(e){return function(t){if(void 0===t&&(t=!0),0!==this.length&&this.isNode()&&!this.removed()){for(var n=0,r=this[0],i=r._private.edges,o=0;ot})),minIndegree:a("indegree",(function(e,t){return et})),minOutdegree:a("outdegree",(function(e,t){return et}))}),r.extend(i,{totalDegree:function(e){for(var t=0,n=this.nodes(),r=0;r0,d=u;u&&(c=c[0]);var f=d?c.position():{x:0,y:0};return i={x:l.x-f.x,y:l.y-f.y},void 0===e?i:i[e]}for(var p=0;p0,v=m;m&&(g=g[0]);var b=v?g.position():{x:0,y:0};void 0!==t?h.position(e,t+b[e]):void 0!==i&&h.position({x:i.x+b.x,y:i.y+b.y})}}else if(!a)return;return this}}).modelPosition=s.point=s.position,s.modelPositions=s.points=s.positions,s.renderedPoint=s.renderedPosition,s.relativePoint=s.relativePosition,e.exports=r},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(2),a=void 0,s=void 0;a=s={},s.renderedBoundingBox=function(e){var t=this.boundingBox(e),n=this.cy(),r=n.zoom(),i=n.pan(),o=t.x1*r+i.x,a=t.x2*r+i.x,s=t.y1*r+i.y,l=t.y2*r+i.y;return{x1:o,x2:a,y1:s,y2:l,w:a-o,h:l-s}},s.dirtyCompoundBoundsCache=function(){var e=this.cy();return e.styleEnabled()&&e.hasCompoundNodes()?(this.forEachUp((function(e){e._private.compoundBoundsClean=!1,e.isParent()&&e.emit("bounds")})),this):this},s.updateCompoundBounds=function(){var e=this.cy();if(!e.styleEnabled()||!e.hasCompoundNodes())return this;if(e.batching())return this;var t=[];function n(e){if(e.isParent()){var n=e._private,r=e.children(),i="include"===e.pstyle("compound-sizing-wrt-labels").value,o={width:{val:e.pstyle("min-width").pfValue,left:e.pstyle("min-width-bias-left"),right:e.pstyle("min-width-bias-right")},height:{val:e.pstyle("min-height").pfValue,top:e.pstyle("min-height-bias-top"),bottom:e.pstyle("min-height-bias-bottom")}},a=r.boundingBox({includeLabels:i,includeOverlays:!1,useCache:!1}),s=n.position;0!==a.w&&0!==a.h||((a={w:e.pstyle("width").pfValue,h:e.pstyle("height").pfValue}).x1=s.x-a.w/2,a.x2=s.x+a.w/2,a.y1=s.y-a.h/2,a.y2=s.y+a.h/2);var l=o.width.left.value;"px"===o.width.left.units&&o.width.val>0&&(l=100*l/o.width.val);var c=o.width.right.value;"px"===o.width.right.units&&o.width.val>0&&(c=100*c/o.width.val);var u=o.height.top.value;"px"===o.height.top.units&&o.height.val>0&&(u=100*u/o.height.val);var d=o.height.bottom.value;"px"===o.height.bottom.units&&o.height.val>0&&(d=100*d/o.height.val);var f=b(o.width.val-a.w,l,c),p=f.biasDiff,h=f.biasComplementDiff,g=b(o.height.val-a.h,u,d),m=g.biasDiff,v=g.biasComplementDiff;n.autoPadding=function(e,t,n,r){if("%"!==n.units)return"px"===n.units?n.pfValue:0;switch(r){case"width":return e>0?n.pfValue*e:0;case"height":return t>0?n.pfValue*t:0;case"average":return e>0&&t>0?n.pfValue*(e+t)/2:0;case"min":return e>0&&t>0?e>t?n.pfValue*t:n.pfValue*e:0;case"max":return e>0&&t>0?e>t?n.pfValue*e:n.pfValue*t:0;default:return 0}}(a.w,a.h,e.pstyle("padding"),e.pstyle("padding-relative-to").value),n.autoWidth=Math.max(a.w,o.width.val),s.x=(-p+a.x1+a.x2+h)/2,n.autoHeight=Math.max(a.h,o.height.val),s.y=(-m+a.y1+a.y2+v)/2,t.push(e)}function b(e,t,n){var r=0,i=0,o=t+n;return e>0&&o>0&&(r=t/o*e,i=n/o*e),{biasDiff:r,biasComplementDiff:i}}}for(var r=0;re.x2?r:e.x2,e.y1=ne.y2?i:e.y2)},u=function(e,t,n){return i.getPrefixedProperty(e,t,n)},d=function(e,t,n){if(!t.cy().headless()){var r=t._private.rstyle,i=r.arrowWidth/2,o=void 0,a=void 0;"none"!==t.pstyle(n+"-arrow-shape").value&&("source"===n?(o=r.srcX,a=r.srcY):"target"===n?(o=r.tgtX,a=r.tgtY):(o=r.midX,a=r.midY),c(e,o-i,a-i,o+i,a+i))}},f=function(e,t,n){if(!t.cy().headless()){var r=void 0;r=n?n+"-":"";var i=t._private,o=i.rstyle;if(t.pstyle(r+"label").strValue){var a=t.pstyle("text-halign"),s=t.pstyle("text-valign"),l=u(o,"labelWidth",n),d=u(o,"labelHeight",n),f=u(o,"labelX",n),p=u(o,"labelY",n),h=t.pstyle(r+"text-margin-x").pfValue,g=t.pstyle(r+"text-margin-y").pfValue,m=t.isEdge(),v=t.pstyle(r+"text-rotation"),b=t.pstyle("text-outline-width").pfValue,y=t.pstyle("text-border-width").pfValue/2,x=t.pstyle("text-background-padding").pfValue,w=d+2*x,k=l+2*x,A=k/2,E=w/2,S=void 0,$=void 0,C=void 0,_=void 0;if(m)S=f-A,$=f+A,C=p-E,_=p+E;else{switch(a.value){case"left":S=f-k,$=f;break;case"center":S=f-A,$=f+A;break;case"right":S=f,$=f+k}switch(s.value){case"top":C=p-w,_=p;break;case"center":C=p-E,_=p+E;break;case"bottom":C=p,_=p+w}}var O=m&&"autorotate"===v.strValue,j=null!=v.pfValue&&0!==v.pfValue;if(O||j){var T=O?u(i.rstyle,"labelAngle",n):v.pfValue,P=Math.cos(T),D=Math.sin(T),R=function(e,t){return{x:(e-=f)*P-(t-=p)*D+f,y:e*D+t*P+p}},I=R(S,C),N=R(S,_),M=R($,C),z=R($,_);S=Math.min(I.x,N.x,M.x,z.x),$=Math.max(I.x,N.x,M.x,z.x),C=Math.min(I.y,N.y,M.y,z.y),_=Math.max(I.y,N.y,M.y,z.y)}S+=h-Math.max(b,y),$+=h+Math.max(b,y),C+=g-Math.max(b,y),_+=g+Math.max(b,y),c(e,S,C,$,_)}return e}},p=function(e){return e?"t":"f"},h=function(e){var t="";return t+=p(e.incudeNodes),t+=p(e.includeEdges),t+=p(e.includeLabels),t+=p(e.includeOverlays)},g=function(e,t){var n=e._private,r=void 0,i=e.cy().headless(),a=t===m?v:h(t);return t.useCache&&!i&&n.bbCache&&n.bbCache[a]?r=n.bbCache[a]:(r=function(e,t){var n=e._private.cy,r=n.styleEnabled(),i=n.headless(),a={x1:1/0,y1:1/0,x2:-1/0,y2:-1/0},s=e._private,u=r?e.pstyle("display").value:"element",p=e.isNode(),h=e.isEdge(),g=void 0,m=void 0,v=void 0,b=void 0,y=void 0,x=void 0,w="none"!==u;if(w){var k=0;r&&t.includeOverlays&&0!==e.pstyle("overlay-opacity").value&&(k=e.pstyle("overlay-padding").value);var A=0;if(r&&(A=e.pstyle("width").pfValue/2),p&&t.includeNodes){var E=e.position();y=E.x,x=E.y;var S=e.outerWidth()/2,$=e.outerHeight()/2;c(a,g=y-S-k,v=x-$-k,m=y+S+k,b=x+$+k)}else if(h&&t.includeEdges){var C=s.rstyle||{};if(r&&!i&&(g=Math.min(C.srcX,C.midX,C.tgtX),m=Math.max(C.srcX,C.midX,C.tgtX),v=Math.min(C.srcY,C.midY,C.tgtY),b=Math.max(C.srcY,C.midY,C.tgtY),c(a,g-=A,v-=A,m+=A,b+=A)),r&&!i&&"haystack"===e.pstyle("curve-style").strValue){var _=C.haystackPts||[];if(g=_[0].x,v=_[0].y,g>(m=_[1].x)){var O=g;g=m,m=O}if(v>(b=_[1].y)){var j=v;v=b,b=j}c(a,g-A,v-A,m+A,b+A)}else{for(var T=C.bezierPts||C.linePts||[],P=0;P(m=I.x)){var N=g;g=m,m=N}if((v=R.y)>(b=I.y)){var M=v;v=b,b=M}c(a,g-=A,v-=A,m+=A,b+=A)}}}if(r&&t.includeEdges&&h&&(d(a,e,"mid-source"),d(a,e,"mid-target"),d(a,e,"source"),d(a,e,"target")),r&&"yes"===e.pstyle("ghost").value){var z=e.pstyle("ghost-offset-x").pfValue,L=e.pstyle("ghost-offset-y").pfValue;c(a,a.x1+z,a.y1+L,a.x2+z,a.y2+L)}r&&(g=a.x1,m=a.x2,v=a.y1,b=a.y2,c(a,g-k,v-k,m+k,b+k)),r&&t.includeLabels&&(f(a,e,null),h&&(f(a,e,"source"),f(a,e,"target")))}return a.x1=l(a.x1),a.y1=l(a.y1),a.x2=l(a.x2),a.y2=l(a.y2),a.w=l(a.x2-a.x1),a.h=l(a.y2-a.y1),a.w>0&&a.h>0&&w&&o.expandBoundingBox(a,1),a}(e,t),i||(n.bbCache=n.bbCache||{},n.bbCache[a]=r)),r},m={includeNodes:!0,includeEdges:!0,includeLabels:!0,includeOverlays:!0,useCache:!0},v=h(m);function b(e){return{includeNodes:i.default(e.includeNodes,m.includeNodes),includeEdges:i.default(e.includeEdges,m.includeEdges),includeLabels:i.default(e.includeLabels,m.includeLabels),includeOverlays:i.default(e.includeOverlays,m.includeOverlays),useCache:i.default(e.useCache,m.useCache)}}s.boundingBox=function(e){if(1===this.length&&this[0]._private.bbCache&&(void 0===e||void 0===e.useCache||!0===e.useCache))return e=void 0===e?m:b(e),g(this[0],e);var t={x1:1/0,y1:1/0,x2:-1/0,y2:-1/0},n=b(e=e||i.staticEmptyObject()),r=this.cy().styleEnabled();r&&this.recalculateRenderedStyle(n.useCache),this.updateCompoundBounds();for(var o,a,s={},u=0;u1&&!a){var s=this.length-1,l=this[s],c=l._private.data.id;this[s]=void 0,this[o]=l,r.set(c,{ele:l,index:o})}return this.length--,this},unmerge:function(e){var t=this._private.cy;if(!e)return this;if(e&&r.string(e)){var n=e;e=t.mutableElements().filter(n)}for(var i=0;in&&(n=a,r=o)}return{value:n,ele:r}},min:function(e,t){for(var n=1/0,r=void 0,i=0;i=0&&i0&&t.push(u[0]),t.push(s[0])}return this.spawn(t,{unique:!0}).filter(e)}),"neighborhood"),closedNeighborhood:function(e){return this.neighborhood().add(this).filter(e)},openNeighborhood:function(e){return this.neighborhood(e)}}),o.neighbourhood=o.neighborhood,o.closedNeighbourhood=o.closedNeighborhood,o.openNeighbourhood=o.openNeighborhood,r.extend(o,{source:a((function(e){var t=this[0],n=void 0;return t&&(n=t._private.source||t.cy().collection()),n&&e?n.filter(e):n}),"source"),target:a((function(e){var t=this[0],n=void 0;return t&&(n=t._private.target||t.cy().collection()),n&&e?n.filter(e):n}),"target"),sources:u({attr:"source"}),targets:u({attr:"target"})}),r.extend(o,{edgesWith:a(d(),"edgesWith"),edgesTo:a(d({thisIsSrc:!0}),"edgesTo")}),r.extend(o,{connectedEdges:a((function(e){for(var t=[],n=0;n0);return i.map((function(e){var t=e.connectedEdges().stdFilter((function(t){return e.anySame(t.source())&&e.anySame(t.target())}));return e.union(t)}))}}),e.exports=o},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(7),a=n(14),s={add:function(e){var t=void 0,n=this;if(r.elementOrCollection(e)){var s=e;if(s._private.cy===n)t=s.restore();else{for(var l=[],c=0;c=0;t--)(0,e[t])();e.splice(0,e.length)},p=s.length-1;p>=0;p--){var h=s[p],g=h._private;g.stopped?(s.splice(p,1),g.hooked=!1,g.playing=!1,g.started=!1,f(g.frames)):(g.playing||g.applying)&&(g.playing&&g.applying&&(g.applying=!1),g.started||i(t,h,e,n),r(t,h,e,n),g.applying&&(g.applying=!1),f(g.frames),h.completed()&&(s.splice(p,1),g.hooked=!1,g.playing=!1,g.started=!1,f(g.completes)),c=!0)}return n||0!==s.length||0!==l.length||o.push(t),c}for(var s=!1,l=0;l0?(n.dirtyCompoundBoundsCache(),t.notify({type:"draw",eles:n})):t.notify({type:"draw"})),n.unmerge(o),t.emit("step")}},function(e,t,n){"use strict";var r=n(73),i=n(76),o=n(0);function a(e,t){return!!(null!=e&&null!=t&&(o.number(e)&&o.number(t)||e&&t))}e.exports=function(e,t,n,s){var l=!s,c=e._private,u=t._private,d=u.easing,f=u.startTime,p=(s?e:e.cy()).style();if(!u.easingImpl)if(null==d)u.easingImpl=r.linear;else{var h=void 0;h=o.string(d)?p.parse("transition-timing-function",d).value:d;var g=void 0,m=void 0;o.string(h)?(g=h,m=[]):(g=h[1],m=h.slice(2).map((function(e){return+e}))),m.length>0?("spring"===g&&m.push(u.duration),u.easingImpl=r[g].apply(null,m)):u.easingImpl=r[g]}var v=u.easingImpl,b=void 0;if(b=0===u.duration?1:(n-f)/u.duration,u.applying&&(b=u.progress),b<0?b=0:b>1&&(b=1),null==u.delay){var y=u.startPosition,x=u.position;if(x&&l&&!e.locked()){var w=e.position();a(y.x,x.x)&&(w.x=i(y.x,x.x,b,v)),a(y.y,x.y)&&(w.y=i(y.y,x.y,b,v)),e.emit("position")}var k=u.startPan,A=u.pan,E=c.pan,S=null!=A&&s;S&&(a(k.x,A.x)&&(E.x=i(k.x,A.x,b,v)),a(k.y,A.y)&&(E.y=i(k.y,A.y,b,v)),e.emit("pan"));var $=u.startZoom,C=u.zoom,_=null!=C&&s;_&&(a($,C)&&(c.zoom=i($,C,b,v)),e.emit("zoom")),(S||_)&&e.emit("viewport");var O=u.style;if(O&&O.length>0&&l){for(var j=0;j0?i=l:r=l}while(Math.abs(o)>a&&++c=o?b(t,s):0===u?s:x(t,r,r+c)}var k=!1;function A(){k=!0,e===t&&n===r||y()}var E=function(i){return k||A(),e===t&&n===r?i:0===i?0:1===i?1:m(w(i),t,r)};E.getControlPoints=function(){return[{x:e,y:t},{x:n,y:r}]};var S="generateBezier("+[e,t,n,r]+")";return E.toString=function(){return S},E}},function(e,t,n){"use strict"; -/*! Runge-Kutta spring physics function generator. Adapted from Framer.js, copyright Koen Bok. MIT License: http://en.wikipedia.org/wiki/MIT_License */var r=function(){function e(e){return-e.tension*e.x-e.friction*e.v}function t(t,n,r){var i={x:t.x+r.dx*n,v:t.v+r.dv*n,tension:t.tension,friction:t.friction};return{dx:i.v,dv:e(i)}}function n(n,r){var i={dx:n.v,dv:e(n)},o=t(n,.5*r,i),a=t(n,.5*r,o),s=t(n,r,a),l=1/6*(i.dx+2*(o.dx+a.dx)+s.dx),c=1/6*(i.dv+2*(o.dv+a.dv)+s.dv);return n.x=n.x+l*r,n.v=n.v+c*r,n}return function e(t,r,i){var o,a={x:-1,v:0,tension:null,friction:null},s=[0],l=0,c=void 0,u=void 0;for(t=parseFloat(t)||500,r=parseFloat(r)||20,i=i||null,a.tension=t,a.friction=r,c=(o=null!==i)?(l=e(t,r))/i*.016:.016;u=n(u||a,c),s.push(1+u.x),l+=16,Math.abs(u.x)>1e-4&&Math.abs(u.v)>1e-4;);return o?function(e){return s[e*(s.length-1)|0]}:l}}();e.exports=r},function(e,t,n){"use strict";var r=n(0);function i(e,t,n,r,i){if(1===r)return n;var o=i(t,n,r);return null==e||((e.roundValue||e.color)&&(o=Math.round(o)),void 0!==e.min&&(o=Math.max(o,e.min)),void 0!==e.max&&(o=Math.min(o,e.max))),o}function o(e,t){return null!=e.pfValue||null!=e.value?null==e.pfValue||null!=t&&"%"===t.type.units?e.value:e.pfValue:e}e.exports=function(e,t,n,a,s){var l=null!=s?s.type:null;n<0?n=0:n>1&&(n=1);var c=o(e,s),u=o(t,s);if(r.number(c)&&r.number(u))return i(l,c,u,n,a);if(r.array(c)&&r.array(u)){for(var d=[],f=0;f0},startBatch:function(){var e=this._private;return null==e.batchCount&&(e.batchCount=0),0===e.batchCount&&(e.batchingStyle=e.batchingNotify=!0,e.batchStyleEles=this.collection(),e.batchNotifyEles=this.collection(),e.batchNotifyTypes=[],e.batchNotifyTypes.ids={}),e.batchCount++,this},endBatch:function(){var e=this._private;return e.batchCount--,0===e.batchCount&&(e.batchingStyle=!1,e.batchStyleEles.updateStyle(),e.batchingNotify=!1,this.notify({type:e.batchNotifyTypes,eles:e.batchNotifyEles})),this},batch:function(e){return this.startBatch(),e(),this.endBatch(),this},batchData:function(e){var t=this;return this.batch((function(){for(var n=Object.keys(e),r=0;r0;)e.removeChild(e.childNodes[0]);this._private.renderer=null},onRender:function(e){return this.on("render",e)},offRender:function(e){return this.off("render",e)}};i.invalidateDimensions=i.resize,e.exports=i},function(e,t,n){"use strict";var r=n(0),i=n(7),o={collection:function(e,t){return r.string(e)?this.$(e):r.elementOrCollection(e)?e.collection():r.array(e)?new i(this,e,t):new i(this)},nodes:function(e){var t=this.$((function(e){return e.isNode()}));return e?t.filter(e):t},edges:function(e){var t=this.$((function(e){return e.isEdge()}));return e?t.filter(e):t},$:function(e){var t=this._private.elements;return e?t.filter(e):t.spawnSelf()},mutableElements:function(){return this._private.elements}};o.elements=o.filter=o.$,e.exports=o},function(e,t,n){"use strict";var r=n(0),i=n(18),o={style:function(e){return e&&this.setStyle(e).update(),this._private.style},setStyle:function(e){var t=this._private;return r.stylesheet(e)?t.style=e.generateStyle(this):r.array(e)?t.style=i.fromJson(this,e):r.string(e)?t.style=i.fromString(this,e):t.style=i(this),t.style}};e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(5),a={apply:function(e){var t=this._private,n=t.cy.collection();t.newStyle&&(t.contextStyles={},t.propDiffs={},this.cleanElements(e,!0));for(var r=0;r0;if(c||u){var d=void 0;c&&u||c?d=l.properties:u&&(d=l.mappedProperties);for(var f=0;f0){n=!0;break}t.hasPie=n;var i=e.pstyle("text-transform").strValue,o=e.pstyle("label").strValue,a=e.pstyle("source-label").strValue,s=e.pstyle("target-label").strValue,l=e.pstyle("font-style").strValue,c=e.pstyle("font-size").pfValue+"px",u=e.pstyle("font-family").strValue,d=e.pstyle("font-weight").strValue,f=l+"$"+c+"$"+u+"$"+d+"$"+i+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-outline-width").pfValue+"$"+e.pstyle("text-wrap").strValue+"$"+e.pstyle("text-max-width").pfValue;t.labelStyleKey=f,t.sourceLabelKey=f+"$"+a,t.targetLabelKey=f+"$"+s,t.labelKey=f+"$"+o,t.fontKey=l+"$"+d+"$"+c+"$"+u,t.styleKey=Date.now()}},applyParsedProperty:function(e,t){var n=this,o=t,a=e._private.style,s=void 0,l=n.types,c=n.properties[o.name].type,u=o.bypass,d=a[o.name],f=d&&d.bypass,p=e._private,h=function(){n.checkZOrderTrigger(e,o.name,d?d.value:null,o.value)};if("curve-style"===t.name&&"haystack"===t.value&&e.isEdge()&&(e.isLoop()||e.source().isParent()||e.target().isParent())&&(o=t=this.parse(t.name,"bezier",u)),o.delete)return a[o.name]=void 0,h(),!0;if(o.deleteBypassed)return d?!!d.bypass&&(d.bypassed=void 0,h(),!0):(h(),!0);if(o.deleteBypass)return d?!!d.bypass&&(a[o.name]=d.bypassed,h(),!0):(h(),!0);var g=function(){r.error("Do not assign mappings to elements without corresponding data (e.g. ele `"+e.id()+"` for property `"+o.name+"` with data field `"+o.field+"`); try a `["+o.field+"]` selector to limit scope to elements with `"+o.field+"` defined")};switch(o.mapped){case l.mapData:for(var m=o.field.split("."),v=p.data,b=0;b1&&(y=1),c.color){var x=o.valueMin[0],w=o.valueMax[0],k=o.valueMin[1],A=o.valueMax[1],E=o.valueMin[2],S=o.valueMax[2],$=null==o.valueMin[3]?1:o.valueMin[3],C=null==o.valueMax[3]?1:o.valueMax[3],_=[Math.round(x+(w-x)*y),Math.round(k+(A-k)*y),Math.round(E+(S-E)*y),Math.round($+(C-$)*y)];s={bypass:o.bypass,name:o.name,value:_,strValue:"rgb("+_[0]+", "+_[1]+", "+_[2]+")"}}else{if(!c.number)return!1;var O=o.valueMin+(o.valueMax-o.valueMin)*y;s=this.parse(o.name,O,o.bypass,"mapping")}s||(s=this.parse(o.name,d.strValue,o.bypass,"mapping")),s||g(),s.mapping=o,o=s;break;case l.data:var j=o.field.split("."),T=p.data;if(T)for(var P=0;P0&&l>0){for(var u={},d=!1,f=0;f0?e.delayAnimation(c).play().promise().then(t):t()})).then((function(){return e.animation({style:u,duration:l,easing:e.pstyle("transition-timing-function").value,queue:!1}).play().promise()})).then((function(){r.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1}))}else a.transitioning&&(this.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1)},checkZOrderTrigger:function(e,t,n,r){var i=this.properties[t];null==i.triggersZOrder||null!=n&&!i.triggersZOrder(n,r)||this._private.cy.notify({type:"zorder",eles:e})}};e.exports=a},function(e,t,n){"use strict";var r=n(0),i=n(1),o={applyBypass:function(e,t,n,o){var a=[];if("*"===t||"**"===t){if(void 0!==n)for(var s=0;sn.length?t.substr(n.length):""}function l(){o=o.length>a.length?o.substr(a.length):""}for(t=t.replace(/[/][*](\s|.)+?[*][/]/g,"");!t.match(/^\s*$/);){var c=t.match(/^\s*((?:.|\s)+?)\s*\{((?:.|\s)+?)\}/);if(!c){r.error("Halting stylesheet parsing: String stylesheet contains more to parse but no selector and block found in: "+t);break}n=c[0];var u=c[1];if("core"!==u&&new i(u)._private.invalid)r.error("Skipping parsing of block: Invalid selector found in string stylesheet: "+u),s();else{var d=c[2],f=!1;o=d;for(var p=[];!o.match(/^\s*$/);){var h=o.match(/^\s*(.+?)\s*:\s*(.+?)\s*;/);if(!h){r.error("Skipping parsing of block: Invalid formatting of style property and value definitions found in:"+d),f=!0;break}a=h[0];var g=h[1],m=h[2];this.properties[g]?this.parse(g,m)?(p.push({name:g,val:m}),l()):(r.error("Skipping property: Invalid property definition in: "+a),l()):(r.error("Skipping property: Invalid property name in: "+a),l())}if(f){s();break}this.selector(u);for(var v=0;v node").css({shape:"rectangle",padding:10,"background-color":"#eee","border-color":"#ccc","border-width":1}).selector("edge").css({width:3,"curve-style":"haystack"}).selector(":parent <-> node").css({"curve-style":"bezier","source-endpoint":"outside-to-line","target-endpoint":"outside-to-line"}).selector(":selected").css({"background-color":"#0169D9","line-color":"#0169D9","source-arrow-color":"#0169D9","target-arrow-color":"#0169D9","mid-source-arrow-color":"#0169D9","mid-target-arrow-color":"#0169D9"}).selector("node:parent:selected").css({"background-color":"#CCE1F9","border-color":"#aec8e5"}).selector(":active").css({"overlay-color":"black","overlay-padding":10,"overlay-opacity":.25}).selector("core").css({"selection-box-color":"#ddd","selection-box-opacity":.65,"selection-box-border-color":"#aaa","selection-box-border-width":1,"active-bg-color":"black","active-bg-opacity":.15,"active-bg-size":30,"outside-texture-bg-color":"#000","outside-texture-bg-opacity":.125}),this.defaultLength=this.length},e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(2),a={parse:function(e,t,n,o){if(i.fn(t))return this.parseImplWarn(e,t,n,o);var a=[e,t,n,"mapping"===o||!0===o||!1===o||null==o?"dontcare":o].join("$"),s=this.propCache=this.propCache||{},l=void 0;return(l=s[a])||(l=s[a]=this.parseImplWarn(e,t,n,o)),(n||"mapping"===o)&&(l=r.copy(l))&&(l.value=r.copy(l.value)),l},parseImplWarn:function(e,t,n,i){var o=this.parseImpl(e,t,n,i);return o||null==t||r.error("The style property `%s: %s` is invalid",e,t),o},parseImpl:function(e,t,n,a){e=r.camel2dash(e);var s=this.properties[e],l=t,c=this.types;if(!s)return null;if(void 0===t)return null;s.alias&&(s=s.pointsTo,e=s.name);var u=i.string(t);u&&(t=t.trim());var d=s.type;if(!d)return null;if(n&&(""===t||null===t))return{name:e,value:t,bypass:!0,deleteBypass:!0};if(i.fn(t))return{name:e,value:t,strValue:"fn",mapped:c.fn,bypass:n};var f=void 0,p=void 0;if(!u||a);else{if(f=new RegExp(c.data.regex).exec(t)){if(n)return!1;var h=c.data;return{name:e,value:f,strValue:""+t,mapped:h,field:f[1],bypass:n}}if(p=new RegExp(c.mapData.regex).exec(t)){if(n)return!1;if(d.multiple)return!1;var g=c.mapData;if(!d.color&&!d.number)return!1;var m=this.parse(e,p[4]);if(!m||m.mapped)return!1;var v=this.parse(e,p[5]);if(!v||v.mapped)return!1;if(m.value===v.value)return!1;if(d.color){var b=m.value,y=v.value;if(!(b[0]!==y[0]||b[1]!==y[1]||b[2]!==y[2]||b[3]!==y[3]&&(null!=b[3]&&1!==b[3]||null!=y[3]&&1!==y[3])))return!1}return{name:e,value:p,strValue:""+t,mapped:g,field:p[1],fieldMin:parseFloat(p[2]),fieldMax:parseFloat(p[3]),valueMin:m.value,valueMax:v.value,bypass:n}}}if(d.multiple&&"multiple"!==a){var x=void 0;if(x=u?t.split(/\s+/):i.array(t)?t:[t],d.evenMultiple&&x.length%2!=0)return null;for(var w=[],k=[],A=[],E=!1,S=0;Sd.max||d.strictMax&&t===d.max))return null;var P={name:e,value:t,strValue:""+t+(_||""),units:_,bypass:n};return d.unitless||"px"!==_&&"em"!==_?P.pfValue=t:P.pfValue="px"!==_&&_?this.getEmSizeInPixels()*t:t,"ms"!==_&&"s"!==_||(P.pfValue="ms"===_?t:1e3*t),"deg"!==_&&"rad"!==_||(P.pfValue="rad"===_?t:o.deg2rad(t)),"%"===_&&(P.pfValue=t/100),P}if(d.propList){var D=[],R=""+t;if("none"===R);else{for(var I=R.split(","),N=0;N0&&s>0&&!isNaN(n.w)&&!isNaN(n.h)&&n.w>0&&n.h>0)return{zoom:l=(l=(l=Math.min((a-2*t)/n.w,(s-2*t)/n.h))>this._private.maxZoom?this._private.maxZoom:l)t.maxZoom?t.maxZoom:s)t.maxZoom||!t.zoomingEnabled?a=!0:(t.zoom=l,o.push("zoom"))}if(i&&(!a||!e.cancelOnFailedZoom)&&t.panningEnabled){var c=e.pan;r.number(c.x)&&(t.pan.x=c.x,s=!1),r.number(c.y)&&(t.pan.y=c.y,s=!1),s||o.push("pan")}return o.length>0&&(o.push("viewport"),this.emit(o.join(" ")),this.notify({type:"viewport"})),this},center:function(e){var t=this.getCenterPan(e);return t&&(this._private.pan=t,this.emit("pan viewport"),this.notify({type:"viewport"})),this},getCenterPan:function(e,t){if(this._private.panningEnabled){if(r.string(e)){var n=e;e=this.mutableElements().filter(n)}else r.elementOrCollection(e)||(e=this.mutableElements());if(0!==e.length){var i=e.boundingBox(),o=this.width(),a=this.height();return{x:(o-(t=void 0===t?this._private.zoom:t)*(i.x1+i.x2))/2,y:(a-t*(i.y1+i.y2))/2}}}},reset:function(){return this._private.panningEnabled&&this._private.zoomingEnabled?(this.viewport({pan:{x:0,y:0},zoom:1}),this):this},invalidateSize:function(){this._private.sizeCache=null},size:function(){var e,t,n=this._private,r=n.container;return n.sizeCache=n.sizeCache||(r?(e=i.getComputedStyle(r),t=function(t){return parseFloat(e.getPropertyValue(t))},{width:r.clientWidth-t("padding-left")-t("padding-right"),height:r.clientHeight-t("padding-top")-t("padding-bottom")}):{width:1,height:1})},width:function(){return this.size().width},height:function(){return this.size().height},extent:function(){var e=this._private.pan,t=this._private.zoom,n=this.renderedExtent(),r={x1:(n.x1-e.x)/t,x2:(n.x2-e.x)/t,y1:(n.y1-e.y)/t,y2:(n.y2-e.y)/t};return r.w=r.x2-r.x1,r.h=r.y2-r.y1,r},renderedExtent:function(){var e=this.width(),t=this.height();return{x1:0,y1:0,x2:e,y2:t,w:e,h:t}}};a.centre=a.center,a.autolockNodes=a.autolock,a.autoungrabifyNodes=a.autoungrabify,e.exports=a},function(e,t,n){"use strict";var r=n(1),i=n(4),o=n(7),a=n(12),s=n(95),l=n(0),c=n(11),u={},d={};function f(e,t,n){var s=n,d=function(n){r.error("Can not register `"+t+"` for `"+e+"` since `"+n+"` already exists in the prototype and can not be overridden")};if("core"===e){if(a.prototype[t])return d(t);a.prototype[t]=n}else if("collection"===e){if(o.prototype[t])return d(t);o.prototype[t]=n}else if("layout"===e){for(var f=function(e){this.options=e,n.call(this,e),l.plainObject(this._private)||(this._private={}),this._private.cy=e.cy,this._private.listeners=[],this.createEmitter()},h=f.prototype=Object.create(n.prototype),g=[],m=0;m0;)m();c=n.collection();for(var v=function(e){var t=h[e],n=t.maxDegree(!1),r=t.filter((function(e){return e.degree(!1)===n}));c=c.add(r)},b=0;by.length-1;)y.push([]);y[J].push(X),Z.depth=J,Z.index=y[J].length-1}N()}var K=0;if(t.avoidOverlap)for(var ee=0;eec||0===t)&&(r+=l/u,i++)}return r/=i=Math.max(1,i),0===i&&(r=void 0),ie[e.id()]=r,r},ae=function(e,t){return oe(e)-oe(t)},se=0;se<3;se++){for(var le=0;le0&&y[0].length<=3?u/2:0),f=2*Math.PI/y[i].length*o;return 0===i&&1===y[0].length&&(d=1),{x:de+d*Math.cos(f),y:fe+d*Math.sin(f)}}return{x:de+(o+1-(a+1)/2)*s,y:(i+1)*c}}var p={x:de+(o+1-(a+1)/2)*s,y:(i+1)*c};return p},he={},ge=y.length-1;ge>=0;ge--)for(var me=y[ge],ve=0;ve1&&t.avoidOverlap){p*=1.75;var b=Math.cos(d)-Math.cos(0),y=Math.sin(d)-Math.sin(0),x=Math.sqrt(p*p/(b*b+y*y));f=Math.max(x,f)}return s.layoutPositions(this,t,(function(e,n){var r=t.startAngle+n*d*(a?1:-1),i=f*Math.cos(r),o=f*Math.sin(r);return{x:c+i,y:u+o}})),this},e.exports=s},function(e,t,n){"use strict";var r=n(1),i=n(2),o={fit:!0,padding:30,startAngle:1.5*Math.PI,sweep:void 0,clockwise:!0,equidistant:!1,minNodeSpacing:10,boundingBox:void 0,avoidOverlap:!0,nodeDimensionsIncludeLabels:!1,height:void 0,width:void 0,spacingFactor:void 0,concentric:function(e){return e.degree()},levelWidth:function(e){return e.maxDegree()/4},animate:!1,animationDuration:500,animationEasing:void 0,animateFilter:function(e,t){return!0},ready:void 0,stop:void 0,transform:function(e,t){return t}};function a(e){this.options=r.extend({},o,e)}a.prototype.run=function(){for(var e=this.options,t=e,n=void 0!==t.counterclockwise?!t.counterclockwise:t.clockwise,r=e.cy,o=t.eles.nodes().not(":parent"),a=i.makeBoundingBox(t.boundingBox?t.boundingBox:{x1:0,y1:0,w:r.width(),h:r.height()}),s=a.x1+a.w/2,l=a.y1+a.h/2,c=[],u=(t.startAngle,0),d=0;d0&&Math.abs(b[0].value-x.value)>=m&&(b=[],v.push(b)),b.push(x)}var w=u+t.minNodeSpacing;if(!t.avoidOverlap){var k=v.length>0&&v[0].length>1,A=(Math.min(a.w,a.h)/2-w)/(v.length+k?1:0);w=Math.min(w,A)}for(var E=0,S=0;S1&&t.avoidOverlap){var O=Math.cos(_)-Math.cos(0),j=Math.sin(_)-Math.sin(0),T=Math.sqrt(w*w/(O*O+j*j));E=Math.max(T,E)}$.r=E,E+=w}if(t.equidistant){for(var P=0,D=0,R=0;R0)var c=(f=r.nodeOverlap*s)*i/(b=Math.sqrt(i*i+o*o)),d=f*o/b;else{var f,p=u(e,i,o),h=u(t,-1*i,-1*o),g=h.x-p.x,m=h.y-p.y,v=g*g+m*m,b=Math.sqrt(v);c=(f=(e.nodeRepulsion+t.nodeRepulsion)/v)*g/b,d=f*m/b}e.isLocked||(e.offsetX-=c,e.offsetY-=d),t.isLocked||(t.offsetX+=c,t.offsetY+=d)}},l=function(e,t,n,r){if(n>0)var i=e.maxX-t.minX;else i=t.maxX-e.minX;if(r>0)var o=e.maxY-t.minY;else o=t.maxY-e.minY;return i>=0&&o>=0?Math.sqrt(i*i+o*o):0},u=function(e,t,n){var r=e.positionX,i=e.positionY,o=e.height||1,a=e.width||1,s=n/t,l=o/a,c={};return 0===t&&0n?(c.x=r,c.y=i+o/2,c):0t&&-1*l<=s&&s<=l?(c.x=r-a/2,c.y=i-a*n/2/t,c):0=l)?(c.x=r+o*t/2/n,c.y=i+o/2,c):0>n&&(s<=-1*l||s>=l)?(c.x=r-o*t/2/n,c.y=i-o/2,c):c},d=function(e,t){for(var n=0;n1){var h=t.gravity*d/p,g=t.gravity*f/p;u.offsetX+=h,u.offsetY+=g}}}}},p=function(e,t){var n=[],r=0,i=-1;for(n.push.apply(n,e.graphSet[0]),i+=e.graphSet[0].length;r<=i;){var o=n[r++],a=e.idToIndex[o],s=e.layoutNodes[a],l=s.children;if(0n)var i={x:n*e/r,y:n*t/r};else i={x:e,y:t};return i},m=function e(t,n){var r=t.parentId;if(null!=r){var i=n.layoutNodes[n.idToIndex[r]],o=!1;return(null==i.maxX||t.maxX+i.padRight>i.maxX)&&(i.maxX=t.maxX+i.padRight,o=!0),(null==i.minX||t.minX-i.padLefti.maxY)&&(i.maxY=t.maxY+i.padBottom,o=!0),(null==i.minY||t.minY-i.padTopg&&(f+=h+t.componentSpacing,d=0,p=0,h=0)}}}(0,i),r})).then((function(e){d.layoutNodes=e.layoutNodes,o.stop(),b()}));var b=function(){!0===e.animate||!1===e.animate?v({force:!0,next:function(){n.one("layoutstop",e.stop),n.emit({type:"layoutstop",layout:n})}}):e.eles.nodes().layoutPositions(n,e,(function(e){var t=d.layoutNodes[d.idToIndex[e.data("id")]];return{x:t.positionX,y:t.positionY}}))};return this},c.prototype.stop=function(){return this.stopped=!0,this.thread&&this.thread.stop(),this.emit("layoutstop"),this},c.prototype.destroy=function(){return this.thread&&this.thread.stop(),this};var u=function(e,t,n){for(var r=n.eles.edges(),i=n.eles.nodes(),s={isCompound:e.hasCompoundNodes(),layoutNodes:[],idToIndex:{},nodeSize:i.size(),graphSet:[],indexToGraph:[],layoutEdges:[],edgeSize:r.size(),temperature:n.initialTemp,clientWidth:e.width(),clientHeight:e.width(),boundingBox:o.makeBoundingBox(n.boundingBox?n.boundingBox:{x1:0,y1:0,w:e.width(),h:e.height()})},l=n.eles.components(),c={},u=0;u0)for(s.graphSet.push(A),u=0;ur.count?0:r.graph},f=function e(t,n,r,i){var o=i.graphSet[r];if(-1a){var h=u(),g=d();(h-1)*g>=a?u(h-1):(g-1)*h>=a&&d(g-1)}else for(;c*l=a?d(v+1):u(m+1)}var b=o.w/c,y=o.h/l;if(t.condense&&(b=0,y=0),t.avoidOverlap)for(var x=0;x=c&&(T=0,j++)},D={},R=0;R(r=i.sqdistToFiniteLine(e,t,w[k],w[k+1],w[k+2],w[k+3])))return b(n,r),!0}else if("bezier"===a.edgeType||"multibezier"===a.edgeType||"self"===a.edgeType||"compound"===a.edgeType)for(w=a.allpts,k=0;k+5(r=i.sqdistToQuadraticBezier(e,t,w[k],w[k+1],w[k+2],w[k+3],w[k+4],w[k+5])))return b(n,r),!0;v=v||o.source,x=x||o.target;var A=l.getArrowWidth(s,u),E=[{name:"source",x:a.arrowStartX,y:a.arrowStartY,angle:a.srcArrowAngle},{name:"target",x:a.arrowEndX,y:a.arrowEndY,angle:a.tgtArrowAngle},{name:"mid-source",x:a.midX,y:a.midY,angle:a.midsrcArrowAngle},{name:"mid-target",x:a.midX,y:a.midY,angle:a.midtgtArrowAngle}];for(k=0;k0&&(y(v),y(x))}function w(e,t,n){return o.getPrefixedProperty(e,t,n)}function k(n,r){var o,a=n._private,s=m;o=r?r+"-":"";var l=n.pstyle(o+"label").value;if("yes"===n.pstyle("text-events").strValue&&l){var c=a.rstyle,u=n.pstyle("text-border-width").pfValue,d=n.pstyle("text-background-padding").pfValue,f=w(c,"labelWidth",r)+u+2*s+2*d,p=w(c,"labelHeight",r)+u+2*s+2*d,h=w(c,"labelX",r),g=w(c,"labelY",r),v=w(a.rscratch,"labelAngle",r),y=h-f/2,x=h+f/2,k=g-p/2,A=g+p/2;if(v){var E=Math.cos(v),S=Math.sin(v),$=function(e,t){return{x:(e-=h)*E-(t-=g)*S+h,y:e*S+t*E+g}},C=$(y,k),_=$(y,A),O=$(x,k),j=$(x,A),T=[C.x,C.y,O.x,O.y,j.x,j.y,_.x,_.y];if(i.pointInsidePolygonPoints(e,t,T))return b(n),!0}else{var P={w:f,h:p,x1:y,x2:x,y1:k,y2:A};if(i.inBoundingBox(P,e,t))return b(n),!0}}}n&&(u=u.interactive);for(var A=u.length-1;A>=0;A--){var E=u[A];E.isNode()?y(E)||k(E):x(E)||k(E)||k(E,"source")||k(E,"target")}return d},getAllInBox:function(e,t,n,r){var o=this.getCachedZSortedEles().interactive,a=[],s=Math.min(e,n),l=Math.max(e,n),c=Math.min(t,r),u=Math.max(t,r);e=s,n=l,t=c,r=u;for(var d=i.makeBoundingBox({x1:e,y1:t,x2:n,y2:r}),f=0;fb?b+"$-$"+v:v+"$-$"+b,g&&(t="unbundled$-$"+h.id);var y=u[t];null==y&&(y=u[t]=[],d.push(t)),y.push(Bt),g&&(y.hasUnbundled=!0),m&&(y.hasBezier=!0)}else f.push(Bt)}for(var x=0;xGt.id()){var k=Ht;Ht=Gt,Gt=k}Wt=Ht.position(),Yt=Gt.position(),Xt=Ht.outerWidth(),Qt=Ht.outerHeight(),Zt=Gt.outerWidth(),Jt=Gt.outerHeight(),n=l.nodeShapes[this.getNodeShape(Ht)],o=l.nodeShapes[this.getNodeShape(Gt)],s=!1;var A={north:0,west:0,south:0,east:0,northwest:0,southwest:0,northeast:0,southeast:0},E=Wt.x,S=Wt.y,$=Xt,C=Qt,_=Yt.x,O=Yt.y,j=Zt,T=Jt,P=w.length;for(p=0;p=d||w){p={cp:b,segment:x};break}}if(p)break}b=p.cp;var k=(d-g)/(x=p.segment).length,A=x.t1-x.t0,E=u?x.t0+A*k:x.t1-A*k;E=r.bound(0,E,1),t=r.qbezierPtAt(b.p0,b.p1,b.p2,E),c=function(e,t,n,i){var o=r.bound(0,i-.001,1),a=r.bound(0,i+.001,1),s=r.qbezierPtAt(e,t,n,o),l=r.qbezierPtAt(e,t,n,a);return f(s,l)}(b.p0,b.p1,b.p2,E);break;case"straight":case"segments":case"haystack":var S,$,C,_,O=0,j=i.allpts.length;for(v=0;v+3=d));v+=2);E=(d-$)/S,E=r.bound(0,E,1),t=r.lineAt(C,_,E),c=f(C,_)}l("labelX",o,t.x),l("labelY",o,t.y),l("labelAutoAngle",o,c)}};c("source"),c("target"),this.applyLabelDimensions(e)}},applyLabelDimensions:function(e){this.applyPrefixedLabelDimensions(e),e.isEdge()&&(this.applyPrefixedLabelDimensions(e,"source"),this.applyPrefixedLabelDimensions(e,"target"))},applyPrefixedLabelDimensions:function(e,t){var n=e._private,r=this.getLabelText(e,t),i=this.calculateLabelDimensions(e,r);o.setPrefixedProperty(n.rstyle,"labelWidth",t,i.width),o.setPrefixedProperty(n.rscratch,"labelWidth",t,i.width),o.setPrefixedProperty(n.rstyle,"labelHeight",t,i.height),o.setPrefixedProperty(n.rscratch,"labelHeight",t,i.height)},getLabelText:function(e,t){var n=e._private,r=t?t+"-":"",i=e.pstyle(r+"label").strValue,a=e.pstyle("text-transform").value,s=function(e,r){return r?(o.setPrefixedProperty(n.rscratch,e,t,r),r):o.getPrefixedProperty(n.rscratch,e,t)};"none"==a||("uppercase"==a?i=i.toUpperCase():"lowercase"==a&&(i=i.toLowerCase()));var l=e.pstyle("text-wrap").value;if("wrap"===l){var c=s("labelKey");if(c&&s("labelWrapKey")===c)return s("labelWrapCachedText");for(var u=i.split("\n"),d=e.pstyle("text-max-width").pfValue,f=[],p=0;pd){for(var g=h.split(/\s+/),m="",v=0;vd);k++)x+=i[k],k===i.length-1&&(w=!0);return w||(x+="…"),x}return i},calculateLabelDimensions:function(e,t,n){var r=e._private.labelStyleKey+"$@$"+t;n&&(r+="$@$"+n);var i=this.labelDimCache||(this.labelDimCache={});if(i[r])return i[r];var o=e.pstyle("font-style").strValue,a=1*e.pstyle("font-size").pfValue+"px",s=e.pstyle("font-family").strValue,l=e.pstyle("font-weight").strValue,c=this.labelCalcDiv;c||(c=this.labelCalcDiv=document.createElement("div"),document.body.appendChild(c));var u=c.style;return u.fontFamily=s,u.fontStyle=o,u.fontSize=a,u.fontWeight=l,u.position="absolute",u.left="-9999px",u.top="-9999px",u.zIndex="-1",u.visibility="hidden",u.pointerEvents="none",u.padding="0",u.lineHeight="1","wrap"===e.pstyle("text-wrap").value?u.whiteSpace="pre":u.whiteSpace="normal",c.textContent=t,i[r]={width:Math.ceil(c.clientWidth/1),height:Math.ceil(c.clientHeight/1)},i[r]},calculateLabelAngles:function(e){var t=e._private.rscratch,n=e.isEdge(),r=e.pstyle("text-rotation"),i=r.strValue;"none"===i?t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle=0:n&&"autorotate"===i?(t.labelAngle=Math.atan(t.midDispY/t.midDispX),t.sourceLabelAngle=t.sourceLabelAutoAngle,t.targetLabelAngle=t.targetLabelAutoAngle):t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle="autorotate"===i?0:r.pfValue}};e.exports=a},function(e,t,n){"use strict";var r={getNodeShape:function(e){var t=e.pstyle("shape").value;if(e.isParent())return"rectangle"===t||"roundrectangle"===t||"cutrectangle"===t||"barrel"===t?t:"rectangle";if("polygon"===t){var n=e.pstyle("shape-polygon-points").value;return this.nodeShapes.makePolygon(n).name}return t}};e.exports=r},function(e,t,n){"use strict";var r={registerCalculationListeners:function(){var e=this.cy,t=e.collection(),n=this,r=function(e,n){var r=!(arguments.length>2&&void 0!==arguments[2])||arguments[2];t.merge(e);for(var i=0;i=e.desktopTapThreshold2}var C=n(i);b&&(e.hoverData.tapholdCancelled=!0),s=!0,t(v,["mousemove","vmousemove","tapdrag"],i,{position:{x:p[0],y:p[1]}});var _=function(){e.data.bgActivePosistion=void 0,e.hoverData.selecting||l.emit("boxstart"),m[4]=1,e.hoverData.selecting=!0,e.redrawHint("select",!0),e.redraw()};if(3===e.hoverData.which){if(b){var O={originalEvent:i,type:"cxtdrag",position:{x:p[0],y:p[1]}};x?x.emit(O):l.emit(O),e.hoverData.cxtDragged=!0,e.hoverData.cxtOver&&v===e.hoverData.cxtOver||(e.hoverData.cxtOver&&e.hoverData.cxtOver.emit({originalEvent:i,type:"cxtdragout",position:{x:p[0],y:p[1]}}),e.hoverData.cxtOver=v,v&&v.emit({originalEvent:i,type:"cxtdragover",position:{x:p[0],y:p[1]}}))}}else if(e.hoverData.dragging){if(s=!0,l.panningEnabled()&&l.userPanningEnabled()){var T;if(e.hoverData.justStartedPan){var P=e.hoverData.mdownPos;T={x:(p[0]-P[0])*c,y:(p[1]-P[1])*c},e.hoverData.justStartedPan=!1}else T={x:w[0]*c,y:w[1]*c};l.panBy(T),e.hoverData.dragged=!0}p=e.projectIntoViewport(i.clientX,i.clientY)}else if(1!=m[4]||null!=x&&!x.isEdge()){if(x&&x.isEdge()&&x.active()&&x.unactivate(),x&&x.grabbed()||v==y||(y&&t(y,["mouseout","tapdragout"],i,{position:{x:p[0],y:p[1]}}),v&&t(v,["mouseover","tapdragover"],i,{position:{x:p[0],y:p[1]}}),e.hoverData.last=v),x)if(b){if(l.boxSelectionEnabled()&&C)x&&x.grabbed()&&(f(k),x.emit("free")),_();else if(x&&x.grabbed()&&e.nodeIsDraggable(x)){var D=!e.dragData.didDrag;D&&e.redrawHint("eles",!0),e.dragData.didDrag=!0;var R=[];e.hoverData.draggingEles||u(l.collection(k),{inDragLayer:!0});for(var I=0;I0&&e.redrawHint("eles",!0),e.dragData.possibleDragElements=l=[]),t(s,["mouseup","tapend","vmouseup"],r,{position:{x:o[0],y:o[1]}}),e.dragData.didDrag||e.hoverData.dragged||e.hoverData.selecting||e.hoverData.isOverThresholdDrag||t(c,["click","tap","vclick"],r,{position:{x:o[0],y:o[1]}}),s!=c||e.dragData.didDrag||e.hoverData.selecting||null!=s&&s._private.selectable&&(e.hoverData.dragging||("additive"===i.selectionType()||u?s.selected()?s.unselect():s.select():u||(i.$(":selected").unmerge(s).unselect(),s.select())),e.redrawHint("eles",!0)),e.hoverData.selecting){var h=i.collection(e.getAllInBox(a[0],a[1],a[2],a[3]));e.redrawHint("select",!0),h.length>0&&e.redrawHint("eles",!0),i.emit("boxend");var g=function(e){return e.selectable()&&!e.selected()};"additive"===i.selectionType()||u||i.$(":selected").unmerge(h).unselect(),h.emit("box").stdFilter(g).select().emit("boxselect"),e.redraw()}if(e.hoverData.dragging&&(e.hoverData.dragging=!1,e.redrawHint("select",!0),e.redrawHint("eles",!0),e.redraw()),!a[4]){e.redrawHint("drag",!0),e.redrawHint("eles",!0);var m=c&&c.grabbed();f(l),m&&c.emit("free")}}a[4]=0,e.hoverData.down=null,e.hoverData.cxtStarted=!1,e.hoverData.draggingEles=!1,e.hoverData.selecting=!1,e.hoverData.isOverThresholdDrag=!1,e.dragData.didDrag=!1,e.hoverData.dragged=!1,e.hoverData.dragDelta=[],e.hoverData.mdownPos=null,e.hoverData.mdownGPos=null}}),!1),e.registerBinding(e.container,"wheel",(function(t){if(!e.scrollingPage){var n,r=e.cy,i=e.projectIntoViewport(t.clientX,t.clientY),o=[i[0]*r.zoom()+r.pan().x,i[1]*r.zoom()+r.pan().y];e.hoverData.draggingEles||e.hoverData.dragging||e.hoverData.cxtStarted||0!==e.selection[4]?t.preventDefault():r.panningEnabled()&&r.userPanningEnabled()&&r.zoomingEnabled()&&r.userZoomingEnabled()&&(t.preventDefault(),e.data.wheelZooming=!0,clearTimeout(e.data.wheelTimeout),e.data.wheelTimeout=setTimeout((function(){e.data.wheelZooming=!1,e.redrawHint("eles",!0),e.redraw()}),150),n=null!=t.deltaY?t.deltaY/-250:null!=t.wheelDeltaY?t.wheelDeltaY/1e3:t.wheelDelta/1e3,n*=e.wheelSensitivity,1===t.deltaMode&&(n*=33),r.zoom({level:r.zoom()*Math.pow(10,n),renderedPosition:{x:o[0],y:o[1]}}))}}),!0),e.registerBinding(window,"scroll",(function(t){e.scrollingPage=!0,clearTimeout(e.scrollingPageTimeout),e.scrollingPageTimeout=setTimeout((function(){e.scrollingPage=!1}),250)}),!0),e.registerBinding(e.container,"mouseout",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseout",position:{x:n[0],y:n[1]}})}),!1),e.registerBinding(e.container,"mouseover",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseover",position:{x:n[0],y:n[1]}})}),!1);var T,P,D,R,I=function(e,t,n,r){return Math.sqrt((n-e)*(n-e)+(r-t)*(r-t))},N=function(e,t,n,r){return(n-e)*(n-e)+(r-t)*(r-t)};if(e.registerBinding(e.container,"touchstart",T=function(n){if(j(n)){e.touchData.capture=!0,e.data.bgActivePosistion=void 0;var r=e.cy,i=e.touchData.now,o=e.touchData.earlier;if(n.touches[0]){var a=e.projectIntoViewport(n.touches[0].clientX,n.touches[0].clientY);i[0]=a[0],i[1]=a[1]}if(n.touches[1]&&(a=e.projectIntoViewport(n.touches[1].clientX,n.touches[1].clientY),i[2]=a[0],i[3]=a[1]),n.touches[2]&&(a=e.projectIntoViewport(n.touches[2].clientX,n.touches[2].clientY),i[4]=a[0],i[5]=a[1]),n.touches[1]){f(e.dragData.touchDragEles);var s=e.findContainerClientCoords();S=s[0],$=s[1],C=s[2],_=s[3],v=n.touches[0].clientX-S,b=n.touches[0].clientY-$,y=n.touches[1].clientX-S,x=n.touches[1].clientY-$,O=0<=v&&v<=C&&0<=y&&y<=C&&0<=b&&b<=_&&0<=x&&x<=_;var c=r.pan(),p=r.zoom();if(w=I(v,b,y,x),k=N(v,b,y,x),E=[((A=[(v+y)/2,(b+x)/2])[0]-c.x)/p,(A[1]-c.y)/p],k<4e4&&!n.touches[2]){var h=e.findNearestElement(i[0],i[1],!0,!0),g=e.findNearestElement(i[2],i[3],!0,!0);return h&&h.isNode()?(h.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=h):g&&g.isNode()?(g.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=g):r.emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxt=!0,e.touchData.cxtDragged=!1,e.data.bgActivePosistion=void 0,void e.redraw()}}if(n.touches[2]);else if(n.touches[1]);else if(n.touches[0]){var m=e.findNearestElements(i[0],i[1],!0,!0),T=m[0];if(null!=T&&(T.activate(),e.touchData.start=T,e.touchData.starts=m,e.nodeIsGrabbable(T))){var P=e.dragData.touchDragEles=[],D=null;e.redrawHint("eles",!0),e.redrawHint("drag",!0),T.selected()?(D=r.$((function(t){return t.selected()&&e.nodeIsGrabbable(t)})),u(D,{addToList:P})):d(T,{addToList:P}),l(T);var R=function(e){return{originalEvent:n,type:e,position:{x:i[0],y:i[1]}}};T.emit(R("grabon")),D?D.forEach((function(e){e.emit(R("grab"))})):T.emit(R("grab"))}t(T,["touchstart","tapstart","vmousedown"],n,{position:{x:i[0],y:i[1]}}),null==T&&(e.data.bgActivePosistion={x:a[0],y:a[1]},e.redrawHint("select",!0),e.redraw()),e.touchData.singleTouchMoved=!1,e.touchData.singleTouchStartTime=+new Date,clearTimeout(e.touchData.tapholdTimeout),e.touchData.tapholdTimeout=setTimeout((function(){!1!==e.touchData.singleTouchMoved||e.pinching||e.touchData.selecting||(t(e.touchData.start,["taphold"],n,{position:{x:i[0],y:i[1]}}),e.touchData.start||r.$(":selected").unselect())}),e.tapholdDuration)}if(n.touches.length>=1){for(var M=e.touchData.startPosition=[],z=0;z=e.touchTapThreshold2}if(i&&e.touchData.cxt){n.preventDefault();var D=n.touches[0].clientX-S,R=n.touches[0].clientY-$,M=n.touches[1].clientX-S,z=n.touches[1].clientY-$,L=N(D,R,M,z);if(L/k>=2.25||L>=22500){e.touchData.cxt=!1,e.data.bgActivePosistion=void 0,e.redrawHint("select",!0);var B={originalEvent:n,type:"cxttapend",position:{x:c[0],y:c[1]}};e.touchData.start?(e.touchData.start.unactivate().emit(B),e.touchData.start=null):l.emit(B)}}if(i&&e.touchData.cxt){B={originalEvent:n,type:"cxtdrag",position:{x:c[0],y:c[1]}},e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),e.touchData.start?e.touchData.start.emit(B):l.emit(B),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxtDragged=!0;var F=e.findNearestElement(c[0],c[1],!0,!0);e.touchData.cxtOver&&F===e.touchData.cxtOver||(e.touchData.cxtOver&&e.touchData.cxtOver.emit({originalEvent:n,type:"cxtdragout",position:{x:c[0],y:c[1]}}),e.touchData.cxtOver=F,F&&F.emit({originalEvent:n,type:"cxtdragover",position:{x:c[0],y:c[1]}}))}else if(i&&n.touches[2]&&l.boxSelectionEnabled())n.preventDefault(),e.data.bgActivePosistion=void 0,this.lastThreeTouch=+new Date,e.touchData.selecting||l.emit("boxstart"),e.touchData.selecting=!0,e.redrawHint("select",!0),s&&0!==s.length&&void 0!==s[0]?(s[2]=(c[0]+c[2]+c[4])/3,s[3]=(c[1]+c[3]+c[5])/3):(s[0]=(c[0]+c[2]+c[4])/3,s[1]=(c[1]+c[3]+c[5])/3,s[2]=(c[0]+c[2]+c[4])/3+1,s[3]=(c[1]+c[3]+c[5])/3+1),s[4]=1,e.touchData.selecting=!0,e.redraw();else if(i&&n.touches[1]&&l.zoomingEnabled()&&l.panningEnabled()&&l.userZoomingEnabled()&&l.userPanningEnabled()){if(n.preventDefault(),e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),ee=e.dragData.touchDragEles){e.redrawHint("drag",!0);for(var q=0;q0)return h[0]}return null},p=Object.keys(d),h=0;h0?f:r.roundRectangleIntersectLine(o,a,e,t,n,i,s)},checkPoint:function(e,t,n,i,o,a,s){var l=r.getRoundRectangleRadius(i,o),c=2*l;if(r.pointInsidePolygon(e,t,this.points,a,s,i,o-c,[0,-1],n))return!0;if(r.pointInsidePolygon(e,t,this.points,a,s,i-c,o,[0,-1],n))return!0;var u=i/2+2*n,d=o/2+2*n,f=[a-u,s-d,a-u,s,a+u,s,a+u,s-d];return!!r.pointInsidePolygonPoints(e,t,f)||!!r.checkInEllipse(e,t,c,c,a+i/2-l,s+o/2-l,n)||!!r.checkInEllipse(e,t,c,c,a-i/2+l,s+o/2-l,n)}}},registerNodeShapes:function(){var e=this.nodeShapes={},t=this;this.generateEllipse(),this.generatePolygon("triangle",r.generateUnitNgonPointsFitToSquare(3,0)),this.generatePolygon("rectangle",r.generateUnitNgonPointsFitToSquare(4,0)),e.square=e.rectangle,this.generateRoundRectangle(),this.generateCutRectangle(),this.generateBarrel(),this.generateBottomRoundrectangle(),this.generatePolygon("diamond",[0,1,1,0,0,-1,-1,0]),this.generatePolygon("pentagon",r.generateUnitNgonPointsFitToSquare(5,0)),this.generatePolygon("hexagon",r.generateUnitNgonPointsFitToSquare(6,0)),this.generatePolygon("heptagon",r.generateUnitNgonPointsFitToSquare(7,0)),this.generatePolygon("octagon",r.generateUnitNgonPointsFitToSquare(8,0));var n=new Array(20),i=r.generateUnitNgonPoints(5,0),o=r.generateUnitNgonPoints(5,Math.PI/5),a=.5*(3-Math.sqrt(5));a*=1.57;for(var s=0;s0&&t.data.lyrTxrCache.invalidateElements(n)}))}l.CANVAS_LAYERS=3,l.SELECT_BOX=0,l.DRAG=1,l.NODE=2,l.BUFFER_COUNT=3,l.TEXTURE_BUFFER=0,l.MOTIONBLUR_BUFFER_NODE=1,l.MOTIONBLUR_BUFFER_DRAG=2,l.redrawHint=function(e,t){var n=this;switch(e){case"eles":n.data.canvasNeedsRedraw[l.NODE]=t;break;case"drag":n.data.canvasNeedsRedraw[l.DRAG]=t;break;case"select":n.data.canvasNeedsRedraw[l.SELECT_BOX]=t}};var u="undefined"!=typeof Path2D;l.path2dEnabled=function(e){if(void 0===e)return this.pathsEnabled;this.pathsEnabled=!!e},l.usePaths=function(){return u&&this.pathsEnabled},[n(126),n(127),n(128),n(129),n(130),n(131),n(132),n(133),n(134),n(135)].forEach((function(e){r.extend(l,e)})),e.exports=s},function(e,t,n){"use strict";var r=n(2),i=n(1),o=n(9),a=n(19),s={dequeue:"dequeue",downscale:"downscale",highQuality:"highQuality"},l=function(e){this.renderer=e,this.onDequeues=[],this.setupDequeueing()},c=l.prototype;c.reasons=s,c.getTextureQueue=function(e){return this.eleImgCaches=this.eleImgCaches||{},this.eleImgCaches[e]=this.eleImgCaches[e]||[]},c.getRetiredTextureQueue=function(e){var t=this.eleImgCaches.retired=this.eleImgCaches.retired||{};return t[e]=t[e]||[]},c.getElementQueue=function(){return this.eleCacheQueue=this.eleCacheQueue||new o((function(e,t){return t.reqs-e.reqs}))},c.getElementIdToQueue=function(){return this.eleIdToCacheQueue=this.eleIdToCacheQueue||{}},c.getElement=function(e,t,n,i,o){var a=this,l=this.renderer,c=e._private.rscratch,u=l.cy.zoom();if(0===t.w||0===t.h||!e.visible())return null;if(null==i&&(i=Math.ceil(r.log2(u*n))),i<-4)i=-4;else if(u>=3.99||i>2)return null;var d,f=Math.pow(2,i),p=t.h*f,h=t.w*f,g=c.imgCaches=c.imgCaches||{},m=g[i];if(m)return m;if(d=p<=25?25:p<=50?50:50*Math.ceil(p/50),p>1024||h>1024||e.isEdge()||e.isParent())return null;var v=a.getTextureQueue(d),b=v[v.length-2],y=function(){return a.recycleTexture(d,h)||a.addTexture(d,h)};b||(b=v[v.length-1]),b||(b=y()),b.width-b.usedWidthi;$--)C=a.getElement(e,t,n,$,s.downscale);_()}else{var O;if(!A&&!E&&!S)for($=i-1;$>=-4;$--){var j;if(j=g[$]){O=j;break}}if(k(O))return a.queueElement(e,i),O;b.context.translate(b.usedWidth,0),b.context.scale(f,f),l.drawElement(b.context,e,t,w),b.context.scale(1/f,1/f),b.context.translate(-b.usedWidth,0)}return m=g[i]={ele:e,x:b.usedWidth,texture:b,level:i,scale:f,width:h,height:p,scaledLabelShown:w},b.usedWidth+=Math.ceil(h+8),b.eleCaches.push(m),a.checkTextureFullness(b),m},c.invalidateElement=function(e){var t=e._private.rscratch.imgCaches;if(t)for(var n=-4;n<=2;n++){var r=t[n];if(r){var o=r.texture;o.invalidatedWidth+=r.width,t[n]=null,i.removeFromArray(o.eleCaches,r),this.removeFromQueue(e),this.checkTextureUtility(o)}}},c.checkTextureUtility=function(e){e.invalidatedWidth>=.5*e.width&&this.retireTexture(e)},c.checkTextureFullness=function(e){var t=this.getTextureQueue(e.height);e.usedWidth/e.width>.8&&e.fullnessChecks>=10?i.removeFromArray(t,e):e.fullnessChecks++},c.retireTexture=function(e){var t=e.height,n=this.getTextureQueue(t);i.removeFromArray(n,e),e.retired=!0;for(var r=e.eleCaches,o=0;o=t)return a.retired=!1,a.usedWidth=0,a.invalidatedWidth=0,a.fullnessChecks=0,i.clearArray(a.eleCaches),a.context.setTransform(1,0,0,1,0,0),a.context.clearRect(0,0,a.width,a.height),i.removeFromArray(r,a),n.push(a),a}},c.queueElement=function(e,t){var n=this.getElementQueue(),r=this.getElementIdToQueue(),i=e.id(),o=r[i];if(o)o.level=Math.max(o.level,t),o.reqs++,n.updateItem(o);else{var a={ele:e,level:t,reqs:1};n.push(a),r[i]=a}},c.dequeue=function(e){for(var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=[],i=0;i<1&&t.size()>0;i++){var o=t.pop(),a=o.ele;if(null==a._private.rscratch.imgCaches[o.level]){n[a.id()]=null,r.push(o);var l=a.boundingBox();this.getElement(a,l,e,o.level,s.dequeue)}}return r},c.removeFromQueue=function(e){var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=n[e.id()];null!=r&&(r.reqs=i.MAX_INT,t.updateItem(r),t.pop(),n[e.id()]=null)},c.onDequeue=function(e){this.onDequeues.push(e)},c.offDequeue=function(e){i.removeFromArray(this.onDequeues,e)},c.setupDequeueing=a.setupDequeueing({deqRedrawThreshold:100,deqCost:.15,deqAvgCost:.1,deqNoDrawCost:.9,deqFastCost:.9,deq:function(e,t,n){return e.dequeue(t,n)},onDeqd:function(e,t){for(var n=0;n=3.99||n>2)return null;o.validateLayersElesOrdering(n,e);var l,c,u=o.layersByLevel,d=Math.pow(2,n),f=u[n]=u[n]||[];if(o.levelIsComplete(n,e))return f;!function(){var t=function(t){if(o.validateLayersElesOrdering(t,e),o.levelIsComplete(t,e))return c=u[t],!0},i=function(e){if(!c)for(var r=n+e;-4<=r&&r<=2&&!t(r);r+=e);};i(1),i(-1);for(var a=f.length-1;a>=0;a--){var s=f[a];s.invalid&&r.removeFromArray(f,s)}}();var p=function(t){var r=(t=t||{}).after;if(function(){if(!l){l=i.makeBoundingBox();for(var t=0;t16e6)return null;var a=o.makeLayer(l,n);if(null!=r){var s=f.indexOf(r)+1;f.splice(s,0,a)}else(void 0===t.insert||t.insert)&&f.unshift(a);return a};if(o.skipping&&!s)return null;for(var h=null,g=e.length/1,m=!s,v=0;v=g||!i.boundingBoxInBoundingBox(h.bb,b.boundingBox()))&&!(h=p({insert:!0,after:h})))return null;c||m?o.queueLayer(h,b):o.drawEleInLayer(h,b,n,t),h.eles.push(b),x[n]=h}}return c||(m?null:f)},c.getEleLevelForLayerLevel=function(e,t){return e},c.drawEleInLayer=function(e,t,n,r){var i=this.renderer,o=e.context,a=t.boundingBox();if(0!==a.w&&0!==a.h&&t.visible()){var s=this.eleTxrCache,l=s.reasons.highQuality;n=this.getEleLevelForLayerLevel(n,r);var c=s.getElement(t,a,null,n,l);c?(f(o,!1),o.drawImage(c.texture.canvas,c.x,0,c.width,c.height,a.x1,a.y1,a.w,a.h),f(o,!0)):i.drawElement(o,t)}},c.levelIsComplete=function(e,t){var n=this.layersByLevel[e];if(!n||0===n.length)return!1;for(var r=0,i=0;i0)return!1;if(o.invalid)return!1;r+=o.eles.length}return r===t.length},c.validateLayersElesOrdering=function(e,t){var n=this.layersByLevel[e];if(n)for(var r=0;r0){e=!0;break}}return e},c.invalidateElements=function(e){var t=this;t.lastInvalidationTime=r.performanceNow(),0!==e.length&&t.haveLayers()&&t.updateElementsInLayers(e,(function(e,n,r){t.invalidateLayer(e)}))},c.invalidateLayer=function(e){if(this.lastInvalidationTime=r.performanceNow(),!e.invalid){var t=e.level,n=e.eles,i=this.layersByLevel[t];r.removeFromArray(i,e),e.elesQueue=[],e.invalid=!0,e.replacement&&(e.replacement.invalid=!0);for(var o=0;o0&&void 0!==arguments[0]?arguments[0]:f;e.lineWidth=h,e.lineCap="butt",i.strokeStyle(e,d[0],d[1],d[2],n),i.drawEdgePath(t,e,o.allpts,p)},m=function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:f;i.drawArrowheads(e,t,n)};if(e.lineJoin="round","yes"===t.pstyle("ghost").value){var v=t.pstyle("ghost-offset-x").pfValue,b=t.pstyle("ghost-offset-y").pfValue,y=t.pstyle("ghost-opacity").value,x=f*y;e.translate(v,b),g(x),m(x),e.translate(-v,-b)}g(),m(),function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:c;e.lineWidth=l,"self"!==o.edgeType||a?e.lineCap="round":e.lineCap="butt",i.strokeStyle(e,u[0],u[1],u[2],n),i.drawEdgePath(t,e,o.allpts,"solid")}(),i.drawElementText(e,t,r),n&&e.translate(s.x1,s.y1)}},drawEdgePath:function(e,t,n,r){var i=e._private.rscratch,o=t,a=void 0,s=!1,l=this.usePaths();if(l){var c=n.join("$");i.pathCacheKey&&i.pathCacheKey===c?(a=t=i.pathCache,s=!0):(a=t=new Path2D,i.pathCacheKey=c,i.pathCache=a)}if(o.setLineDash)switch(r){case"dotted":o.setLineDash([1,1]);break;case"dashed":o.setLineDash([6,3]);break;case"solid":o.setLineDash([])}if(!s&&!i.badLine)switch(t.beginPath&&t.beginPath(),t.moveTo(n[0],n[1]),i.edgeType){case"bezier":case"self":case"compound":case"multibezier":if(e.hasClass("horizontal")){var u=n[4],d=n[5],f=(n[0]+n[4])/2;t.lineTo(n[0]+10,n[1]),t.bezierCurveTo(f,n[1],f,n[5],n[4]-10,n[5]),t.lineTo(u,d)}else if(e.hasClass("vertical")){var p=n[4],h=n[5],g=(n[1]+n[5])/2;t.bezierCurveTo(n[0],g,n[4],g,n[4],n[5]-10),t.lineTo(p,h)}else for(var m=2;m+30||j>0&&O>0){var P=f-T;switch(k){case"left":P-=m;break;case"center":P-=m/2}var D=p-v-T,R=m+2*T,I=v+2*T;if(_>0){var N=e.fillStyle,M=t.pstyle("text-background-color").value;e.fillStyle="rgba("+M[0]+","+M[1]+","+M[2]+","+_*o+")","roundrectangle"==t.pstyle("text-background-shape").strValue?(s=P,l=D,c=R,u=I,d=(d=2)||5,(a=e).beginPath(),a.moveTo(s+d,l),a.lineTo(s+c-d,l),a.quadraticCurveTo(s+c,l,s+c,l+d),a.lineTo(s+c,l+u-d),a.quadraticCurveTo(s+c,l+u,s+c-d,l+u),a.lineTo(s+d,l+u),a.quadraticCurveTo(s,l+u,s,l+u-d),a.lineTo(s,l+d),a.quadraticCurveTo(s,l,s+d,l),a.closePath(),a.fill()):e.fillRect(P,D,R,I),e.fillStyle=N}if(j>0&&O>0){var z=e.strokeStyle,L=e.lineWidth,B=t.pstyle("text-border-color").value,F=t.pstyle("text-border-style").value;if(e.strokeStyle="rgba("+B[0]+","+B[1]+","+B[2]+","+O*o+")",e.lineWidth=j,e.setLineDash)switch(F){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"double":e.lineWidth=j/4,e.setLineDash([]);break;case"solid":e.setLineDash([])}if(e.strokeRect(P,D,R,I),"double"===F){var q=j/2;e.strokeRect(P+q,D+q,R-2*q,I-2*q)}e.setLineDash&&e.setLineDash([]),e.lineWidth=L,e.strokeStyle=z}}var V=2*t.pstyle("text-outline-width").pfValue;if(V>0&&(e.lineWidth=V),"wrap"===t.pstyle("text-wrap").value){var U=r.getPrefixedProperty(i,"labelWrapCachedLines",n),H=v/U.length;switch(A){case"top":p-=(U.length-1)*H;break;case"center":case"bottom":p-=(U.length-1)*H}for(var G=0;G0&&e.strokeText(U[G],f,p),e.fillText(U[G],f,p),p+=H}else V>0&&e.strokeText(h,f,p),e.fillText(h,f,p);0!==E&&(e.rotate(-E),e.translate(-$,-C))}}},e.exports=o},function(e,t,n){"use strict";var r=n(0),i={drawNode:function(e,t,n,i){var o,a,s=this,l=t._private,c=l.rscratch,u=t.position();if(r.number(u.x)&&r.number(u.y)&&t.visible()){var d=t.effectiveOpacity(),f=s.usePaths(),p=void 0,h=!1,g=t.padding();o=t.width()+2*g,a=t.height()+2*g;var m=void 0;n&&(m=n,e.translate(-m.x1,-m.y1));for(var v=t.pstyle("background-image").value,b=new Array(v.length),y=new Array(v.length),x=0,w=0;w0&&void 0!==arguments[0]?arguments[0]:C;s.fillStyle(e,$[0],$[1],$[2],t)},P=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:j;s.strokeStyle(e,_[0],_[1],_[2],t)},D=t.pstyle("shape").strValue,R=t.pstyle("shape-polygon-points").pfValue;if(f){var I=D+"$"+o+"$"+a+("polygon"===D?"$"+R.join("$"):"");e.translate(u.x,u.y),c.pathCacheKey===I?(p=c.pathCache,h=!0):(p=new Path2D,c.pathCacheKey=I,c.pathCache=p)}var N,M,z,L=function(){if(!h){var n=u;f&&(n={x:0,y:0}),s.nodeShapes[s.getNodeShape(t)].draw(p||e,n.x,n.y,o,a)}f?e.fill(p):e.fill()},B=function(){for(var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,r=l.backgrounding,i=0,o=0;o0&&void 0!==arguments[0]&&arguments[0],r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:d;s.hasPie(t)&&(s.drawPie(e,t,r),n&&(f||s.nodeShapes[s.getNodeShape(t)].draw(e,u.x,u.y,o,a)))},q=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,n=(E>0?E:-E)*t,r=E>0?0:255;0!==E&&(s.fillStyle(e,r,r,r,n),f?e.fill(p):e.fill())},V=function(){if(S>0){if(e.lineWidth=S,e.lineCap="butt",e.setLineDash)switch(O){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"solid":case"double":e.setLineDash([])}if(f?e.stroke(p):e.stroke(),"double"===O){e.lineWidth=S/3;var t=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",f?e.stroke(p):e.stroke(),e.globalCompositeOperation=t}e.setLineDash&&e.setLineDash([])}};if("yes"===t.pstyle("ghost").value){var U=t.pstyle("ghost-offset-x").pfValue,H=t.pstyle("ghost-offset-y").pfValue,G=t.pstyle("ghost-opacity").value,W=G*d;e.translate(U,H),T(G*C),L(),B(W),F(0!==E||0!==S),q(W),P(G*j),V(),e.translate(-U,-H)}T(),L(),B(),F(0!==E||0!==S),q(),P(),V(),f&&e.translate(-u.x,-u.y),s.drawElementText(e,t,i),N=t.pstyle("overlay-padding").pfValue,M=t.pstyle("overlay-opacity").value,z=t.pstyle("overlay-color").value,M>0&&(s.fillStyle(e,z[0],z[1],z[2],M),s.nodeShapes.roundrectangle.draw(e,u.x,u.y,o+2*N,a+2*N),e.fill()),n&&e.translate(m.x1,m.y1)}},hasPie:function(e){return(e=e[0])._private.hasPie},drawPie:function(e,t,n,r){t=t[0],r=r||t.position();var i=t.cy().style(),o=t.pstyle("pie-size"),a=r.x,s=r.y,l=t.width(),c=t.height(),u=Math.min(l,c)/2,d=0;this.usePaths()&&(a=0,s=0),"%"===o.units?u*=o.pfValue:void 0!==o.pfValue&&(u=o.pfValue/2);for(var f=1;f<=i.pieBackgroundN;f++){var p=t.pstyle("pie-"+f+"-background-size").value,h=t.pstyle("pie-"+f+"-background-color").value,g=t.pstyle("pie-"+f+"-background-opacity").value*n,m=p/100;m+d>1&&(m=1-d);var v=1.5*Math.PI+2*Math.PI*d,b=v+2*Math.PI*m;0===p||d>=1||d+m>1||(e.beginPath(),e.moveTo(a,s),e.arc(a,s,u,v,b),e.closePath(),this.fillStyle(e,h[0],h[1],h[2],g),e.fill(),d+=m)}}};e.exports=i},function(e,t,n){"use strict";var r={},i=n(1);r.getPixelRatio=function(){var e=this.data.contexts[0];if(null!=this.forcedPixelRatio)return this.forcedPixelRatio;var t=e.backingStorePixelRatio||e.webkitBackingStorePixelRatio||e.mozBackingStorePixelRatio||e.msBackingStorePixelRatio||e.oBackingStorePixelRatio||e.backingStorePixelRatio||1;return(window.devicePixelRatio||1)/t},r.paintCache=function(e){for(var t,n=this.paintCaches=this.paintCaches||[],r=!0,i=0;is.minMbLowQualFrames&&(s.motionBlurPxRatio=s.mbPxRBlurry)),s.clearingMotionBlur&&(s.motionBlurPxRatio=1),s.textureDrawLastFrame&&!f&&(d[s.NODE]=!0,d[s.SELECT_BOX]=!0);var y=c.style()._private.coreStyle,x=c.zoom(),w=void 0!==o?o:x,k=c.pan(),A={x:k.x,y:k.y},E={zoom:x,pan:{x:k.x,y:k.y}},S=s.prevViewport;void 0===S||E.zoom!==S.zoom||E.pan.x!==S.pan.x||E.pan.y!==S.pan.y||m&&!g||(s.motionBlurPxRatio=1),a&&(A=a),w*=l,A.x*=l,A.y*=l;var $=s.getCachedZSortedEles();function C(e,t,n,r,i){var o=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",s.fillStyle(e,255,255,255,s.motionBlurTransparency),e.fillRect(t,n,r,i),e.globalCompositeOperation=o}function _(e,r){var i,l,c,d;s.clearingMotionBlur||e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]&&e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]?(i=A,l=w,c=s.canvasWidth,d=s.canvasHeight):(i={x:k.x*h,y:k.y*h},l=x*h,c=s.canvasWidth*h,d=s.canvasHeight*h),e.setTransform(1,0,0,1,0,0),"motionBlur"===r?C(e,0,0,c,d):t||void 0!==r&&!r||e.clearRect(0,0,c,d),n||(e.translate(i.x,i.y),e.scale(l,l)),a&&e.translate(a.x,a.y),o&&e.scale(o,o)}if(f||(s.textureDrawLastFrame=!1),f){if(s.textureDrawLastFrame=!0,!s.textureCache){s.textureCache={},s.textureCache.bb=c.mutableElements().boundingBox(),s.textureCache.texture=s.data.bufferCanvases[s.TEXTURE_BUFFER];var O=s.data.bufferContexts[s.TEXTURE_BUFFER];O.setTransform(1,0,0,1,0,0),O.clearRect(0,0,s.canvasWidth*s.textureMult,s.canvasHeight*s.textureMult),s.render({forcedContext:O,drawOnlyNodeLayer:!0,forcedPxRatio:l*s.textureMult}),(E=s.textureCache.viewport={zoom:c.zoom(),pan:c.pan(),width:s.canvasWidth,height:s.canvasHeight}).mpan={x:(0-E.pan.x)/E.zoom,y:(0-E.pan.y)/E.zoom}}d[s.DRAG]=!1,d[s.NODE]=!1;var j=u.contexts[s.NODE],T=s.textureCache.texture;E=s.textureCache.viewport,s.textureCache.bb,j.setTransform(1,0,0,1,0,0),p?C(j,0,0,E.width,E.height):j.clearRect(0,0,E.width,E.height);var P=y["outside-texture-bg-color"].value,D=y["outside-texture-bg-opacity"].value;s.fillStyle(j,P[0],P[1],P[2],D),j.fillRect(0,0,E.width,E.height),x=c.zoom(),_(j,!1),j.clearRect(E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l),j.drawImage(T,E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l)}else s.textureOnViewport&&!t&&(s.textureCache=null);var R=c.extent(),I=s.pinching||s.hoverData.dragging||s.swipePanning||s.data.wheelZooming||s.hoverData.draggingEles,N=s.hideEdgesOnViewport&&I,M=[];if(M[s.NODE]=!d[s.NODE]&&p&&!s.clearedForMotionBlur[s.NODE]||s.clearingMotionBlur,M[s.NODE]&&(s.clearedForMotionBlur[s.NODE]=!0),M[s.DRAG]=!d[s.DRAG]&&p&&!s.clearedForMotionBlur[s.DRAG]||s.clearingMotionBlur,M[s.DRAG]&&(s.clearedForMotionBlur[s.DRAG]=!0),d[s.NODE]||n||r||M[s.NODE]){var z=p&&!M[s.NODE]&&1!==h;_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]:u.contexts[s.NODE]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.nondrag,l,R):s.drawLayeredElements(j,$.nondrag,l,R),s.debug&&s.drawDebugPoints(j,$.nondrag),n||p||(d[s.NODE]=!1)}if(!r&&(d[s.DRAG]||n||M[s.DRAG])&&(z=p&&!M[s.DRAG]&&1!==h,_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]:u.contexts[s.DRAG]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.drag,l,R):s.drawCachedElements(j,$.drag,l,R),s.debug&&s.drawDebugPoints(j,$.drag),n||p||(d[s.DRAG]=!1)),s.showFps||!r&&d[s.SELECT_BOX]&&!n){if(_(j=t||u.contexts[s.SELECT_BOX]),1==s.selection[4]&&(s.hoverData.selecting||s.touchData.selecting)){x=s.cy.zoom();var L=y["selection-box-border-width"].value/x;j.lineWidth=L,j.fillStyle="rgba("+y["selection-box-color"].value[0]+","+y["selection-box-color"].value[1]+","+y["selection-box-color"].value[2]+","+y["selection-box-opacity"].value+")",j.fillRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]),L>0&&(j.strokeStyle="rgba("+y["selection-box-border-color"].value[0]+","+y["selection-box-border-color"].value[1]+","+y["selection-box-border-color"].value[2]+","+y["selection-box-opacity"].value+")",j.strokeRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]))}if(u.bgActivePosistion&&!s.hoverData.selecting){x=s.cy.zoom();var B=u.bgActivePosistion;j.fillStyle="rgba("+y["active-bg-color"].value[0]+","+y["active-bg-color"].value[1]+","+y["active-bg-color"].value[2]+","+y["active-bg-opacity"].value+")",j.beginPath(),j.arc(B.x,B.y,y["active-bg-size"].pfValue/x,0,2*Math.PI),j.fill()}var F=s.lastRedrawTime;if(s.showFps&&F){F=Math.round(F);var q=Math.round(1e3/F);j.setTransform(1,0,0,1,0,0),j.fillStyle="rgba(255, 0, 0, 0.75)",j.strokeStyle="rgba(255, 0, 0, 0.75)",j.lineWidth=1,j.fillText("1 frame = "+F+" ms = "+q+" fps",0,20),j.strokeRect(0,30,250,20),j.fillRect(0,30,250*Math.min(q/60,1),20)}n||(d[s.SELECT_BOX]=!1)}if(p&&1!==h){var V=u.contexts[s.NODE],U=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_NODE],H=u.contexts[s.DRAG],G=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_DRAG],W=function(e,t,n){e.setTransform(1,0,0,1,0,0),n||!b?e.clearRect(0,0,s.canvasWidth,s.canvasHeight):C(e,0,0,s.canvasWidth,s.canvasHeight);var r=h;e.drawImage(t,0,0,s.canvasWidth*r,s.canvasHeight*r,0,0,s.canvasWidth,s.canvasHeight)};(d[s.NODE]||M[s.NODE])&&(W(V,U,M[s.NODE]),d[s.NODE]=!1),(d[s.DRAG]||M[s.DRAG])&&(W(H,G,M[s.DRAG]),d[s.DRAG]=!1)}s.prevViewport=E,s.clearingMotionBlur&&(s.clearingMotionBlur=!1,s.motionBlurCleared=!0,s.motionBlur=!0),p&&(s.motionBlurTimeout=setTimeout((function(){s.motionBlurTimeout=null,s.clearedForMotionBlur[s.NODE]=!1,s.clearedForMotionBlur[s.DRAG]=!1,s.motionBlur=!1,s.clearingMotionBlur=!f,s.mbFrames=0,d[s.NODE]=!0,d[s.DRAG]=!0,s.redraw()}),100)),t||c.emit("render")},e.exports=r},function(e,t,n){"use strict";for(var r=n(2),i={drawPolygonPath:function(e,t,n,r,i,o){var a=r/2,s=i/2;e.beginPath&&e.beginPath(),e.moveTo(t+a*o[0],n+s*o[1]);for(var l=1;l0&&a>0){p.clearRect(0,0,o,a),p.globalCompositeOperation="source-over";var h=this.getCachedZSortedEles();if(e.full)p.translate(-n.x1*c,-n.y1*c),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(n.x1*c,n.y1*c);else{var g=t.pan(),m={x:g.x*c,y:g.y*c};c*=t.zoom(),p.translate(m.x,m.y),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(-m.x,-m.y)}e.bg&&(p.globalCompositeOperation="destination-over",p.fillStyle=e.bg,p.rect(0,0,o,a),p.fill())}return f},i.png=function(e){return a(e,this.bufferCanvasImage(e),"image/png")},i.jpg=function(e){return a(e,this.bufferCanvasImage(e),"image/jpeg")},e.exports=i},function(e,t,n){"use strict";var r={nodeShapeImpl:function(e,t,n,r,i,o,a){switch(e){case"ellipse":return this.drawEllipsePath(t,n,r,i,o);case"polygon":return this.drawPolygonPath(t,n,r,i,o,a);case"roundrectangle":return this.drawRoundRectanglePath(t,n,r,i,o);case"cutrectangle":return this.drawCutRectanglePath(t,n,r,i,o);case"bottomroundrectangle":return this.drawBottomRoundRectanglePath(t,n,r,i,o);case"barrel":return this.drawBarrelPath(t,n,r,i,o)}}};e.exports=r},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(18),a=function e(){if(!(this instanceof e))return new e;this.length=0},s=a.prototype;s.instanceString=function(){return"stylesheet"},s.selector=function(e){return this[this.length++]={selector:e,properties:[]},this},s.css=function(e,t){var n=this.length-1;if(r.string(e))this[n].properties.push({name:e,value:t});else if(r.plainObject(e))for(var a=e,s=0;s=0&&(e._idleTimeoutId=setTimeout((function(){e._onTimeout&&e._onTimeout()}),t))},n(239),t.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,t.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(this,n(35))},function(e,t,n){(function(e,t){!function(e,n){"use strict";if(!e.setImmediate){var r,i,o,a,s,l=1,c={},u=!1,d=e.document,f=Object.getPrototypeOf&&Object.getPrototypeOf(e);f=f&&f.setTimeout?f:e,"[object process]"==={}.toString.call(e.process)?r=function(e){t.nextTick((function(){h(e)}))}:!function(){if(e.postMessage&&!e.importScripts){var t=!0,n=e.onmessage;return e.onmessage=function(){t=!1},e.postMessage("","*"),e.onmessage=n,t}}()?e.MessageChannel?((o=new MessageChannel).port1.onmessage=function(e){h(e.data)},r=function(e){o.port2.postMessage(e)}):d&&"onreadystatechange"in d.createElement("script")?(i=d.documentElement,r=function(e){var t=d.createElement("script");t.onreadystatechange=function(){h(e),t.onreadystatechange=null,i.removeChild(t),t=null},i.appendChild(t)}):r=function(e){setTimeout(h,0,e)}:(a="setImmediate$"+Math.random()+"$",s=function(t){t.source===e&&"string"==typeof t.data&&0===t.data.indexOf(a)&&h(+t.data.slice(a.length))},e.addEventListener?e.addEventListener("message",s,!1):e.attachEvent("onmessage",s),r=function(t){e.postMessage(a+t,"*")}),f.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n1)for(var n=1;n=t||n<0||m&&e-c>=o}function w(){var e=p();if(x(e))return k(e);s=setTimeout(w,function(e){var n=t-(e-l);return m?f(n,o-(e-c)):n}(e))}function k(e){return s=void 0,v&&r?b(e):(r=i=void 0,a)}function A(){var e=p(),n=x(e);if(r=arguments,i=this,l=e,n){if(void 0===s)return y(l);if(m)return s=setTimeout(w,t),b(l)}return void 0===s&&(s=setTimeout(w,t)),a}return t=g(t)||0,h(n)&&(u=!!n.leading,o=(m="maxWait"in n)?d(g(n.maxWait)||0,t):o,v="trailing"in n?!!n.trailing:v),A.cancel=function(){void 0!==s&&clearTimeout(s),c=0,r=l=i=s=void 0},A.flush=function(){return void 0===s?a:k(p())},A}}).call(this,n(35))},function(e,t,n){e.exports=n(243)},function(e,t,n){var r,i,o;(function(){var n,a,s,l,c,u,d,f,p,h,g,m,v,b,y;s=Math.floor,h=Math.min,a=function(e,t){return et?1:0},p=function(e,t,n,r,i){var o;if(null==n&&(n=0),null==i&&(i=a),n<0)throw new Error("lo must be non-negative");for(null==r&&(r=e.length);nn;0<=n?t++:t--)c.push(t);return c}.apply(this).reverse()).length;rg;0<=g?++u:--u)m.push(c(e,n));return m},b=function(e,t,n,r){var i,o,s;for(null==r&&(r=a),i=e[n];n>t&&r(i,o=e[s=n-1>>1])<0;)e[n]=o,n=s;return e[n]=i},y=function(e,t,n){var r,i,o,s,l;for(null==n&&(n=a),i=e.length,l=t,o=e[t],r=2*t+1;r'+e.content+"":s+=">"+e.content+"";var l=t(s);return l.data("selector",e.selector),l.data("on-click-function",e.onClickFunction),l.data("show",void 0===e.show||e.show),l}function y(){var e;l("active")&&(e=s.children(),t(e).each((function(){x(t(this))})),i.off("tapstart",n),s.remove(),c(s=void 0,void 0),c("active",!1),c("anyVisibleChild",!1))}function x(e){var n="string"==typeof e?t("#"+e):e,r=n.data("cy-context-menus-cxtfcn"),o=n.data("selector"),a=n.data("call-on-click-function"),s=n.data("cy-context-menus-cxtcorefcn");r&&i.off("cxttap",o,r),s&&i.off("cxttap",s),a&&n.off("click",a),n.remove()}"get"!==e&&(c("options",a=function(e,t){var n={};for(var r in e)n[r]=e[r];for(var r in t)n[r]=t[r];return n}(r,e)),l("active")&&y(),c("active",!0),o=u(a.contextMenuClasses),(s=t("
")).addClass("cy-context-menus-cxt-menu"),c("cxtMenu",s),t("body").append(s),s=s,g(a.menuItems),i.on("tapstart",n=function(){f(s),c("cxtMenuPosition",void 0),c("currentCyEvent",void 0)}),t(".cy-context-menus-cxt-menu").contextmenu((function(){return!1})));return function(e){return{isActive:function(){return l("active")},appendMenuItem:function(t){return m(t),e},appendMenuItems:function(t){return g(t),e},removeMenuItem:function(t){return x(t),e},setTrailingDivider:function(n,r){return function(e,n){var r=t("#"+e);n?r.addClass("cy-context-menus-divider"):r.removeClass("cy-context-menus-divider")}(n,r),e},insertBeforeMenuItem:function(t,n){return v(t,n),e},moveBeforeOtherMenuItem:function(n,r){return function(e,n){if(e!==n){var r=t("#"+e).detach(),i=t("#"+n);r.insertBefore(i)}}(n,r),e},disableMenuItem:function(n){return t("#"+n).attr("disabled",!0),e},enableMenuItem:function(n){return t("#"+n).attr("disabled",!1),e},hideMenuItem:function(n){return t("#"+n).data("show",!1),f(t("#"+n)),e},showMenuItem:function(n){return t("#"+n).data("show",!0),d(t("#"+n)),e},destroy:function(){return y(),e}}}(this)}))}};e.exports&&(e.exports=o),void 0===(r=function(){return o}.call(t,n,t,e))||(e.exports=r),"undefined"!=typeof cytoscape&&i&&o(cytoscape,i)}()},function(e,t,n){var r;r=function(e){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var i=t[r]={i:r,l:!1,exports:{}};return e[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var i in e)n.d(r,i,function(t){return e[t]}.bind(null,i));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=0)}([function(e,t,n){var r=n(1),i=function(e){e&&e("layout","dagre",r)};"undefined"!=typeof cytoscape&&i(cytoscape),e.exports=i},function(e,t,n){function r(e){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var i=n(2),o=n(3),a=n(4);function s(e){this.options=o({},i,e)}s.prototype.run=function(){var e=this.options,t=e.cy,n=e.eles,i=function(e,t){return"function"==typeof t?t.apply(e,[e]):t},o=e.boundingBox||{x1:0,y1:0,w:t.width(),h:t.height()};void 0===o.x2&&(o.x2=o.x1+o.w),void 0===o.w&&(o.w=o.x2-o.x1),void 0===o.y2&&(o.y2=o.y1+o.h),void 0===o.h&&(o.h=o.y2-o.y1);var s=new a.graphlib.Graph({multigraph:!0,compound:!0}),l={},c=function(e,t){null!=t&&(l[e]=t)};c("nodesep",e.nodeSep),c("edgesep",e.edgeSep),c("ranksep",e.rankSep),c("rankdir",e.rankDir),c("ranker",e.ranker),s.setGraph(l),s.setDefaultEdgeLabel((function(){return{}})),s.setDefaultNodeLabel((function(){return{}}));for(var u=n.nodes(),d=0;d1?t-1:0),r=1;r-1}},function(e,t,n){var r=n(75);e.exports=function(e,t){var n=this.__data__,i=r(n,e);return i<0?(++this.size,n.push([e,t])):n[i][1]=t,this}},function(e,t,n){var r=n(74);e.exports=function(){this.__data__=new r,this.size=0}},function(e,t){e.exports=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n}},function(e,t){e.exports=function(e){return this.__data__.get(e)}},function(e,t){e.exports=function(e){return this.__data__.has(e)}},function(e,t,n){var r=n(74),i=n(117),o=n(118);e.exports=function(e,t){var n=this.__data__;if(n instanceof r){var a=n.__data__;if(!i||a.length<199)return a.push([e,t]),this.size=++n.size,this;n=this.__data__=new o(a)}return n.set(e,t),this.size=n.size,this}},function(e,t,n){var r=n(64),i=n(262),o=n(23),a=n(151),s=/^\[object .+?Constructor\]$/,l=Function.prototype,c=Object.prototype,u=l.toString,d=c.hasOwnProperty,f=RegExp("^"+u.call(d).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");e.exports=function(e){return!(!o(e)||i(e))&&(r(e)?f:s).test(a(e))}},function(e,t,n){var r=n(58),i=Object.prototype,o=i.hasOwnProperty,a=i.toString,s=r?r.toStringTag:void 0;e.exports=function(e){var t=o.call(e,s),n=e[s];try{e[s]=void 0;var r=!0}catch(e){}var i=a.call(e);return r&&(t?e[s]=n:delete e[s]),i}},function(e,t){var n=Object.prototype.toString;e.exports=function(e){return n.call(e)}},function(e,t,n){var r,i=n(263),o=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";e.exports=function(e){return!!o&&o in e}},function(e,t,n){var r=n(29)["__core-js_shared__"];e.exports=r},function(e,t){e.exports=function(e,t){return null==e?void 0:e[t]}},function(e,t,n){var r=n(266),i=n(74),o=n(117);e.exports=function(){this.size=0,this.__data__={hash:new r,map:new(o||i),string:new r}}},function(e,t,n){var r=n(267),i=n(268),o=n(269),a=n(270),s=n(271);function l(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t0){if(++t>=800)return arguments[0]}else t=0;return e.apply(void 0,arguments)}}},function(e,t,n){var r=n(173),i=n(340),o=n(344),a=n(174),s=n(345),l=n(129);e.exports=function(e,t,n){var c=-1,u=i,d=e.length,f=!0,p=[],h=p;if(n)f=!1,u=o;else if(d>=200){var g=t?null:s(e);if(g)return l(g);f=!1,u=a,h=new r}else h=t?[]:p;e:for(;++c-1}},function(e,t,n){var r=n(188),i=n(342),o=n(343);e.exports=function(e,t,n){return t==t?o(e,t,n):r(e,i,n)}},function(e,t){e.exports=function(e){return e!=e}},function(e,t){e.exports=function(e,t,n){for(var r=n-1,i=e.length;++r1||1===t.length&&e.hasEdge(t[0],t[0])}))}},function(e,t,n){var r=n(22);e.exports=function(e,t,n){return function(e,t,n){var r={},i=e.nodes();return i.forEach((function(e){r[e]={},r[e][e]={distance:0},i.forEach((function(t){e!==t&&(r[e][t]={distance:Number.POSITIVE_INFINITY})})),n(e).forEach((function(n){var i=n.v===e?n.w:n.v,o=t(n);r[e][i]={distance:o,predecessor:e}}))})),i.forEach((function(e){var t=r[e];i.forEach((function(n){var o=r[n];i.forEach((function(n){var r=o[e],i=t[n],a=o[n],s=r.distance+i.distance;s0;){if(n=l.removeMin(),r.has(s,n))a.setEdge(n,s[n]);else{if(u)throw new Error("Input graph is not connected: "+e);u=!0}e.nodeEdges(n).forEach(c)}return a}},function(e,t,n){"use strict";var r=n(11),i=n(399),o=n(402),a=n(403),s=n(20).normalizeRanks,l=n(405),c=n(20).removeEmptyRanks,u=n(406),d=n(407),f=n(408),p=n(409),h=n(418),g=n(20),m=n(28).Graph;e.exports=function(e,t){var n=t&&t.debugTiming?g.time:g.notime;n("layout",(function(){var t=n(" buildLayoutGraph",(function(){return function(e){var t=new m({multigraph:!0,compound:!0}),n=$(e.graph());return t.setGraph(r.merge({},b,S(n,v),r.pick(n,y))),r.forEach(e.nodes(),(function(n){var i=$(e.node(n));t.setNode(n,r.defaults(S(i,x),w)),t.setParent(n,e.parent(n))})),r.forEach(e.edges(),(function(n){var i=$(e.edge(n));t.setEdge(n,r.merge({},A,S(i,k),r.pick(i,E)))})),t}(e)}));n(" runLayout",(function(){!function(e,t){t(" makeSpaceForEdgeLabels",(function(){!function(e){var t=e.graph();t.ranksep/=2,r.forEach(e.edges(),(function(n){var r=e.edge(n);r.minlen*=2,"c"!==r.labelpos.toLowerCase()&&("TB"===t.rankdir||"BT"===t.rankdir?r.width+=r.labeloffset:r.height+=r.labeloffset)}))}(e)})),t(" removeSelfEdges",(function(){!function(e){r.forEach(e.edges(),(function(t){if(t.v===t.w){var n=e.node(t.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e:t,label:e.edge(t)}),e.removeEdge(t)}}))}(e)})),t(" acyclic",(function(){i.run(e)})),t(" nestingGraph.run",(function(){u.run(e)})),t(" rank",(function(){a(g.asNonCompoundGraph(e))})),t(" injectEdgeLabelProxies",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(n.width&&n.height){var r=e.node(t.v),i={rank:(e.node(t.w).rank-r.rank)/2+r.rank,e:t};g.addDummyNode(e,"edge-proxy",i,"_ep")}}))}(e)})),t(" removeEmptyRanks",(function(){c(e)})),t(" nestingGraph.cleanup",(function(){u.cleanup(e)})),t(" normalizeRanks",(function(){s(e)})),t(" assignRankMinMax",(function(){!function(e){var t=0;r.forEach(e.nodes(),(function(n){var i=e.node(n);i.borderTop&&(i.minRank=e.node(i.borderTop).rank,i.maxRank=e.node(i.borderBottom).rank,t=r.max(t,i.maxRank))})),e.graph().maxRank=t}(e)})),t(" removeEdgeLabelProxies",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);"edge-proxy"===n.dummy&&(e.edge(n.e).labelRank=n.rank,e.removeNode(t))}))}(e)})),t(" normalize.run",(function(){o.run(e)})),t(" parentDummyChains",(function(){l(e)})),t(" addBorderSegments",(function(){d(e)})),t(" order",(function(){p(e)})),t(" insertSelfEdges",(function(){!function(e){var t=g.buildLayerMatrix(e);r.forEach(t,(function(t){var n=0;r.forEach(t,(function(t,i){var o=e.node(t);o.order=i+n,r.forEach(o.selfEdges,(function(t){g.addDummyNode(e,"selfedge",{width:t.label.width,height:t.label.height,rank:o.rank,order:i+ ++n,e:t.e,label:t.label},"_se")})),delete o.selfEdges}))}))}(e)})),t(" adjustCoordinateSystem",(function(){f.adjust(e)})),t(" position",(function(){h(e)})),t(" positionSelfEdges",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);if("selfedge"===n.dummy){var r=e.node(n.e.v),i=r.x+r.width/2,o=r.y,a=n.x-i,s=r.height/2;e.setEdge(n.e,n.label),e.removeNode(t),n.label.points=[{x:i+2*a/3,y:o-s},{x:i+5*a/6,y:o-s},{x:i+a,y:o},{x:i+5*a/6,y:o+s},{x:i+2*a/3,y:o+s}],n.label.x=n.x,n.label.y=n.y}}))}(e)})),t(" removeBorderNodes",(function(){!function(e){r.forEach(e.nodes(),(function(t){if(e.children(t).length){var n=e.node(t),i=e.node(n.borderTop),o=e.node(n.borderBottom),a=e.node(r.last(n.borderLeft)),s=e.node(r.last(n.borderRight));n.width=Math.abs(s.x-a.x),n.height=Math.abs(o.y-i.y),n.x=a.x+n.width/2,n.y=i.y+n.height/2}})),r.forEach(e.nodes(),(function(t){"border"===e.node(t).dummy&&e.removeNode(t)}))}(e)})),t(" normalize.undo",(function(){o.undo(e)})),t(" fixupEdgeLabelCoords",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(r.has(n,"x"))switch("l"!==n.labelpos&&"r"!==n.labelpos||(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}}))}(e)})),t(" undoCoordinateSystem",(function(){f.undo(e)})),t(" translateGraph",(function(){!function(e){var t=Number.POSITIVE_INFINITY,n=0,i=Number.POSITIVE_INFINITY,o=0,a=e.graph(),s=a.marginx||0,l=a.marginy||0;function c(e){var r=e.x,a=e.y,s=e.width,l=e.height;t=Math.min(t,r-s/2),n=Math.max(n,r+s/2),i=Math.min(i,a-l/2),o=Math.max(o,a+l/2)}r.forEach(e.nodes(),(function(t){c(e.node(t))})),r.forEach(e.edges(),(function(t){var n=e.edge(t);r.has(n,"x")&&c(n)})),t-=s,i-=l,r.forEach(e.nodes(),(function(n){var r=e.node(n);r.x-=t,r.y-=i})),r.forEach(e.edges(),(function(n){var o=e.edge(n);r.forEach(o.points,(function(e){e.x-=t,e.y-=i})),r.has(o,"x")&&(o.x-=t),r.has(o,"y")&&(o.y-=i)})),a.width=n-t+s,a.height=o-i+l}(e)})),t(" assignNodeIntersects",(function(){!function(e){r.forEach(e.edges(),(function(t){var n,r,i=e.edge(t),o=e.node(t.v),a=e.node(t.w);i.points?(n=i.points[0],r=i.points[i.points.length-1]):(i.points=[],n=a,r=o),i.points.unshift(g.intersectRect(o,n)),i.points.push(g.intersectRect(a,r))}))}(e)})),t(" reversePoints",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);n.reversed&&n.points.reverse()}))}(e)})),t(" acyclic.undo",(function(){i.undo(e)}))}(t,n)})),n(" updateInputGraph",(function(){!function(e,t){r.forEach(e.nodes(),(function(n){var r=e.node(n),i=t.node(n);r&&(r.x=i.x,r.y=i.y,t.children(n).length&&(r.width=i.width,r.height=i.height))})),r.forEach(e.edges(),(function(n){var i=e.edge(n),o=t.edge(n);i.points=o.points,r.has(o,"x")&&(i.x=o.x,i.y=o.y)})),e.graph().width=t.graph().width,e.graph().height=t.graph().height}(e,t)}))}))};var v=["nodesep","edgesep","ranksep","marginx","marginy"],b={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},y=["acyclicer","ranker","rankdir","align"],x=["width","height"],w={width:0,height:0},k=["minlen","weight","width","height","labeloffset"],A={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},E=["labelpos"];function S(e,t){return r.mapValues(r.pick(e,t),Number)}function $(e){var t={};return r.forEach(e,(function(e,n){t[n.toLowerCase()]=e})),t}},function(e,t,n){var r=n(149);e.exports=function(e){return r(e,5)}},function(e,t,n){var r=n(89),i=n(57),o=n(90),a=n(48),s=Object.prototype,l=s.hasOwnProperty,c=r((function(e,t){e=Object(e);var n=-1,r=t.length,c=r>2?t[2]:void 0;for(c&&o(t[0],t[1],c)&&(r=1);++n-1?s[l?t[c]:c]:void 0}}},function(e,t,n){var r=n(188),i=n(37),o=n(365),a=Math.max;e.exports=function(e,t,n){var s=null==e?0:e.length;if(!s)return-1;var l=null==n?0:o(n);return l<0&&(l=a(s+l,0)),r(e,i(t,3),l)}},function(e,t,n){var r=n(196);e.exports=function(e){var t=r(e),n=t%1;return t==t?n?t-n:t:0}},function(e,t,n){var r=n(367),i=n(23),o=n(61),a=/^[-+]0x[0-9a-f]+$/i,s=/^0b[01]+$/i,l=/^0o[0-7]+$/i,c=parseInt;e.exports=function(e){if("number"==typeof e)return e;if(o(e))return NaN;if(i(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=i(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=r(e);var n=s.test(e);return n||l.test(e)?c(e.slice(2),n?2:8):a.test(e)?NaN:+e}},function(e,t,n){var r=n(368),i=/^\s+/;e.exports=function(e){return e?e.slice(0,r(e)+1).replace(i,""):e}},function(e,t){var n=/\s/;e.exports=function(e){for(var t=e.length;t--&&n.test(e.charAt(t)););return t}},function(e,t,n){var r=n(128),i=n(169),o=n(48);e.exports=function(e,t){return null==e?e:r(e,i(t),o)}},function(e,t){e.exports=function(e){var t=null==e?0:e.length;return t?e[t-1]:void 0}},function(e,t,n){var r=n(79),i=n(127),o=n(37);e.exports=function(e,t){var n={};return t=o(t,3),i(e,(function(e,i,o){r(n,i,t(e,i,o))})),n}},function(e,t,n){var r=n(132),i=n(373),o=n(49);e.exports=function(e){return e&&e.length?r(e,o,i):void 0}},function(e,t){e.exports=function(e,t){return e>t}},function(e,t,n){var r=n(375),i=n(379)((function(e,t,n){r(e,t,n)}));e.exports=i},function(e,t,n){var r=n(73),i=n(198),o=n(128),a=n(376),s=n(23),l=n(48),c=n(199);e.exports=function e(t,n,u,d,f){t!==n&&o(n,(function(o,l){if(f||(f=new r),s(o))a(t,n,l,u,e,d,f);else{var p=d?d(c(t,l),o,l+"",t,n,f):void 0;void 0===p&&(p=o),i(t,l,p)}}),l)}},function(e,t,n){var r=n(198),i=n(155),o=n(164),a=n(156),s=n(165),l=n(66),c=n(13),u=n(189),d=n(59),f=n(64),p=n(23),h=n(377),g=n(67),m=n(199),v=n(378);e.exports=function(e,t,n,b,y,x,w){var k=m(e,n),A=m(t,n),E=w.get(A);if(E)r(e,n,E);else{var S=x?x(k,A,n+"",e,t,w):void 0,$=void 0===S;if($){var C=c(A),_=!C&&d(A),O=!C&&!_&&g(A);S=A,C||_||O?c(k)?S=k:u(k)?S=a(k):_?($=!1,S=i(A,!0)):O?($=!1,S=o(A,!0)):S=[]:h(A)||l(A)?(S=k,l(k)?S=v(k):p(k)&&!f(k)||(S=s(A))):$=!1}$&&(w.set(A,S),y(S,A,b,x,w),w.delete(A)),r(e,n,S)}}},function(e,t,n){var r=n(47),i=n(84),o=n(32),a=Function.prototype,s=Object.prototype,l=a.toString,c=s.hasOwnProperty,u=l.call(Object);e.exports=function(e){if(!o(e)||"[object Object]"!=r(e))return!1;var t=i(e);if(null===t)return!0;var n=c.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&l.call(n)==u}},function(e,t,n){var r=n(65),i=n(48);e.exports=function(e){return r(e,i(e))}},function(e,t,n){var r=n(89),i=n(90);e.exports=function(e){return r((function(t,n){var r=-1,o=n.length,a=o>1?n[o-1]:void 0,s=o>2?n[2]:void 0;for(a=e.length>3&&"function"==typeof a?(o--,a):void 0,s&&i(n[0],n[1],s)&&(a=o<3?void 0:a,o=1),t=Object(t);++r1&&a(e,t[0],t[1])?t=[]:n>2&&a(t[0],t[1],t[2])&&(t=[t[0]]),i(e,r(t,1),[])}));e.exports=s},function(e,t,n){var r=n(88),i=n(86),o=n(37),a=n(184),s=n(393),l=n(82),c=n(394),u=n(49),d=n(13);e.exports=function(e,t,n){t=t.length?r(t,(function(e){return d(e)?function(t){return i(t,1===e.length?e[0]:e)}:e})):[u];var f=-1;t=r(t,l(o));var p=a(e,(function(e,n,i){return{criteria:r(t,(function(t){return t(e)})),index:++f,value:e}}));return s(p,(function(e,t){return c(e,t,n)}))}},function(e,t){e.exports=function(e,t){var n=e.length;for(e.sort(t);n--;)e[n]=e[n].value;return e}},function(e,t,n){var r=n(395);e.exports=function(e,t,n){for(var i=-1,o=e.criteria,a=t.criteria,s=o.length,l=n.length;++i=l?c:c*("desc"==n[i]?-1:1)}return e.index-t.index}},function(e,t,n){var r=n(61);e.exports=function(e,t){if(e!==t){var n=void 0!==e,i=null===e,o=e==e,a=r(e),s=void 0!==t,l=null===t,c=t==t,u=r(t);if(!l&&!u&&!a&&e>t||a&&s&&c&&!l&&!u||i&&s&&c||!n&&c||!o)return 1;if(!i&&!a&&!u&&e0;--l)if(r=t[l].dequeue()){i=i.concat(s(e,t,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(c,(function(t){return e.outEdges(t.v,t.w)})),!0)};var a=r.constant(1);function s(e,t,n,i,o){var a=o?[]:void 0;return r.forEach(e.inEdges(i.v),(function(r){var i=e.edge(r),s=e.node(r.v);o&&a.push({v:r.v,w:r.w}),s.out-=i,l(t,n,s)})),r.forEach(e.outEdges(i.v),(function(r){var i=e.edge(r),o=r.w,a=e.node(o);a.in-=i,l(t,n,a)})),e.removeNode(i.v),a}function l(e,t,n){n.out?n.in?e[n.out-n.in+t].enqueue(n):e[e.length-1].enqueue(n):e[0].enqueue(n)}},function(e,t){function n(){var e={};e._next=e._prev=e,this._sentinel=e}function r(e){e._prev._next=e._next,e._next._prev=e._prev,delete e._next,delete e._prev}function i(e,t){if("_next"!==e&&"_prev"!==e)return t}e.exports=n,n.prototype.dequeue=function(){var e=this._sentinel,t=e._prev;if(t!==e)return r(t),t},n.prototype.enqueue=function(e){var t=this._sentinel;e._prev&&e._next&&r(e),e._next=t._next,t._next._prev=e,t._next=e,e._prev=t},n.prototype.toString=function(){for(var e=[],t=this._sentinel,n=t._prev;n!==t;)e.push(JSON.stringify(n,i)),n=n._prev;return"["+e.join(", ")+"]"}},function(e,t,n){"use strict";var r=n(11),i=n(20);e.exports={run:function(e){e.graph().dummyChains=[],r.forEach(e.edges(),(function(t){!function(e,t){var n,r,o,a=t.v,s=e.node(a).rank,l=t.w,c=e.node(l).rank,u=t.name,d=e.edge(t),f=d.labelRank;if(c===s+1)return;for(e.removeEdge(t),o=0,++s;sl.lim&&(c=l,u=!0);var d=r.filter(t.edges(),(function(t){return u===b(e,e.node(t.v),c)&&u!==b(e,e.node(t.w),c)}));return r.minBy(d,(function(e){return o(t,e)}))}function v(e,t,n,i){var o=n.v,a=n.w;e.removeEdge(o,a),e.setEdge(i.v,i.w,{}),p(e),d(e,t),function(e,t){var n=r.find(e.nodes(),(function(e){return!t.node(e).parent})),i=s(e,n);i=i.slice(1),r.forEach(i,(function(n){var r=e.node(n).parent,i=t.edge(n,r),o=!1;i||(i=t.edge(r,n),o=!0),t.node(n).rank=t.node(r).rank+(o?i.minlen:-i.minlen)}))}(e,t)}function b(e,t,n){return n.low<=t.lim&&t.lim<=n.lim}e.exports=u,u.initLowLimValues=p,u.initCutValues=d,u.calcCutValue=f,u.leaveEdge=g,u.enterEdge=m,u.exchangeEdges=v},function(e,t,n){var r=n(11);e.exports=function(e){var t=function(e){var t={},n=0;function i(o){var a=n;r.forEach(e.children(o),i),t[o]={low:a,lim:n++}}return r.forEach(e.children(),i),t}(e);r.forEach(e.graph().dummyChains,(function(n){for(var r=e.node(n),i=r.edgeObj,o=function(e,t,n,r){var i,o,a=[],s=[],l=Math.min(t[n].low,t[r].low),c=Math.max(t[n].lim,t[r].lim);i=n;do{i=e.parent(i),a.push(i)}while(i&&(t[i].low>l||c>t[i].lim));o=i,i=r;for(;(i=e.parent(i))!==o;)s.push(i);return{path:a.concat(s.reverse()),lca:o}}(e,t,i.v,i.w),a=o.path,s=o.lca,l=0,c=a[l],u=!0;n!==i.w;){if(r=e.node(n),u){for(;(c=a[l])!==s&&e.node(c).maxRank=2),s=u.buildLayerMatrix(e);var m=o(e,s);m0;)t%2&&(n+=l[t+1]),l[t=t-1>>1]+=e.weight;c+=e.weight*n}))),c}e.exports=function(e,t){for(var n=0,r=1;r=e.barycenter)&&function(e,t){var n=0,r=0;e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.vs=t.vs.concat(e.vs),e.barycenter=n/r,e.weight=r,e.i=Math.min(t.i,e.i),t.merged=!0}(e,t)}}function i(t){return function(n){n.in.push(t),0==--n.indegree&&e.push(n)}}for(;e.length;){var o=e.pop();t.push(o),r.forEach(o.in.reverse(),n(o)),r.forEach(o.out,i(o))}return r.map(r.filter(t,(function(e){return!e.merged})),(function(e){return r.pick(e,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(e){return!e.indegree})))}},function(e,t,n){var r=n(11),i=n(20);function o(e,t,n){for(var i;t.length&&(i=r.last(t)).i<=n;)t.pop(),e.push(i.vs),n++;return n}e.exports=function(e,t){var n=i.partition(e,(function(e){return r.has(e,"barycenter")})),a=n.lhs,s=r.sortBy(n.rhs,(function(e){return-e.i})),l=[],c=0,u=0,d=0;a.sort((f=!!t,function(e,t){return e.barycentert.barycenter?1:f?t.i-e.i:e.i-t.i})),d=o(l,s,d),r.forEach(a,(function(e){d+=e.vs.length,l.push(e.vs),c+=e.barycenter*e.weight,u+=e.weight,d=o(l,s,d)}));var f;var p={vs:r.flatten(l,!0)};u&&(p.barycenter=c/u,p.weight=u);return p}},function(e,t,n){var r=n(11),i=n(28).Graph;e.exports=function(e,t,n){var o=function(e){var t;for(;e.hasNode(t=r.uniqueId("_root")););return t}(e),a=new i({compound:!0}).setGraph({root:o}).setDefaultNodeLabel((function(t){return e.node(t)}));return r.forEach(e.nodes(),(function(i){var s=e.node(i),l=e.parent(i);(s.rank===t||s.minRank<=t&&t<=s.maxRank)&&(a.setNode(i),a.setParent(i,l||o),r.forEach(e[n](i),(function(t){var n=t.v===i?t.w:t.v,o=a.edge(n,i),s=r.isUndefined(o)?0:o.weight;a.setEdge(n,i,{weight:e.edge(t).weight+s})})),r.has(s,"minRank")&&a.setNode(i,{borderLeft:s.borderLeft[t],borderRight:s.borderRight[t]}))})),a}},function(e,t,n){var r=n(11);e.exports=function(e,t,n){var i,o={};r.forEach(n,(function(n){for(var r,a,s=e.parent(n);s;){if((r=e.parent(s))?(a=o[r],o[r]=s):(a=i,i=s),a&&a!==s)return void t.setEdge(a,s);s=r}}))}},function(e,t,n){"use strict";var r=n(11),i=n(20),o=n(419).positionX;e.exports=function(e){(function(e){var t=i.buildLayerMatrix(e),n=e.graph().ranksep,o=0;r.forEach(t,(function(t){var i=r.max(r.map(t,(function(t){return e.node(t).height})));r.forEach(t,(function(t){e.node(t).y=o+i/2})),o+=i+n}))})(e=i.asNonCompoundGraph(e)),r.forEach(o(e),(function(t,n){e.node(n).x=t}))}},function(e,t,n){"use strict";var r=n(11),i=n(28).Graph,o=n(20);function a(e,t){var n={};return r.reduce(t,(function(t,i){var o=0,a=0,s=t.length,c=r.last(i);return r.forEach(i,(function(t,u){var d=function(e,t){if(e.node(t).dummy)return r.find(e.predecessors(t),(function(t){return e.node(t).dummy}))}(e,t),f=d?e.node(d).order:s;(d||t===c)&&(r.forEach(i.slice(a,u+1),(function(t){r.forEach(e.predecessors(t),(function(r){var i=e.node(r),a=i.order;!(as)&&l(n,t,c)}))}))}return r.reduce(t,(function(t,n){var o,a=-1,s=0;return r.forEach(n,(function(r,l){if("border"===e.node(r).dummy){var c=e.predecessors(r);c.length&&(o=e.node(c[0]).order,i(n,s,l,a,o),s=l,a=o)}i(n,s,n.length,o,t.length)})),n})),n}function l(e,t,n){if(t>n){var r=t;t=n,n=r}var i=e[t];i||(e[t]=i={}),i[n]=!0}function c(e,t,n){if(t>n){var i=t;t=n,n=i}return r.has(e[t],n)}function u(e,t,n,i){var o={},a={},s={};return r.forEach(t,(function(e){r.forEach(e,(function(e,t){o[e]=e,a[e]=e,s[e]=t}))})),r.forEach(t,(function(e){var t=-1;r.forEach(e,(function(e){var l=i(e);if(l.length)for(var u=((l=r.sortBy(l,(function(e){return s[e]}))).length-1)/2,d=Math.floor(u),f=Math.ceil(u);d<=f;++d){var p=l[d];a[e]===e&&t\n.menu ul ul {\n margin-left: 12px;\n}\n\n\n\n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(425),i=n(21);n(426),angular.module("dbt").directive("modelTreeLine",["$state",function(e){return{scope:{item:"=",depth:"<",resourceType:"@"},replace:!0,templateUrl:r,link:function(t,n,r,o){t.depth||(t.depth=0);var a=t.item.name;if(a){var s=i.last(a,15).join(""),l=i.initial(a,s.length).join("");t.name={name:a,start:l,end:s},t.name_start=l,t.name_end=s,t.onFolderClick=function(n){if(n.active=!n.active,"source"==t.resourceType){var r=n.name;e.go("dbt.source_list",{source:r})}else 0===t.depth&&"database"!==n.type&&e.go("dbt.project_overview",{project_name:n.name})},t.activate=function(n){t.$emit("clearSearch"),n.active=!0;var r="dbt."+n.node.resource_type;e.go(r,{unique_id:n.unique_id})},t.getIcon=function(e,t){return"#"+{header:{on:"icn-down",off:"icn-right"},database:{on:"icn-db-on",off:"icn-db"},schema:{on:"icn-tree-on",off:"icn-tree"},table:{on:"icn-doc-on",off:"icn-doc"},folder:{on:"icn-dir-on",off:"icn-dir"},file:{on:"icn-doc-on",off:"icn-doc"}}[e][t]},t.getClass=function(e){return{active:e.active,"menu-tree":"header"==e.type||"schema"==e.type||"folder"==e.type,"menu-main":"header"==e.type,"menu-node":"file"==e.type||"table"==e.type}}}}}}])},function(e,t){var n="/components/model_tree/model_tree_line.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
  • \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n
      \n \n
    \n
  • \n')}]),e.exports=n},function(e,t,n){var r=n(427);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.unselectable{\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(429);n(31);n(206),r.module("dbt").directive("docsSearch",["$sce","project",function(e,t){return{scope:{query:"=",results:"=",onSelect:"&"},replace:!0,templateUrl:i,link:function(n){n.max_results=20,n.show_all=!1,n.max_results_columns=3,n.limit_columns={},n.checkboxStatus={show_names:!1,show_descriptions:!1,show_columns:!1,show_code:!1,show_tags:!1},n.limit_search=function(e,t,r){return t0&&null!=n.query&&n.query.trim().length>0){let t=e.replace(/\s+/g," "),o=r(i(n.query)[0]),a=t.search(new RegExp(o)),s=a-75<0?0:a-75,l=a+75>t.length?t.length:a+75;return"..."+t.substring(s,l)+"..."}return e},n.highlight=function(t){if(!n.query||!t)return e.trustAsHtml(t);let o="("+i(n.query).map(e=>r(e)).join(")|(")+")";return e.trustAsHtml(t.replace(new RegExp(o,"gi"),'$&'))},n.$watch("query",(function(e,t){0==e.length&&(n.show_all=!1,n.limit_columns={})})),n.columnFilter=function(e){var t=[];let r=i(n.query);for(var o in e)r.every(e=>-1!=o.toLowerCase().indexOf(e))&&t.push(o);return t},n.limitColumns=function(e){return void 0!==n.limit_columns[e]?n.limit_columns[e]:3}}}}])},function(e,t){var n="/components/search/search.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n \n
    \n
    \n

    \n {{ query }}\n {{ results.length }} search results\n

    \n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n \n
    \n
    \n

    \n \n {{result.model.resource_type}}\n

    \n

    \n
    \n
    \n
    \n \n columns:\n \n \n \n Show {{ columnFilter(result.model.columns).length - max_results_columns }} more\n
    \n
    \n \n \n \n
    \n
    \n \n tags:\n \n \n \n
    \n
    \n Show {{ results.length - max_results }} more\n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(431);n(432);const i=n(21);angular.module("dbt").directive("tableDetails",["$sce","$filter",function(e,t){return{scope:{model:"=",extras:"=",exclude:"<"},templateUrl:r,link:function(e){function n(e,t){if(0==e)return"0 bytes";if(e<1&&(e*=1e6),isNaN(parseFloat(e))||!isFinite(e))return"-";void 0===t&&(t=0);var n=Math.floor(Math.log(e)/Math.log(1024));return(e/Math.pow(1024,Math.floor(n))).toFixed(t)+" "+["bytes","KB","MB","GB","TB","PB"][n]}function r(e,n){return void 0===n&&(n=2),t("number")(100*e,n)+"%"}function o(e,n){return void 0===n&&(n=0),t("number")(e,n)}e.details=[],e.extended=[],e.exclude=e.exclude||[],e.meta=null,e._show_expanded=!1,e.show_expanded=function(t){return void 0!==t&&(e._show_expanded=t),e._show_expanded},e.hasData=function(e){return!(!e||i.isEmpty(e))&&(1!=e.length||0!=e[0].include)},e.$watch("model",(function(t,a){i.property(["metadata","type"])(t);var s,l,c,u=t.hasOwnProperty("sources")&&null!=t.sources[0]?t.sources[0].source_meta:null;if(e.meta=t.meta||u,e.details=function(e){var t,n,r=!e.metadata,o=e.metadata||{};t=e.database?e.database+".":"",n=r?void 0:"source"==e.resource_type?t+e.schema+"."+e.identifier:t+e.schema+"."+e.alias;var a,s=[{name:"Owner",value:o.owner},{name:"Type",value:r?void 0:(a=o.type,"BASE TABLE"==a?{type:"table",name:"table"}:"LATE BINDING VIEW"==a?{type:"view",name:"late binding view"}:{type:a.toLowerCase(),name:a.toLowerCase()}).name},{name:"Package",value:e.package_name},{name:"Language",value:e.language},{name:"Relation",value:n}];return i.filter(s,(function(e){return void 0!==e.value}))}(t),e.extended=(s=t.stats,l={rows:o,row_count:o,num_rows:o,max_varchar:o,pct_used:r,size:n,bytes:n,num_bytes:n},c=i.sortBy(i.values(s),"label"),i.map(c,(function(e){var t=i.clone(e),n=l[e.id];return n&&(t.value=n(e.value),t.label=e.label.replace("Approximate","~"),t.label=e.label.replace("Utilization","Used")),t}))),e.extras){var d=i.filter(e.extras,(function(e){return void 0!==e.value&&null!==e.value}));e.details=e.details.concat(d)}e.show_extended=i.where(e.extended,{include:!0}).length>0})),e.queryTag=function(t){e.$emit("query",t)}}}}])},function(e,t){var n="/components/table_details/table_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    Details
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    Tags
    \n
    \n {{ tag }} \n
    \n
    untagged
    \n
    \n
    \n
    {{ item.name }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ item.label }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){var r=n(433);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n\n.details-content {\n table-layout: fixed;\n}\n\n.detail-body {\n white-space: nowrap;\n overflow-x: scroll;\n}\n",""])},function(e,t,n){"use strict";const r=n(435),i=n(21);angular.module("dbt").directive("columnDetails",["project",function(e){return{scope:{model:"="},templateUrl:r,link:function(t){t.has_test=function(e,t){return-1!=i.pluck(e.tests,"short").indexOf(t)},t.has_more_info=function(e){var t=e.tests||[],n=e.description||"",r=e.meta||{};return t.length||n.length||!i.isEmpty(r)},t.toggle_column_expanded=function(e){t.has_more_info(e)&&(e.expanded=!e.expanded)},t.getState=function(e){return"dbt."+e.resource_type},t.get_col_name=function(t){return e.caseColumn(t)},t.get_columns=function(e){var t=i.chain(e.columns).values().sortBy("index").value();return i.each(t,(function(e,t){e.index=t})),t}}}}])},function(e,t){var n="/components/column_details/column_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n
    \n Column information is not available for this seed\n
    \n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ColumnTypeDescriptionTestsMore?
    \n
    \n {{ get_col_name(column.name) }}\n
    \n
    \n {{ column.type }}

    \n
    \n {{ column.description }}\n \n \n U\n N\n F\n A\n +\n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Details
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n\n
    \n
    Description
    \n \n
    \n\n
    \n
    Generic Tests
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(437);n(31),n(438);function i(e){return"python"===e?"language-python":"language-sql"}angular.module("dbt").directive("codeBlock",["code","$timeout",function(e,t){return{scope:{versions:"=",default:"<",language:"="},restrict:"E",templateUrl:r,link:function(n,r){n.selected_version=n.default,n.language_class=i(n.language),n.source=null,n.setSelected=function(r){n.selected_version=r,n.source=n.versions[r]||"";const i=n.source.trim();n.highlighted=e.highlight(i,n.language),t((function(){Prism.highlightAll()}))},n.titleCase=function(e){return e.charAt(0).toUpperCase()+e.substring(1)},n.copied=!1,n.copy_to_clipboard=function(){e.copy_to_clipboard(n.source),n.copied=!0,setTimeout((function(){n.$apply((function(){n.copied=!1}))}),1e3)},n.$watch("language",(function(e,t){e&&e!=t&&(n.language_class=i(e))}),!0),n.$watch("versions",(function(e,t){if(e)if(n.default)n.setSelected(n.default);else{var r=Object.keys(n.versions);r.length>0&&n.setSelected(r[0])}}),!0)}}}])},function(e,t){var n="/components/code_block/code_block.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    Code
    \n\n')}]),e.exports=n},function(e,t,n){var r=n(439);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"pre.code {\n border: none !important;\n overflow-y: visible !important;\n overflow-x: scroll !important;\n padding-bottom: 10px;\n}\n\npre.code code {\n font-family: Monaco, monospace !important;\n font-weight: 400 !important;\n}\n\n.line-numbers-rows {\n border: none !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(441);angular.module("dbt").directive("macroArguments",[function(){return{scope:{macro:"="},templateUrl:r,link:function(e){_.each(e.macro.arguments,(function(e){e.expanded=!1}))}}}])},function(e,t){var n="/components/macro_arguments/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n
    \n
    \n Details are not available for this macro\n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ArgumentTypeDescriptionMore?
    \n
    \n {{ arg.name }}\n
    \n
    \n {{ arg.type }}

    \n
    \n {{ arg.description }}\n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Description
    \n \n
    \n
    \n
    \n
    \n
    \n\n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(443);angular.module("dbt").directive("referenceList",["$state",function(e){return{scope:{references:"=",node:"="},restrict:"E",templateUrl:r,link:function(t){t.selected_type=null,t.setType=function(e){t.selected_type=e,t.nodes=t.references[t.selected_type]},t.getNodeUrl=function(t){var n="dbt."+t.resource_type;return e.href(n,{unique_id:t.unique_id,"#":null})},t.mapResourceType=function(e){return"model"==e?"Models":"seed"==e?"Seeds":"test"==e?"Tests":"snapshot"==e?"Snapshots":"analysis"==e?"Analyses":"macro"==e?"Macros":"exposure"==e?"Exposures":"metric"==e?"Metrics":"operation"==e?"Operations":"Nodes"},t.$watch("references",(function(e){e&&_.size(e)>0?(t.selected_type=_.keys(e)[0],t.has_references=!0,t.nodes=t.references[t.selected_type]):t.has_references=!1}))}}}])},function(e,t){var n="/components/references/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n No resources reference this {{ node.resource_type }}\n
    \n
    \n \n
    \n \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){n(445),n(447),n(448),n(449),n(450),n(451),n(452),n(453),n(454),n(455)},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ModelCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.copied=!1,e.copy_to_clipboard=function(t){r.copy_to_clipboard(t),e.copied=!0,setTimeout((function(){e.$apply((function(){e.copied=!1}))}),1e3)},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.nav-tabs li.nav-pull-right {\n flex: 1 0 auto;\n text-align: right;\n}\n\ntr.column-row-selected {\n\n}\n\ntd.column-expanded{\n padding: 0px !important;\n}\n\ntd.column-expanded > div {\n padding: 5px 10px;\n margin-left: 20px;\n height: 100%;\n\n border-left: 1px solid #ccc !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SourceCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Sample SQL":r.generateSourceSQL(e.model)},e.extra_table_fields=[{name:"Loader",value:e.model.loader},{name:"Source",value:e.model.source_name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SeedCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Example SQL":r.generateSourceSQL(e.model)}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SnapshotCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"Compiled SQL is not available for this snapshot"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("TestCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(33);n(34),r.module("dbt").controller("MacroCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,a,s,l){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.macro={},n.ready((function(t){let n=t.macros[e.model_uid];if(e.macro=n,e.references=o.getMacroReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=o.getMacroParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.macro.is_adapter_macro){var r=t.metadata.adapter_type;e.versions=n.impls,n.impls[r]?e.default_version=r:n.impls.default?e.default_version="default":e.default_version=i.keys(n.impls)[0]}else e.default_version="Source",e.versions={Source:e.macro.macro_sql}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("AnalysisCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.default_version="Source",e.versions={Source:"",Compiled:""},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language,e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ExposureCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.exposure={},n.ready((function(t){let n=t.nodes[e.model_uid];e.exposure=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.language=n.language,e.extra_table_fields=[{name:"Maturity",value:e.exposure.maturity},{name:"Owner",value:e.exposure.owner.name},{name:"Owner email",value:e.exposure.owner.email},{name:"Exposure name",value:e.exposure.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("MetricCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.metric={},n.ready((function(t){let n=t.nodes[e.model_uid];e.metric=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.versions={Definition:r.generateMetricSQL(e.metric)};const o="expression"===e.metric.type?"Expression metric":"Aggregate metric";e.extra_table_fields=[{name:"Metric Type",value:o},{name:"Metric name",value:e.metric.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("OperationCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";n(9).module("dbt").controller("GraphCtrl",["$scope","$state","$window","graph","project","selectorService",function(e,t,n,r,i,o){function a(e){return e&&"source"==e.resource_type?"source:"+e.source_name+"."+e.name:e&&"exposure"==e.resource_type?"exposure:"+e.name:e&&"metric"==e.resource_type?"metric:"+e.name:e.name?e.name:"*"}e.graph=r.graph,e.graphService=r,e.graphRendered=function(e){r.setGraphReady(e)},e.$watch((function(){return t.params.unique_id}),(function(e,t){e&&e!=t&&i.find_by_id(e,(function(e){e&&("sidebar"==r.orientation?r.showVerticalGraph(a(e),!1):r.showFullGraph(a(e)))})),e||o.clearViewNode()}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(31),a=n(458);n(459),n(206),n(467),n(469),n(472),n(476),r.module("dbt").controller("MainController",["$scope","$route","$state","project","graph","selectorService","trackingService","locationService","$transitions",function(e,t,n,r,s,l,c,u,d){function f(t){e.model_uid=t;var n=r.node(t);n&&l.resetSelection(n)}function p(e){e&&setTimeout((function(){var t=o("*[data-nav-unique-id='"+e+"']");t.length&&t[0].scrollIntoView&&t[0].scrollIntoView({behavior:"smooth",block:"center",inline:"center"})}),1)}e.tree={database:{},project:{},sources:{}},e.search={query:"",results:[],is_focused:!1},e.logo=a,e.model_uid=null,e.project={},o("body").bind("keydown",(function(e){"t"==event.key&&"INPUT"!=event.target.tagName&&(console.log("Opening search"),o("#search").focus(),event.preventDefault())})),e.onSearchFocus=function(t,n){e.search.is_focused=n},e.clearSearch=function(){e.search.is_focused=!1,e.search.query="",e.search.results=[],o("#search").blur()},e.$on("clearSearch",(function(){e.clearSearch()})),e.$on("query",(function(t,n){e.search.is_focused=!0,e.search.query=n})),e.onSearchKeypress=function(t){"Escape"==t.key&&(e.clearSearch(),t.preventDefault())},r.getModelTree(n.params.unique_id,(function(t){e.tree.database=t.database,e.tree.project=t.project,e.tree.sources=t.sources,e.tree.exposures=t.exposures,e.tree.metrics=t.metrics,setTimeout((function(){p(e.model_uid)}))})),d.onSuccess({},(function(t,n){var i=t.router.globals.params,o=l.getViewNode(),a=o?o.unique_id:null,s=i.unique_id,u=!0;if(t.from().name==t.to().name&&a==s&&(u=!1),u&&i.unique_id){var d=r.updateSelected(i.unique_id);e.tree.database=d.database,e.tree.project=d.project,e.tree.sources=d.sources,e.search.query="",console.log("updating selected model to: ",i),f(i.unique_id),setTimeout((function(){p(i.unique_id)}))}u&&c.track_pageview()})),e.$watch("search.query",(function(t){e.search.results=function(t){if(""===e.search.query)return t;let n={name:10,tags:5,description:3,raw_code:2,columns:1};return i.each(t,(function(t){t.overallWeight=0,i.each(Object.keys(n),(function(r){if(null!=t.model[r]){let o=0,a=t.model[r],s=e.search.query.toLowerCase();if("columns"===r)i.each(a,(function(e){if(e.name){let t=e.name.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}}));else if("tags"===r)i.each(a,(function(e){let t=e.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}));else{a=a.toLowerCase();let e=0;for(;-1!=e;)e=a.indexOf(s,e),-1!=e&&(o++,e++)}t.overallWeight+=o*n[r]}}))})),t}(r.search(t))})),r.init(),r.ready((function(t){e.project=t,e.search.results=r.search("");var o=i.unique(i.pluck(i.values(t.nodes),"package_name")).sort(),a=[null];i.each(t.nodes,(function(e){var t=e.tags;a=i.union(a,t).sort()})),l.init({packages:o,tags:a}),f(n.params.unique_id);var d=u.parseState(n.params);d.show_graph&&s.ready((function(){i.assign(l.selection.dirty,d.selected);var e=l.updateSelection();s.updateGraph(e)}));var p=t.metadata||{};c.init({track:p.send_anonymous_usage_stats,project_id:p.project_id})}))}])},function(e,t){e.exports="data:image/svg+xml,%3Csvg width='242' height='90' viewBox='0 0 242 90' fill='none' xmlns='http://www.w3.org/2000/svg'%3E %3Cpath d='M240.384 74.5122L239.905 75.8589H239.728L239.249 74.5156V75.8589H238.941V74.0234H239.324L239.816 75.3872L240.309 74.0234H240.691V75.8589H240.384V74.5122ZM238.671 74.3003H238.169V75.8589H237.858V74.3003H237.352V74.0234H238.671V74.3003Z' fill='%23262A38'/%3E %3Cpath d='M154.123 13.915V75.3527H141.672V69.0868C140.37 71.2839 138.499 73.0742 136.22 74.2134C133.779 75.434 131.012 76.085 128.246 76.085C124.828 76.1664 121.41 75.1899 118.562 73.2369C115.633 71.2839 113.354 68.5986 111.889 65.425C110.262 61.7631 109.448 57.8572 109.529 53.8698C109.448 49.8825 110.262 45.9765 111.889 42.3961C113.354 39.3038 115.633 36.6185 118.481 34.7469C121.41 32.8753 124.828 31.9801 128.246 32.0615C130.931 32.0615 133.616 32.6311 135.976 33.8517C138.255 34.991 140.126 36.6999 141.428 38.8156V18.0651L154.123 13.915ZM139.15 63.2279C140.777 61.1121 141.672 58.0199 141.672 54.0326C141.672 50.0452 140.859 47.0344 139.15 44.9187C137.441 42.8029 134.755 41.5823 131.989 41.6637C129.222 41.5009 126.537 42.7215 124.746 44.8373C123.038 46.953 122.142 49.9639 122.142 53.8698C122.142 57.8572 123.038 60.9494 124.746 63.1465C126.455 65.3436 129.222 66.5642 131.989 66.4828C135.081 66.4828 137.522 65.3436 139.15 63.2279Z' fill='%23262A38'/%3E %3Cpath d='M198.635 34.6655C201.564 36.5371 203.843 39.2225 205.226 42.3147C206.853 45.8952 207.667 49.8011 207.586 53.7885C207.667 57.7758 206.853 61.7632 205.226 65.3436C203.761 68.5172 201.483 71.2026 198.553 73.1556C195.705 75.0272 192.287 76.0037 188.87 75.9223C186.103 76.0037 183.336 75.3527 180.895 74.0507C178.617 72.9114 176.745 71.1212 175.524 68.9241V75.2713H162.993V18.0651L175.606 13.915V38.9783C176.826 36.7812 178.698 34.991 180.976 33.8517C183.418 32.5498 186.103 31.8988 188.87 31.9801C192.287 31.8988 195.705 32.8753 198.635 34.6655ZM192.45 63.1465C194.159 60.9494 194.973 57.8572 194.973 53.7885C194.973 49.8825 194.159 46.8716 192.45 44.7559C190.741 42.6402 188.381 41.5823 185.289 41.5823C182.523 41.4196 179.837 42.6402 178.047 44.8373C176.338 47.0344 175.524 50.0452 175.524 53.9512C175.524 57.9386 176.338 61.0308 178.047 63.1465C179.756 65.3436 182.441 66.5642 185.289 66.4015C188.056 66.5642 190.741 65.3436 192.45 63.1465Z' fill='%23262A38'/%3E %3Cpath d='M225 42.4774V58.915C225 61.2749 225.651 62.9838 226.791 64.0416C228.093 65.1809 229.801 65.7505 231.592 65.6691C232.975 65.6691 234.44 65.425 235.742 65.0995V74.8644C233.382 75.6782 230.941 76.085 228.499 76.0037C223.292 76.0037 219.304 74.5389 216.537 71.6094C213.771 68.68 212.387 64.5299 212.387 59.1592V23.1103L225 19.0416V33.038H235.742V42.4774H225Z' fill='%23262A38'/%3E %3Cpath d='M86.1754 3.74322C88.2911 5.77758 89.6745 8.46293 90 11.3924C90 12.613 89.6745 13.4268 88.9421 14.9729C88.2098 16.519 79.1772 32.1429 76.4919 36.4557C74.9458 38.9783 74.132 41.9892 74.132 44.9186C74.132 47.9295 74.9458 50.859 76.4919 53.3816C79.1772 57.6944 88.2098 73.3996 88.9421 74.9457C89.6745 76.4919 90 77.2242 90 78.4448C89.6745 81.3743 88.3725 84.0597 86.2568 86.0127C84.2224 88.1284 81.5371 89.5118 78.689 89.7559C77.4684 89.7559 76.6546 89.4304 75.1899 88.698C73.7251 87.9656 57.7758 79.1772 53.4629 76.4919C53.1374 76.3291 52.8119 76.085 52.4051 75.9222L31.085 63.3092C31.5732 67.3779 33.3635 71.2839 36.2929 74.132C36.8626 74.7016 37.4322 75.1899 38.0832 75.6781C37.5949 75.9222 37.0253 76.1664 36.5371 76.4919C32.2242 79.1772 16.519 88.2098 14.9729 88.9421C13.4268 89.6745 12.6944 90 11.3924 90C8.46293 89.6745 5.77758 88.3725 3.82459 86.2568C1.70886 84.2224 0.325497 81.5371 0 78.6076C0.0813743 77.387 0.406872 76.1664 1.05787 75.1085C1.79024 73.5624 10.8228 57.8571 13.5081 53.5443C15.0542 51.0217 15.868 48.0922 15.868 45.0814C15.868 42.0705 15.0542 39.141 13.5081 36.6184C10.8228 32.1429 1.70886 16.4376 1.05787 14.8915C0.406872 13.8336 0.0813743 12.613 0 11.3924C0.325497 8.46293 1.62749 5.77758 3.74322 3.74322C5.77758 1.62749 8.46293 0.325497 11.3924 0C12.613 0.0813743 13.8336 0.406872 14.9729 1.05787C16.2749 1.62749 27.7486 8.30018 33.8517 11.8807L35.2351 12.6944C35.7233 13.0199 36.1302 13.264 36.4557 13.4268L37.1067 13.8336L58.8336 26.6908C58.3454 21.8083 55.8228 17.3327 51.9168 14.3219C52.4051 14.0778 52.9747 13.8336 53.4629 13.5081C57.7758 10.8228 73.481 1.70886 75.0271 1.05787C76.085 0.406872 77.3056 0.0813743 78.6076 0C81.4557 0.325497 84.1411 1.62749 86.1754 3.74322ZM46.1392 50.7776L50.7776 46.1392C51.4286 45.4882 51.4286 44.5118 50.7776 43.8608L46.1392 39.2224C45.4882 38.5714 44.5118 38.5714 43.8608 39.2224L39.2224 43.8608C38.5714 44.5118 38.5714 45.4882 39.2224 46.1392L43.8608 50.7776C44.4304 51.3472 45.4882 51.3472 46.1392 50.7776Z' fill='%23FF694A'/%3E %3C/svg%3E"},function(e,t,n){"use strict";n.r(t);var r=n(63),i=n.n(r);n(460),n(461),n(462),n(463),n(465);const o=n(9),a=(n(31),n(21));window.Prism=i.a,o.module("dbt").factory("code",["$sce",function(e){var t={copied:!1,highlight:function(t,n="sql"){if("sql"==n)var r=i.a.highlight(t,i.a.languages.sql,"sql");else if("python"==n)r=i.a.highlight(t,i.a.languages.python,"python");return e.trustAsHtml(r)},copy_to_clipboard:function(e){var t=document.createElement("textarea");t.value=e,t.setAttribute("readonly",""),t.style.position="absolute",t.style.left="-9999px",document.body.appendChild(t),t.select(),document.execCommand("copy"),document.body.removeChild(t)},generateSourceSQL:function(e){var t=["select"],n=a.size(e.columns),r=a.keys(e.columns);a.each(r,(function(e,r){var i=" "+e;r+1!=n&&(i+=","),t.push(i)}));const i=(e.database?e.database+".":"")+e.schema+"."+e.identifier;return t.push("from "+i),t.join("\n")},generateMetricSQL:function(e){if("expression"==e.type)return e.sql;const t=[`select ${e.type}(${e.sql})`,`from {{ ${e.model} }}`];if(e.filters.length>0){const n=e.filters.map(e=>`${e.field} ${e.operator} ${e.value}`).join(" AND ");t.push("where "+n)}return t.join("\n")}};return t}])},function(e,t){Prism.languages.sql={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:--|\/\/|#).*)/,lookbehind:!0},variable:[{pattern:/@(["'`])(?:\\[\s\S]|(?!\1)[^\\])+\1/,greedy:!0},/@[\w.$]+/],string:{pattern:/(^|[^@\\])("|')(?:\\[\s\S]|(?!\2)[^\\]|\2\2)*\2/,greedy:!0,lookbehind:!0},identifier:{pattern:/(^|[^@\\])`(?:\\[\s\S]|[^`\\]|``)*`/,greedy:!0,lookbehind:!0,inside:{punctuation:/^`|`$/}},function:/\b(?:AVG|COUNT|FIRST|FORMAT|LAST|LCASE|LEN|MAX|MID|MIN|MOD|NOW|ROUND|SUM|UCASE)(?=\s*\()/i,keyword:/\b(?:ACTION|ADD|AFTER|ALGORITHM|ALL|ALTER|ANALYZE|ANY|APPLY|AS|ASC|AUTHORIZATION|AUTO_INCREMENT|BACKUP|BDB|BEGIN|BERKELEYDB|BIGINT|BINARY|BIT|BLOB|BOOL|BOOLEAN|BREAK|BROWSE|BTREE|BULK|BY|CALL|CASCADED?|CASE|CHAIN|CHAR(?:ACTER|SET)?|CHECK(?:POINT)?|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMNS?|COMMENT|COMMIT(?:TED)?|COMPUTE|CONNECT|CONSISTENT|CONSTRAINT|CONTAINS(?:TABLE)?|CONTINUE|CONVERT|CREATE|CROSS|CURRENT(?:_DATE|_TIME|_TIMESTAMP|_USER)?|CURSOR|CYCLE|DATA(?:BASES?)?|DATE(?:TIME)?|DAY|DBCC|DEALLOCATE|DEC|DECIMAL|DECLARE|DEFAULT|DEFINER|DELAYED|DELETE|DELIMITERS?|DENY|DESC|DESCRIBE|DETERMINISTIC|DISABLE|DISCARD|DISK|DISTINCT|DISTINCTROW|DISTRIBUTED|DO|DOUBLE|DROP|DUMMY|DUMP(?:FILE)?|DUPLICATE|ELSE(?:IF)?|ENABLE|ENCLOSED|END|ENGINE|ENUM|ERRLVL|ERRORS|ESCAPED?|EXCEPT|EXEC(?:UTE)?|EXISTS|EXIT|EXPLAIN|EXTENDED|FETCH|FIELDS|FILE|FILLFACTOR|FIRST|FIXED|FLOAT|FOLLOWING|FOR(?: EACH ROW)?|FORCE|FOREIGN|FREETEXT(?:TABLE)?|FROM|FULL|FUNCTION|GEOMETRY(?:COLLECTION)?|GLOBAL|GOTO|GRANT|GROUP|HANDLER|HASH|HAVING|HOLDLOCK|HOUR|IDENTITY(?:COL|_INSERT)?|IF|IGNORE|IMPORT|INDEX|INFILE|INNER|INNODB|INOUT|INSERT|INT|INTEGER|INTERSECT|INTERVAL|INTO|INVOKER|ISOLATION|ITERATE|JOIN|KEYS?|KILL|LANGUAGE|LAST|LEAVE|LEFT|LEVEL|LIMIT|LINENO|LINES|LINESTRING|LOAD|LOCAL|LOCK|LONG(?:BLOB|TEXT)|LOOP|MATCH(?:ED)?|MEDIUM(?:BLOB|INT|TEXT)|MERGE|MIDDLEINT|MINUTE|MODE|MODIFIES|MODIFY|MONTH|MULTI(?:LINESTRING|POINT|POLYGON)|NATIONAL|NATURAL|NCHAR|NEXT|NO|NONCLUSTERED|NULLIF|NUMERIC|OFF?|OFFSETS?|ON|OPEN(?:DATASOURCE|QUERY|ROWSET)?|OPTIMIZE|OPTION(?:ALLY)?|ORDER|OUT(?:ER|FILE)?|OVER|PARTIAL|PARTITION|PERCENT|PIVOT|PLAN|POINT|POLYGON|PRECEDING|PRECISION|PREPARE|PREV|PRIMARY|PRINT|PRIVILEGES|PROC(?:EDURE)?|PUBLIC|PURGE|QUICK|RAISERROR|READS?|REAL|RECONFIGURE|REFERENCES|RELEASE|RENAME|REPEAT(?:ABLE)?|REPLACE|REPLICATION|REQUIRE|RESIGNAL|RESTORE|RESTRICT|RETURN(?:ING|S)?|REVOKE|RIGHT|ROLLBACK|ROUTINE|ROW(?:COUNT|GUIDCOL|S)?|RTREE|RULE|SAVE(?:POINT)?|SCHEMA|SECOND|SELECT|SERIAL(?:IZABLE)?|SESSION(?:_USER)?|SET(?:USER)?|SHARE|SHOW|SHUTDOWN|SIMPLE|SMALLINT|SNAPSHOT|SOME|SONAME|SQL|START(?:ING)?|STATISTICS|STATUS|STRIPED|SYSTEM_USER|TABLES?|TABLESPACE|TEMP(?:ORARY|TABLE)?|TERMINATED|TEXT(?:SIZE)?|THEN|TIME(?:STAMP)?|TINY(?:BLOB|INT|TEXT)|TOP?|TRAN(?:SACTIONS?)?|TRIGGER|TRUNCATE|TSEQUAL|TYPES?|UNBOUNDED|UNCOMMITTED|UNDEFINED|UNION|UNIQUE|UNLOCK|UNPIVOT|UNSIGNED|UPDATE(?:TEXT)?|USAGE|USE|USER|USING|VALUES?|VAR(?:BINARY|CHAR|CHARACTER|YING)|VIEW|WAITFOR|WARNINGS|WHEN|WHERE|WHILE|WITH(?: ROLLUP|IN)?|WORK|WRITE(?:TEXT)?|YEAR)\b/i,boolean:/\b(?:FALSE|NULL|TRUE)\b/i,number:/\b0x[\da-f]+\b|\b\d+(?:\.\d*)?|\B\.\d+\b/i,operator:/[-+*\/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?|\b(?:AND|BETWEEN|DIV|ILIKE|IN|IS|LIKE|NOT|OR|REGEXP|RLIKE|SOUNDS LIKE|XOR)\b/i,punctuation:/[;[\]()`,.]/}},function(e,t){Prism.languages.python={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0,greedy:!0},"string-interpolation":{pattern:/(?:f|fr|rf)(?:("""|''')[\s\S]*?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2)/i,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^{])(?:\{\{)*)\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}])+\})+\})+\}/,lookbehind:!0,inside:{"format-spec":{pattern:/(:)[^:(){}]+(?=\}$)/,lookbehind:!0},"conversion-option":{pattern:/![sra](?=[:}]$)/,alias:"punctuation"},rest:null}},string:/[\s\S]+/}},"triple-quoted-string":{pattern:/(?:[rub]|br|rb)?("""|''')[\s\S]*?\1/i,greedy:!0,alias:"string"},string:{pattern:/(?:[rub]|br|rb)?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/i,greedy:!0},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/g,lookbehind:!0},"class-name":{pattern:/(\bclass\s+)\w+/i,lookbehind:!0},decorator:{pattern:/(^[\t ]*)@\w+(?:\.\w+)*/m,lookbehind:!0,alias:["annotation","punctuation"],inside:{punctuation:/\./}},keyword:/\b(?:_(?=\s*:)|and|as|assert|async|await|break|case|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|match|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)\b/,builtin:/\b(?:__import__|abs|all|any|apply|ascii|basestring|bin|bool|buffer|bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|format|frozenset|getattr|globals|hasattr|hash|help|hex|id|input|int|intern|isinstance|issubclass|iter|len|list|locals|long|map|max|memoryview|min|next|object|oct|open|ord|pow|property|range|raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)\b/,boolean:/\b(?:False|None|True)\b/,number:/\b0(?:b(?:_?[01])+|o(?:_?[0-7])+|x(?:_?[a-f0-9])+)\b|(?:\b\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\B\.\d+(?:_\d+)*)(?:e[+-]?\d+(?:_\d+)*)?j?(?!\w)/i,operator:/[-+%=]=?|!=|:=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[{}[\];(),.:]/},Prism.languages.python["string-interpolation"].inside.interpolation.inside.rest=Prism.languages.python,Prism.languages.py=Prism.languages.python},function(e,t){!function(){if("undefined"!=typeof Prism&&"undefined"!=typeof document){var e=/\n(?!$)/g,t=Prism.plugins.lineNumbers={getLine:function(e,t){if("PRE"===e.tagName&&e.classList.contains("line-numbers")){var n=e.querySelector(".line-numbers-rows");if(n){var r=parseInt(e.getAttribute("data-start"),10)||1,i=r+(n.children.length-1);ti&&(t=i);var o=t-r;return n.children[o]}}},resize:function(e){r([e])},assumeViewportIndependence:!0},n=void 0;window.addEventListener("resize",(function(){t.assumeViewportIndependence&&n===window.innerWidth||(n=window.innerWidth,r(Array.prototype.slice.call(document.querySelectorAll("pre.line-numbers"))))})),Prism.hooks.add("complete",(function(t){if(t.code){var n=t.element,i=n.parentNode;if(i&&/pre/i.test(i.nodeName)&&!n.querySelector(".line-numbers-rows")&&Prism.util.isActive(n,"line-numbers")){n.classList.remove("line-numbers"),i.classList.add("line-numbers");var o,a=t.code.match(e),s=a?a.length+1:1,l=new Array(s+1).join("");(o=document.createElement("span")).setAttribute("aria-hidden","true"),o.className="line-numbers-rows",o.innerHTML=l,i.hasAttribute("data-start")&&(i.style.counterReset="linenumber "+(parseInt(i.getAttribute("data-start"),10)-1)),t.element.appendChild(o),r([i]),Prism.hooks.run("line-numbers",t)}}})),Prism.hooks.add("line-numbers",(function(e){e.plugins=e.plugins||{},e.plugins.lineNumbers=!0}))}function r(t){if(0!=(t=t.filter((function(e){var t=function(e){if(!e)return null;return window.getComputedStyle?getComputedStyle(e):e.currentStyle||null}(e)["white-space"];return"pre-wrap"===t||"pre-line"===t}))).length){var n=t.map((function(t){var n=t.querySelector("code"),r=t.querySelector(".line-numbers-rows");if(n&&r){var i=t.querySelector(".line-numbers-sizer"),o=n.textContent.split(e);i||((i=document.createElement("span")).className="line-numbers-sizer",n.appendChild(i)),i.innerHTML="0",i.style.display="block";var a=i.getBoundingClientRect().height;return i.innerHTML="",{element:t,lines:o,lineHeights:[],oneLinerHeight:a,sizer:i}}})).filter(Boolean);n.forEach((function(e){var t=e.sizer,n=e.lines,r=e.lineHeights,i=e.oneLinerHeight;r[n.length-1]=void 0,n.forEach((function(e,n){if(e&&e.length>1){var o=t.appendChild(document.createElement("span"));o.style.display="block",o.textContent=e}else r[n]=i}))})),n.forEach((function(e){for(var t=e.sizer,n=e.lineHeights,r=0,i=0;i code {\n\tposition: relative;\n\twhite-space: inherit;\n}\n\n.line-numbers .line-numbers-rows {\n\tposition: absolute;\n\tpointer-events: none;\n\ttop: 0;\n\tfont-size: 100%;\n\tleft: -3.8em;\n\twidth: 3em; /* works for line-numbers below 1000 lines */\n\tletter-spacing: -1px;\n\tborder-right: 1px solid #999;\n\n\t-webkit-user-select: none;\n\t-moz-user-select: none;\n\t-ms-user-select: none;\n\tuser-select: none;\n\n}\n\n\t.line-numbers-rows > span {\n\t\tdisplay: block;\n\t\tcounter-increment: linenumber;\n\t}\n\n\t\t.line-numbers-rows > span:before {\n\t\t\tcontent: counter(linenumber);\n\t\t\tcolor: #999;\n\t\t\tdisplay: block;\n\t\t\tpadding-right: 0.8em;\n\t\t\ttext-align: right;\n\t\t}\n',""])},function(e,t,n){var r=n(466);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,'/**\n * GHColors theme by Avi Aryan (http://aviaryan.in)\n * Inspired by Github syntax coloring\n */\n\ncode[class*="language-"],\npre[class*="language-"] {\n\tcolor: #393A34;\n\tfont-family: "Consolas", "Bitstream Vera Sans Mono", "Courier New", Courier, monospace;\n\tdirection: ltr;\n\ttext-align: left;\n\twhite-space: pre;\n\tword-spacing: normal;\n\tword-break: normal;\n\tfont-size: .9em;\n\tline-height: 1.2em;\n\n\t-moz-tab-size: 4;\n\t-o-tab-size: 4;\n\ttab-size: 4;\n\n\t-webkit-hyphens: none;\n\t-moz-hyphens: none;\n\t-ms-hyphens: none;\n\thyphens: none;\n}\n\npre > code[class*="language-"] {\n\tfont-size: 1em;\n}\n\npre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection,\ncode[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection {\n\tbackground: #b3d4fc;\n}\n\npre[class*="language-"]::selection, pre[class*="language-"] ::selection,\ncode[class*="language-"]::selection, code[class*="language-"] ::selection {\n\tbackground: #b3d4fc;\n}\n\n/* Code blocks */\npre[class*="language-"] {\n\tpadding: 1em;\n\tmargin: .5em 0;\n\toverflow: auto;\n\tborder: 1px solid #dddddd;\n\tbackground-color: white;\n}\n\n/* Inline code */\n:not(pre) > code[class*="language-"] {\n\tpadding: .2em;\n\tpadding-top: 1px;\n\tpadding-bottom: 1px;\n\tbackground: #f8f8f8;\n\tborder: 1px solid #dddddd;\n}\n\n.token.comment,\n.token.prolog,\n.token.doctype,\n.token.cdata {\n\tcolor: #999988;\n\tfont-style: italic;\n}\n\n.token.namespace {\n\topacity: .7;\n}\n\n.token.string,\n.token.attr-value {\n\tcolor: #e3116c;\n}\n\n.token.punctuation,\n.token.operator {\n\tcolor: #393A34; /* no highlight */\n}\n\n.token.entity,\n.token.url,\n.token.symbol,\n.token.number,\n.token.boolean,\n.token.variable,\n.token.constant,\n.token.property,\n.token.regex,\n.token.inserted {\n\tcolor: #36acaa;\n}\n\n.token.atrule,\n.token.keyword,\n.token.attr-name,\n.language-autohotkey .token.selector {\n\tcolor: #00a4db;\n}\n\n.token.function,\n.token.deleted,\n.language-autohotkey .token.tag {\n\tcolor: #9a050f;\n}\n\n.token.tag,\n.token.selector,\n.language-autohotkey .token.keyword {\n\tcolor: #00009f;\n}\n\n.token.important,\n.token.function,\n.token.bold {\n\tfont-weight: bold;\n}\n\n.token.italic {\n\tfont-style: italic;\n}\n',""])},function(e,t,n){n(31);const r=n(21),i=n(148),o=n(203),a=n(468);angular.module("dbt").factory("graph",["$state","$window","$q","selectorService","project","locationService",function(e,t,n,s,l,c){var u={vertical:{userPanningEnabled:!1,boxSelectionEnabled:!1,maxZoom:1.5},horizontal:{userPanningEnabled:!0,boxSelectionEnabled:!1,maxZoom:1,minZoom:.05}},d={none:{name:"null"},left_right:{name:"dagre",rankDir:"LR",rankSep:200,edgeSep:30,nodeSep:50},top_down:{name:"preset",positions:function(t){var n=e.params.unique_id;if(!n)return{x:0,y:0};var a=f.graph.pristine.dag,s=r.sortBy(o.ancestorNodes(a,n,1)),l=r.sortBy(o.descendentNodes(a,n,1)),c=r.partial(r.includes,s),u=r.partial(r.includes,l),d=a.filterNodes(c),p=a.filterNodes(u);return function(e,t,n,i){console.log("Getting position for ",i,". Primary: ",e);var o,a=100/(1+Math.max(t.length,n.length));if(e==i)return{x:0,y:0};if(r.includes(t,i))o={set:t,index:r.indexOf(t,i),factor:-1,type:"parent"};else{if(!r.includes(n,i))return{x:0,y:0};o={set:n,index:r.indexOf(n,i),factor:1,type:"child"}}var s=o.set.length;if("parent"==o.type)var l={x:(0+o.index)*a,y:-200-100*(s-o.index-1)};else l={x:(0+o.index)*a,y:200+100*(s-o.index-1)};return l}(n,i.alg.topsort(d),i.alg.topsort(p).reverse(),t.data("id"))}}},f={loading:!0,loaded:n.defer(),graph_element:null,orientation:"sidebar",expanded:!1,graph:{options:u.vertical,pristine:{nodes:{},edges:{},dag:null},elements:[],layout:d.none,style:[{selector:"edge.vertical",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#027599","arrow-scale":1.5,"line-color":"#027599",width:3,"target-distance-from-node":"5px","source-endpoint":"0% 50%","target-endpoint":"0deg"}},{selector:"edge.horizontal",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#006f8a","arrow-scale":1.5,"target-distance-from-node":"10px","source-distance-from-node":"5px","line-color":"#006f8a",width:3,"source-endpoint":"50% 0%","target-endpoint":"270deg"}},{selector:"edge[selected=1]",style:{"line-color":"#bd6bb6","target-arrow-color":"#bd6bb6","z-index":1}},{selector:'node[display="none"]',style:{display:"none"}},{selector:"node.vertical",style:{"text-margin-x":"5px","background-color":"#0094b3","font-size":"16px",shape:"ellipse",color:"#fff",width:"5px",height:"5px",padding:"5px",content:"data(label)","font-weight":300,"text-valign":"center","text-halign":"right"}},{selector:"node.horizontal",style:{"background-color":"#0094b3","font-size":"24px",shape:"roundrectangle",color:"#fff",width:"label",height:"label",padding:"12px",content:"data(label)","font-weight":300,"font-family":'-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, Cantarell, "Fira Sans", "Droid Sans", "Helvetica Neue", Helvetica, Arial, sans-serif',"text-valign":"center","text-halign":"center",ghost:"yes","ghost-offset-x":"2px","ghost-offset-y":"4px","ghost-opacity":.5,"text-outline-color":"#000","text-outline-width":"1px","text-outline-opacity":.2}},{selector:'node[resource_type="source"]',style:{"background-color":"#5fb825"}},{selector:'node[resource_type="exposure"]',style:{"background-color":"#ff694b"}},{selector:'node[resource_type="metric"]',style:{"background-color":"#ff5688"}},{selector:"node[node_color]",style:{"background-color":"data(node_color)"}},{selector:"node[selected=1]",style:{"background-color":"#bd6bb6"}},{selector:"node.horizontal[selected=1]",style:{"background-color":"#88447d"}},{selector:"node.horizontal.dirty",style:{"background-color":"#919599"}},{selector:"node[hidden=1]",style:{"background-color":"#919599","background-opacity":.5}}],ready:function(e){console.log("graph ready")}}};function p(e,t,n){var i=r.map(e,(function(e){return f.graph.pristine.nodes[e]})),o=[];r.flatten(r.each(e,(function(t){var n=f.graph.pristine.edges[t];r.each(n,(function(t){r.includes(e,t.data.target)&&r.includes(e,t.data.source)&&o.push(t)}))})));var s=r.compact(i).concat(r.compact(o));return r.each(f.graph.elements,(function(e){e.data.display="none",e.data.selected=0,e.data.hidden=0,e.classes=n})),r.each(s,(function(e){e.data.display="element",e.classes=n,t&&r.includes(t,e.data.unique_id)&&(e.data.selected=1),r.get(e,["data","docs","show"],!0)||(e.data.hidden=1);var i=r.get(e,["data","docs","node_color"]);i&&a.isValidColor(i)&&(e.data.node_color=i)})),f.graph.elements=r.filter(s,(function(e){return"element"==e.data.display})),e}function h(e,t,n){var r=f.graph.pristine.dag;if(r){var i=f.graph.pristine.nodes,o=s.selectNodes(r,i,e),a=n?o.matched:[];return p(o.selected,a,t)}}return f.setGraphReady=function(e){f.loading=!1,f.loaded.resolve(),f.graph_element=e},f.ready=function(e){f.loaded.promise.then((function(){e(f)}))},f.manifest={},f.packages=[],f.selected_node=null,f.getCanvasHeight=function(){return.8*t.innerHeight+"px"},l.ready((function(e){f.manifest=e,f.packages=r.uniq(r.map(f.manifest.nodes,"package_name")),r.each(r.filter(f.manifest.nodes,(function(e){var t=r.includes(["model","seed","source","snapshot","analysis","exposure","metric","operation"],e.resource_type),n="test"==e.resource_type&&!e.hasOwnProperty("test_metadata");return t||n})),(function(e){var t={group:"nodes",data:r.assign(e,{parent:e.package_name,id:e.unique_id,is_group:"false"})};f.graph.pristine.nodes[e.unique_id]=t})),r.each(f.manifest.parent_map,(function(e,t){r.each(e,(function(e){var n=f.manifest.nodes[e],i=f.manifest.nodes[t];if(r.includes(["model","source","seed","snapshot","metric"],n.resource_type)&&("test"!=i.resource_type||!i.hasOwnProperty("test_metadata"))){var o=n.unique_id+"|"+i.unique_id,a={group:"edges",data:{source:n.unique_id,target:i.unique_id,unique_id:o}},s=i.unique_id;f.graph.pristine.edges[s]||(f.graph.pristine.edges[s]=[]),f.graph.pristine.edges[s].push(a)}}))}));var t=new i.Graph({directed:!0});r.each(f.graph.pristine.nodes,(function(e){t.setNode(e.data.unique_id,e.data.name)})),r.each(f.graph.pristine.edges,(function(e){r.each(e,(function(e){t.setEdge(e.data.source,e.data.target)}))})),f.graph.pristine.dag=t,f.graph.elements=r.flatten(r.values(f.graph.pristine.nodes).concat(r.values(f.graph.pristine.edges))),p(t.nodes())})),f.hideGraph=function(){f.orientation="sidebar",f.expanded=!1},f.showVerticalGraph=function(e,t){f.orientation="sidebar",t&&(f.expanded=!0);var n=h(r.assign({},s.options,{include:"+"+e+"+",exclude:"",hops:1}),"vertical",!0);return f.graph.layout=d.top_down,f.graph.options=u.vertical,n},f.showFullGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=r.assign({},s.options);e?(t.include="+"+e+"+",t.exclude=""):(t.include="",t.exclude="");var n=h(t,"horizontal",!0);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(t),n},f.updateGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=h(e,"horizontal",!1);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(e),t},f.deselectNodes=function(){"fullscreen"==f.orientation&&f.graph_element.elements().data("selected",0)},f.selectNode=function(e){if("fullscreen"==f.orientation){f.graph.pristine.nodes[e];var t=f.graph.pristine.dag,n=r.indexBy(o.ancestorNodes(t,e)),i=r.indexBy(o.descendentNodes(t,e));n[e]=e,i[e]=e;var a=f.graph_element;r.each(f.graph.elements,(function(t){var r=a.$id(t.data.id);n[t.data.source]&&n[t.data.target]||i[t.data.source]&&i[t.data.target]||t.data.unique_id==e?r.data("selected",1):r.data("selected",0)}))}},f.markDirty=function(e){f.markAllClean(),r.each(e,(function(e){f.graph_element.$id(e).addClass("dirty")}))},f.markAllClean=function(){f.graph_element&&f.graph_element.elements().removeClass("dirty")},f}])},function(e,t,n){"use strict";n.r(t),n.d(t,"isValidColor",(function(){return i}));const r=new Set(["aliceblue","antiquewhite","aqua","aquamarine","azure","beige","bisque","black","blanchedalmond","blue","blueviolet","brown","burlywood","cadetblue","chartreuse","chocolate","coral","cornflowerblue","cornsilk","crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgreen","darkkhaki","darkmagenta","darkolivegreen","darkorange","darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dodgerblue","firebrick","floralwhite","forestgreen","fuchsia","ghostwhite","gold","goldenrod","gray","green","greenyellow","honeydew","hotpink","indianred","indigo","ivory","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral","lightcyan","lightgoldenrodyellow","lightgray","lightgreen","lightpink","lightsalmon","lightsalmon","lightseagreen","lightskyblue","lightslategray","lightsteelblue","lightyellow","lime","limegreen","linen","magenta","maroon","mediumaquamarine","mediumblue","mediumorchid","mediumpurple","mediumseagreen","mediumslateblue","mediumslateblue","mediumspringgreen","mediumturquoise","mediumvioletred","midnightblue","mintcream","mistyrose","moccasin","navajowhite","navy","oldlace","olive","olivedrab","orange","orangered","orchid","palegoldenrod","palegreen","paleturquoise","palevioletred","papayawhip","peachpuff","peru","pink","plum","powderblue","purple","rebeccapurple","red","rosybrown","royalblue","saddlebrown","salmon","sandybrown","seagreen","seashell","sienna","silver","skyblue","slateblue","slategray","snow","springgreen","steelblue","tan","teal","thistle","tomato","turquoise","violet","wheat","white","whitesmoke","yellow","yellowgreen"]);function i(e){if(!e)return!1;const t=e.trim().toLowerCase();if(""===t)return!1;const n=t.match(/^#([A-Fa-f0-9]{3}){1,2}$/),i=r.has(t);return Boolean(n)||i}},function(e,t,n){n(31);const r=n(21),i=n(470);angular.module("dbt").factory("selectorService",["$state",function(e){var t={include:"",exclude:"",packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"],depth:1},n={view_node:null,selection:{clean:r.clone(t),dirty:r.clone(t)},options:{packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"]},init:function(e){r.each(e,(function(e,r){n.options[r]=e,t[r]=e,n.selection.clean[r]=e,n.selection.dirty[r]=e}))},resetSelection:function(e){var i={include:e&&r.includes(["model","seed","snapshot"],e.resource_type)?"+"+e.name+"+":e&&"source"==e.resource_type?"+source:"+e.source_name+"."+e.name+"+":e&&"exposure"==e.resource_type?"+exposure:"+e.name:e&&"metric"==e.resource_type?"+metric:"+e.name:e&&r.includes(["analysis","test"],e.resource_type)?"+"+e.name:""},o=r.assign({},t,i);n.selection.clean=r.clone(o),n.selection.dirty=r.clone(o),n.view_node=e},getViewNode:function(){return n.view_node},excludeNode:function(e,t){var r,i=n.selection.dirty.exclude,o=t.parents?"+":"",a=t.children?"+":"",s=i.length>0?" ":"";"source"==e.resource_type?(o+="source:",r=e.source_name+"."+e.name):["exposure","metric"].indexOf(e.resource_type)>-1?(o+=e.resource_type+":",r=e.name):r=e.name;var l=i+s+o+r+a;return n.selection.dirty.exclude=l,n.updateSelection()},selectSource:function(e,t){var r="source:"+e+(t.children?"+":"");return n.selection.dirty.include=r,n.updateSelection()},clearViewNode:function(){n.view_node=null},isDirty:function(){return!r.isEqual(n.selection.clean,n.selection.dirty)},updateSelection:function(){return n.selection.clean=r.clone(n.selection.dirty),n.selection.clean},selectNodes:function(e,t,n){return i.selectNodes(e,t,n)}};return n}])},function(e,t,n){const r=n(21),i=n(471);function o(e,t){return t||(t=" "),r.filter(r.uniq(e.split(t)),(function(e){return e.length>0}))}function a(e){var t={raw:e,select_at:!1,select_children:!1,children_depth:null,select_parents:!1,parents_depth:null};const n=new RegExp(""+/^/.source+/(?(\@))?/.source+/(?((?(\d*))\+))?/.source+/((?([\w.]+)):)?/.source+/(?(.*?))/.source+/(?(\+(?(\d*))))?/.source+/$/.source).exec(e).groups;t.select_at="@"==n.childs_parents,t.select_parents=!!n.parents,t.select_children=!!n.children,n.parents_depth&&(t.parents_depth=parseInt(n.parents_depth)),n.children_depth&&(t.children_depth=parseInt(n.children_depth));var r=n.method,i=n.value;return r?-1!=r.indexOf(".")&&([r,selector_modifier]=r.split(".",2),i={config:selector_modifier,value:i}):r="implicit",t.selector_type=r,t.selector_value=i,t}function s(e){var t=o(e," ");return r.map(t,(function(e){var t=o(e,",");return t.length>1?{method:"intersect",selectors:r.map(t,a)}:{method:"none",selectors:r.map([e],a)}}))}function l(e,t){var n=s(e),i=null,o=null;return r.each(n,(function(e){var n="intersect"==e.method?r.intersection:r.union;r.each(e.selectors,(function(e){var r=t(e);null===i?(i=r.matched,o=r.selected):(i=n(i,r.matched),o=n(o,r.selected))}))})),{matched:i||[],selected:o||[]}}e.exports={splitSpecs:o,parseSpec:a,parseSpecs:s,buildSpec:function(e,t,n){return{include:s(e),exclude:s(t),hops:n}},applySpec:l,selectNodes:function(e,t,n){n.include,n.exclude;var o,a=r.partial(i.getNodesFromSpec,e,t,n.hops);r.values(t),o=0==n.include.trim().length?{selected:e.nodes(),matched:[]}:l(n.include,a);var s=l(n.exclude,a),c=o.selected,u=o.matched;c=r.difference(c,s.selected),u=r.difference(u,s.matched);var d=[];return r.each(c,(function(e){var i=t[e];i.data.tags||(i.data.tags=[]);var o=r.includes(n.packages,i.data.package_name),a=r.intersection(n.tags,i.data.tags).length>0,s=r.includes(n.tags,null)&&0==i.data.tags.length,l=r.includes(n.resource_types,i.data.resource_type);o&&(a||s)&&l||d.push(i.data.unique_id)})),{selected:r.difference(c,d),matched:r.difference(u,d)}}}},function(e,t,n){const r=n(21),i=n(203);var o="fqn",a="tag",s="source",l="exposure",c="metric",u="path",d="file",f="package",p="config",h="test_name",g="test_type",m={};function v(e,t){if(t===r.last(e))return!0;var n=e.reduce((e,t)=>e.concat(t.split(".")),[]),i=t.split(".");if(n.length-1||!r.hasOwnProperty("test_metadata")&&["data","singular"].indexOf(t)>-1)&&n.push(r)})),n}function $(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("source"==r.resource_type){var i,o,a=r.source_name,s=r.name;-1!=t.indexOf(".")?[i,o]=t.split(".",2):(i=t,o=null),("*"==i||i==a&&"*"===o||i==a&&o===s||i==a&&null===o)&&n.push(e.data)}})),n}m["implicit"]=function(e,t){var n=b(e,t),i=y(e,t),o=[];t.toLowerCase().endsWith(".sql")&&(o=x(e,t));var a=r.uniq([].concat(r.map(n,"unique_id"),r.map(i,"unique_id"),r.map(o,"unique_id")));return r.map(a,t=>e[t].data)},m[o]=b,m[a]=w,m[s]=$,m[l]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("exposure"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[c]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("metric"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[u]=y,m[d]=x,m[f]=k,m[p]=A,m[h]=E,m[g]=S,e.exports={isFQNMatch:v,getNodesByFQN:b,getNodesByTag:w,getNodesBySource:$,getNodesByPath:y,getNodesByPackage:k,getNodesByConfig:A,getNodesByTestName:E,getNodesByTestType:S,getNodesFromSpec:function(e,t,n,o){const a=m[o.selector_type];if(!a)return console.log("Node matcher for selector",o.selector_type,"is invalid"),{selected:[],matched:[]};var s=a(t,o.selector_value),l=[],c=[];return r.each(s,(function(t){var a=t.unique_id;c.push(t.unique_id);var s=[],u=[],d=[];if(o.select_at&&(d=r.union(i.selectAt(e,a))),o.select_parents){var f=n||o.parents_depth;s=i.ancestorNodes(e,a,f)}if(o.select_children){f=n||o.children_depth;u=i.descendentNodes(e,a,f)}l=r.union([a],l,u,s,d)})),{selected:l,matched:c}}}},function(e,t,n){const r=n(9);n(473);r.module("dbt").factory("trackingService",["$location","selectorService","$rootScope",function(e,t,n){var r={initialized:!1,snowplow:null,project_id:null,init:function(e){r.initialized||(r.initialized=!0,r.project_id=e.project_id,!0===e.track&&r.turn_on_tracking())},isHosted:function(){return window.location.hostname.indexOf(".getdbt.com")>-1},turn_on_tracking:function(){var e,t,n,i,o,a;e=window,t=document,n="script",e[i="snowplow"]||(e.GlobalSnowplowNamespace=e.GlobalSnowplowNamespace||[],e.GlobalSnowplowNamespace.push(i),e[i]=function(){(e[i].q=e[i].q||[]).push(arguments)},e[i].q=e[i].q||[],o=t.createElement(n),a=t.getElementsByTagName(n)[0],o.async=1,o.src="//d1fc8wv8zag5ca.cloudfront.net/2.9.0/sp.js",a.parentNode.insertBefore(o,a));var s={appId:"dbt-docs",forceSecureTracker:!0,respectDoNotTrack:!0,userFingerprint:!1,contexts:{webPage:!0}};r.isHosted()&&(s.cookieDomain=".getdbt.com"),r.snowplow=window.snowplow,r.snowplow("newTracker","sp","fishtownanalytics.sinter-collect.com",s),r.snowplow("enableActivityTracking",30,30),r.track_pageview()},fuzzUrls:function(){r.isHosted()||(r.snowplow("setCustomUrl","https://fuzzed.getdbt.com/"),r.snowplow("setReferrerUrl","https://fuzzed.getdbt.com/"))},getContext:function(){return[{schema:"iglu:com.dbt/dbt_docs/jsonschema/1-0-0",data:{is_cloud_hosted:r.isHosted(),core_project_id:r.project_id}}]},track_pageview:function(){if(r.snowplow){r.fuzzUrls();r.snowplow("trackPageView",null,r.getContext())}},track_event:function(e,t,n,i){r.snowplow&&(r.fuzzUrls(),r.snowplow("trackStructEvent","dbt-docs",e,t,n,i,r.getContext()))},track_graph_interaction:function(e,t){r.snowplow&&(r.fuzzUrls(),r.track_event("graph","interact",e,t))}};return r}])},function(e,t,n){var r,i,o,a,s;r=n(474),i=n(204).utf8,o=n(475),a=n(204).bin,(s=function(e,t){e.constructor==String?e=t&&"binary"===t.encoding?a.stringToBytes(e):i.stringToBytes(e):o(e)?e=Array.prototype.slice.call(e,0):Array.isArray(e)||e.constructor===Uint8Array||(e=e.toString());for(var n=r.bytesToWords(e),l=8*e.length,c=1732584193,u=-271733879,d=-1732584194,f=271733878,p=0;p>>24)|4278255360&(n[p]<<24|n[p]>>>8);n[l>>>5]|=128<>>9<<4)]=l;var h=s._ff,g=s._gg,m=s._hh,v=s._ii;for(p=0;p>>0,u=u+y>>>0,d=d+x>>>0,f=f+w>>>0}return r.endian([c,u,d,f])})._ff=function(e,t,n,r,i,o,a){var s=e+(t&n|~t&r)+(i>>>0)+a;return(s<>>32-o)+t},s._gg=function(e,t,n,r,i,o,a){var s=e+(t&r|n&~r)+(i>>>0)+a;return(s<>>32-o)+t},s._hh=function(e,t,n,r,i,o,a){var s=e+(t^n^r)+(i>>>0)+a;return(s<>>32-o)+t},s._ii=function(e,t,n,r,i,o,a){var s=e+(n^(t|~r))+(i>>>0)+a;return(s<>>32-o)+t},s._blocksize=16,s._digestsize=16,e.exports=function(e,t){if(null==e)throw new Error("Illegal argument "+e);var n=r.wordsToBytes(s(e,t));return t&&t.asBytes?n:t&&t.asString?a.bytesToString(n):r.bytesToHex(n)}},function(e,t){var n,r;n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",r={rotl:function(e,t){return e<>>32-t},rotr:function(e,t){return e<<32-t|e>>>t},endian:function(e){if(e.constructor==Number)return 16711935&r.rotl(e,8)|4278255360&r.rotl(e,24);for(var t=0;t0;e--)t.push(Math.floor(256*Math.random()));return t},bytesToWords:function(e){for(var t=[],n=0,r=0;n>>5]|=e[n]<<24-r%32;return t},wordsToBytes:function(e){for(var t=[],n=0;n<32*e.length;n+=8)t.push(e[n>>>5]>>>24-n%32&255);return t},bytesToHex:function(e){for(var t=[],n=0;n>>4).toString(16)),t.push((15&e[n]).toString(16));return t.join("")},hexToBytes:function(e){for(var t=[],n=0;n>>6*(3-o)&63)):t.push("=");return t.join("")},base64ToBytes:function(e){e=e.replace(/[^A-Z0-9+\/]/gi,"");for(var t=[],r=0,i=0;r>>6-2*i);return t}},e.exports=r},function(e,t){function n(e){return!!e.constructor&&"function"==typeof e.constructor.isBuffer&&e.constructor.isBuffer(e)} +/*! Runge-Kutta spring physics function generator. Adapted from Framer.js, copyright Koen Bok. MIT License: http://en.wikipedia.org/wiki/MIT_License */var r=function(){function e(e){return-e.tension*e.x-e.friction*e.v}function t(t,n,r){var i={x:t.x+r.dx*n,v:t.v+r.dv*n,tension:t.tension,friction:t.friction};return{dx:i.v,dv:e(i)}}function n(n,r){var i={dx:n.v,dv:e(n)},o=t(n,.5*r,i),a=t(n,.5*r,o),s=t(n,r,a),l=1/6*(i.dx+2*(o.dx+a.dx)+s.dx),c=1/6*(i.dv+2*(o.dv+a.dv)+s.dv);return n.x=n.x+l*r,n.v=n.v+c*r,n}return function e(t,r,i){var o,a={x:-1,v:0,tension:null,friction:null},s=[0],l=0,c=void 0,u=void 0;for(t=parseFloat(t)||500,r=parseFloat(r)||20,i=i||null,a.tension=t,a.friction=r,c=(o=null!==i)?(l=e(t,r))/i*.016:.016;u=n(u||a,c),s.push(1+u.x),l+=16,Math.abs(u.x)>1e-4&&Math.abs(u.v)>1e-4;);return o?function(e){return s[e*(s.length-1)|0]}:l}}();e.exports=r},function(e,t,n){"use strict";var r=n(0);function i(e,t,n,r,i){if(1===r)return n;var o=i(t,n,r);return null==e||((e.roundValue||e.color)&&(o=Math.round(o)),void 0!==e.min&&(o=Math.max(o,e.min)),void 0!==e.max&&(o=Math.min(o,e.max))),o}function o(e,t){return null!=e.pfValue||null!=e.value?null==e.pfValue||null!=t&&"%"===t.type.units?e.value:e.pfValue:e}e.exports=function(e,t,n,a,s){var l=null!=s?s.type:null;n<0?n=0:n>1&&(n=1);var c=o(e,s),u=o(t,s);if(r.number(c)&&r.number(u))return i(l,c,u,n,a);if(r.array(c)&&r.array(u)){for(var d=[],f=0;f0},startBatch:function(){var e=this._private;return null==e.batchCount&&(e.batchCount=0),0===e.batchCount&&(e.batchingStyle=e.batchingNotify=!0,e.batchStyleEles=this.collection(),e.batchNotifyEles=this.collection(),e.batchNotifyTypes=[],e.batchNotifyTypes.ids={}),e.batchCount++,this},endBatch:function(){var e=this._private;return e.batchCount--,0===e.batchCount&&(e.batchingStyle=!1,e.batchStyleEles.updateStyle(),e.batchingNotify=!1,this.notify({type:e.batchNotifyTypes,eles:e.batchNotifyEles})),this},batch:function(e){return this.startBatch(),e(),this.endBatch(),this},batchData:function(e){var t=this;return this.batch((function(){for(var n=Object.keys(e),r=0;r0;)e.removeChild(e.childNodes[0]);this._private.renderer=null},onRender:function(e){return this.on("render",e)},offRender:function(e){return this.off("render",e)}};i.invalidateDimensions=i.resize,e.exports=i},function(e,t,n){"use strict";var r=n(0),i=n(7),o={collection:function(e,t){return r.string(e)?this.$(e):r.elementOrCollection(e)?e.collection():r.array(e)?new i(this,e,t):new i(this)},nodes:function(e){var t=this.$((function(e){return e.isNode()}));return e?t.filter(e):t},edges:function(e){var t=this.$((function(e){return e.isEdge()}));return e?t.filter(e):t},$:function(e){var t=this._private.elements;return e?t.filter(e):t.spawnSelf()},mutableElements:function(){return this._private.elements}};o.elements=o.filter=o.$,e.exports=o},function(e,t,n){"use strict";var r=n(0),i=n(18),o={style:function(e){return e&&this.setStyle(e).update(),this._private.style},setStyle:function(e){var t=this._private;return r.stylesheet(e)?t.style=e.generateStyle(this):r.array(e)?t.style=i.fromJson(this,e):r.string(e)?t.style=i.fromString(this,e):t.style=i(this),t.style}};e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(5),a={apply:function(e){var t=this._private,n=t.cy.collection();t.newStyle&&(t.contextStyles={},t.propDiffs={},this.cleanElements(e,!0));for(var r=0;r0;if(c||u){var d=void 0;c&&u||c?d=l.properties:u&&(d=l.mappedProperties);for(var f=0;f0){n=!0;break}t.hasPie=n;var i=e.pstyle("text-transform").strValue,o=e.pstyle("label").strValue,a=e.pstyle("source-label").strValue,s=e.pstyle("target-label").strValue,l=e.pstyle("font-style").strValue,c=e.pstyle("font-size").pfValue+"px",u=e.pstyle("font-family").strValue,d=e.pstyle("font-weight").strValue,f=l+"$"+c+"$"+u+"$"+d+"$"+i+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-outline-width").pfValue+"$"+e.pstyle("text-wrap").strValue+"$"+e.pstyle("text-max-width").pfValue;t.labelStyleKey=f,t.sourceLabelKey=f+"$"+a,t.targetLabelKey=f+"$"+s,t.labelKey=f+"$"+o,t.fontKey=l+"$"+d+"$"+c+"$"+u,t.styleKey=Date.now()}},applyParsedProperty:function(e,t){var n=this,o=t,a=e._private.style,s=void 0,l=n.types,c=n.properties[o.name].type,u=o.bypass,d=a[o.name],f=d&&d.bypass,p=e._private,h=function(){n.checkZOrderTrigger(e,o.name,d?d.value:null,o.value)};if("curve-style"===t.name&&"haystack"===t.value&&e.isEdge()&&(e.isLoop()||e.source().isParent()||e.target().isParent())&&(o=t=this.parse(t.name,"bezier",u)),o.delete)return a[o.name]=void 0,h(),!0;if(o.deleteBypassed)return d?!!d.bypass&&(d.bypassed=void 0,h(),!0):(h(),!0);if(o.deleteBypass)return d?!!d.bypass&&(a[o.name]=d.bypassed,h(),!0):(h(),!0);var g=function(){r.error("Do not assign mappings to elements without corresponding data (e.g. ele `"+e.id()+"` for property `"+o.name+"` with data field `"+o.field+"`); try a `["+o.field+"]` selector to limit scope to elements with `"+o.field+"` defined")};switch(o.mapped){case l.mapData:for(var m=o.field.split("."),v=p.data,b=0;b1&&(y=1),c.color){var x=o.valueMin[0],w=o.valueMax[0],k=o.valueMin[1],A=o.valueMax[1],E=o.valueMin[2],S=o.valueMax[2],$=null==o.valueMin[3]?1:o.valueMin[3],C=null==o.valueMax[3]?1:o.valueMax[3],_=[Math.round(x+(w-x)*y),Math.round(k+(A-k)*y),Math.round(E+(S-E)*y),Math.round($+(C-$)*y)];s={bypass:o.bypass,name:o.name,value:_,strValue:"rgb("+_[0]+", "+_[1]+", "+_[2]+")"}}else{if(!c.number)return!1;var O=o.valueMin+(o.valueMax-o.valueMin)*y;s=this.parse(o.name,O,o.bypass,"mapping")}s||(s=this.parse(o.name,d.strValue,o.bypass,"mapping")),s||g(),s.mapping=o,o=s;break;case l.data:var j=o.field.split("."),T=p.data;if(T)for(var P=0;P0&&l>0){for(var u={},d=!1,f=0;f0?e.delayAnimation(c).play().promise().then(t):t()})).then((function(){return e.animation({style:u,duration:l,easing:e.pstyle("transition-timing-function").value,queue:!1}).play().promise()})).then((function(){r.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1}))}else a.transitioning&&(this.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1)},checkZOrderTrigger:function(e,t,n,r){var i=this.properties[t];null==i.triggersZOrder||null!=n&&!i.triggersZOrder(n,r)||this._private.cy.notify({type:"zorder",eles:e})}};e.exports=a},function(e,t,n){"use strict";var r=n(0),i=n(1),o={applyBypass:function(e,t,n,o){var a=[];if("*"===t||"**"===t){if(void 0!==n)for(var s=0;sn.length?t.substr(n.length):""}function l(){o=o.length>a.length?o.substr(a.length):""}for(t=t.replace(/[/][*](\s|.)+?[*][/]/g,"");!t.match(/^\s*$/);){var c=t.match(/^\s*((?:.|\s)+?)\s*\{((?:.|\s)+?)\}/);if(!c){r.error("Halting stylesheet parsing: String stylesheet contains more to parse but no selector and block found in: "+t);break}n=c[0];var u=c[1];if("core"!==u&&new i(u)._private.invalid)r.error("Skipping parsing of block: Invalid selector found in string stylesheet: "+u),s();else{var d=c[2],f=!1;o=d;for(var p=[];!o.match(/^\s*$/);){var h=o.match(/^\s*(.+?)\s*:\s*(.+?)\s*;/);if(!h){r.error("Skipping parsing of block: Invalid formatting of style property and value definitions found in:"+d),f=!0;break}a=h[0];var g=h[1],m=h[2];this.properties[g]?this.parse(g,m)?(p.push({name:g,val:m}),l()):(r.error("Skipping property: Invalid property definition in: "+a),l()):(r.error("Skipping property: Invalid property name in: "+a),l())}if(f){s();break}this.selector(u);for(var v=0;v node").css({shape:"rectangle",padding:10,"background-color":"#eee","border-color":"#ccc","border-width":1}).selector("edge").css({width:3,"curve-style":"haystack"}).selector(":parent <-> node").css({"curve-style":"bezier","source-endpoint":"outside-to-line","target-endpoint":"outside-to-line"}).selector(":selected").css({"background-color":"#0169D9","line-color":"#0169D9","source-arrow-color":"#0169D9","target-arrow-color":"#0169D9","mid-source-arrow-color":"#0169D9","mid-target-arrow-color":"#0169D9"}).selector("node:parent:selected").css({"background-color":"#CCE1F9","border-color":"#aec8e5"}).selector(":active").css({"overlay-color":"black","overlay-padding":10,"overlay-opacity":.25}).selector("core").css({"selection-box-color":"#ddd","selection-box-opacity":.65,"selection-box-border-color":"#aaa","selection-box-border-width":1,"active-bg-color":"black","active-bg-opacity":.15,"active-bg-size":30,"outside-texture-bg-color":"#000","outside-texture-bg-opacity":.125}),this.defaultLength=this.length},e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(2),a={parse:function(e,t,n,o){if(i.fn(t))return this.parseImplWarn(e,t,n,o);var a=[e,t,n,"mapping"===o||!0===o||!1===o||null==o?"dontcare":o].join("$"),s=this.propCache=this.propCache||{},l=void 0;return(l=s[a])||(l=s[a]=this.parseImplWarn(e,t,n,o)),(n||"mapping"===o)&&(l=r.copy(l))&&(l.value=r.copy(l.value)),l},parseImplWarn:function(e,t,n,i){var o=this.parseImpl(e,t,n,i);return o||null==t||r.error("The style property `%s: %s` is invalid",e,t),o},parseImpl:function(e,t,n,a){e=r.camel2dash(e);var s=this.properties[e],l=t,c=this.types;if(!s)return null;if(void 0===t)return null;s.alias&&(s=s.pointsTo,e=s.name);var u=i.string(t);u&&(t=t.trim());var d=s.type;if(!d)return null;if(n&&(""===t||null===t))return{name:e,value:t,bypass:!0,deleteBypass:!0};if(i.fn(t))return{name:e,value:t,strValue:"fn",mapped:c.fn,bypass:n};var f=void 0,p=void 0;if(!u||a);else{if(f=new RegExp(c.data.regex).exec(t)){if(n)return!1;var h=c.data;return{name:e,value:f,strValue:""+t,mapped:h,field:f[1],bypass:n}}if(p=new RegExp(c.mapData.regex).exec(t)){if(n)return!1;if(d.multiple)return!1;var g=c.mapData;if(!d.color&&!d.number)return!1;var m=this.parse(e,p[4]);if(!m||m.mapped)return!1;var v=this.parse(e,p[5]);if(!v||v.mapped)return!1;if(m.value===v.value)return!1;if(d.color){var b=m.value,y=v.value;if(!(b[0]!==y[0]||b[1]!==y[1]||b[2]!==y[2]||b[3]!==y[3]&&(null!=b[3]&&1!==b[3]||null!=y[3]&&1!==y[3])))return!1}return{name:e,value:p,strValue:""+t,mapped:g,field:p[1],fieldMin:parseFloat(p[2]),fieldMax:parseFloat(p[3]),valueMin:m.value,valueMax:v.value,bypass:n}}}if(d.multiple&&"multiple"!==a){var x=void 0;if(x=u?t.split(/\s+/):i.array(t)?t:[t],d.evenMultiple&&x.length%2!=0)return null;for(var w=[],k=[],A=[],E=!1,S=0;Sd.max||d.strictMax&&t===d.max))return null;var P={name:e,value:t,strValue:""+t+(_||""),units:_,bypass:n};return d.unitless||"px"!==_&&"em"!==_?P.pfValue=t:P.pfValue="px"!==_&&_?this.getEmSizeInPixels()*t:t,"ms"!==_&&"s"!==_||(P.pfValue="ms"===_?t:1e3*t),"deg"!==_&&"rad"!==_||(P.pfValue="rad"===_?t:o.deg2rad(t)),"%"===_&&(P.pfValue=t/100),P}if(d.propList){var D=[],R=""+t;if("none"===R);else{for(var I=R.split(","),N=0;N0&&s>0&&!isNaN(n.w)&&!isNaN(n.h)&&n.w>0&&n.h>0)return{zoom:l=(l=(l=Math.min((a-2*t)/n.w,(s-2*t)/n.h))>this._private.maxZoom?this._private.maxZoom:l)t.maxZoom?t.maxZoom:s)t.maxZoom||!t.zoomingEnabled?a=!0:(t.zoom=l,o.push("zoom"))}if(i&&(!a||!e.cancelOnFailedZoom)&&t.panningEnabled){var c=e.pan;r.number(c.x)&&(t.pan.x=c.x,s=!1),r.number(c.y)&&(t.pan.y=c.y,s=!1),s||o.push("pan")}return o.length>0&&(o.push("viewport"),this.emit(o.join(" ")),this.notify({type:"viewport"})),this},center:function(e){var t=this.getCenterPan(e);return t&&(this._private.pan=t,this.emit("pan viewport"),this.notify({type:"viewport"})),this},getCenterPan:function(e,t){if(this._private.panningEnabled){if(r.string(e)){var n=e;e=this.mutableElements().filter(n)}else r.elementOrCollection(e)||(e=this.mutableElements());if(0!==e.length){var i=e.boundingBox(),o=this.width(),a=this.height();return{x:(o-(t=void 0===t?this._private.zoom:t)*(i.x1+i.x2))/2,y:(a-t*(i.y1+i.y2))/2}}}},reset:function(){return this._private.panningEnabled&&this._private.zoomingEnabled?(this.viewport({pan:{x:0,y:0},zoom:1}),this):this},invalidateSize:function(){this._private.sizeCache=null},size:function(){var e,t,n=this._private,r=n.container;return n.sizeCache=n.sizeCache||(r?(e=i.getComputedStyle(r),t=function(t){return parseFloat(e.getPropertyValue(t))},{width:r.clientWidth-t("padding-left")-t("padding-right"),height:r.clientHeight-t("padding-top")-t("padding-bottom")}):{width:1,height:1})},width:function(){return this.size().width},height:function(){return this.size().height},extent:function(){var e=this._private.pan,t=this._private.zoom,n=this.renderedExtent(),r={x1:(n.x1-e.x)/t,x2:(n.x2-e.x)/t,y1:(n.y1-e.y)/t,y2:(n.y2-e.y)/t};return r.w=r.x2-r.x1,r.h=r.y2-r.y1,r},renderedExtent:function(){var e=this.width(),t=this.height();return{x1:0,y1:0,x2:e,y2:t,w:e,h:t}}};a.centre=a.center,a.autolockNodes=a.autolock,a.autoungrabifyNodes=a.autoungrabify,e.exports=a},function(e,t,n){"use strict";var r=n(1),i=n(4),o=n(7),a=n(12),s=n(95),l=n(0),c=n(11),u={},d={};function f(e,t,n){var s=n,d=function(n){r.error("Can not register `"+t+"` for `"+e+"` since `"+n+"` already exists in the prototype and can not be overridden")};if("core"===e){if(a.prototype[t])return d(t);a.prototype[t]=n}else if("collection"===e){if(o.prototype[t])return d(t);o.prototype[t]=n}else if("layout"===e){for(var f=function(e){this.options=e,n.call(this,e),l.plainObject(this._private)||(this._private={}),this._private.cy=e.cy,this._private.listeners=[],this.createEmitter()},h=f.prototype=Object.create(n.prototype),g=[],m=0;m0;)m();c=n.collection();for(var v=function(e){var t=h[e],n=t.maxDegree(!1),r=t.filter((function(e){return e.degree(!1)===n}));c=c.add(r)},b=0;by.length-1;)y.push([]);y[J].push(X),Z.depth=J,Z.index=y[J].length-1}N()}var K=0;if(t.avoidOverlap)for(var ee=0;eec||0===t)&&(r+=l/u,i++)}return r/=i=Math.max(1,i),0===i&&(r=void 0),ie[e.id()]=r,r},ae=function(e,t){return oe(e)-oe(t)},se=0;se<3;se++){for(var le=0;le0&&y[0].length<=3?u/2:0),f=2*Math.PI/y[i].length*o;return 0===i&&1===y[0].length&&(d=1),{x:de+d*Math.cos(f),y:fe+d*Math.sin(f)}}return{x:de+(o+1-(a+1)/2)*s,y:(i+1)*c}}var p={x:de+(o+1-(a+1)/2)*s,y:(i+1)*c};return p},he={},ge=y.length-1;ge>=0;ge--)for(var me=y[ge],ve=0;ve1&&t.avoidOverlap){p*=1.75;var b=Math.cos(d)-Math.cos(0),y=Math.sin(d)-Math.sin(0),x=Math.sqrt(p*p/(b*b+y*y));f=Math.max(x,f)}return s.layoutPositions(this,t,(function(e,n){var r=t.startAngle+n*d*(a?1:-1),i=f*Math.cos(r),o=f*Math.sin(r);return{x:c+i,y:u+o}})),this},e.exports=s},function(e,t,n){"use strict";var r=n(1),i=n(2),o={fit:!0,padding:30,startAngle:1.5*Math.PI,sweep:void 0,clockwise:!0,equidistant:!1,minNodeSpacing:10,boundingBox:void 0,avoidOverlap:!0,nodeDimensionsIncludeLabels:!1,height:void 0,width:void 0,spacingFactor:void 0,concentric:function(e){return e.degree()},levelWidth:function(e){return e.maxDegree()/4},animate:!1,animationDuration:500,animationEasing:void 0,animateFilter:function(e,t){return!0},ready:void 0,stop:void 0,transform:function(e,t){return t}};function a(e){this.options=r.extend({},o,e)}a.prototype.run=function(){for(var e=this.options,t=e,n=void 0!==t.counterclockwise?!t.counterclockwise:t.clockwise,r=e.cy,o=t.eles.nodes().not(":parent"),a=i.makeBoundingBox(t.boundingBox?t.boundingBox:{x1:0,y1:0,w:r.width(),h:r.height()}),s=a.x1+a.w/2,l=a.y1+a.h/2,c=[],u=(t.startAngle,0),d=0;d0&&Math.abs(b[0].value-x.value)>=m&&(b=[],v.push(b)),b.push(x)}var w=u+t.minNodeSpacing;if(!t.avoidOverlap){var k=v.length>0&&v[0].length>1,A=(Math.min(a.w,a.h)/2-w)/(v.length+k?1:0);w=Math.min(w,A)}for(var E=0,S=0;S1&&t.avoidOverlap){var O=Math.cos(_)-Math.cos(0),j=Math.sin(_)-Math.sin(0),T=Math.sqrt(w*w/(O*O+j*j));E=Math.max(T,E)}$.r=E,E+=w}if(t.equidistant){for(var P=0,D=0,R=0;R0)var c=(f=r.nodeOverlap*s)*i/(b=Math.sqrt(i*i+o*o)),d=f*o/b;else{var f,p=u(e,i,o),h=u(t,-1*i,-1*o),g=h.x-p.x,m=h.y-p.y,v=g*g+m*m,b=Math.sqrt(v);c=(f=(e.nodeRepulsion+t.nodeRepulsion)/v)*g/b,d=f*m/b}e.isLocked||(e.offsetX-=c,e.offsetY-=d),t.isLocked||(t.offsetX+=c,t.offsetY+=d)}},l=function(e,t,n,r){if(n>0)var i=e.maxX-t.minX;else i=t.maxX-e.minX;if(r>0)var o=e.maxY-t.minY;else o=t.maxY-e.minY;return i>=0&&o>=0?Math.sqrt(i*i+o*o):0},u=function(e,t,n){var r=e.positionX,i=e.positionY,o=e.height||1,a=e.width||1,s=n/t,l=o/a,c={};return 0===t&&0n?(c.x=r,c.y=i+o/2,c):0t&&-1*l<=s&&s<=l?(c.x=r-a/2,c.y=i-a*n/2/t,c):0=l)?(c.x=r+o*t/2/n,c.y=i+o/2,c):0>n&&(s<=-1*l||s>=l)?(c.x=r-o*t/2/n,c.y=i-o/2,c):c},d=function(e,t){for(var n=0;n1){var h=t.gravity*d/p,g=t.gravity*f/p;u.offsetX+=h,u.offsetY+=g}}}}},p=function(e,t){var n=[],r=0,i=-1;for(n.push.apply(n,e.graphSet[0]),i+=e.graphSet[0].length;r<=i;){var o=n[r++],a=e.idToIndex[o],s=e.layoutNodes[a],l=s.children;if(0n)var i={x:n*e/r,y:n*t/r};else i={x:e,y:t};return i},m=function e(t,n){var r=t.parentId;if(null!=r){var i=n.layoutNodes[n.idToIndex[r]],o=!1;return(null==i.maxX||t.maxX+i.padRight>i.maxX)&&(i.maxX=t.maxX+i.padRight,o=!0),(null==i.minX||t.minX-i.padLefti.maxY)&&(i.maxY=t.maxY+i.padBottom,o=!0),(null==i.minY||t.minY-i.padTopg&&(f+=h+t.componentSpacing,d=0,p=0,h=0)}}}(0,i),r})).then((function(e){d.layoutNodes=e.layoutNodes,o.stop(),b()}));var b=function(){!0===e.animate||!1===e.animate?v({force:!0,next:function(){n.one("layoutstop",e.stop),n.emit({type:"layoutstop",layout:n})}}):e.eles.nodes().layoutPositions(n,e,(function(e){var t=d.layoutNodes[d.idToIndex[e.data("id")]];return{x:t.positionX,y:t.positionY}}))};return this},c.prototype.stop=function(){return this.stopped=!0,this.thread&&this.thread.stop(),this.emit("layoutstop"),this},c.prototype.destroy=function(){return this.thread&&this.thread.stop(),this};var u=function(e,t,n){for(var r=n.eles.edges(),i=n.eles.nodes(),s={isCompound:e.hasCompoundNodes(),layoutNodes:[],idToIndex:{},nodeSize:i.size(),graphSet:[],indexToGraph:[],layoutEdges:[],edgeSize:r.size(),temperature:n.initialTemp,clientWidth:e.width(),clientHeight:e.width(),boundingBox:o.makeBoundingBox(n.boundingBox?n.boundingBox:{x1:0,y1:0,w:e.width(),h:e.height()})},l=n.eles.components(),c={},u=0;u0)for(s.graphSet.push(A),u=0;ur.count?0:r.graph},f=function e(t,n,r,i){var o=i.graphSet[r];if(-1a){var h=u(),g=d();(h-1)*g>=a?u(h-1):(g-1)*h>=a&&d(g-1)}else for(;c*l=a?d(v+1):u(m+1)}var b=o.w/c,y=o.h/l;if(t.condense&&(b=0,y=0),t.avoidOverlap)for(var x=0;x=c&&(T=0,j++)},D={},R=0;R(r=i.sqdistToFiniteLine(e,t,w[k],w[k+1],w[k+2],w[k+3])))return b(n,r),!0}else if("bezier"===a.edgeType||"multibezier"===a.edgeType||"self"===a.edgeType||"compound"===a.edgeType)for(w=a.allpts,k=0;k+5(r=i.sqdistToQuadraticBezier(e,t,w[k],w[k+1],w[k+2],w[k+3],w[k+4],w[k+5])))return b(n,r),!0;v=v||o.source,x=x||o.target;var A=l.getArrowWidth(s,u),E=[{name:"source",x:a.arrowStartX,y:a.arrowStartY,angle:a.srcArrowAngle},{name:"target",x:a.arrowEndX,y:a.arrowEndY,angle:a.tgtArrowAngle},{name:"mid-source",x:a.midX,y:a.midY,angle:a.midsrcArrowAngle},{name:"mid-target",x:a.midX,y:a.midY,angle:a.midtgtArrowAngle}];for(k=0;k0&&(y(v),y(x))}function w(e,t,n){return o.getPrefixedProperty(e,t,n)}function k(n,r){var o,a=n._private,s=m;o=r?r+"-":"";var l=n.pstyle(o+"label").value;if("yes"===n.pstyle("text-events").strValue&&l){var c=a.rstyle,u=n.pstyle("text-border-width").pfValue,d=n.pstyle("text-background-padding").pfValue,f=w(c,"labelWidth",r)+u+2*s+2*d,p=w(c,"labelHeight",r)+u+2*s+2*d,h=w(c,"labelX",r),g=w(c,"labelY",r),v=w(a.rscratch,"labelAngle",r),y=h-f/2,x=h+f/2,k=g-p/2,A=g+p/2;if(v){var E=Math.cos(v),S=Math.sin(v),$=function(e,t){return{x:(e-=h)*E-(t-=g)*S+h,y:e*S+t*E+g}},C=$(y,k),_=$(y,A),O=$(x,k),j=$(x,A),T=[C.x,C.y,O.x,O.y,j.x,j.y,_.x,_.y];if(i.pointInsidePolygonPoints(e,t,T))return b(n),!0}else{var P={w:f,h:p,x1:y,x2:x,y1:k,y2:A};if(i.inBoundingBox(P,e,t))return b(n),!0}}}n&&(u=u.interactive);for(var A=u.length-1;A>=0;A--){var E=u[A];E.isNode()?y(E)||k(E):x(E)||k(E)||k(E,"source")||k(E,"target")}return d},getAllInBox:function(e,t,n,r){var o=this.getCachedZSortedEles().interactive,a=[],s=Math.min(e,n),l=Math.max(e,n),c=Math.min(t,r),u=Math.max(t,r);e=s,n=l,t=c,r=u;for(var d=i.makeBoundingBox({x1:e,y1:t,x2:n,y2:r}),f=0;fb?b+"$-$"+v:v+"$-$"+b,g&&(t="unbundled$-$"+h.id);var y=u[t];null==y&&(y=u[t]=[],d.push(t)),y.push(Bt),g&&(y.hasUnbundled=!0),m&&(y.hasBezier=!0)}else f.push(Bt)}for(var x=0;xGt.id()){var k=Ht;Ht=Gt,Gt=k}Wt=Ht.position(),Yt=Gt.position(),Xt=Ht.outerWidth(),Qt=Ht.outerHeight(),Zt=Gt.outerWidth(),Jt=Gt.outerHeight(),n=l.nodeShapes[this.getNodeShape(Ht)],o=l.nodeShapes[this.getNodeShape(Gt)],s=!1;var A={north:0,west:0,south:0,east:0,northwest:0,southwest:0,northeast:0,southeast:0},E=Wt.x,S=Wt.y,$=Xt,C=Qt,_=Yt.x,O=Yt.y,j=Zt,T=Jt,P=w.length;for(p=0;p=d||w){p={cp:b,segment:x};break}}if(p)break}b=p.cp;var k=(d-g)/(x=p.segment).length,A=x.t1-x.t0,E=u?x.t0+A*k:x.t1-A*k;E=r.bound(0,E,1),t=r.qbezierPtAt(b.p0,b.p1,b.p2,E),c=function(e,t,n,i){var o=r.bound(0,i-.001,1),a=r.bound(0,i+.001,1),s=r.qbezierPtAt(e,t,n,o),l=r.qbezierPtAt(e,t,n,a);return f(s,l)}(b.p0,b.p1,b.p2,E);break;case"straight":case"segments":case"haystack":var S,$,C,_,O=0,j=i.allpts.length;for(v=0;v+3=d));v+=2);E=(d-$)/S,E=r.bound(0,E,1),t=r.lineAt(C,_,E),c=f(C,_)}l("labelX",o,t.x),l("labelY",o,t.y),l("labelAutoAngle",o,c)}};c("source"),c("target"),this.applyLabelDimensions(e)}},applyLabelDimensions:function(e){this.applyPrefixedLabelDimensions(e),e.isEdge()&&(this.applyPrefixedLabelDimensions(e,"source"),this.applyPrefixedLabelDimensions(e,"target"))},applyPrefixedLabelDimensions:function(e,t){var n=e._private,r=this.getLabelText(e,t),i=this.calculateLabelDimensions(e,r);o.setPrefixedProperty(n.rstyle,"labelWidth",t,i.width),o.setPrefixedProperty(n.rscratch,"labelWidth",t,i.width),o.setPrefixedProperty(n.rstyle,"labelHeight",t,i.height),o.setPrefixedProperty(n.rscratch,"labelHeight",t,i.height)},getLabelText:function(e,t){var n=e._private,r=t?t+"-":"",i=e.pstyle(r+"label").strValue,a=e.pstyle("text-transform").value,s=function(e,r){return r?(o.setPrefixedProperty(n.rscratch,e,t,r),r):o.getPrefixedProperty(n.rscratch,e,t)};"none"==a||("uppercase"==a?i=i.toUpperCase():"lowercase"==a&&(i=i.toLowerCase()));var l=e.pstyle("text-wrap").value;if("wrap"===l){var c=s("labelKey");if(c&&s("labelWrapKey")===c)return s("labelWrapCachedText");for(var u=i.split("\n"),d=e.pstyle("text-max-width").pfValue,f=[],p=0;pd){for(var g=h.split(/\s+/),m="",v=0;vd);k++)x+=i[k],k===i.length-1&&(w=!0);return w||(x+="…"),x}return i},calculateLabelDimensions:function(e,t,n){var r=e._private.labelStyleKey+"$@$"+t;n&&(r+="$@$"+n);var i=this.labelDimCache||(this.labelDimCache={});if(i[r])return i[r];var o=e.pstyle("font-style").strValue,a=1*e.pstyle("font-size").pfValue+"px",s=e.pstyle("font-family").strValue,l=e.pstyle("font-weight").strValue,c=this.labelCalcDiv;c||(c=this.labelCalcDiv=document.createElement("div"),document.body.appendChild(c));var u=c.style;return u.fontFamily=s,u.fontStyle=o,u.fontSize=a,u.fontWeight=l,u.position="absolute",u.left="-9999px",u.top="-9999px",u.zIndex="-1",u.visibility="hidden",u.pointerEvents="none",u.padding="0",u.lineHeight="1","wrap"===e.pstyle("text-wrap").value?u.whiteSpace="pre":u.whiteSpace="normal",c.textContent=t,i[r]={width:Math.ceil(c.clientWidth/1),height:Math.ceil(c.clientHeight/1)},i[r]},calculateLabelAngles:function(e){var t=e._private.rscratch,n=e.isEdge(),r=e.pstyle("text-rotation"),i=r.strValue;"none"===i?t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle=0:n&&"autorotate"===i?(t.labelAngle=Math.atan(t.midDispY/t.midDispX),t.sourceLabelAngle=t.sourceLabelAutoAngle,t.targetLabelAngle=t.targetLabelAutoAngle):t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle="autorotate"===i?0:r.pfValue}};e.exports=a},function(e,t,n){"use strict";var r={getNodeShape:function(e){var t=e.pstyle("shape").value;if(e.isParent())return"rectangle"===t||"roundrectangle"===t||"cutrectangle"===t||"barrel"===t?t:"rectangle";if("polygon"===t){var n=e.pstyle("shape-polygon-points").value;return this.nodeShapes.makePolygon(n).name}return t}};e.exports=r},function(e,t,n){"use strict";var r={registerCalculationListeners:function(){var e=this.cy,t=e.collection(),n=this,r=function(e,n){var r=!(arguments.length>2&&void 0!==arguments[2])||arguments[2];t.merge(e);for(var i=0;i=e.desktopTapThreshold2}var C=n(i);b&&(e.hoverData.tapholdCancelled=!0),s=!0,t(v,["mousemove","vmousemove","tapdrag"],i,{position:{x:p[0],y:p[1]}});var _=function(){e.data.bgActivePosistion=void 0,e.hoverData.selecting||l.emit("boxstart"),m[4]=1,e.hoverData.selecting=!0,e.redrawHint("select",!0),e.redraw()};if(3===e.hoverData.which){if(b){var O={originalEvent:i,type:"cxtdrag",position:{x:p[0],y:p[1]}};x?x.emit(O):l.emit(O),e.hoverData.cxtDragged=!0,e.hoverData.cxtOver&&v===e.hoverData.cxtOver||(e.hoverData.cxtOver&&e.hoverData.cxtOver.emit({originalEvent:i,type:"cxtdragout",position:{x:p[0],y:p[1]}}),e.hoverData.cxtOver=v,v&&v.emit({originalEvent:i,type:"cxtdragover",position:{x:p[0],y:p[1]}}))}}else if(e.hoverData.dragging){if(s=!0,l.panningEnabled()&&l.userPanningEnabled()){var T;if(e.hoverData.justStartedPan){var P=e.hoverData.mdownPos;T={x:(p[0]-P[0])*c,y:(p[1]-P[1])*c},e.hoverData.justStartedPan=!1}else T={x:w[0]*c,y:w[1]*c};l.panBy(T),e.hoverData.dragged=!0}p=e.projectIntoViewport(i.clientX,i.clientY)}else if(1!=m[4]||null!=x&&!x.isEdge()){if(x&&x.isEdge()&&x.active()&&x.unactivate(),x&&x.grabbed()||v==y||(y&&t(y,["mouseout","tapdragout"],i,{position:{x:p[0],y:p[1]}}),v&&t(v,["mouseover","tapdragover"],i,{position:{x:p[0],y:p[1]}}),e.hoverData.last=v),x)if(b){if(l.boxSelectionEnabled()&&C)x&&x.grabbed()&&(f(k),x.emit("free")),_();else if(x&&x.grabbed()&&e.nodeIsDraggable(x)){var D=!e.dragData.didDrag;D&&e.redrawHint("eles",!0),e.dragData.didDrag=!0;var R=[];e.hoverData.draggingEles||u(l.collection(k),{inDragLayer:!0});for(var I=0;I0&&e.redrawHint("eles",!0),e.dragData.possibleDragElements=l=[]),t(s,["mouseup","tapend","vmouseup"],r,{position:{x:o[0],y:o[1]}}),e.dragData.didDrag||e.hoverData.dragged||e.hoverData.selecting||e.hoverData.isOverThresholdDrag||t(c,["click","tap","vclick"],r,{position:{x:o[0],y:o[1]}}),s!=c||e.dragData.didDrag||e.hoverData.selecting||null!=s&&s._private.selectable&&(e.hoverData.dragging||("additive"===i.selectionType()||u?s.selected()?s.unselect():s.select():u||(i.$(":selected").unmerge(s).unselect(),s.select())),e.redrawHint("eles",!0)),e.hoverData.selecting){var h=i.collection(e.getAllInBox(a[0],a[1],a[2],a[3]));e.redrawHint("select",!0),h.length>0&&e.redrawHint("eles",!0),i.emit("boxend");var g=function(e){return e.selectable()&&!e.selected()};"additive"===i.selectionType()||u||i.$(":selected").unmerge(h).unselect(),h.emit("box").stdFilter(g).select().emit("boxselect"),e.redraw()}if(e.hoverData.dragging&&(e.hoverData.dragging=!1,e.redrawHint("select",!0),e.redrawHint("eles",!0),e.redraw()),!a[4]){e.redrawHint("drag",!0),e.redrawHint("eles",!0);var m=c&&c.grabbed();f(l),m&&c.emit("free")}}a[4]=0,e.hoverData.down=null,e.hoverData.cxtStarted=!1,e.hoverData.draggingEles=!1,e.hoverData.selecting=!1,e.hoverData.isOverThresholdDrag=!1,e.dragData.didDrag=!1,e.hoverData.dragged=!1,e.hoverData.dragDelta=[],e.hoverData.mdownPos=null,e.hoverData.mdownGPos=null}}),!1),e.registerBinding(e.container,"wheel",(function(t){if(!e.scrollingPage){var n,r=e.cy,i=e.projectIntoViewport(t.clientX,t.clientY),o=[i[0]*r.zoom()+r.pan().x,i[1]*r.zoom()+r.pan().y];e.hoverData.draggingEles||e.hoverData.dragging||e.hoverData.cxtStarted||0!==e.selection[4]?t.preventDefault():r.panningEnabled()&&r.userPanningEnabled()&&r.zoomingEnabled()&&r.userZoomingEnabled()&&(t.preventDefault(),e.data.wheelZooming=!0,clearTimeout(e.data.wheelTimeout),e.data.wheelTimeout=setTimeout((function(){e.data.wheelZooming=!1,e.redrawHint("eles",!0),e.redraw()}),150),n=null!=t.deltaY?t.deltaY/-250:null!=t.wheelDeltaY?t.wheelDeltaY/1e3:t.wheelDelta/1e3,n*=e.wheelSensitivity,1===t.deltaMode&&(n*=33),r.zoom({level:r.zoom()*Math.pow(10,n),renderedPosition:{x:o[0],y:o[1]}}))}}),!0),e.registerBinding(window,"scroll",(function(t){e.scrollingPage=!0,clearTimeout(e.scrollingPageTimeout),e.scrollingPageTimeout=setTimeout((function(){e.scrollingPage=!1}),250)}),!0),e.registerBinding(e.container,"mouseout",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseout",position:{x:n[0],y:n[1]}})}),!1),e.registerBinding(e.container,"mouseover",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseover",position:{x:n[0],y:n[1]}})}),!1);var T,P,D,R,I=function(e,t,n,r){return Math.sqrt((n-e)*(n-e)+(r-t)*(r-t))},N=function(e,t,n,r){return(n-e)*(n-e)+(r-t)*(r-t)};if(e.registerBinding(e.container,"touchstart",T=function(n){if(j(n)){e.touchData.capture=!0,e.data.bgActivePosistion=void 0;var r=e.cy,i=e.touchData.now,o=e.touchData.earlier;if(n.touches[0]){var a=e.projectIntoViewport(n.touches[0].clientX,n.touches[0].clientY);i[0]=a[0],i[1]=a[1]}if(n.touches[1]&&(a=e.projectIntoViewport(n.touches[1].clientX,n.touches[1].clientY),i[2]=a[0],i[3]=a[1]),n.touches[2]&&(a=e.projectIntoViewport(n.touches[2].clientX,n.touches[2].clientY),i[4]=a[0],i[5]=a[1]),n.touches[1]){f(e.dragData.touchDragEles);var s=e.findContainerClientCoords();S=s[0],$=s[1],C=s[2],_=s[3],v=n.touches[0].clientX-S,b=n.touches[0].clientY-$,y=n.touches[1].clientX-S,x=n.touches[1].clientY-$,O=0<=v&&v<=C&&0<=y&&y<=C&&0<=b&&b<=_&&0<=x&&x<=_;var c=r.pan(),p=r.zoom();if(w=I(v,b,y,x),k=N(v,b,y,x),E=[((A=[(v+y)/2,(b+x)/2])[0]-c.x)/p,(A[1]-c.y)/p],k<4e4&&!n.touches[2]){var h=e.findNearestElement(i[0],i[1],!0,!0),g=e.findNearestElement(i[2],i[3],!0,!0);return h&&h.isNode()?(h.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=h):g&&g.isNode()?(g.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=g):r.emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxt=!0,e.touchData.cxtDragged=!1,e.data.bgActivePosistion=void 0,void e.redraw()}}if(n.touches[2]);else if(n.touches[1]);else if(n.touches[0]){var m=e.findNearestElements(i[0],i[1],!0,!0),T=m[0];if(null!=T&&(T.activate(),e.touchData.start=T,e.touchData.starts=m,e.nodeIsGrabbable(T))){var P=e.dragData.touchDragEles=[],D=null;e.redrawHint("eles",!0),e.redrawHint("drag",!0),T.selected()?(D=r.$((function(t){return t.selected()&&e.nodeIsGrabbable(t)})),u(D,{addToList:P})):d(T,{addToList:P}),l(T);var R=function(e){return{originalEvent:n,type:e,position:{x:i[0],y:i[1]}}};T.emit(R("grabon")),D?D.forEach((function(e){e.emit(R("grab"))})):T.emit(R("grab"))}t(T,["touchstart","tapstart","vmousedown"],n,{position:{x:i[0],y:i[1]}}),null==T&&(e.data.bgActivePosistion={x:a[0],y:a[1]},e.redrawHint("select",!0),e.redraw()),e.touchData.singleTouchMoved=!1,e.touchData.singleTouchStartTime=+new Date,clearTimeout(e.touchData.tapholdTimeout),e.touchData.tapholdTimeout=setTimeout((function(){!1!==e.touchData.singleTouchMoved||e.pinching||e.touchData.selecting||(t(e.touchData.start,["taphold"],n,{position:{x:i[0],y:i[1]}}),e.touchData.start||r.$(":selected").unselect())}),e.tapholdDuration)}if(n.touches.length>=1){for(var M=e.touchData.startPosition=[],z=0;z=e.touchTapThreshold2}if(i&&e.touchData.cxt){n.preventDefault();var D=n.touches[0].clientX-S,R=n.touches[0].clientY-$,M=n.touches[1].clientX-S,z=n.touches[1].clientY-$,L=N(D,R,M,z);if(L/k>=2.25||L>=22500){e.touchData.cxt=!1,e.data.bgActivePosistion=void 0,e.redrawHint("select",!0);var B={originalEvent:n,type:"cxttapend",position:{x:c[0],y:c[1]}};e.touchData.start?(e.touchData.start.unactivate().emit(B),e.touchData.start=null):l.emit(B)}}if(i&&e.touchData.cxt){B={originalEvent:n,type:"cxtdrag",position:{x:c[0],y:c[1]}},e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),e.touchData.start?e.touchData.start.emit(B):l.emit(B),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxtDragged=!0;var F=e.findNearestElement(c[0],c[1],!0,!0);e.touchData.cxtOver&&F===e.touchData.cxtOver||(e.touchData.cxtOver&&e.touchData.cxtOver.emit({originalEvent:n,type:"cxtdragout",position:{x:c[0],y:c[1]}}),e.touchData.cxtOver=F,F&&F.emit({originalEvent:n,type:"cxtdragover",position:{x:c[0],y:c[1]}}))}else if(i&&n.touches[2]&&l.boxSelectionEnabled())n.preventDefault(),e.data.bgActivePosistion=void 0,this.lastThreeTouch=+new Date,e.touchData.selecting||l.emit("boxstart"),e.touchData.selecting=!0,e.redrawHint("select",!0),s&&0!==s.length&&void 0!==s[0]?(s[2]=(c[0]+c[2]+c[4])/3,s[3]=(c[1]+c[3]+c[5])/3):(s[0]=(c[0]+c[2]+c[4])/3,s[1]=(c[1]+c[3]+c[5])/3,s[2]=(c[0]+c[2]+c[4])/3+1,s[3]=(c[1]+c[3]+c[5])/3+1),s[4]=1,e.touchData.selecting=!0,e.redraw();else if(i&&n.touches[1]&&l.zoomingEnabled()&&l.panningEnabled()&&l.userZoomingEnabled()&&l.userPanningEnabled()){if(n.preventDefault(),e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),ee=e.dragData.touchDragEles){e.redrawHint("drag",!0);for(var q=0;q0)return h[0]}return null},p=Object.keys(d),h=0;h0?f:r.roundRectangleIntersectLine(o,a,e,t,n,i,s)},checkPoint:function(e,t,n,i,o,a,s){var l=r.getRoundRectangleRadius(i,o),c=2*l;if(r.pointInsidePolygon(e,t,this.points,a,s,i,o-c,[0,-1],n))return!0;if(r.pointInsidePolygon(e,t,this.points,a,s,i-c,o,[0,-1],n))return!0;var u=i/2+2*n,d=o/2+2*n,f=[a-u,s-d,a-u,s,a+u,s,a+u,s-d];return!!r.pointInsidePolygonPoints(e,t,f)||!!r.checkInEllipse(e,t,c,c,a+i/2-l,s+o/2-l,n)||!!r.checkInEllipse(e,t,c,c,a-i/2+l,s+o/2-l,n)}}},registerNodeShapes:function(){var e=this.nodeShapes={},t=this;this.generateEllipse(),this.generatePolygon("triangle",r.generateUnitNgonPointsFitToSquare(3,0)),this.generatePolygon("rectangle",r.generateUnitNgonPointsFitToSquare(4,0)),e.square=e.rectangle,this.generateRoundRectangle(),this.generateCutRectangle(),this.generateBarrel(),this.generateBottomRoundrectangle(),this.generatePolygon("diamond",[0,1,1,0,0,-1,-1,0]),this.generatePolygon("pentagon",r.generateUnitNgonPointsFitToSquare(5,0)),this.generatePolygon("hexagon",r.generateUnitNgonPointsFitToSquare(6,0)),this.generatePolygon("heptagon",r.generateUnitNgonPointsFitToSquare(7,0)),this.generatePolygon("octagon",r.generateUnitNgonPointsFitToSquare(8,0));var n=new Array(20),i=r.generateUnitNgonPoints(5,0),o=r.generateUnitNgonPoints(5,Math.PI/5),a=.5*(3-Math.sqrt(5));a*=1.57;for(var s=0;s0&&t.data.lyrTxrCache.invalidateElements(n)}))}l.CANVAS_LAYERS=3,l.SELECT_BOX=0,l.DRAG=1,l.NODE=2,l.BUFFER_COUNT=3,l.TEXTURE_BUFFER=0,l.MOTIONBLUR_BUFFER_NODE=1,l.MOTIONBLUR_BUFFER_DRAG=2,l.redrawHint=function(e,t){var n=this;switch(e){case"eles":n.data.canvasNeedsRedraw[l.NODE]=t;break;case"drag":n.data.canvasNeedsRedraw[l.DRAG]=t;break;case"select":n.data.canvasNeedsRedraw[l.SELECT_BOX]=t}};var u="undefined"!=typeof Path2D;l.path2dEnabled=function(e){if(void 0===e)return this.pathsEnabled;this.pathsEnabled=!!e},l.usePaths=function(){return u&&this.pathsEnabled},[n(126),n(127),n(128),n(129),n(130),n(131),n(132),n(133),n(134),n(135)].forEach((function(e){r.extend(l,e)})),e.exports=s},function(e,t,n){"use strict";var r=n(2),i=n(1),o=n(9),a=n(19),s={dequeue:"dequeue",downscale:"downscale",highQuality:"highQuality"},l=function(e){this.renderer=e,this.onDequeues=[],this.setupDequeueing()},c=l.prototype;c.reasons=s,c.getTextureQueue=function(e){return this.eleImgCaches=this.eleImgCaches||{},this.eleImgCaches[e]=this.eleImgCaches[e]||[]},c.getRetiredTextureQueue=function(e){var t=this.eleImgCaches.retired=this.eleImgCaches.retired||{};return t[e]=t[e]||[]},c.getElementQueue=function(){return this.eleCacheQueue=this.eleCacheQueue||new o((function(e,t){return t.reqs-e.reqs}))},c.getElementIdToQueue=function(){return this.eleIdToCacheQueue=this.eleIdToCacheQueue||{}},c.getElement=function(e,t,n,i,o){var a=this,l=this.renderer,c=e._private.rscratch,u=l.cy.zoom();if(0===t.w||0===t.h||!e.visible())return null;if(null==i&&(i=Math.ceil(r.log2(u*n))),i<-4)i=-4;else if(u>=3.99||i>2)return null;var d,f=Math.pow(2,i),p=t.h*f,h=t.w*f,g=c.imgCaches=c.imgCaches||{},m=g[i];if(m)return m;if(d=p<=25?25:p<=50?50:50*Math.ceil(p/50),p>1024||h>1024||e.isEdge()||e.isParent())return null;var v=a.getTextureQueue(d),b=v[v.length-2],y=function(){return a.recycleTexture(d,h)||a.addTexture(d,h)};b||(b=v[v.length-1]),b||(b=y()),b.width-b.usedWidthi;$--)C=a.getElement(e,t,n,$,s.downscale);_()}else{var O;if(!A&&!E&&!S)for($=i-1;$>=-4;$--){var j;if(j=g[$]){O=j;break}}if(k(O))return a.queueElement(e,i),O;b.context.translate(b.usedWidth,0),b.context.scale(f,f),l.drawElement(b.context,e,t,w),b.context.scale(1/f,1/f),b.context.translate(-b.usedWidth,0)}return m=g[i]={ele:e,x:b.usedWidth,texture:b,level:i,scale:f,width:h,height:p,scaledLabelShown:w},b.usedWidth+=Math.ceil(h+8),b.eleCaches.push(m),a.checkTextureFullness(b),m},c.invalidateElement=function(e){var t=e._private.rscratch.imgCaches;if(t)for(var n=-4;n<=2;n++){var r=t[n];if(r){var o=r.texture;o.invalidatedWidth+=r.width,t[n]=null,i.removeFromArray(o.eleCaches,r),this.removeFromQueue(e),this.checkTextureUtility(o)}}},c.checkTextureUtility=function(e){e.invalidatedWidth>=.5*e.width&&this.retireTexture(e)},c.checkTextureFullness=function(e){var t=this.getTextureQueue(e.height);e.usedWidth/e.width>.8&&e.fullnessChecks>=10?i.removeFromArray(t,e):e.fullnessChecks++},c.retireTexture=function(e){var t=e.height,n=this.getTextureQueue(t);i.removeFromArray(n,e),e.retired=!0;for(var r=e.eleCaches,o=0;o=t)return a.retired=!1,a.usedWidth=0,a.invalidatedWidth=0,a.fullnessChecks=0,i.clearArray(a.eleCaches),a.context.setTransform(1,0,0,1,0,0),a.context.clearRect(0,0,a.width,a.height),i.removeFromArray(r,a),n.push(a),a}},c.queueElement=function(e,t){var n=this.getElementQueue(),r=this.getElementIdToQueue(),i=e.id(),o=r[i];if(o)o.level=Math.max(o.level,t),o.reqs++,n.updateItem(o);else{var a={ele:e,level:t,reqs:1};n.push(a),r[i]=a}},c.dequeue=function(e){for(var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=[],i=0;i<1&&t.size()>0;i++){var o=t.pop(),a=o.ele;if(null==a._private.rscratch.imgCaches[o.level]){n[a.id()]=null,r.push(o);var l=a.boundingBox();this.getElement(a,l,e,o.level,s.dequeue)}}return r},c.removeFromQueue=function(e){var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=n[e.id()];null!=r&&(r.reqs=i.MAX_INT,t.updateItem(r),t.pop(),n[e.id()]=null)},c.onDequeue=function(e){this.onDequeues.push(e)},c.offDequeue=function(e){i.removeFromArray(this.onDequeues,e)},c.setupDequeueing=a.setupDequeueing({deqRedrawThreshold:100,deqCost:.15,deqAvgCost:.1,deqNoDrawCost:.9,deqFastCost:.9,deq:function(e,t,n){return e.dequeue(t,n)},onDeqd:function(e,t){for(var n=0;n=3.99||n>2)return null;o.validateLayersElesOrdering(n,e);var l,c,u=o.layersByLevel,d=Math.pow(2,n),f=u[n]=u[n]||[];if(o.levelIsComplete(n,e))return f;!function(){var t=function(t){if(o.validateLayersElesOrdering(t,e),o.levelIsComplete(t,e))return c=u[t],!0},i=function(e){if(!c)for(var r=n+e;-4<=r&&r<=2&&!t(r);r+=e);};i(1),i(-1);for(var a=f.length-1;a>=0;a--){var s=f[a];s.invalid&&r.removeFromArray(f,s)}}();var p=function(t){var r=(t=t||{}).after;if(function(){if(!l){l=i.makeBoundingBox();for(var t=0;t16e6)return null;var a=o.makeLayer(l,n);if(null!=r){var s=f.indexOf(r)+1;f.splice(s,0,a)}else(void 0===t.insert||t.insert)&&f.unshift(a);return a};if(o.skipping&&!s)return null;for(var h=null,g=e.length/1,m=!s,v=0;v=g||!i.boundingBoxInBoundingBox(h.bb,b.boundingBox()))&&!(h=p({insert:!0,after:h})))return null;c||m?o.queueLayer(h,b):o.drawEleInLayer(h,b,n,t),h.eles.push(b),x[n]=h}}return c||(m?null:f)},c.getEleLevelForLayerLevel=function(e,t){return e},c.drawEleInLayer=function(e,t,n,r){var i=this.renderer,o=e.context,a=t.boundingBox();if(0!==a.w&&0!==a.h&&t.visible()){var s=this.eleTxrCache,l=s.reasons.highQuality;n=this.getEleLevelForLayerLevel(n,r);var c=s.getElement(t,a,null,n,l);c?(f(o,!1),o.drawImage(c.texture.canvas,c.x,0,c.width,c.height,a.x1,a.y1,a.w,a.h),f(o,!0)):i.drawElement(o,t)}},c.levelIsComplete=function(e,t){var n=this.layersByLevel[e];if(!n||0===n.length)return!1;for(var r=0,i=0;i0)return!1;if(o.invalid)return!1;r+=o.eles.length}return r===t.length},c.validateLayersElesOrdering=function(e,t){var n=this.layersByLevel[e];if(n)for(var r=0;r0){e=!0;break}}return e},c.invalidateElements=function(e){var t=this;t.lastInvalidationTime=r.performanceNow(),0!==e.length&&t.haveLayers()&&t.updateElementsInLayers(e,(function(e,n,r){t.invalidateLayer(e)}))},c.invalidateLayer=function(e){if(this.lastInvalidationTime=r.performanceNow(),!e.invalid){var t=e.level,n=e.eles,i=this.layersByLevel[t];r.removeFromArray(i,e),e.elesQueue=[],e.invalid=!0,e.replacement&&(e.replacement.invalid=!0);for(var o=0;o0&&void 0!==arguments[0]?arguments[0]:f;e.lineWidth=h,e.lineCap="butt",i.strokeStyle(e,d[0],d[1],d[2],n),i.drawEdgePath(t,e,o.allpts,p)},m=function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:f;i.drawArrowheads(e,t,n)};if(e.lineJoin="round","yes"===t.pstyle("ghost").value){var v=t.pstyle("ghost-offset-x").pfValue,b=t.pstyle("ghost-offset-y").pfValue,y=t.pstyle("ghost-opacity").value,x=f*y;e.translate(v,b),g(x),m(x),e.translate(-v,-b)}g(),m(),function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:c;e.lineWidth=l,"self"!==o.edgeType||a?e.lineCap="round":e.lineCap="butt",i.strokeStyle(e,u[0],u[1],u[2],n),i.drawEdgePath(t,e,o.allpts,"solid")}(),i.drawElementText(e,t,r),n&&e.translate(s.x1,s.y1)}},drawEdgePath:function(e,t,n,r){var i=e._private.rscratch,o=t,a=void 0,s=!1,l=this.usePaths();if(l){var c=n.join("$");i.pathCacheKey&&i.pathCacheKey===c?(a=t=i.pathCache,s=!0):(a=t=new Path2D,i.pathCacheKey=c,i.pathCache=a)}if(o.setLineDash)switch(r){case"dotted":o.setLineDash([1,1]);break;case"dashed":o.setLineDash([6,3]);break;case"solid":o.setLineDash([])}if(!s&&!i.badLine)switch(t.beginPath&&t.beginPath(),t.moveTo(n[0],n[1]),i.edgeType){case"bezier":case"self":case"compound":case"multibezier":if(e.hasClass("horizontal")){var u=n[4],d=n[5],f=(n[0]+n[4])/2;t.lineTo(n[0]+10,n[1]),t.bezierCurveTo(f,n[1],f,n[5],n[4]-10,n[5]),t.lineTo(u,d)}else if(e.hasClass("vertical")){var p=n[4],h=n[5],g=(n[1]+n[5])/2;t.bezierCurveTo(n[0],g,n[4],g,n[4],n[5]-10),t.lineTo(p,h)}else for(var m=2;m+30||j>0&&O>0){var P=f-T;switch(k){case"left":P-=m;break;case"center":P-=m/2}var D=p-v-T,R=m+2*T,I=v+2*T;if(_>0){var N=e.fillStyle,M=t.pstyle("text-background-color").value;e.fillStyle="rgba("+M[0]+","+M[1]+","+M[2]+","+_*o+")","roundrectangle"==t.pstyle("text-background-shape").strValue?(s=P,l=D,c=R,u=I,d=(d=2)||5,(a=e).beginPath(),a.moveTo(s+d,l),a.lineTo(s+c-d,l),a.quadraticCurveTo(s+c,l,s+c,l+d),a.lineTo(s+c,l+u-d),a.quadraticCurveTo(s+c,l+u,s+c-d,l+u),a.lineTo(s+d,l+u),a.quadraticCurveTo(s,l+u,s,l+u-d),a.lineTo(s,l+d),a.quadraticCurveTo(s,l,s+d,l),a.closePath(),a.fill()):e.fillRect(P,D,R,I),e.fillStyle=N}if(j>0&&O>0){var z=e.strokeStyle,L=e.lineWidth,B=t.pstyle("text-border-color").value,F=t.pstyle("text-border-style").value;if(e.strokeStyle="rgba("+B[0]+","+B[1]+","+B[2]+","+O*o+")",e.lineWidth=j,e.setLineDash)switch(F){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"double":e.lineWidth=j/4,e.setLineDash([]);break;case"solid":e.setLineDash([])}if(e.strokeRect(P,D,R,I),"double"===F){var q=j/2;e.strokeRect(P+q,D+q,R-2*q,I-2*q)}e.setLineDash&&e.setLineDash([]),e.lineWidth=L,e.strokeStyle=z}}var V=2*t.pstyle("text-outline-width").pfValue;if(V>0&&(e.lineWidth=V),"wrap"===t.pstyle("text-wrap").value){var U=r.getPrefixedProperty(i,"labelWrapCachedLines",n),H=v/U.length;switch(A){case"top":p-=(U.length-1)*H;break;case"center":case"bottom":p-=(U.length-1)*H}for(var G=0;G0&&e.strokeText(U[G],f,p),e.fillText(U[G],f,p),p+=H}else V>0&&e.strokeText(h,f,p),e.fillText(h,f,p);0!==E&&(e.rotate(-E),e.translate(-$,-C))}}},e.exports=o},function(e,t,n){"use strict";var r=n(0),i={drawNode:function(e,t,n,i){var o,a,s=this,l=t._private,c=l.rscratch,u=t.position();if(r.number(u.x)&&r.number(u.y)&&t.visible()){var d=t.effectiveOpacity(),f=s.usePaths(),p=void 0,h=!1,g=t.padding();o=t.width()+2*g,a=t.height()+2*g;var m=void 0;n&&(m=n,e.translate(-m.x1,-m.y1));for(var v=t.pstyle("background-image").value,b=new Array(v.length),y=new Array(v.length),x=0,w=0;w0&&void 0!==arguments[0]?arguments[0]:C;s.fillStyle(e,$[0],$[1],$[2],t)},P=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:j;s.strokeStyle(e,_[0],_[1],_[2],t)},D=t.pstyle("shape").strValue,R=t.pstyle("shape-polygon-points").pfValue;if(f){var I=D+"$"+o+"$"+a+("polygon"===D?"$"+R.join("$"):"");e.translate(u.x,u.y),c.pathCacheKey===I?(p=c.pathCache,h=!0):(p=new Path2D,c.pathCacheKey=I,c.pathCache=p)}var N,M,z,L=function(){if(!h){var n=u;f&&(n={x:0,y:0}),s.nodeShapes[s.getNodeShape(t)].draw(p||e,n.x,n.y,o,a)}f?e.fill(p):e.fill()},B=function(){for(var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,r=l.backgrounding,i=0,o=0;o0&&void 0!==arguments[0]&&arguments[0],r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:d;s.hasPie(t)&&(s.drawPie(e,t,r),n&&(f||s.nodeShapes[s.getNodeShape(t)].draw(e,u.x,u.y,o,a)))},q=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,n=(E>0?E:-E)*t,r=E>0?0:255;0!==E&&(s.fillStyle(e,r,r,r,n),f?e.fill(p):e.fill())},V=function(){if(S>0){if(e.lineWidth=S,e.lineCap="butt",e.setLineDash)switch(O){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"solid":case"double":e.setLineDash([])}if(f?e.stroke(p):e.stroke(),"double"===O){e.lineWidth=S/3;var t=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",f?e.stroke(p):e.stroke(),e.globalCompositeOperation=t}e.setLineDash&&e.setLineDash([])}};if("yes"===t.pstyle("ghost").value){var U=t.pstyle("ghost-offset-x").pfValue,H=t.pstyle("ghost-offset-y").pfValue,G=t.pstyle("ghost-opacity").value,W=G*d;e.translate(U,H),T(G*C),L(),B(W),F(0!==E||0!==S),q(W),P(G*j),V(),e.translate(-U,-H)}T(),L(),B(),F(0!==E||0!==S),q(),P(),V(),f&&e.translate(-u.x,-u.y),s.drawElementText(e,t,i),N=t.pstyle("overlay-padding").pfValue,M=t.pstyle("overlay-opacity").value,z=t.pstyle("overlay-color").value,M>0&&(s.fillStyle(e,z[0],z[1],z[2],M),s.nodeShapes.roundrectangle.draw(e,u.x,u.y,o+2*N,a+2*N),e.fill()),n&&e.translate(m.x1,m.y1)}},hasPie:function(e){return(e=e[0])._private.hasPie},drawPie:function(e,t,n,r){t=t[0],r=r||t.position();var i=t.cy().style(),o=t.pstyle("pie-size"),a=r.x,s=r.y,l=t.width(),c=t.height(),u=Math.min(l,c)/2,d=0;this.usePaths()&&(a=0,s=0),"%"===o.units?u*=o.pfValue:void 0!==o.pfValue&&(u=o.pfValue/2);for(var f=1;f<=i.pieBackgroundN;f++){var p=t.pstyle("pie-"+f+"-background-size").value,h=t.pstyle("pie-"+f+"-background-color").value,g=t.pstyle("pie-"+f+"-background-opacity").value*n,m=p/100;m+d>1&&(m=1-d);var v=1.5*Math.PI+2*Math.PI*d,b=v+2*Math.PI*m;0===p||d>=1||d+m>1||(e.beginPath(),e.moveTo(a,s),e.arc(a,s,u,v,b),e.closePath(),this.fillStyle(e,h[0],h[1],h[2],g),e.fill(),d+=m)}}};e.exports=i},function(e,t,n){"use strict";var r={},i=n(1);r.getPixelRatio=function(){var e=this.data.contexts[0];if(null!=this.forcedPixelRatio)return this.forcedPixelRatio;var t=e.backingStorePixelRatio||e.webkitBackingStorePixelRatio||e.mozBackingStorePixelRatio||e.msBackingStorePixelRatio||e.oBackingStorePixelRatio||e.backingStorePixelRatio||1;return(window.devicePixelRatio||1)/t},r.paintCache=function(e){for(var t,n=this.paintCaches=this.paintCaches||[],r=!0,i=0;is.minMbLowQualFrames&&(s.motionBlurPxRatio=s.mbPxRBlurry)),s.clearingMotionBlur&&(s.motionBlurPxRatio=1),s.textureDrawLastFrame&&!f&&(d[s.NODE]=!0,d[s.SELECT_BOX]=!0);var y=c.style()._private.coreStyle,x=c.zoom(),w=void 0!==o?o:x,k=c.pan(),A={x:k.x,y:k.y},E={zoom:x,pan:{x:k.x,y:k.y}},S=s.prevViewport;void 0===S||E.zoom!==S.zoom||E.pan.x!==S.pan.x||E.pan.y!==S.pan.y||m&&!g||(s.motionBlurPxRatio=1),a&&(A=a),w*=l,A.x*=l,A.y*=l;var $=s.getCachedZSortedEles();function C(e,t,n,r,i){var o=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",s.fillStyle(e,255,255,255,s.motionBlurTransparency),e.fillRect(t,n,r,i),e.globalCompositeOperation=o}function _(e,r){var i,l,c,d;s.clearingMotionBlur||e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]&&e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]?(i=A,l=w,c=s.canvasWidth,d=s.canvasHeight):(i={x:k.x*h,y:k.y*h},l=x*h,c=s.canvasWidth*h,d=s.canvasHeight*h),e.setTransform(1,0,0,1,0,0),"motionBlur"===r?C(e,0,0,c,d):t||void 0!==r&&!r||e.clearRect(0,0,c,d),n||(e.translate(i.x,i.y),e.scale(l,l)),a&&e.translate(a.x,a.y),o&&e.scale(o,o)}if(f||(s.textureDrawLastFrame=!1),f){if(s.textureDrawLastFrame=!0,!s.textureCache){s.textureCache={},s.textureCache.bb=c.mutableElements().boundingBox(),s.textureCache.texture=s.data.bufferCanvases[s.TEXTURE_BUFFER];var O=s.data.bufferContexts[s.TEXTURE_BUFFER];O.setTransform(1,0,0,1,0,0),O.clearRect(0,0,s.canvasWidth*s.textureMult,s.canvasHeight*s.textureMult),s.render({forcedContext:O,drawOnlyNodeLayer:!0,forcedPxRatio:l*s.textureMult}),(E=s.textureCache.viewport={zoom:c.zoom(),pan:c.pan(),width:s.canvasWidth,height:s.canvasHeight}).mpan={x:(0-E.pan.x)/E.zoom,y:(0-E.pan.y)/E.zoom}}d[s.DRAG]=!1,d[s.NODE]=!1;var j=u.contexts[s.NODE],T=s.textureCache.texture;E=s.textureCache.viewport,s.textureCache.bb,j.setTransform(1,0,0,1,0,0),p?C(j,0,0,E.width,E.height):j.clearRect(0,0,E.width,E.height);var P=y["outside-texture-bg-color"].value,D=y["outside-texture-bg-opacity"].value;s.fillStyle(j,P[0],P[1],P[2],D),j.fillRect(0,0,E.width,E.height),x=c.zoom(),_(j,!1),j.clearRect(E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l),j.drawImage(T,E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l)}else s.textureOnViewport&&!t&&(s.textureCache=null);var R=c.extent(),I=s.pinching||s.hoverData.dragging||s.swipePanning||s.data.wheelZooming||s.hoverData.draggingEles,N=s.hideEdgesOnViewport&&I,M=[];if(M[s.NODE]=!d[s.NODE]&&p&&!s.clearedForMotionBlur[s.NODE]||s.clearingMotionBlur,M[s.NODE]&&(s.clearedForMotionBlur[s.NODE]=!0),M[s.DRAG]=!d[s.DRAG]&&p&&!s.clearedForMotionBlur[s.DRAG]||s.clearingMotionBlur,M[s.DRAG]&&(s.clearedForMotionBlur[s.DRAG]=!0),d[s.NODE]||n||r||M[s.NODE]){var z=p&&!M[s.NODE]&&1!==h;_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]:u.contexts[s.NODE]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.nondrag,l,R):s.drawLayeredElements(j,$.nondrag,l,R),s.debug&&s.drawDebugPoints(j,$.nondrag),n||p||(d[s.NODE]=!1)}if(!r&&(d[s.DRAG]||n||M[s.DRAG])&&(z=p&&!M[s.DRAG]&&1!==h,_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]:u.contexts[s.DRAG]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.drag,l,R):s.drawCachedElements(j,$.drag,l,R),s.debug&&s.drawDebugPoints(j,$.drag),n||p||(d[s.DRAG]=!1)),s.showFps||!r&&d[s.SELECT_BOX]&&!n){if(_(j=t||u.contexts[s.SELECT_BOX]),1==s.selection[4]&&(s.hoverData.selecting||s.touchData.selecting)){x=s.cy.zoom();var L=y["selection-box-border-width"].value/x;j.lineWidth=L,j.fillStyle="rgba("+y["selection-box-color"].value[0]+","+y["selection-box-color"].value[1]+","+y["selection-box-color"].value[2]+","+y["selection-box-opacity"].value+")",j.fillRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]),L>0&&(j.strokeStyle="rgba("+y["selection-box-border-color"].value[0]+","+y["selection-box-border-color"].value[1]+","+y["selection-box-border-color"].value[2]+","+y["selection-box-opacity"].value+")",j.strokeRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]))}if(u.bgActivePosistion&&!s.hoverData.selecting){x=s.cy.zoom();var B=u.bgActivePosistion;j.fillStyle="rgba("+y["active-bg-color"].value[0]+","+y["active-bg-color"].value[1]+","+y["active-bg-color"].value[2]+","+y["active-bg-opacity"].value+")",j.beginPath(),j.arc(B.x,B.y,y["active-bg-size"].pfValue/x,0,2*Math.PI),j.fill()}var F=s.lastRedrawTime;if(s.showFps&&F){F=Math.round(F);var q=Math.round(1e3/F);j.setTransform(1,0,0,1,0,0),j.fillStyle="rgba(255, 0, 0, 0.75)",j.strokeStyle="rgba(255, 0, 0, 0.75)",j.lineWidth=1,j.fillText("1 frame = "+F+" ms = "+q+" fps",0,20),j.strokeRect(0,30,250,20),j.fillRect(0,30,250*Math.min(q/60,1),20)}n||(d[s.SELECT_BOX]=!1)}if(p&&1!==h){var V=u.contexts[s.NODE],U=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_NODE],H=u.contexts[s.DRAG],G=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_DRAG],W=function(e,t,n){e.setTransform(1,0,0,1,0,0),n||!b?e.clearRect(0,0,s.canvasWidth,s.canvasHeight):C(e,0,0,s.canvasWidth,s.canvasHeight);var r=h;e.drawImage(t,0,0,s.canvasWidth*r,s.canvasHeight*r,0,0,s.canvasWidth,s.canvasHeight)};(d[s.NODE]||M[s.NODE])&&(W(V,U,M[s.NODE]),d[s.NODE]=!1),(d[s.DRAG]||M[s.DRAG])&&(W(H,G,M[s.DRAG]),d[s.DRAG]=!1)}s.prevViewport=E,s.clearingMotionBlur&&(s.clearingMotionBlur=!1,s.motionBlurCleared=!0,s.motionBlur=!0),p&&(s.motionBlurTimeout=setTimeout((function(){s.motionBlurTimeout=null,s.clearedForMotionBlur[s.NODE]=!1,s.clearedForMotionBlur[s.DRAG]=!1,s.motionBlur=!1,s.clearingMotionBlur=!f,s.mbFrames=0,d[s.NODE]=!0,d[s.DRAG]=!0,s.redraw()}),100)),t||c.emit("render")},e.exports=r},function(e,t,n){"use strict";for(var r=n(2),i={drawPolygonPath:function(e,t,n,r,i,o){var a=r/2,s=i/2;e.beginPath&&e.beginPath(),e.moveTo(t+a*o[0],n+s*o[1]);for(var l=1;l0&&a>0){p.clearRect(0,0,o,a),p.globalCompositeOperation="source-over";var h=this.getCachedZSortedEles();if(e.full)p.translate(-n.x1*c,-n.y1*c),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(n.x1*c,n.y1*c);else{var g=t.pan(),m={x:g.x*c,y:g.y*c};c*=t.zoom(),p.translate(m.x,m.y),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(-m.x,-m.y)}e.bg&&(p.globalCompositeOperation="destination-over",p.fillStyle=e.bg,p.rect(0,0,o,a),p.fill())}return f},i.png=function(e){return a(e,this.bufferCanvasImage(e),"image/png")},i.jpg=function(e){return a(e,this.bufferCanvasImage(e),"image/jpeg")},e.exports=i},function(e,t,n){"use strict";var r={nodeShapeImpl:function(e,t,n,r,i,o,a){switch(e){case"ellipse":return this.drawEllipsePath(t,n,r,i,o);case"polygon":return this.drawPolygonPath(t,n,r,i,o,a);case"roundrectangle":return this.drawRoundRectanglePath(t,n,r,i,o);case"cutrectangle":return this.drawCutRectanglePath(t,n,r,i,o);case"bottomroundrectangle":return this.drawBottomRoundRectanglePath(t,n,r,i,o);case"barrel":return this.drawBarrelPath(t,n,r,i,o)}}};e.exports=r},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(18),a=function e(){if(!(this instanceof e))return new e;this.length=0},s=a.prototype;s.instanceString=function(){return"stylesheet"},s.selector=function(e){return this[this.length++]={selector:e,properties:[]},this},s.css=function(e,t){var n=this.length-1;if(r.string(e))this[n].properties.push({name:e,value:t});else if(r.plainObject(e))for(var a=e,s=0;s=0&&(e._idleTimeoutId=setTimeout((function(){e._onTimeout&&e._onTimeout()}),t))},n(239),t.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,t.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(this,n(35))},function(e,t,n){(function(e,t){!function(e,n){"use strict";if(!e.setImmediate){var r,i,o,a,s,l=1,c={},u=!1,d=e.document,f=Object.getPrototypeOf&&Object.getPrototypeOf(e);f=f&&f.setTimeout?f:e,"[object process]"==={}.toString.call(e.process)?r=function(e){t.nextTick((function(){h(e)}))}:!function(){if(e.postMessage&&!e.importScripts){var t=!0,n=e.onmessage;return e.onmessage=function(){t=!1},e.postMessage("","*"),e.onmessage=n,t}}()?e.MessageChannel?((o=new MessageChannel).port1.onmessage=function(e){h(e.data)},r=function(e){o.port2.postMessage(e)}):d&&"onreadystatechange"in d.createElement("script")?(i=d.documentElement,r=function(e){var t=d.createElement("script");t.onreadystatechange=function(){h(e),t.onreadystatechange=null,i.removeChild(t),t=null},i.appendChild(t)}):r=function(e){setTimeout(h,0,e)}:(a="setImmediate$"+Math.random()+"$",s=function(t){t.source===e&&"string"==typeof t.data&&0===t.data.indexOf(a)&&h(+t.data.slice(a.length))},e.addEventListener?e.addEventListener("message",s,!1):e.attachEvent("onmessage",s),r=function(t){e.postMessage(a+t,"*")}),f.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n1)for(var n=1;n=t||n<0||m&&e-c>=o}function w(){var e=p();if(x(e))return k(e);s=setTimeout(w,function(e){var n=t-(e-l);return m?f(n,o-(e-c)):n}(e))}function k(e){return s=void 0,v&&r?b(e):(r=i=void 0,a)}function A(){var e=p(),n=x(e);if(r=arguments,i=this,l=e,n){if(void 0===s)return y(l);if(m)return s=setTimeout(w,t),b(l)}return void 0===s&&(s=setTimeout(w,t)),a}return t=g(t)||0,h(n)&&(u=!!n.leading,o=(m="maxWait"in n)?d(g(n.maxWait)||0,t):o,v="trailing"in n?!!n.trailing:v),A.cancel=function(){void 0!==s&&clearTimeout(s),c=0,r=l=i=s=void 0},A.flush=function(){return void 0===s?a:k(p())},A}}).call(this,n(35))},function(e,t,n){e.exports=n(243)},function(e,t,n){var r,i,o;(function(){var n,a,s,l,c,u,d,f,p,h,g,m,v,b,y;s=Math.floor,h=Math.min,a=function(e,t){return et?1:0},p=function(e,t,n,r,i){var o;if(null==n&&(n=0),null==i&&(i=a),n<0)throw new Error("lo must be non-negative");for(null==r&&(r=e.length);nn;0<=n?t++:t--)c.push(t);return c}.apply(this).reverse()).length;rg;0<=g?++u:--u)m.push(c(e,n));return m},b=function(e,t,n,r){var i,o,s;for(null==r&&(r=a),i=e[n];n>t&&r(i,o=e[s=n-1>>1])<0;)e[n]=o,n=s;return e[n]=i},y=function(e,t,n){var r,i,o,s,l;for(null==n&&(n=a),i=e.length,l=t,o=e[t],r=2*t+1;r'+e.content+"":s+=">"+e.content+"";var l=t(s);return l.data("selector",e.selector),l.data("on-click-function",e.onClickFunction),l.data("show",void 0===e.show||e.show),l}function y(){var e;l("active")&&(e=s.children(),t(e).each((function(){x(t(this))})),i.off("tapstart",n),s.remove(),c(s=void 0,void 0),c("active",!1),c("anyVisibleChild",!1))}function x(e){var n="string"==typeof e?t("#"+e):e,r=n.data("cy-context-menus-cxtfcn"),o=n.data("selector"),a=n.data("call-on-click-function"),s=n.data("cy-context-menus-cxtcorefcn");r&&i.off("cxttap",o,r),s&&i.off("cxttap",s),a&&n.off("click",a),n.remove()}"get"!==e&&(c("options",a=function(e,t){var n={};for(var r in e)n[r]=e[r];for(var r in t)n[r]=t[r];return n}(r,e)),l("active")&&y(),c("active",!0),o=u(a.contextMenuClasses),(s=t("
    ")).addClass("cy-context-menus-cxt-menu"),c("cxtMenu",s),t("body").append(s),s=s,g(a.menuItems),i.on("tapstart",n=function(){f(s),c("cxtMenuPosition",void 0),c("currentCyEvent",void 0)}),t(".cy-context-menus-cxt-menu").contextmenu((function(){return!1})));return function(e){return{isActive:function(){return l("active")},appendMenuItem:function(t){return m(t),e},appendMenuItems:function(t){return g(t),e},removeMenuItem:function(t){return x(t),e},setTrailingDivider:function(n,r){return function(e,n){var r=t("#"+e);n?r.addClass("cy-context-menus-divider"):r.removeClass("cy-context-menus-divider")}(n,r),e},insertBeforeMenuItem:function(t,n){return v(t,n),e},moveBeforeOtherMenuItem:function(n,r){return function(e,n){if(e!==n){var r=t("#"+e).detach(),i=t("#"+n);r.insertBefore(i)}}(n,r),e},disableMenuItem:function(n){return t("#"+n).attr("disabled",!0),e},enableMenuItem:function(n){return t("#"+n).attr("disabled",!1),e},hideMenuItem:function(n){return t("#"+n).data("show",!1),f(t("#"+n)),e},showMenuItem:function(n){return t("#"+n).data("show",!0),d(t("#"+n)),e},destroy:function(){return y(),e}}}(this)}))}};e.exports&&(e.exports=o),void 0===(r=function(){return o}.call(t,n,t,e))||(e.exports=r),"undefined"!=typeof cytoscape&&i&&o(cytoscape,i)}()},function(e,t,n){var r;r=function(e){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var i=t[r]={i:r,l:!1,exports:{}};return e[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var i in e)n.d(r,i,function(t){return e[t]}.bind(null,i));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=0)}([function(e,t,n){var r=n(1),i=function(e){e&&e("layout","dagre",r)};"undefined"!=typeof cytoscape&&i(cytoscape),e.exports=i},function(e,t,n){function r(e){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var i=n(2),o=n(3),a=n(4);function s(e){this.options=o({},i,e)}s.prototype.run=function(){var e=this.options,t=e.cy,n=e.eles,i=function(e,t){return"function"==typeof t?t.apply(e,[e]):t},o=e.boundingBox||{x1:0,y1:0,w:t.width(),h:t.height()};void 0===o.x2&&(o.x2=o.x1+o.w),void 0===o.w&&(o.w=o.x2-o.x1),void 0===o.y2&&(o.y2=o.y1+o.h),void 0===o.h&&(o.h=o.y2-o.y1);var s=new a.graphlib.Graph({multigraph:!0,compound:!0}),l={},c=function(e,t){null!=t&&(l[e]=t)};c("nodesep",e.nodeSep),c("edgesep",e.edgeSep),c("ranksep",e.rankSep),c("rankdir",e.rankDir),c("ranker",e.ranker),s.setGraph(l),s.setDefaultEdgeLabel((function(){return{}})),s.setDefaultNodeLabel((function(){return{}}));for(var u=n.nodes(),d=0;d1?t-1:0),r=1;r-1}},function(e,t,n){var r=n(75);e.exports=function(e,t){var n=this.__data__,i=r(n,e);return i<0?(++this.size,n.push([e,t])):n[i][1]=t,this}},function(e,t,n){var r=n(74);e.exports=function(){this.__data__=new r,this.size=0}},function(e,t){e.exports=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n}},function(e,t){e.exports=function(e){return this.__data__.get(e)}},function(e,t){e.exports=function(e){return this.__data__.has(e)}},function(e,t,n){var r=n(74),i=n(117),o=n(118);e.exports=function(e,t){var n=this.__data__;if(n instanceof r){var a=n.__data__;if(!i||a.length<199)return a.push([e,t]),this.size=++n.size,this;n=this.__data__=new o(a)}return n.set(e,t),this.size=n.size,this}},function(e,t,n){var r=n(64),i=n(262),o=n(23),a=n(151),s=/^\[object .+?Constructor\]$/,l=Function.prototype,c=Object.prototype,u=l.toString,d=c.hasOwnProperty,f=RegExp("^"+u.call(d).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");e.exports=function(e){return!(!o(e)||i(e))&&(r(e)?f:s).test(a(e))}},function(e,t,n){var r=n(58),i=Object.prototype,o=i.hasOwnProperty,a=i.toString,s=r?r.toStringTag:void 0;e.exports=function(e){var t=o.call(e,s),n=e[s];try{e[s]=void 0;var r=!0}catch(e){}var i=a.call(e);return r&&(t?e[s]=n:delete e[s]),i}},function(e,t){var n=Object.prototype.toString;e.exports=function(e){return n.call(e)}},function(e,t,n){var r,i=n(263),o=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";e.exports=function(e){return!!o&&o in e}},function(e,t,n){var r=n(29)["__core-js_shared__"];e.exports=r},function(e,t){e.exports=function(e,t){return null==e?void 0:e[t]}},function(e,t,n){var r=n(266),i=n(74),o=n(117);e.exports=function(){this.size=0,this.__data__={hash:new r,map:new(o||i),string:new r}}},function(e,t,n){var r=n(267),i=n(268),o=n(269),a=n(270),s=n(271);function l(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t0){if(++t>=800)return arguments[0]}else t=0;return e.apply(void 0,arguments)}}},function(e,t,n){var r=n(173),i=n(340),o=n(344),a=n(174),s=n(345),l=n(129);e.exports=function(e,t,n){var c=-1,u=i,d=e.length,f=!0,p=[],h=p;if(n)f=!1,u=o;else if(d>=200){var g=t?null:s(e);if(g)return l(g);f=!1,u=a,h=new r}else h=t?[]:p;e:for(;++c-1}},function(e,t,n){var r=n(188),i=n(342),o=n(343);e.exports=function(e,t,n){return t==t?o(e,t,n):r(e,i,n)}},function(e,t){e.exports=function(e){return e!=e}},function(e,t){e.exports=function(e,t,n){for(var r=n-1,i=e.length;++r1||1===t.length&&e.hasEdge(t[0],t[0])}))}},function(e,t,n){var r=n(22);e.exports=function(e,t,n){return function(e,t,n){var r={},i=e.nodes();return i.forEach((function(e){r[e]={},r[e][e]={distance:0},i.forEach((function(t){e!==t&&(r[e][t]={distance:Number.POSITIVE_INFINITY})})),n(e).forEach((function(n){var i=n.v===e?n.w:n.v,o=t(n);r[e][i]={distance:o,predecessor:e}}))})),i.forEach((function(e){var t=r[e];i.forEach((function(n){var o=r[n];i.forEach((function(n){var r=o[e],i=t[n],a=o[n],s=r.distance+i.distance;s0;){if(n=l.removeMin(),r.has(s,n))a.setEdge(n,s[n]);else{if(u)throw new Error("Input graph is not connected: "+e);u=!0}e.nodeEdges(n).forEach(c)}return a}},function(e,t,n){"use strict";var r=n(11),i=n(399),o=n(402),a=n(403),s=n(20).normalizeRanks,l=n(405),c=n(20).removeEmptyRanks,u=n(406),d=n(407),f=n(408),p=n(409),h=n(418),g=n(20),m=n(28).Graph;e.exports=function(e,t){var n=t&&t.debugTiming?g.time:g.notime;n("layout",(function(){var t=n(" buildLayoutGraph",(function(){return function(e){var t=new m({multigraph:!0,compound:!0}),n=$(e.graph());return t.setGraph(r.merge({},b,S(n,v),r.pick(n,y))),r.forEach(e.nodes(),(function(n){var i=$(e.node(n));t.setNode(n,r.defaults(S(i,x),w)),t.setParent(n,e.parent(n))})),r.forEach(e.edges(),(function(n){var i=$(e.edge(n));t.setEdge(n,r.merge({},A,S(i,k),r.pick(i,E)))})),t}(e)}));n(" runLayout",(function(){!function(e,t){t(" makeSpaceForEdgeLabels",(function(){!function(e){var t=e.graph();t.ranksep/=2,r.forEach(e.edges(),(function(n){var r=e.edge(n);r.minlen*=2,"c"!==r.labelpos.toLowerCase()&&("TB"===t.rankdir||"BT"===t.rankdir?r.width+=r.labeloffset:r.height+=r.labeloffset)}))}(e)})),t(" removeSelfEdges",(function(){!function(e){r.forEach(e.edges(),(function(t){if(t.v===t.w){var n=e.node(t.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e:t,label:e.edge(t)}),e.removeEdge(t)}}))}(e)})),t(" acyclic",(function(){i.run(e)})),t(" nestingGraph.run",(function(){u.run(e)})),t(" rank",(function(){a(g.asNonCompoundGraph(e))})),t(" injectEdgeLabelProxies",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(n.width&&n.height){var r=e.node(t.v),i={rank:(e.node(t.w).rank-r.rank)/2+r.rank,e:t};g.addDummyNode(e,"edge-proxy",i,"_ep")}}))}(e)})),t(" removeEmptyRanks",(function(){c(e)})),t(" nestingGraph.cleanup",(function(){u.cleanup(e)})),t(" normalizeRanks",(function(){s(e)})),t(" assignRankMinMax",(function(){!function(e){var t=0;r.forEach(e.nodes(),(function(n){var i=e.node(n);i.borderTop&&(i.minRank=e.node(i.borderTop).rank,i.maxRank=e.node(i.borderBottom).rank,t=r.max(t,i.maxRank))})),e.graph().maxRank=t}(e)})),t(" removeEdgeLabelProxies",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);"edge-proxy"===n.dummy&&(e.edge(n.e).labelRank=n.rank,e.removeNode(t))}))}(e)})),t(" normalize.run",(function(){o.run(e)})),t(" parentDummyChains",(function(){l(e)})),t(" addBorderSegments",(function(){d(e)})),t(" order",(function(){p(e)})),t(" insertSelfEdges",(function(){!function(e){var t=g.buildLayerMatrix(e);r.forEach(t,(function(t){var n=0;r.forEach(t,(function(t,i){var o=e.node(t);o.order=i+n,r.forEach(o.selfEdges,(function(t){g.addDummyNode(e,"selfedge",{width:t.label.width,height:t.label.height,rank:o.rank,order:i+ ++n,e:t.e,label:t.label},"_se")})),delete o.selfEdges}))}))}(e)})),t(" adjustCoordinateSystem",(function(){f.adjust(e)})),t(" position",(function(){h(e)})),t(" positionSelfEdges",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);if("selfedge"===n.dummy){var r=e.node(n.e.v),i=r.x+r.width/2,o=r.y,a=n.x-i,s=r.height/2;e.setEdge(n.e,n.label),e.removeNode(t),n.label.points=[{x:i+2*a/3,y:o-s},{x:i+5*a/6,y:o-s},{x:i+a,y:o},{x:i+5*a/6,y:o+s},{x:i+2*a/3,y:o+s}],n.label.x=n.x,n.label.y=n.y}}))}(e)})),t(" removeBorderNodes",(function(){!function(e){r.forEach(e.nodes(),(function(t){if(e.children(t).length){var n=e.node(t),i=e.node(n.borderTop),o=e.node(n.borderBottom),a=e.node(r.last(n.borderLeft)),s=e.node(r.last(n.borderRight));n.width=Math.abs(s.x-a.x),n.height=Math.abs(o.y-i.y),n.x=a.x+n.width/2,n.y=i.y+n.height/2}})),r.forEach(e.nodes(),(function(t){"border"===e.node(t).dummy&&e.removeNode(t)}))}(e)})),t(" normalize.undo",(function(){o.undo(e)})),t(" fixupEdgeLabelCoords",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(r.has(n,"x"))switch("l"!==n.labelpos&&"r"!==n.labelpos||(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}}))}(e)})),t(" undoCoordinateSystem",(function(){f.undo(e)})),t(" translateGraph",(function(){!function(e){var t=Number.POSITIVE_INFINITY,n=0,i=Number.POSITIVE_INFINITY,o=0,a=e.graph(),s=a.marginx||0,l=a.marginy||0;function c(e){var r=e.x,a=e.y,s=e.width,l=e.height;t=Math.min(t,r-s/2),n=Math.max(n,r+s/2),i=Math.min(i,a-l/2),o=Math.max(o,a+l/2)}r.forEach(e.nodes(),(function(t){c(e.node(t))})),r.forEach(e.edges(),(function(t){var n=e.edge(t);r.has(n,"x")&&c(n)})),t-=s,i-=l,r.forEach(e.nodes(),(function(n){var r=e.node(n);r.x-=t,r.y-=i})),r.forEach(e.edges(),(function(n){var o=e.edge(n);r.forEach(o.points,(function(e){e.x-=t,e.y-=i})),r.has(o,"x")&&(o.x-=t),r.has(o,"y")&&(o.y-=i)})),a.width=n-t+s,a.height=o-i+l}(e)})),t(" assignNodeIntersects",(function(){!function(e){r.forEach(e.edges(),(function(t){var n,r,i=e.edge(t),o=e.node(t.v),a=e.node(t.w);i.points?(n=i.points[0],r=i.points[i.points.length-1]):(i.points=[],n=a,r=o),i.points.unshift(g.intersectRect(o,n)),i.points.push(g.intersectRect(a,r))}))}(e)})),t(" reversePoints",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);n.reversed&&n.points.reverse()}))}(e)})),t(" acyclic.undo",(function(){i.undo(e)}))}(t,n)})),n(" updateInputGraph",(function(){!function(e,t){r.forEach(e.nodes(),(function(n){var r=e.node(n),i=t.node(n);r&&(r.x=i.x,r.y=i.y,t.children(n).length&&(r.width=i.width,r.height=i.height))})),r.forEach(e.edges(),(function(n){var i=e.edge(n),o=t.edge(n);i.points=o.points,r.has(o,"x")&&(i.x=o.x,i.y=o.y)})),e.graph().width=t.graph().width,e.graph().height=t.graph().height}(e,t)}))}))};var v=["nodesep","edgesep","ranksep","marginx","marginy"],b={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},y=["acyclicer","ranker","rankdir","align"],x=["width","height"],w={width:0,height:0},k=["minlen","weight","width","height","labeloffset"],A={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},E=["labelpos"];function S(e,t){return r.mapValues(r.pick(e,t),Number)}function $(e){var t={};return r.forEach(e,(function(e,n){t[n.toLowerCase()]=e})),t}},function(e,t,n){var r=n(149);e.exports=function(e){return r(e,5)}},function(e,t,n){var r=n(89),i=n(57),o=n(90),a=n(48),s=Object.prototype,l=s.hasOwnProperty,c=r((function(e,t){e=Object(e);var n=-1,r=t.length,c=r>2?t[2]:void 0;for(c&&o(t[0],t[1],c)&&(r=1);++n-1?s[l?t[c]:c]:void 0}}},function(e,t,n){var r=n(188),i=n(37),o=n(365),a=Math.max;e.exports=function(e,t,n){var s=null==e?0:e.length;if(!s)return-1;var l=null==n?0:o(n);return l<0&&(l=a(s+l,0)),r(e,i(t,3),l)}},function(e,t,n){var r=n(196);e.exports=function(e){var t=r(e),n=t%1;return t==t?n?t-n:t:0}},function(e,t,n){var r=n(367),i=n(23),o=n(61),a=/^[-+]0x[0-9a-f]+$/i,s=/^0b[01]+$/i,l=/^0o[0-7]+$/i,c=parseInt;e.exports=function(e){if("number"==typeof e)return e;if(o(e))return NaN;if(i(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=i(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=r(e);var n=s.test(e);return n||l.test(e)?c(e.slice(2),n?2:8):a.test(e)?NaN:+e}},function(e,t,n){var r=n(368),i=/^\s+/;e.exports=function(e){return e?e.slice(0,r(e)+1).replace(i,""):e}},function(e,t){var n=/\s/;e.exports=function(e){for(var t=e.length;t--&&n.test(e.charAt(t)););return t}},function(e,t,n){var r=n(128),i=n(169),o=n(48);e.exports=function(e,t){return null==e?e:r(e,i(t),o)}},function(e,t){e.exports=function(e){var t=null==e?0:e.length;return t?e[t-1]:void 0}},function(e,t,n){var r=n(79),i=n(127),o=n(37);e.exports=function(e,t){var n={};return t=o(t,3),i(e,(function(e,i,o){r(n,i,t(e,i,o))})),n}},function(e,t,n){var r=n(132),i=n(373),o=n(49);e.exports=function(e){return e&&e.length?r(e,o,i):void 0}},function(e,t){e.exports=function(e,t){return e>t}},function(e,t,n){var r=n(375),i=n(379)((function(e,t,n){r(e,t,n)}));e.exports=i},function(e,t,n){var r=n(73),i=n(198),o=n(128),a=n(376),s=n(23),l=n(48),c=n(199);e.exports=function e(t,n,u,d,f){t!==n&&o(n,(function(o,l){if(f||(f=new r),s(o))a(t,n,l,u,e,d,f);else{var p=d?d(c(t,l),o,l+"",t,n,f):void 0;void 0===p&&(p=o),i(t,l,p)}}),l)}},function(e,t,n){var r=n(198),i=n(155),o=n(164),a=n(156),s=n(165),l=n(66),c=n(13),u=n(189),d=n(59),f=n(64),p=n(23),h=n(377),g=n(67),m=n(199),v=n(378);e.exports=function(e,t,n,b,y,x,w){var k=m(e,n),A=m(t,n),E=w.get(A);if(E)r(e,n,E);else{var S=x?x(k,A,n+"",e,t,w):void 0,$=void 0===S;if($){var C=c(A),_=!C&&d(A),O=!C&&!_&&g(A);S=A,C||_||O?c(k)?S=k:u(k)?S=a(k):_?($=!1,S=i(A,!0)):O?($=!1,S=o(A,!0)):S=[]:h(A)||l(A)?(S=k,l(k)?S=v(k):p(k)&&!f(k)||(S=s(A))):$=!1}$&&(w.set(A,S),y(S,A,b,x,w),w.delete(A)),r(e,n,S)}}},function(e,t,n){var r=n(47),i=n(84),o=n(32),a=Function.prototype,s=Object.prototype,l=a.toString,c=s.hasOwnProperty,u=l.call(Object);e.exports=function(e){if(!o(e)||"[object Object]"!=r(e))return!1;var t=i(e);if(null===t)return!0;var n=c.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&l.call(n)==u}},function(e,t,n){var r=n(65),i=n(48);e.exports=function(e){return r(e,i(e))}},function(e,t,n){var r=n(89),i=n(90);e.exports=function(e){return r((function(t,n){var r=-1,o=n.length,a=o>1?n[o-1]:void 0,s=o>2?n[2]:void 0;for(a=e.length>3&&"function"==typeof a?(o--,a):void 0,s&&i(n[0],n[1],s)&&(a=o<3?void 0:a,o=1),t=Object(t);++r1&&a(e,t[0],t[1])?t=[]:n>2&&a(t[0],t[1],t[2])&&(t=[t[0]]),i(e,r(t,1),[])}));e.exports=s},function(e,t,n){var r=n(88),i=n(86),o=n(37),a=n(184),s=n(393),l=n(82),c=n(394),u=n(49),d=n(13);e.exports=function(e,t,n){t=t.length?r(t,(function(e){return d(e)?function(t){return i(t,1===e.length?e[0]:e)}:e})):[u];var f=-1;t=r(t,l(o));var p=a(e,(function(e,n,i){return{criteria:r(t,(function(t){return t(e)})),index:++f,value:e}}));return s(p,(function(e,t){return c(e,t,n)}))}},function(e,t){e.exports=function(e,t){var n=e.length;for(e.sort(t);n--;)e[n]=e[n].value;return e}},function(e,t,n){var r=n(395);e.exports=function(e,t,n){for(var i=-1,o=e.criteria,a=t.criteria,s=o.length,l=n.length;++i=l?c:c*("desc"==n[i]?-1:1)}return e.index-t.index}},function(e,t,n){var r=n(61);e.exports=function(e,t){if(e!==t){var n=void 0!==e,i=null===e,o=e==e,a=r(e),s=void 0!==t,l=null===t,c=t==t,u=r(t);if(!l&&!u&&!a&&e>t||a&&s&&c&&!l&&!u||i&&s&&c||!n&&c||!o)return 1;if(!i&&!a&&!u&&e0;--l)if(r=t[l].dequeue()){i=i.concat(s(e,t,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(c,(function(t){return e.outEdges(t.v,t.w)})),!0)};var a=r.constant(1);function s(e,t,n,i,o){var a=o?[]:void 0;return r.forEach(e.inEdges(i.v),(function(r){var i=e.edge(r),s=e.node(r.v);o&&a.push({v:r.v,w:r.w}),s.out-=i,l(t,n,s)})),r.forEach(e.outEdges(i.v),(function(r){var i=e.edge(r),o=r.w,a=e.node(o);a.in-=i,l(t,n,a)})),e.removeNode(i.v),a}function l(e,t,n){n.out?n.in?e[n.out-n.in+t].enqueue(n):e[e.length-1].enqueue(n):e[0].enqueue(n)}},function(e,t){function n(){var e={};e._next=e._prev=e,this._sentinel=e}function r(e){e._prev._next=e._next,e._next._prev=e._prev,delete e._next,delete e._prev}function i(e,t){if("_next"!==e&&"_prev"!==e)return t}e.exports=n,n.prototype.dequeue=function(){var e=this._sentinel,t=e._prev;if(t!==e)return r(t),t},n.prototype.enqueue=function(e){var t=this._sentinel;e._prev&&e._next&&r(e),e._next=t._next,t._next._prev=e,t._next=e,e._prev=t},n.prototype.toString=function(){for(var e=[],t=this._sentinel,n=t._prev;n!==t;)e.push(JSON.stringify(n,i)),n=n._prev;return"["+e.join(", ")+"]"}},function(e,t,n){"use strict";var r=n(11),i=n(20);e.exports={run:function(e){e.graph().dummyChains=[],r.forEach(e.edges(),(function(t){!function(e,t){var n,r,o,a=t.v,s=e.node(a).rank,l=t.w,c=e.node(l).rank,u=t.name,d=e.edge(t),f=d.labelRank;if(c===s+1)return;for(e.removeEdge(t),o=0,++s;sl.lim&&(c=l,u=!0);var d=r.filter(t.edges(),(function(t){return u===b(e,e.node(t.v),c)&&u!==b(e,e.node(t.w),c)}));return r.minBy(d,(function(e){return o(t,e)}))}function v(e,t,n,i){var o=n.v,a=n.w;e.removeEdge(o,a),e.setEdge(i.v,i.w,{}),p(e),d(e,t),function(e,t){var n=r.find(e.nodes(),(function(e){return!t.node(e).parent})),i=s(e,n);i=i.slice(1),r.forEach(i,(function(n){var r=e.node(n).parent,i=t.edge(n,r),o=!1;i||(i=t.edge(r,n),o=!0),t.node(n).rank=t.node(r).rank+(o?i.minlen:-i.minlen)}))}(e,t)}function b(e,t,n){return n.low<=t.lim&&t.lim<=n.lim}e.exports=u,u.initLowLimValues=p,u.initCutValues=d,u.calcCutValue=f,u.leaveEdge=g,u.enterEdge=m,u.exchangeEdges=v},function(e,t,n){var r=n(11);e.exports=function(e){var t=function(e){var t={},n=0;function i(o){var a=n;r.forEach(e.children(o),i),t[o]={low:a,lim:n++}}return r.forEach(e.children(),i),t}(e);r.forEach(e.graph().dummyChains,(function(n){for(var r=e.node(n),i=r.edgeObj,o=function(e,t,n,r){var i,o,a=[],s=[],l=Math.min(t[n].low,t[r].low),c=Math.max(t[n].lim,t[r].lim);i=n;do{i=e.parent(i),a.push(i)}while(i&&(t[i].low>l||c>t[i].lim));o=i,i=r;for(;(i=e.parent(i))!==o;)s.push(i);return{path:a.concat(s.reverse()),lca:o}}(e,t,i.v,i.w),a=o.path,s=o.lca,l=0,c=a[l],u=!0;n!==i.w;){if(r=e.node(n),u){for(;(c=a[l])!==s&&e.node(c).maxRank=2),s=u.buildLayerMatrix(e);var m=o(e,s);m0;)t%2&&(n+=l[t+1]),l[t=t-1>>1]+=e.weight;c+=e.weight*n}))),c}e.exports=function(e,t){for(var n=0,r=1;r=e.barycenter)&&function(e,t){var n=0,r=0;e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.vs=t.vs.concat(e.vs),e.barycenter=n/r,e.weight=r,e.i=Math.min(t.i,e.i),t.merged=!0}(e,t)}}function i(t){return function(n){n.in.push(t),0==--n.indegree&&e.push(n)}}for(;e.length;){var o=e.pop();t.push(o),r.forEach(o.in.reverse(),n(o)),r.forEach(o.out,i(o))}return r.map(r.filter(t,(function(e){return!e.merged})),(function(e){return r.pick(e,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(e){return!e.indegree})))}},function(e,t,n){var r=n(11),i=n(20);function o(e,t,n){for(var i;t.length&&(i=r.last(t)).i<=n;)t.pop(),e.push(i.vs),n++;return n}e.exports=function(e,t){var n=i.partition(e,(function(e){return r.has(e,"barycenter")})),a=n.lhs,s=r.sortBy(n.rhs,(function(e){return-e.i})),l=[],c=0,u=0,d=0;a.sort((f=!!t,function(e,t){return e.barycentert.barycenter?1:f?t.i-e.i:e.i-t.i})),d=o(l,s,d),r.forEach(a,(function(e){d+=e.vs.length,l.push(e.vs),c+=e.barycenter*e.weight,u+=e.weight,d=o(l,s,d)}));var f;var p={vs:r.flatten(l,!0)};u&&(p.barycenter=c/u,p.weight=u);return p}},function(e,t,n){var r=n(11),i=n(28).Graph;e.exports=function(e,t,n){var o=function(e){var t;for(;e.hasNode(t=r.uniqueId("_root")););return t}(e),a=new i({compound:!0}).setGraph({root:o}).setDefaultNodeLabel((function(t){return e.node(t)}));return r.forEach(e.nodes(),(function(i){var s=e.node(i),l=e.parent(i);(s.rank===t||s.minRank<=t&&t<=s.maxRank)&&(a.setNode(i),a.setParent(i,l||o),r.forEach(e[n](i),(function(t){var n=t.v===i?t.w:t.v,o=a.edge(n,i),s=r.isUndefined(o)?0:o.weight;a.setEdge(n,i,{weight:e.edge(t).weight+s})})),r.has(s,"minRank")&&a.setNode(i,{borderLeft:s.borderLeft[t],borderRight:s.borderRight[t]}))})),a}},function(e,t,n){var r=n(11);e.exports=function(e,t,n){var i,o={};r.forEach(n,(function(n){for(var r,a,s=e.parent(n);s;){if((r=e.parent(s))?(a=o[r],o[r]=s):(a=i,i=s),a&&a!==s)return void t.setEdge(a,s);s=r}}))}},function(e,t,n){"use strict";var r=n(11),i=n(20),o=n(419).positionX;e.exports=function(e){(function(e){var t=i.buildLayerMatrix(e),n=e.graph().ranksep,o=0;r.forEach(t,(function(t){var i=r.max(r.map(t,(function(t){return e.node(t).height})));r.forEach(t,(function(t){e.node(t).y=o+i/2})),o+=i+n}))})(e=i.asNonCompoundGraph(e)),r.forEach(o(e),(function(t,n){e.node(n).x=t}))}},function(e,t,n){"use strict";var r=n(11),i=n(28).Graph,o=n(20);function a(e,t){var n={};return r.reduce(t,(function(t,i){var o=0,a=0,s=t.length,c=r.last(i);return r.forEach(i,(function(t,u){var d=function(e,t){if(e.node(t).dummy)return r.find(e.predecessors(t),(function(t){return e.node(t).dummy}))}(e,t),f=d?e.node(d).order:s;(d||t===c)&&(r.forEach(i.slice(a,u+1),(function(t){r.forEach(e.predecessors(t),(function(r){var i=e.node(r),a=i.order;!(as)&&l(n,t,c)}))}))}return r.reduce(t,(function(t,n){var o,a=-1,s=0;return r.forEach(n,(function(r,l){if("border"===e.node(r).dummy){var c=e.predecessors(r);c.length&&(o=e.node(c[0]).order,i(n,s,l,a,o),s=l,a=o)}i(n,s,n.length,o,t.length)})),n})),n}function l(e,t,n){if(t>n){var r=t;t=n,n=r}var i=e[t];i||(e[t]=i={}),i[n]=!0}function c(e,t,n){if(t>n){var i=t;t=n,n=i}return r.has(e[t],n)}function u(e,t,n,i){var o={},a={},s={};return r.forEach(t,(function(e){r.forEach(e,(function(e,t){o[e]=e,a[e]=e,s[e]=t}))})),r.forEach(t,(function(e){var t=-1;r.forEach(e,(function(e){var l=i(e);if(l.length)for(var u=((l=r.sortBy(l,(function(e){return s[e]}))).length-1)/2,d=Math.floor(u),f=Math.ceil(u);d<=f;++d){var p=l[d];a[e]===e&&t\n.menu ul ul {\n margin-left: 12px;\n}\n\n\n\n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(425),i=n(21);n(426),angular.module("dbt").directive("modelTreeLine",["$state",function(e){return{scope:{item:"=",depth:"<",resourceType:"@"},replace:!0,templateUrl:r,link:function(t,n,r,o){t.depth||(t.depth=0);var a=t.item.name;if(a){var s=i.last(a,15).join(""),l=i.initial(a,s.length).join("");t.name={name:a,start:l,end:s},t.name_start=l,t.name_end=s,t.onFolderClick=function(n){if(n.active=!n.active,"source"==t.resourceType){var r=n.name;e.go("dbt.source_list",{source:r})}else 0===t.depth&&"database"!==n.type&&e.go("dbt.project_overview",{project_name:n.name})},t.activate=function(n){t.$emit("clearSearch"),n.active=!0;var r="dbt."+n.node.resource_type;e.go(r,{unique_id:n.unique_id})},t.getIcon=function(e,t){return"#"+{header:{on:"icn-down",off:"icn-right"},database:{on:"icn-db-on",off:"icn-db"},schema:{on:"icn-tree-on",off:"icn-tree"},table:{on:"icn-doc-on",off:"icn-doc"},folder:{on:"icn-dir-on",off:"icn-dir"},file:{on:"icn-doc-on",off:"icn-doc"}}[e][t]},t.getClass=function(e){return{active:e.active,"menu-tree":"header"==e.type||"schema"==e.type||"folder"==e.type,"menu-main":"header"==e.type,"menu-node":"file"==e.type||"table"==e.type}}}}}}])},function(e,t){var n="/components/model_tree/model_tree_line.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
  • \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n
      \n \n
    \n
  • \n')}]),e.exports=n},function(e,t,n){var r=n(427);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.unselectable{\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(429);n(31);n(206),r.module("dbt").directive("docsSearch",["$sce","project",function(e,t){return{scope:{query:"=",results:"=",onSelect:"&"},replace:!0,templateUrl:i,link:function(n){n.max_results=20,n.show_all=!1,n.max_results_columns=3,n.limit_columns={},n.checkboxStatus={show_names:!1,show_descriptions:!1,show_columns:!1,show_code:!1,show_tags:!1},n.limit_search=function(e,t,r){return t0&&null!=n.query&&n.query.trim().length>0){let t=e.replace(/\s+/g," "),o=r(i(n.query)[0]),a=t.search(new RegExp(o)),s=a-75<0?0:a-75,l=a+75>t.length?t.length:a+75;return"..."+t.substring(s,l)+"..."}return e},n.highlight=function(t){if(!n.query||!t)return e.trustAsHtml(t);let o="("+i(n.query).map(e=>r(e)).join(")|(")+")";return e.trustAsHtml(t.replace(new RegExp(o,"gi"),'$&'))},n.$watch("query",(function(e,t){0==e.length&&(n.show_all=!1,n.limit_columns={})})),n.columnFilter=function(e){var t=[];let r=i(n.query);for(var o in e)r.every(e=>-1!=o.toLowerCase().indexOf(e))&&t.push(o);return t},n.limitColumns=function(e){return void 0!==n.limit_columns[e]?n.limit_columns[e]:3}}}}])},function(e,t){var n="/components/search/search.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n \n
    \n
    \n

    \n {{ query }}\n {{ results.length }} search results\n

    \n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n \n
    \n
    \n

    \n \n {{result.model.resource_type}}\n

    \n

    \n
    \n
    \n
    \n \n columns:\n \n \n \n Show {{ columnFilter(result.model.columns).length - max_results_columns }} more\n
    \n
    \n \n \n \n
    \n
    \n \n tags:\n \n \n \n
    \n
    \n Show {{ results.length - max_results }} more\n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(431);n(432);const i=n(21);angular.module("dbt").directive("tableDetails",["$sce","$filter",function(e,t){return{scope:{model:"=",extras:"=",exclude:"<"},templateUrl:r,link:function(e){function n(e,t){if(0==e)return"0 bytes";if(e<1&&(e*=1e6),isNaN(parseFloat(e))||!isFinite(e))return"-";void 0===t&&(t=0);var n=Math.floor(Math.log(e)/Math.log(1024));return(e/Math.pow(1024,Math.floor(n))).toFixed(t)+" "+["bytes","KB","MB","GB","TB","PB"][n]}function r(e,n){return void 0===n&&(n=2),t("number")(100*e,n)+"%"}function o(e,n){return void 0===n&&(n=0),t("number")(e,n)}e.details=[],e.extended=[],e.exclude=e.exclude||[],e.meta=null,e._show_expanded=!1,e.show_expanded=function(t){return void 0!==t&&(e._show_expanded=t),e._show_expanded},e.hasData=function(e){return!(!e||i.isEmpty(e))&&(1!=e.length||0!=e[0].include)},e.$watch("model",(function(t,a){i.property(["metadata","type"])(t);var s,l,c,u=t.hasOwnProperty("sources")&&null!=t.sources[0]?t.sources[0].source_meta:null;if(e.meta=t.meta||u,e.details=function(e){var t,n,r=!e.metadata,o=e.metadata||{};t=e.database?e.database+".":"",n=r?void 0:"source"==e.resource_type?t+e.schema+"."+e.identifier:t+e.schema+"."+e.alias;var a,s=[{name:"Owner",value:o.owner},{name:"Type",value:r?void 0:(a=o.type,"BASE TABLE"==a?{type:"table",name:"table"}:"LATE BINDING VIEW"==a?{type:"view",name:"late binding view"}:{type:a.toLowerCase(),name:a.toLowerCase()}).name},{name:"Package",value:e.package_name},{name:"Language",value:e.language},{name:"Relation",value:n}];return i.filter(s,(function(e){return void 0!==e.value}))}(t),e.extended=(s=t.stats,l={rows:o,row_count:o,num_rows:o,max_varchar:o,pct_used:r,size:n,bytes:n,num_bytes:n},c=i.sortBy(i.values(s),"label"),i.map(c,(function(e){var t=i.clone(e),n=l[e.id];return n&&(t.value=n(e.value),t.label=e.label.replace("Approximate","~"),t.label=e.label.replace("Utilization","Used")),t}))),e.extras){var d=i.filter(e.extras,(function(e){return void 0!==e.value&&null!==e.value}));e.details=e.details.concat(d)}e.show_extended=i.where(e.extended,{include:!0}).length>0})),e.queryTag=function(t){e.$emit("query",t)}}}}])},function(e,t){var n="/components/table_details/table_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    Details
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    Tags
    \n
    \n {{ tag }} \n
    \n
    untagged
    \n
    \n
    \n
    {{ item.name }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ item.label }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){var r=n(433);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n\n.details-content {\n table-layout: fixed;\n}\n\n.detail-body {\n white-space: nowrap;\n overflow-x: scroll;\n}\n",""])},function(e,t,n){"use strict";const r=n(435),i=n(21);angular.module("dbt").directive("columnDetails",["project",function(e){return{scope:{model:"="},templateUrl:r,link:function(t){t.has_test=function(e,t){return-1!=i.pluck(e.tests,"short").indexOf(t)},t.has_more_info=function(e){var t=e.tests||[],n=e.description||"",r=e.meta||{};return t.length||n.length||!i.isEmpty(r)},t.toggle_column_expanded=function(e){t.has_more_info(e)&&(e.expanded=!e.expanded)},t.getState=function(e){return"dbt."+e.resource_type},t.get_col_name=function(t){return e.caseColumn(t)},t.get_columns=function(e){var t=i.chain(e.columns).values().sortBy("index").value();return i.each(t,(function(e,t){e.index=t})),t}}}}])},function(e,t){var n="/components/column_details/column_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n
    \n Column information is not available for this seed\n
    \n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ColumnTypeDescriptionTestsMore?
    \n
    \n {{ get_col_name(column.name) }}\n
    \n
    \n {{ column.type }}

    \n
    \n {{ column.description }}\n \n \n U\n N\n F\n A\n +\n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Details
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n\n
    \n
    Description
    \n \n
    \n\n
    \n
    Generic Tests
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(437);n(31),n(438);function i(e){return"python"===e?"language-python":"language-sql"}angular.module("dbt").directive("codeBlock",["code","$timeout",function(e,t){return{scope:{versions:"=",default:"<",language:"="},restrict:"E",templateUrl:r,link:function(n,r){n.selected_version=n.default,n.language_class=i(n.language),n.source=null,n.setSelected=function(r){n.selected_version=r,n.source=n.versions[r]||"";const i=n.source.trim();n.highlighted=e.highlight(i,n.language),t((function(){Prism.highlightAll()}))},n.titleCase=function(e){return e.charAt(0).toUpperCase()+e.substring(1)},n.copied=!1,n.copy_to_clipboard=function(){e.copy_to_clipboard(n.source),n.copied=!0,setTimeout((function(){n.$apply((function(){n.copied=!1}))}),1e3)},n.$watch("language",(function(e,t){e&&e!=t&&(n.language_class=i(e))}),!0),n.$watch("versions",(function(e,t){if(e)if(n.default)n.setSelected(n.default);else{var r=Object.keys(n.versions);r.length>0&&n.setSelected(r[0])}}),!0)}}}])},function(e,t){var n="/components/code_block/code_block.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    Code
    \n\n')}]),e.exports=n},function(e,t,n){var r=n(439);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"pre.code {\n border: none !important;\n overflow-y: visible !important;\n overflow-x: scroll !important;\n padding-bottom: 10px;\n}\n\npre.code code {\n font-family: Monaco, monospace !important;\n font-weight: 400 !important;\n}\n\n.line-numbers-rows {\n border: none !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(441);angular.module("dbt").directive("macroArguments",[function(){return{scope:{macro:"="},templateUrl:r,link:function(e){_.each(e.macro.arguments,(function(e){e.expanded=!1}))}}}])},function(e,t){var n="/components/macro_arguments/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n
    \n
    \n Details are not available for this macro\n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ArgumentTypeDescriptionMore?
    \n
    \n {{ arg.name }}\n
    \n
    \n {{ arg.type }}

    \n
    \n {{ arg.description }}\n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Description
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(443);angular.module("dbt").directive("referenceList",["$state",function(e){return{scope:{references:"=",node:"="},restrict:"E",templateUrl:r,link:function(t){t.selected_type=null,t.setType=function(e){t.selected_type=e,t.nodes=t.references[t.selected_type]},t.getNodeUrl=function(t){var n="dbt."+t.resource_type;return e.href(n,{unique_id:t.unique_id,"#":null})},t.mapResourceType=function(e){return"model"==e?"Models":"seed"==e?"Seeds":"test"==e?"Tests":"snapshot"==e?"Snapshots":"analysis"==e?"Analyses":"macro"==e?"Macros":"exposure"==e?"Exposures":"metric"==e?"Metrics":"operation"==e?"Operations":"Nodes"},t.$watch("references",(function(e){e&&_.size(e)>0?(t.selected_type=_.keys(e)[0],t.has_references=!0,t.nodes=t.references[t.selected_type]):t.has_references=!1}))}}}])},function(e,t){var n="/components/references/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n No resources reference this {{ node.resource_type }}\n
    \n
    \n \n
    \n \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){n(445),n(447),n(448),n(449),n(450),n(451),n(452),n(453),n(454),n(455)},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ModelCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.copied=!1,e.copy_to_clipboard=function(t){r.copy_to_clipboard(t),e.copied=!0,setTimeout((function(){e.$apply((function(){e.copied=!1}))}),1e3)},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.nav-tabs li.nav-pull-right {\n flex: 1 0 auto;\n text-align: right;\n}\n\ntr.column-row-selected {\n\n}\n\ntd.column-expanded{\n padding: 0px !important;\n}\n\ntd.column-expanded > div {\n padding: 5px 10px;\n margin-left: 20px;\n height: 100%;\n\n border-left: 1px solid #ccc !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SourceCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Sample SQL":r.generateSourceSQL(e.model)},e.extra_table_fields=[{name:"Loader",value:e.model.loader},{name:"Source",value:e.model.source_name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SeedCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Example SQL":r.generateSourceSQL(e.model)}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SnapshotCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"Compiled SQL is not available for this snapshot"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("TestCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(33);n(34),r.module("dbt").controller("MacroCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,a,s,l){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.macro={},n.ready((function(t){let n=t.macros[e.model_uid];if(e.macro=n,e.references=o.getMacroReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=o.getMacroParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.macro.is_adapter_macro){var r=t.metadata.adapter_type;e.versions=n.impls,n.impls[r]?e.default_version=r:n.impls.default?e.default_version="default":e.default_version=i.keys(n.impls)[0]}else e.default_version="Source",e.versions={Source:e.macro.macro_sql}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("AnalysisCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.default_version="Source",e.versions={Source:"",Compiled:""},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language,e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ExposureCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.exposure={},n.ready((function(t){let n=t.nodes[e.model_uid];e.exposure=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.language=n.language,e.extra_table_fields=[{name:"Maturity",value:e.exposure.maturity},{name:"Owner",value:e.exposure.owner.name},{name:"Owner email",value:e.exposure.owner.email},{name:"Exposure name",value:e.exposure.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("MetricCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.metric={},n.ready((function(t){let n=t.nodes[e.model_uid];e.metric=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.versions={Definition:r.generateMetricSQL(e.metric)};const o="expression"===e.metric.type?"Expression metric":"Aggregate metric";e.extra_table_fields=[{name:"Metric Type",value:o},{name:"Metric name",value:e.metric.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("OperationCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";n(9).module("dbt").controller("GraphCtrl",["$scope","$state","$window","graph","project","selectorService",function(e,t,n,r,i,o){function a(e){return e&&"source"==e.resource_type?"source:"+e.source_name+"."+e.name:e&&"exposure"==e.resource_type?"exposure:"+e.name:e&&"metric"==e.resource_type?"metric:"+e.name:e.name?e.name:"*"}e.graph=r.graph,e.graphService=r,e.graphRendered=function(e){r.setGraphReady(e)},e.$watch((function(){return t.params.unique_id}),(function(e,t){e&&e!=t&&i.find_by_id(e,(function(e){e&&("sidebar"==r.orientation?r.showVerticalGraph(a(e),!1):r.showFullGraph(a(e)))})),e||o.clearViewNode()}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(31),a=n(458);n(459),n(206),n(467),n(469),n(472),n(476),r.module("dbt").controller("MainController",["$scope","$route","$state","project","graph","selectorService","trackingService","locationService","$transitions",function(e,t,n,r,s,l,c,u,d){function f(t){e.model_uid=t;var n=r.node(t);n&&l.resetSelection(n)}function p(e){e&&setTimeout((function(){var t=o("*[data-nav-unique-id='"+e+"']");t.length&&t[0].scrollIntoView&&t[0].scrollIntoView({behavior:"smooth",block:"center",inline:"center"})}),1)}e.tree={database:{},project:{},sources:{}},e.search={query:"",results:[],is_focused:!1},e.logo=a,e.model_uid=null,e.project={},o("body").bind("keydown",(function(e){"t"==event.key&&"INPUT"!=event.target.tagName&&(console.log("Opening search"),o("#search").focus(),event.preventDefault())})),e.onSearchFocus=function(t,n){e.search.is_focused=n},e.clearSearch=function(){e.search.is_focused=!1,e.search.query="",e.search.results=[],o("#search").blur()},e.$on("clearSearch",(function(){e.clearSearch()})),e.$on("query",(function(t,n){e.search.is_focused=!0,e.search.query=n})),e.onSearchKeypress=function(t){"Escape"==t.key&&(e.clearSearch(),t.preventDefault())},r.getModelTree(n.params.unique_id,(function(t){e.tree.database=t.database,e.tree.project=t.project,e.tree.sources=t.sources,e.tree.exposures=t.exposures,e.tree.metrics=t.metrics,setTimeout((function(){p(e.model_uid)}))})),d.onSuccess({},(function(t,n){var i=t.router.globals.params,o=l.getViewNode(),a=o?o.unique_id:null,s=i.unique_id,u=!0;if(t.from().name==t.to().name&&a==s&&(u=!1),u&&i.unique_id){var d=r.updateSelected(i.unique_id);e.tree.database=d.database,e.tree.project=d.project,e.tree.sources=d.sources,e.search.query="",console.log("updating selected model to: ",i),f(i.unique_id),setTimeout((function(){p(i.unique_id)}))}u&&c.track_pageview()})),e.$watch("search.query",(function(t){e.search.results=function(t){if(""===e.search.query)return t;let n={name:10,tags:5,description:3,raw_code:2,columns:1};return i.each(t,(function(t){t.overallWeight=0,i.each(Object.keys(n),(function(r){if(null!=t.model[r]){let o=0,a=t.model[r],s=e.search.query.toLowerCase();if("columns"===r)i.each(a,(function(e){if(e.name){let t=e.name.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}}));else if("tags"===r)i.each(a,(function(e){let t=e.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}));else{a=a.toLowerCase();let e=0;for(;-1!=e;)e=a.indexOf(s,e),-1!=e&&(o++,e++)}t.overallWeight+=o*n[r]}}))})),t}(r.search(t))})),r.init(),r.ready((function(t){e.project=t,e.search.results=r.search("");var o=i.unique(i.pluck(i.values(t.nodes),"package_name")).sort(),a=[null];i.each(t.nodes,(function(e){var t=e.tags;a=i.union(a,t).sort()})),l.init({packages:o,tags:a}),f(n.params.unique_id);var d=u.parseState(n.params);d.show_graph&&s.ready((function(){i.assign(l.selection.dirty,d.selected);var e=l.updateSelection();s.updateGraph(e)}));var p=t.metadata||{};c.init({track:p.send_anonymous_usage_stats,project_id:p.project_id})}))}])},function(e,t){e.exports="data:image/svg+xml,%3Csvg width='242' height='90' viewBox='0 0 242 90' fill='none' xmlns='http://www.w3.org/2000/svg'%3E %3Cpath d='M240.384 74.5122L239.905 75.8589H239.728L239.249 74.5156V75.8589H238.941V74.0234H239.324L239.816 75.3872L240.309 74.0234H240.691V75.8589H240.384V74.5122ZM238.671 74.3003H238.169V75.8589H237.858V74.3003H237.352V74.0234H238.671V74.3003Z' fill='%23262A38'/%3E %3Cpath d='M154.123 13.915V75.3527H141.672V69.0868C140.37 71.2839 138.499 73.0742 136.22 74.2134C133.779 75.434 131.012 76.085 128.246 76.085C124.828 76.1664 121.41 75.1899 118.562 73.2369C115.633 71.2839 113.354 68.5986 111.889 65.425C110.262 61.7631 109.448 57.8572 109.529 53.8698C109.448 49.8825 110.262 45.9765 111.889 42.3961C113.354 39.3038 115.633 36.6185 118.481 34.7469C121.41 32.8753 124.828 31.9801 128.246 32.0615C130.931 32.0615 133.616 32.6311 135.976 33.8517C138.255 34.991 140.126 36.6999 141.428 38.8156V18.0651L154.123 13.915ZM139.15 63.2279C140.777 61.1121 141.672 58.0199 141.672 54.0326C141.672 50.0452 140.859 47.0344 139.15 44.9187C137.441 42.8029 134.755 41.5823 131.989 41.6637C129.222 41.5009 126.537 42.7215 124.746 44.8373C123.038 46.953 122.142 49.9639 122.142 53.8698C122.142 57.8572 123.038 60.9494 124.746 63.1465C126.455 65.3436 129.222 66.5642 131.989 66.4828C135.081 66.4828 137.522 65.3436 139.15 63.2279Z' fill='%23262A38'/%3E %3Cpath d='M198.635 34.6655C201.564 36.5371 203.843 39.2225 205.226 42.3147C206.853 45.8952 207.667 49.8011 207.586 53.7885C207.667 57.7758 206.853 61.7632 205.226 65.3436C203.761 68.5172 201.483 71.2026 198.553 73.1556C195.705 75.0272 192.287 76.0037 188.87 75.9223C186.103 76.0037 183.336 75.3527 180.895 74.0507C178.617 72.9114 176.745 71.1212 175.524 68.9241V75.2713H162.993V18.0651L175.606 13.915V38.9783C176.826 36.7812 178.698 34.991 180.976 33.8517C183.418 32.5498 186.103 31.8988 188.87 31.9801C192.287 31.8988 195.705 32.8753 198.635 34.6655ZM192.45 63.1465C194.159 60.9494 194.973 57.8572 194.973 53.7885C194.973 49.8825 194.159 46.8716 192.45 44.7559C190.741 42.6402 188.381 41.5823 185.289 41.5823C182.523 41.4196 179.837 42.6402 178.047 44.8373C176.338 47.0344 175.524 50.0452 175.524 53.9512C175.524 57.9386 176.338 61.0308 178.047 63.1465C179.756 65.3436 182.441 66.5642 185.289 66.4015C188.056 66.5642 190.741 65.3436 192.45 63.1465Z' fill='%23262A38'/%3E %3Cpath d='M225 42.4774V58.915C225 61.2749 225.651 62.9838 226.791 64.0416C228.093 65.1809 229.801 65.7505 231.592 65.6691C232.975 65.6691 234.44 65.425 235.742 65.0995V74.8644C233.382 75.6782 230.941 76.085 228.499 76.0037C223.292 76.0037 219.304 74.5389 216.537 71.6094C213.771 68.68 212.387 64.5299 212.387 59.1592V23.1103L225 19.0416V33.038H235.742V42.4774H225Z' fill='%23262A38'/%3E %3Cpath d='M86.1754 3.74322C88.2911 5.77758 89.6745 8.46293 90 11.3924C90 12.613 89.6745 13.4268 88.9421 14.9729C88.2098 16.519 79.1772 32.1429 76.4919 36.4557C74.9458 38.9783 74.132 41.9892 74.132 44.9186C74.132 47.9295 74.9458 50.859 76.4919 53.3816C79.1772 57.6944 88.2098 73.3996 88.9421 74.9457C89.6745 76.4919 90 77.2242 90 78.4448C89.6745 81.3743 88.3725 84.0597 86.2568 86.0127C84.2224 88.1284 81.5371 89.5118 78.689 89.7559C77.4684 89.7559 76.6546 89.4304 75.1899 88.698C73.7251 87.9656 57.7758 79.1772 53.4629 76.4919C53.1374 76.3291 52.8119 76.085 52.4051 75.9222L31.085 63.3092C31.5732 67.3779 33.3635 71.2839 36.2929 74.132C36.8626 74.7016 37.4322 75.1899 38.0832 75.6781C37.5949 75.9222 37.0253 76.1664 36.5371 76.4919C32.2242 79.1772 16.519 88.2098 14.9729 88.9421C13.4268 89.6745 12.6944 90 11.3924 90C8.46293 89.6745 5.77758 88.3725 3.82459 86.2568C1.70886 84.2224 0.325497 81.5371 0 78.6076C0.0813743 77.387 0.406872 76.1664 1.05787 75.1085C1.79024 73.5624 10.8228 57.8571 13.5081 53.5443C15.0542 51.0217 15.868 48.0922 15.868 45.0814C15.868 42.0705 15.0542 39.141 13.5081 36.6184C10.8228 32.1429 1.70886 16.4376 1.05787 14.8915C0.406872 13.8336 0.0813743 12.613 0 11.3924C0.325497 8.46293 1.62749 5.77758 3.74322 3.74322C5.77758 1.62749 8.46293 0.325497 11.3924 0C12.613 0.0813743 13.8336 0.406872 14.9729 1.05787C16.2749 1.62749 27.7486 8.30018 33.8517 11.8807L35.2351 12.6944C35.7233 13.0199 36.1302 13.264 36.4557 13.4268L37.1067 13.8336L58.8336 26.6908C58.3454 21.8083 55.8228 17.3327 51.9168 14.3219C52.4051 14.0778 52.9747 13.8336 53.4629 13.5081C57.7758 10.8228 73.481 1.70886 75.0271 1.05787C76.085 0.406872 77.3056 0.0813743 78.6076 0C81.4557 0.325497 84.1411 1.62749 86.1754 3.74322ZM46.1392 50.7776L50.7776 46.1392C51.4286 45.4882 51.4286 44.5118 50.7776 43.8608L46.1392 39.2224C45.4882 38.5714 44.5118 38.5714 43.8608 39.2224L39.2224 43.8608C38.5714 44.5118 38.5714 45.4882 39.2224 46.1392L43.8608 50.7776C44.4304 51.3472 45.4882 51.3472 46.1392 50.7776Z' fill='%23FF694A'/%3E %3C/svg%3E"},function(e,t,n){"use strict";n.r(t);var r=n(63),i=n.n(r);n(460),n(461),n(462),n(463),n(465);const o=n(9),a=(n(31),n(21));window.Prism=i.a,o.module("dbt").factory("code",["$sce",function(e){var t={copied:!1,highlight:function(t,n="sql"){if("sql"==n)var r=i.a.highlight(t,i.a.languages.sql,"sql");else if("python"==n)r=i.a.highlight(t,i.a.languages.python,"python");return e.trustAsHtml(r)},copy_to_clipboard:function(e){var t=document.createElement("textarea");t.value=e,t.setAttribute("readonly",""),t.style.position="absolute",t.style.left="-9999px",document.body.appendChild(t),t.select(),document.execCommand("copy"),document.body.removeChild(t)},generateSourceSQL:function(e){var t=["select"],n=a.size(e.columns),r=a.keys(e.columns);a.each(r,(function(e,r){var i=" "+e;r+1!=n&&(i+=","),t.push(i)}));const i=(e.database?e.database+".":"")+e.schema+"."+e.identifier;return t.push("from "+i),t.join("\n")},generateMetricSQL:function(e){if("derived"==e.calculation_method)return"-- derived\n"+e.expression;const t=[`select ${e.calculation_method}(${e.expression})`,`from {{ ${e.model} }}`];if(e.filters.length>0){const n=e.filters.map(e=>`${e.field} ${e.operator} ${e.value}`).join(" AND ");t.push("where "+n)}return t.join("\n")}};return t}])},function(e,t){Prism.languages.sql={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:--|\/\/|#).*)/,lookbehind:!0},variable:[{pattern:/@(["'`])(?:\\[\s\S]|(?!\1)[^\\])+\1/,greedy:!0},/@[\w.$]+/],string:{pattern:/(^|[^@\\])("|')(?:\\[\s\S]|(?!\2)[^\\]|\2\2)*\2/,greedy:!0,lookbehind:!0},identifier:{pattern:/(^|[^@\\])`(?:\\[\s\S]|[^`\\]|``)*`/,greedy:!0,lookbehind:!0,inside:{punctuation:/^`|`$/}},function:/\b(?:AVG|COUNT|FIRST|FORMAT|LAST|LCASE|LEN|MAX|MID|MIN|MOD|NOW|ROUND|SUM|UCASE)(?=\s*\()/i,keyword:/\b(?:ACTION|ADD|AFTER|ALGORITHM|ALL|ALTER|ANALYZE|ANY|APPLY|AS|ASC|AUTHORIZATION|AUTO_INCREMENT|BACKUP|BDB|BEGIN|BERKELEYDB|BIGINT|BINARY|BIT|BLOB|BOOL|BOOLEAN|BREAK|BROWSE|BTREE|BULK|BY|CALL|CASCADED?|CASE|CHAIN|CHAR(?:ACTER|SET)?|CHECK(?:POINT)?|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMNS?|COMMENT|COMMIT(?:TED)?|COMPUTE|CONNECT|CONSISTENT|CONSTRAINT|CONTAINS(?:TABLE)?|CONTINUE|CONVERT|CREATE|CROSS|CURRENT(?:_DATE|_TIME|_TIMESTAMP|_USER)?|CURSOR|CYCLE|DATA(?:BASES?)?|DATE(?:TIME)?|DAY|DBCC|DEALLOCATE|DEC|DECIMAL|DECLARE|DEFAULT|DEFINER|DELAYED|DELETE|DELIMITERS?|DENY|DESC|DESCRIBE|DETERMINISTIC|DISABLE|DISCARD|DISK|DISTINCT|DISTINCTROW|DISTRIBUTED|DO|DOUBLE|DROP|DUMMY|DUMP(?:FILE)?|DUPLICATE|ELSE(?:IF)?|ENABLE|ENCLOSED|END|ENGINE|ENUM|ERRLVL|ERRORS|ESCAPED?|EXCEPT|EXEC(?:UTE)?|EXISTS|EXIT|EXPLAIN|EXTENDED|FETCH|FIELDS|FILE|FILLFACTOR|FIRST|FIXED|FLOAT|FOLLOWING|FOR(?: EACH ROW)?|FORCE|FOREIGN|FREETEXT(?:TABLE)?|FROM|FULL|FUNCTION|GEOMETRY(?:COLLECTION)?|GLOBAL|GOTO|GRANT|GROUP|HANDLER|HASH|HAVING|HOLDLOCK|HOUR|IDENTITY(?:COL|_INSERT)?|IF|IGNORE|IMPORT|INDEX|INFILE|INNER|INNODB|INOUT|INSERT|INT|INTEGER|INTERSECT|INTERVAL|INTO|INVOKER|ISOLATION|ITERATE|JOIN|KEYS?|KILL|LANGUAGE|LAST|LEAVE|LEFT|LEVEL|LIMIT|LINENO|LINES|LINESTRING|LOAD|LOCAL|LOCK|LONG(?:BLOB|TEXT)|LOOP|MATCH(?:ED)?|MEDIUM(?:BLOB|INT|TEXT)|MERGE|MIDDLEINT|MINUTE|MODE|MODIFIES|MODIFY|MONTH|MULTI(?:LINESTRING|POINT|POLYGON)|NATIONAL|NATURAL|NCHAR|NEXT|NO|NONCLUSTERED|NULLIF|NUMERIC|OFF?|OFFSETS?|ON|OPEN(?:DATASOURCE|QUERY|ROWSET)?|OPTIMIZE|OPTION(?:ALLY)?|ORDER|OUT(?:ER|FILE)?|OVER|PARTIAL|PARTITION|PERCENT|PIVOT|PLAN|POINT|POLYGON|PRECEDING|PRECISION|PREPARE|PREV|PRIMARY|PRINT|PRIVILEGES|PROC(?:EDURE)?|PUBLIC|PURGE|QUICK|RAISERROR|READS?|REAL|RECONFIGURE|REFERENCES|RELEASE|RENAME|REPEAT(?:ABLE)?|REPLACE|REPLICATION|REQUIRE|RESIGNAL|RESTORE|RESTRICT|RETURN(?:ING|S)?|REVOKE|RIGHT|ROLLBACK|ROUTINE|ROW(?:COUNT|GUIDCOL|S)?|RTREE|RULE|SAVE(?:POINT)?|SCHEMA|SECOND|SELECT|SERIAL(?:IZABLE)?|SESSION(?:_USER)?|SET(?:USER)?|SHARE|SHOW|SHUTDOWN|SIMPLE|SMALLINT|SNAPSHOT|SOME|SONAME|SQL|START(?:ING)?|STATISTICS|STATUS|STRIPED|SYSTEM_USER|TABLES?|TABLESPACE|TEMP(?:ORARY|TABLE)?|TERMINATED|TEXT(?:SIZE)?|THEN|TIME(?:STAMP)?|TINY(?:BLOB|INT|TEXT)|TOP?|TRAN(?:SACTIONS?)?|TRIGGER|TRUNCATE|TSEQUAL|TYPES?|UNBOUNDED|UNCOMMITTED|UNDEFINED|UNION|UNIQUE|UNLOCK|UNPIVOT|UNSIGNED|UPDATE(?:TEXT)?|USAGE|USE|USER|USING|VALUES?|VAR(?:BINARY|CHAR|CHARACTER|YING)|VIEW|WAITFOR|WARNINGS|WHEN|WHERE|WHILE|WITH(?: ROLLUP|IN)?|WORK|WRITE(?:TEXT)?|YEAR)\b/i,boolean:/\b(?:FALSE|NULL|TRUE)\b/i,number:/\b0x[\da-f]+\b|\b\d+(?:\.\d*)?|\B\.\d+\b/i,operator:/[-+*\/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?|\b(?:AND|BETWEEN|DIV|ILIKE|IN|IS|LIKE|NOT|OR|REGEXP|RLIKE|SOUNDS LIKE|XOR)\b/i,punctuation:/[;[\]()`,.]/}},function(e,t){Prism.languages.python={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0,greedy:!0},"string-interpolation":{pattern:/(?:f|fr|rf)(?:("""|''')[\s\S]*?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2)/i,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^{])(?:\{\{)*)\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}])+\})+\})+\}/,lookbehind:!0,inside:{"format-spec":{pattern:/(:)[^:(){}]+(?=\}$)/,lookbehind:!0},"conversion-option":{pattern:/![sra](?=[:}]$)/,alias:"punctuation"},rest:null}},string:/[\s\S]+/}},"triple-quoted-string":{pattern:/(?:[rub]|br|rb)?("""|''')[\s\S]*?\1/i,greedy:!0,alias:"string"},string:{pattern:/(?:[rub]|br|rb)?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/i,greedy:!0},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/g,lookbehind:!0},"class-name":{pattern:/(\bclass\s+)\w+/i,lookbehind:!0},decorator:{pattern:/(^[\t ]*)@\w+(?:\.\w+)*/m,lookbehind:!0,alias:["annotation","punctuation"],inside:{punctuation:/\./}},keyword:/\b(?:_(?=\s*:)|and|as|assert|async|await|break|case|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|match|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)\b/,builtin:/\b(?:__import__|abs|all|any|apply|ascii|basestring|bin|bool|buffer|bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|format|frozenset|getattr|globals|hasattr|hash|help|hex|id|input|int|intern|isinstance|issubclass|iter|len|list|locals|long|map|max|memoryview|min|next|object|oct|open|ord|pow|property|range|raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)\b/,boolean:/\b(?:False|None|True)\b/,number:/\b0(?:b(?:_?[01])+|o(?:_?[0-7])+|x(?:_?[a-f0-9])+)\b|(?:\b\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\B\.\d+(?:_\d+)*)(?:e[+-]?\d+(?:_\d+)*)?j?(?!\w)/i,operator:/[-+%=]=?|!=|:=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[{}[\];(),.:]/},Prism.languages.python["string-interpolation"].inside.interpolation.inside.rest=Prism.languages.python,Prism.languages.py=Prism.languages.python},function(e,t){!function(){if("undefined"!=typeof Prism&&"undefined"!=typeof document){var e=/\n(?!$)/g,t=Prism.plugins.lineNumbers={getLine:function(e,t){if("PRE"===e.tagName&&e.classList.contains("line-numbers")){var n=e.querySelector(".line-numbers-rows");if(n){var r=parseInt(e.getAttribute("data-start"),10)||1,i=r+(n.children.length-1);ti&&(t=i);var o=t-r;return n.children[o]}}},resize:function(e){r([e])},assumeViewportIndependence:!0},n=void 0;window.addEventListener("resize",(function(){t.assumeViewportIndependence&&n===window.innerWidth||(n=window.innerWidth,r(Array.prototype.slice.call(document.querySelectorAll("pre.line-numbers"))))})),Prism.hooks.add("complete",(function(t){if(t.code){var n=t.element,i=n.parentNode;if(i&&/pre/i.test(i.nodeName)&&!n.querySelector(".line-numbers-rows")&&Prism.util.isActive(n,"line-numbers")){n.classList.remove("line-numbers"),i.classList.add("line-numbers");var o,a=t.code.match(e),s=a?a.length+1:1,l=new Array(s+1).join("");(o=document.createElement("span")).setAttribute("aria-hidden","true"),o.className="line-numbers-rows",o.innerHTML=l,i.hasAttribute("data-start")&&(i.style.counterReset="linenumber "+(parseInt(i.getAttribute("data-start"),10)-1)),t.element.appendChild(o),r([i]),Prism.hooks.run("line-numbers",t)}}})),Prism.hooks.add("line-numbers",(function(e){e.plugins=e.plugins||{},e.plugins.lineNumbers=!0}))}function r(t){if(0!=(t=t.filter((function(e){var t=function(e){if(!e)return null;return window.getComputedStyle?getComputedStyle(e):e.currentStyle||null}(e)["white-space"];return"pre-wrap"===t||"pre-line"===t}))).length){var n=t.map((function(t){var n=t.querySelector("code"),r=t.querySelector(".line-numbers-rows");if(n&&r){var i=t.querySelector(".line-numbers-sizer"),o=n.textContent.split(e);i||((i=document.createElement("span")).className="line-numbers-sizer",n.appendChild(i)),i.innerHTML="0",i.style.display="block";var a=i.getBoundingClientRect().height;return i.innerHTML="",{element:t,lines:o,lineHeights:[],oneLinerHeight:a,sizer:i}}})).filter(Boolean);n.forEach((function(e){var t=e.sizer,n=e.lines,r=e.lineHeights,i=e.oneLinerHeight;r[n.length-1]=void 0,n.forEach((function(e,n){if(e&&e.length>1){var o=t.appendChild(document.createElement("span"));o.style.display="block",o.textContent=e}else r[n]=i}))})),n.forEach((function(e){for(var t=e.sizer,n=e.lineHeights,r=0,i=0;i code {\n\tposition: relative;\n\twhite-space: inherit;\n}\n\n.line-numbers .line-numbers-rows {\n\tposition: absolute;\n\tpointer-events: none;\n\ttop: 0;\n\tfont-size: 100%;\n\tleft: -3.8em;\n\twidth: 3em; /* works for line-numbers below 1000 lines */\n\tletter-spacing: -1px;\n\tborder-right: 1px solid #999;\n\n\t-webkit-user-select: none;\n\t-moz-user-select: none;\n\t-ms-user-select: none;\n\tuser-select: none;\n\n}\n\n\t.line-numbers-rows > span {\n\t\tdisplay: block;\n\t\tcounter-increment: linenumber;\n\t}\n\n\t\t.line-numbers-rows > span:before {\n\t\t\tcontent: counter(linenumber);\n\t\t\tcolor: #999;\n\t\t\tdisplay: block;\n\t\t\tpadding-right: 0.8em;\n\t\t\ttext-align: right;\n\t\t}\n',""])},function(e,t,n){var r=n(466);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,'/**\n * GHColors theme by Avi Aryan (http://aviaryan.in)\n * Inspired by Github syntax coloring\n */\n\ncode[class*="language-"],\npre[class*="language-"] {\n\tcolor: #393A34;\n\tfont-family: "Consolas", "Bitstream Vera Sans Mono", "Courier New", Courier, monospace;\n\tdirection: ltr;\n\ttext-align: left;\n\twhite-space: pre;\n\tword-spacing: normal;\n\tword-break: normal;\n\tfont-size: .9em;\n\tline-height: 1.2em;\n\n\t-moz-tab-size: 4;\n\t-o-tab-size: 4;\n\ttab-size: 4;\n\n\t-webkit-hyphens: none;\n\t-moz-hyphens: none;\n\t-ms-hyphens: none;\n\thyphens: none;\n}\n\npre > code[class*="language-"] {\n\tfont-size: 1em;\n}\n\npre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection,\ncode[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection {\n\tbackground: #b3d4fc;\n}\n\npre[class*="language-"]::selection, pre[class*="language-"] ::selection,\ncode[class*="language-"]::selection, code[class*="language-"] ::selection {\n\tbackground: #b3d4fc;\n}\n\n/* Code blocks */\npre[class*="language-"] {\n\tpadding: 1em;\n\tmargin: .5em 0;\n\toverflow: auto;\n\tborder: 1px solid #dddddd;\n\tbackground-color: white;\n}\n\n/* Inline code */\n:not(pre) > code[class*="language-"] {\n\tpadding: .2em;\n\tpadding-top: 1px;\n\tpadding-bottom: 1px;\n\tbackground: #f8f8f8;\n\tborder: 1px solid #dddddd;\n}\n\n.token.comment,\n.token.prolog,\n.token.doctype,\n.token.cdata {\n\tcolor: #999988;\n\tfont-style: italic;\n}\n\n.token.namespace {\n\topacity: .7;\n}\n\n.token.string,\n.token.attr-value {\n\tcolor: #e3116c;\n}\n\n.token.punctuation,\n.token.operator {\n\tcolor: #393A34; /* no highlight */\n}\n\n.token.entity,\n.token.url,\n.token.symbol,\n.token.number,\n.token.boolean,\n.token.variable,\n.token.constant,\n.token.property,\n.token.regex,\n.token.inserted {\n\tcolor: #36acaa;\n}\n\n.token.atrule,\n.token.keyword,\n.token.attr-name,\n.language-autohotkey .token.selector {\n\tcolor: #00a4db;\n}\n\n.token.function,\n.token.deleted,\n.language-autohotkey .token.tag {\n\tcolor: #9a050f;\n}\n\n.token.tag,\n.token.selector,\n.language-autohotkey .token.keyword {\n\tcolor: #00009f;\n}\n\n.token.important,\n.token.function,\n.token.bold {\n\tfont-weight: bold;\n}\n\n.token.italic {\n\tfont-style: italic;\n}\n',""])},function(e,t,n){n(31);const r=n(21),i=n(148),o=n(203),a=n(468);angular.module("dbt").factory("graph",["$state","$window","$q","selectorService","project","locationService",function(e,t,n,s,l,c){var u={vertical:{userPanningEnabled:!1,boxSelectionEnabled:!1,maxZoom:1.5},horizontal:{userPanningEnabled:!0,boxSelectionEnabled:!1,maxZoom:1,minZoom:.05}},d={none:{name:"null"},left_right:{name:"dagre",rankDir:"LR",rankSep:200,edgeSep:30,nodeSep:50},top_down:{name:"preset",positions:function(t){var n=e.params.unique_id;if(!n)return{x:0,y:0};var a=f.graph.pristine.dag,s=r.sortBy(o.ancestorNodes(a,n,1)),l=r.sortBy(o.descendentNodes(a,n,1)),c=r.partial(r.includes,s),u=r.partial(r.includes,l),d=a.filterNodes(c),p=a.filterNodes(u);return function(e,t,n,i){console.log("Getting position for ",i,". Primary: ",e);var o,a=100/(1+Math.max(t.length,n.length));if(e==i)return{x:0,y:0};if(r.includes(t,i))o={set:t,index:r.indexOf(t,i),factor:-1,type:"parent"};else{if(!r.includes(n,i))return{x:0,y:0};o={set:n,index:r.indexOf(n,i),factor:1,type:"child"}}var s=o.set.length;if("parent"==o.type)var l={x:(0+o.index)*a,y:-200-100*(s-o.index-1)};else l={x:(0+o.index)*a,y:200+100*(s-o.index-1)};return l}(n,i.alg.topsort(d),i.alg.topsort(p).reverse(),t.data("id"))}}},f={loading:!0,loaded:n.defer(),graph_element:null,orientation:"sidebar",expanded:!1,graph:{options:u.vertical,pristine:{nodes:{},edges:{},dag:null},elements:[],layout:d.none,style:[{selector:"edge.vertical",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#027599","arrow-scale":1.5,"line-color":"#027599",width:3,"target-distance-from-node":"5px","source-endpoint":"0% 50%","target-endpoint":"0deg"}},{selector:"edge.horizontal",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#006f8a","arrow-scale":1.5,"target-distance-from-node":"10px","source-distance-from-node":"5px","line-color":"#006f8a",width:3,"source-endpoint":"50% 0%","target-endpoint":"270deg"}},{selector:"edge[selected=1]",style:{"line-color":"#bd6bb6","target-arrow-color":"#bd6bb6","z-index":1}},{selector:'node[display="none"]',style:{display:"none"}},{selector:"node.vertical",style:{"text-margin-x":"5px","background-color":"#0094b3","font-size":"16px",shape:"ellipse",color:"#fff",width:"5px",height:"5px",padding:"5px",content:"data(label)","font-weight":300,"text-valign":"center","text-halign":"right"}},{selector:"node.horizontal",style:{"background-color":"#0094b3","font-size":"24px",shape:"roundrectangle",color:"#fff",width:"label",height:"label",padding:"12px",content:"data(label)","font-weight":300,"font-family":'-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, Cantarell, "Fira Sans", "Droid Sans", "Helvetica Neue", Helvetica, Arial, sans-serif',"text-valign":"center","text-halign":"center",ghost:"yes","ghost-offset-x":"2px","ghost-offset-y":"4px","ghost-opacity":.5,"text-outline-color":"#000","text-outline-width":"1px","text-outline-opacity":.2}},{selector:'node[resource_type="source"]',style:{"background-color":"#5fb825"}},{selector:'node[resource_type="exposure"]',style:{"background-color":"#ff694b"}},{selector:'node[resource_type="metric"]',style:{"background-color":"#ff5688"}},{selector:'node[language="python"]',style:{"background-color":"#6a5acd"}},{selector:"node[node_color]",style:{"background-color":"data(node_color)"}},{selector:"node[selected=1]",style:{"background-color":"#bd6bb6"}},{selector:"node.horizontal[selected=1]",style:{"background-color":"#88447d"}},{selector:"node.horizontal.dirty",style:{"background-color":"#919599"}},{selector:"node[hidden=1]",style:{"background-color":"#919599","background-opacity":.5}}],ready:function(e){console.log("graph ready")}}};function p(e,t,n){var i=r.map(e,(function(e){return f.graph.pristine.nodes[e]})),o=[];r.flatten(r.each(e,(function(t){var n=f.graph.pristine.edges[t];r.each(n,(function(t){r.includes(e,t.data.target)&&r.includes(e,t.data.source)&&o.push(t)}))})));var s=r.compact(i).concat(r.compact(o));return r.each(f.graph.elements,(function(e){e.data.display="none",e.data.selected=0,e.data.hidden=0,e.classes=n})),r.each(s,(function(e){e.data.display="element",e.classes=n,t&&r.includes(t,e.data.unique_id)&&(e.data.selected=1),r.get(e,["data","docs","show"],!0)||(e.data.hidden=1);var i=r.get(e,["data","docs","node_color"]);i&&a.isValidColor(i)&&(e.data.node_color=i)})),f.graph.elements=r.filter(s,(function(e){return"element"==e.data.display})),e}function h(e,t,n){var r=f.graph.pristine.dag;if(r){var i=f.graph.pristine.nodes,o=s.selectNodes(r,i,e),a=n?o.matched:[];return p(o.selected,a,t)}}return f.setGraphReady=function(e){f.loading=!1,f.loaded.resolve(),f.graph_element=e},f.ready=function(e){f.loaded.promise.then((function(){e(f)}))},f.manifest={},f.packages=[],f.selected_node=null,f.getCanvasHeight=function(){return.8*t.innerHeight+"px"},l.ready((function(e){f.manifest=e,f.packages=r.uniq(r.map(f.manifest.nodes,"package_name")),r.each(r.filter(f.manifest.nodes,(function(e){var t=r.includes(["model","seed","source","snapshot","analysis","exposure","metric","operation"],e.resource_type),n="test"==e.resource_type&&!e.hasOwnProperty("test_metadata");return t||n})),(function(e){var t={group:"nodes",data:r.assign(e,{parent:e.package_name,id:e.unique_id,is_group:"false"})};f.graph.pristine.nodes[e.unique_id]=t})),r.each(f.manifest.parent_map,(function(e,t){r.each(e,(function(e){var n=f.manifest.nodes[e],i=f.manifest.nodes[t];if(r.includes(["model","source","seed","snapshot","metric"],n.resource_type)&&("test"!=i.resource_type||!i.hasOwnProperty("test_metadata"))){var o=n.unique_id+"|"+i.unique_id,a={group:"edges",data:{source:n.unique_id,target:i.unique_id,unique_id:o}},s=i.unique_id;f.graph.pristine.edges[s]||(f.graph.pristine.edges[s]=[]),f.graph.pristine.edges[s].push(a)}}))}));var t=new i.Graph({directed:!0});r.each(f.graph.pristine.nodes,(function(e){t.setNode(e.data.unique_id,e.data.name)})),r.each(f.graph.pristine.edges,(function(e){r.each(e,(function(e){t.setEdge(e.data.source,e.data.target)}))})),f.graph.pristine.dag=t,f.graph.elements=r.flatten(r.values(f.graph.pristine.nodes).concat(r.values(f.graph.pristine.edges))),p(t.nodes())})),f.hideGraph=function(){f.orientation="sidebar",f.expanded=!1},f.showVerticalGraph=function(e,t){f.orientation="sidebar",t&&(f.expanded=!0);var n=h(r.assign({},s.options,{include:"+"+e+"+",exclude:"",hops:1}),"vertical",!0);return f.graph.layout=d.top_down,f.graph.options=u.vertical,n},f.showFullGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=r.assign({},s.options);e?(t.include="+"+e+"+",t.exclude=""):(t.include="",t.exclude="");var n=h(t,"horizontal",!0);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(t),n},f.updateGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=h(e,"horizontal",!1);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(e),t},f.deselectNodes=function(){"fullscreen"==f.orientation&&f.graph_element.elements().data("selected",0)},f.selectNode=function(e){if("fullscreen"==f.orientation){f.graph.pristine.nodes[e];var t=f.graph.pristine.dag,n=r.indexBy(o.ancestorNodes(t,e)),i=r.indexBy(o.descendentNodes(t,e));n[e]=e,i[e]=e;var a=f.graph_element;r.each(f.graph.elements,(function(t){var r=a.$id(t.data.id);n[t.data.source]&&n[t.data.target]||i[t.data.source]&&i[t.data.target]||t.data.unique_id==e?r.data("selected",1):r.data("selected",0)}))}},f.markDirty=function(e){f.markAllClean(),r.each(e,(function(e){f.graph_element.$id(e).addClass("dirty")}))},f.markAllClean=function(){f.graph_element&&f.graph_element.elements().removeClass("dirty")},f}])},function(e,t,n){"use strict";n.r(t),n.d(t,"isValidColor",(function(){return i}));const r=new Set(["aliceblue","antiquewhite","aqua","aquamarine","azure","beige","bisque","black","blanchedalmond","blue","blueviolet","brown","burlywood","cadetblue","chartreuse","chocolate","coral","cornflowerblue","cornsilk","crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgreen","darkkhaki","darkmagenta","darkolivegreen","darkorange","darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dodgerblue","firebrick","floralwhite","forestgreen","fuchsia","ghostwhite","gold","goldenrod","gray","green","greenyellow","honeydew","hotpink","indianred","indigo","ivory","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral","lightcyan","lightgoldenrodyellow","lightgray","lightgreen","lightpink","lightsalmon","lightsalmon","lightseagreen","lightskyblue","lightslategray","lightsteelblue","lightyellow","lime","limegreen","linen","magenta","maroon","mediumaquamarine","mediumblue","mediumorchid","mediumpurple","mediumseagreen","mediumslateblue","mediumslateblue","mediumspringgreen","mediumturquoise","mediumvioletred","midnightblue","mintcream","mistyrose","moccasin","navajowhite","navy","oldlace","olive","olivedrab","orange","orangered","orchid","palegoldenrod","palegreen","paleturquoise","palevioletred","papayawhip","peachpuff","peru","pink","plum","powderblue","purple","rebeccapurple","red","rosybrown","royalblue","saddlebrown","salmon","sandybrown","seagreen","seashell","sienna","silver","skyblue","slateblue","slategray","snow","springgreen","steelblue","tan","teal","thistle","tomato","turquoise","violet","wheat","white","whitesmoke","yellow","yellowgreen"]);function i(e){if(!e)return!1;const t=e.trim().toLowerCase();if(""===t)return!1;const n=t.match(/^#([A-Fa-f0-9]{3}){1,2}$/),i=r.has(t);return Boolean(n)||i}},function(e,t,n){n(31);const r=n(21),i=n(470);angular.module("dbt").factory("selectorService",["$state",function(e){var t={include:"",exclude:"",packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"],depth:1},n={view_node:null,selection:{clean:r.clone(t),dirty:r.clone(t)},options:{packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"]},init:function(e){r.each(e,(function(e,r){n.options[r]=e,t[r]=e,n.selection.clean[r]=e,n.selection.dirty[r]=e}))},resetSelection:function(e){var i={include:e&&r.includes(["model","seed","snapshot"],e.resource_type)?"+"+e.name+"+":e&&"source"==e.resource_type?"+source:"+e.source_name+"."+e.name+"+":e&&"exposure"==e.resource_type?"+exposure:"+e.name:e&&"metric"==e.resource_type?"+metric:"+e.name:e&&r.includes(["analysis","test"],e.resource_type)?"+"+e.name:""},o=r.assign({},t,i);n.selection.clean=r.clone(o),n.selection.dirty=r.clone(o),n.view_node=e},getViewNode:function(){return n.view_node},excludeNode:function(e,t){var r,i=n.selection.dirty.exclude,o=t.parents?"+":"",a=t.children?"+":"",s=i.length>0?" ":"";"source"==e.resource_type?(o+="source:",r=e.source_name+"."+e.name):["exposure","metric"].indexOf(e.resource_type)>-1?(o+=e.resource_type+":",r=e.name):r=e.name;var l=i+s+o+r+a;return n.selection.dirty.exclude=l,n.updateSelection()},selectSource:function(e,t){var r="source:"+e+(t.children?"+":"");return n.selection.dirty.include=r,n.updateSelection()},clearViewNode:function(){n.view_node=null},isDirty:function(){return!r.isEqual(n.selection.clean,n.selection.dirty)},updateSelection:function(){return n.selection.clean=r.clone(n.selection.dirty),n.selection.clean},selectNodes:function(e,t,n){return i.selectNodes(e,t,n)}};return n}])},function(e,t,n){const r=n(21),i=n(471);function o(e,t){return t||(t=" "),r.filter(r.uniq(e.split(t)),(function(e){return e.length>0}))}function a(e){var t={raw:e,select_at:!1,select_children:!1,children_depth:null,select_parents:!1,parents_depth:null};const n=new RegExp(""+/^/.source+/(?(\@))?/.source+/(?((?(\d*))\+))?/.source+/((?([\w.]+)):)?/.source+/(?(.*?))/.source+/(?(\+(?(\d*))))?/.source+/$/.source).exec(e).groups;t.select_at="@"==n.childs_parents,t.select_parents=!!n.parents,t.select_children=!!n.children,n.parents_depth&&(t.parents_depth=parseInt(n.parents_depth)),n.children_depth&&(t.children_depth=parseInt(n.children_depth));var r=n.method,i=n.value;return r?-1!=r.indexOf(".")&&([r,selector_modifier]=r.split(".",2),i={config:selector_modifier,value:i}):r="implicit",t.selector_type=r,t.selector_value=i,t}function s(e){var t=o(e," ");return r.map(t,(function(e){var t=o(e,",");return t.length>1?{method:"intersect",selectors:r.map(t,a)}:{method:"none",selectors:r.map([e],a)}}))}function l(e,t){var n=s(e),i=null,o=null;return r.each(n,(function(e){var n="intersect"==e.method?r.intersection:r.union;r.each(e.selectors,(function(e){var r=t(e);null===i?(i=r.matched,o=r.selected):(i=n(i,r.matched),o=n(o,r.selected))}))})),{matched:i||[],selected:o||[]}}e.exports={splitSpecs:o,parseSpec:a,parseSpecs:s,buildSpec:function(e,t,n){return{include:s(e),exclude:s(t),hops:n}},applySpec:l,selectNodes:function(e,t,n){n.include,n.exclude;var o,a=r.partial(i.getNodesFromSpec,e,t,n.hops);r.values(t),o=0==n.include.trim().length?{selected:e.nodes(),matched:[]}:l(n.include,a);var s=l(n.exclude,a),c=o.selected,u=o.matched;c=r.difference(c,s.selected),u=r.difference(u,s.matched);var d=[];return r.each(c,(function(e){var i=t[e];i.data.tags||(i.data.tags=[]);var o=r.includes(n.packages,i.data.package_name),a=r.intersection(n.tags,i.data.tags).length>0,s=r.includes(n.tags,null)&&0==i.data.tags.length,l=r.includes(n.resource_types,i.data.resource_type);o&&(a||s)&&l||d.push(i.data.unique_id)})),{selected:r.difference(c,d),matched:r.difference(u,d)}}}},function(e,t,n){const r=n(21),i=n(203);var o="fqn",a="tag",s="source",l="exposure",c="metric",u="path",d="file",f="package",p="config",h="test_name",g="test_type",m={};function v(e,t){if(t===r.last(e))return!0;var n=e.reduce((e,t)=>e.concat(t.split(".")),[]),i=t.split(".");if(n.length-1||!r.hasOwnProperty("test_metadata")&&["data","singular"].indexOf(t)>-1)&&n.push(r)})),n}function $(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("source"==r.resource_type){var i,o,a=r.source_name,s=r.name;-1!=t.indexOf(".")?[i,o]=t.split(".",2):(i=t,o=null),("*"==i||i==a&&"*"===o||i==a&&o===s||i==a&&null===o)&&n.push(e.data)}})),n}m["implicit"]=function(e,t){var n=b(e,t),i=y(e,t),o=[];t.toLowerCase().endsWith(".sql")&&(o=x(e,t));var a=r.uniq([].concat(r.map(n,"unique_id"),r.map(i,"unique_id"),r.map(o,"unique_id")));return r.map(a,t=>e[t].data)},m[o]=b,m[a]=w,m[s]=$,m[l]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("exposure"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[c]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("metric"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[u]=y,m[d]=x,m[f]=k,m[p]=A,m[h]=E,m[g]=S,e.exports={isFQNMatch:v,getNodesByFQN:b,getNodesByTag:w,getNodesBySource:$,getNodesByPath:y,getNodesByPackage:k,getNodesByConfig:A,getNodesByTestName:E,getNodesByTestType:S,getNodesFromSpec:function(e,t,n,o){const a=m[o.selector_type];if(!a)return console.log("Node matcher for selector",o.selector_type,"is invalid"),{selected:[],matched:[]};var s=a(t,o.selector_value),l=[],c=[];return r.each(s,(function(t){var a=t.unique_id;c.push(t.unique_id);var s=[],u=[],d=[];if(o.select_at&&(d=r.union(i.selectAt(e,a))),o.select_parents){var f=n||o.parents_depth;s=i.ancestorNodes(e,a,f)}if(o.select_children){f=n||o.children_depth;u=i.descendentNodes(e,a,f)}l=r.union([a],l,u,s,d)})),{selected:l,matched:c}}}},function(e,t,n){const r=n(9);n(473);r.module("dbt").factory("trackingService",["$location","selectorService","$rootScope",function(e,t,n){var r={initialized:!1,snowplow:null,project_id:null,init:function(e){r.initialized||(r.initialized=!0,r.project_id=e.project_id,!0===e.track&&r.turn_on_tracking())},isHosted:function(){return window.location.hostname.indexOf(".getdbt.com")>-1},turn_on_tracking:function(){var e,t,n,i,o,a;e=window,t=document,n="script",e[i="snowplow"]||(e.GlobalSnowplowNamespace=e.GlobalSnowplowNamespace||[],e.GlobalSnowplowNamespace.push(i),e[i]=function(){(e[i].q=e[i].q||[]).push(arguments)},e[i].q=e[i].q||[],o=t.createElement(n),a=t.getElementsByTagName(n)[0],o.async=1,o.src="//d1fc8wv8zag5ca.cloudfront.net/2.9.0/sp.js",a.parentNode.insertBefore(o,a));var s={appId:"dbt-docs",forceSecureTracker:!0,respectDoNotTrack:!0,userFingerprint:!1,contexts:{webPage:!0}};r.isHosted()&&(s.cookieDomain=".getdbt.com"),r.snowplow=window.snowplow,r.snowplow("newTracker","sp","fishtownanalytics.sinter-collect.com",s),r.snowplow("enableActivityTracking",30,30),r.track_pageview()},fuzzUrls:function(){r.isHosted()||(r.snowplow("setCustomUrl","https://fuzzed.getdbt.com/"),r.snowplow("setReferrerUrl","https://fuzzed.getdbt.com/"))},getContext:function(){return[{schema:"iglu:com.dbt/dbt_docs/jsonschema/1-0-0",data:{is_cloud_hosted:r.isHosted(),core_project_id:r.project_id}}]},track_pageview:function(){if(r.snowplow){r.fuzzUrls();r.snowplow("trackPageView",null,r.getContext())}},track_event:function(e,t,n,i){r.snowplow&&(r.fuzzUrls(),r.snowplow("trackStructEvent","dbt-docs",e,t,n,i,r.getContext()))},track_graph_interaction:function(e,t){r.snowplow&&(r.fuzzUrls(),r.track_event("graph","interact",e,t))}};return r}])},function(e,t,n){var r,i,o,a,s;r=n(474),i=n(204).utf8,o=n(475),a=n(204).bin,(s=function(e,t){e.constructor==String?e=t&&"binary"===t.encoding?a.stringToBytes(e):i.stringToBytes(e):o(e)?e=Array.prototype.slice.call(e,0):Array.isArray(e)||e.constructor===Uint8Array||(e=e.toString());for(var n=r.bytesToWords(e),l=8*e.length,c=1732584193,u=-271733879,d=-1732584194,f=271733878,p=0;p>>24)|4278255360&(n[p]<<24|n[p]>>>8);n[l>>>5]|=128<>>9<<4)]=l;var h=s._ff,g=s._gg,m=s._hh,v=s._ii;for(p=0;p>>0,u=u+y>>>0,d=d+x>>>0,f=f+w>>>0}return r.endian([c,u,d,f])})._ff=function(e,t,n,r,i,o,a){var s=e+(t&n|~t&r)+(i>>>0)+a;return(s<>>32-o)+t},s._gg=function(e,t,n,r,i,o,a){var s=e+(t&r|n&~r)+(i>>>0)+a;return(s<>>32-o)+t},s._hh=function(e,t,n,r,i,o,a){var s=e+(t^n^r)+(i>>>0)+a;return(s<>>32-o)+t},s._ii=function(e,t,n,r,i,o,a){var s=e+(n^(t|~r))+(i>>>0)+a;return(s<>>32-o)+t},s._blocksize=16,s._digestsize=16,e.exports=function(e,t){if(null==e)throw new Error("Illegal argument "+e);var n=r.wordsToBytes(s(e,t));return t&&t.asBytes?n:t&&t.asString?a.bytesToString(n):r.bytesToHex(n)}},function(e,t){var n,r;n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",r={rotl:function(e,t){return e<>>32-t},rotr:function(e,t){return e<<32-t|e>>>t},endian:function(e){if(e.constructor==Number)return 16711935&r.rotl(e,8)|4278255360&r.rotl(e,24);for(var t=0;t0;e--)t.push(Math.floor(256*Math.random()));return t},bytesToWords:function(e){for(var t=[],n=0,r=0;n>>5]|=e[n]<<24-r%32;return t},wordsToBytes:function(e){for(var t=[],n=0;n<32*e.length;n+=8)t.push(e[n>>>5]>>>24-n%32&255);return t},bytesToHex:function(e){for(var t=[],n=0;n>>4).toString(16)),t.push((15&e[n]).toString(16));return t.join("")},hexToBytes:function(e){for(var t=[],n=0;n>>6*(3-o)&63)):t.push("=");return t.join("")},base64ToBytes:function(e){e=e.replace(/[^A-Z0-9+\/]/gi,"");for(var t=[],r=0,i=0;r>>6-2*i);return t}},e.exports=r},function(e,t){function n(e){return!!e.constructor&&"function"==typeof e.constructor.isBuffer&&e.constructor.isBuffer(e)} /*! * Determine if an object is a Buffer * diff --git a/core/dbt/lib.py b/core/dbt/lib.py index ff8f06c88a8..f4b9ab5be0e 100644 --- a/core/dbt/lib.py +++ b/core/dbt/lib.py @@ -1,4 +1,6 @@ import os +from dbt.config.project import Project +from dbt.config.renderer import DbtProjectYamlRenderer from dbt.contracts.results import RunningStatus, collect_timing_info from dbt.events.functions import fire_event from dbt.events.types import NodeCompiling, NodeExecuting @@ -29,11 +31,10 @@ def compile_and_execute(self, manifest, ctx): method. Once conditional credential usage is enabled, this should be removed. """ result = None - ctx.node._event_status["node_status"] = RunningStatus.Compiling + ctx.node.update_event_status(node_status=RunningStatus.Compiling) fire_event( NodeCompiling( node_info=ctx.node.node_info, - unique_id=ctx.node.unique_id, ) ) with collect_timing_info("compile") as timing_info: @@ -45,11 +46,10 @@ def compile_and_execute(self, manifest, ctx): # for ephemeral nodes, we only want to compile, not run if not ctx.node.is_ephemeral_model: - ctx.node._event_status["node_status"] = RunningStatus.Executing + ctx.node.update_event_status(node_status=RunningStatus.Executing) fire_event( NodeExecuting( node_info=ctx.node.node_info, - unique_id=ctx.node.unique_id, ) ) with collect_timing_info("execute") as timing_info: @@ -71,16 +71,22 @@ def get_dbt_config(project_dir, args=None, single_threaded=False): else: profiles_dir = flags.DEFAULT_PROFILES_DIR + profile_name = getattr(args, "profile", None) + runtime_args = RuntimeArgs( project_dir=project_dir, profiles_dir=profiles_dir, single_threaded=single_threaded, - profile=getattr(args, "profile", None), + profile=profile_name, target=getattr(args, "target", None), ) - # Construct a RuntimeConfig from phony args - config = RuntimeConfig.from_args(runtime_args) + profile = RuntimeConfig.collect_profile(args=runtime_args, profile_name=profile_name) + project_renderer = DbtProjectYamlRenderer(profile, None) + project = RuntimeConfig.collect_project(args=runtime_args, project_renderer=project_renderer) + assert type(project) is Project + + config = RuntimeConfig.from_parts(project, profile, runtime_args) # Set global flags from arguments flags.set_from_args(args, config) diff --git a/core/dbt/logger.py b/core/dbt/logger.py index 4bbcfca4c06..0c7ba2fe8f2 100644 --- a/core/dbt/logger.py +++ b/core/dbt/logger.py @@ -191,11 +191,6 @@ def process(self, record): record.level = self.target_level -class JsonOnly(logbook.Processor): - def process(self, record): - record.extra["json_only"] = True - - class TextOnly(logbook.Processor): def process(self, record): record.extra["text_only"] = True diff --git a/core/dbt/main.py b/core/dbt/main.py index 24b053d0997..3c23cfec4b3 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -211,7 +211,8 @@ def run_from_args(parsed): if task.config is not None: log_path = getattr(task.config, "log_path", None) log_manager.set_path(log_path) - setup_event_logger(log_path or "logs", "json", False, True) + # WHY WE SET DEBUG TO BE TRUE HERE previously? + setup_event_logger(log_path or "logs", "json", False, False) fire_event(MainReportVersion(version=str(dbt.version.installed), log_version=LOG_VERSION)) fire_event(MainReportArgs(args=args_to_dict(parsed))) @@ -482,6 +483,20 @@ def _add_defer_argument(*subparsers): ) +def _add_favor_state_argument(*subparsers): + for sub in subparsers: + sub.add_optional_argument_inverse( + "--favor-state", + enable_help=""" + If set, defer to the state variable for resolving unselected nodes, even if node exist as a database object in the current environment. + """, + disable_help=""" + If defer is set, expect standard defer behaviour. + """, + default=flags.FAVOR_STATE_MODE, + ) + + def _build_run_subparser(subparsers, base_subparser): run_sub = subparsers.add_parser( "run", @@ -1072,14 +1087,6 @@ def parse_args(args, cls=DBTArgumentParser): """, ) - p.add_argument( - "--event-buffer-size", - dest="event_buffer_size", - help=""" - Sets the max number of events to buffer in EVENT_HISTORY - """, - ) - p.add_argument( "-q", "--quiet", @@ -1154,6 +1161,8 @@ def parse_args(args, cls=DBTArgumentParser): _add_selection_arguments(run_sub, compile_sub, generate_sub, test_sub, snapshot_sub, seed_sub) # --defer _add_defer_argument(run_sub, test_sub, build_sub, snapshot_sub, compile_sub) + # --favor-state + _add_favor_state_argument(run_sub, test_sub, build_sub, snapshot_sub) # --full-refresh _add_table_mutability_arguments(run_sub, compile_sub, build_sub) diff --git a/core/dbt/node_types.py b/core/dbt/node_types.py index a6fa5ff4f84..ec7517d2029 100644 --- a/core/dbt/node_types.py +++ b/core/dbt/node_types.py @@ -13,7 +13,7 @@ class NodeType(StrEnum): # TODO: rm? RPCCall = "rpc" SqlOperation = "sql operation" - Documentation = "docs block" + Documentation = "doc" Source = "source" Macro = "macro" Exposure = "exposure" diff --git a/core/dbt/parser/README.md b/core/dbt/parser/README.md index 6ab326c42a6..7e4c208cdf9 100644 --- a/core/dbt/parser/README.md +++ b/core/dbt/parser/README.md @@ -126,17 +126,17 @@ These have executable SQL attached. Models - Are generated from SQL files in the 'models' directory - have a unique_id starting with 'model.' -- Final object is a ParsedModelNode +- Final object is a ModelNode -Data Tests +Singular Tests - Are generated from SQL files in 'tests' directory - have a unique_id starting with 'test.' -- Final object is a ParsedDataTestNode +- Final object is a SingularTestNode -Schema Tests +Generic Tests - Are generated from 'tests' in schema yaml files, which ultimately derive from tests in the 'macros' directory - Have a unique_id starting with 'test.' -- Final object is a ParsedSchemaTestNode +- Final object is a GenericTestNode - fqn is .schema_test. Hooks @@ -146,35 +146,35 @@ Hooks Analysis - comes from SQL files in 'analysis' directory -- Final object is a ParsedAnalysisNode +- Final object is a AnalysisNode RPC Node - This is a "node" representing the bit of Jinja-SQL that gets passed into the run_sql or compile_sql methods. When you're using the Cloud IDE, and you're working in a scratch tab, and you just want to compile/run what you have there: it needs to be parsed and executed, but it's not actually a model/node in the project, so it's this special thing. This is a temporary addition to the running manifest. -- Object is a ParsedRPCNode +- Object is a RPCNode ### sources - comes from 'sources' sections in yaml files -- Final object is a ParsedSourceDefinition node +- Final object is a SourceDefinition node - have a unique_id starting with 'source.' ### macros - comes from SQL files in 'macros' directory -- Final object is a ParsedMacro node +- Final object is a Macro node - have a unique_id starting with 'macro.' - Test macros are used in schema tests ### docs - comes from .md files in 'docs' directory -- Final object is a ParsedDocumentation +- Final object is a Documentation ### exposures - comes from 'exposures' sections in yaml files -- Final object is a ParsedExposure node +- Final object is a Exposure node ## Temporary patch files diff --git a/core/dbt/parser/analysis.py b/core/dbt/parser/analysis.py index 17eadb8783b..2102a76ac2e 100644 --- a/core/dbt/parser/analysis.py +++ b/core/dbt/parser/analysis.py @@ -1,16 +1,16 @@ import os -from dbt.contracts.graph.parsed import ParsedAnalysisNode +from dbt.contracts.graph.nodes import AnalysisNode from dbt.node_types import NodeType from dbt.parser.base import SimpleSQLParser from dbt.parser.search import FileBlock -class AnalysisParser(SimpleSQLParser[ParsedAnalysisNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedAnalysisNode: +class AnalysisParser(SimpleSQLParser[AnalysisNode]): + def parse_from_dict(self, dct, validate=True) -> AnalysisNode: if validate: - ParsedAnalysisNode.validate(dct) - return ParsedAnalysisNode.from_dict(dct) + AnalysisNode.validate(dct) + return AnalysisNode.from_dict(dct) @property def resource_type(self) -> NodeType: diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index 4b9e666a421..9c245214d83 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -16,9 +16,9 @@ from dbt.config import Project, RuntimeConfig from dbt.context.context_config import ContextConfig from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import HasUniqueID, ManifestNodes +from dbt.contracts.graph.nodes import ManifestNode, BaseNode from dbt.contracts.graph.unparsed import UnparsedNode, Docs -from dbt.exceptions import ParsingException, validator_error_message, InternalException +from dbt.exceptions import InternalException, InvalidConfigUpdate, InvalidDictParse from dbt import hooks from dbt.node_types import NodeType, ModelLanguage from dbt.parser.search import FileBlock @@ -26,11 +26,11 @@ # internally, the parser may store a less-restrictive type that will be # transformed into the final type. But it will have to be derived from # ParsedNode to be operable. -FinalValue = TypeVar("FinalValue", bound=HasUniqueID) -IntermediateValue = TypeVar("IntermediateValue", bound=HasUniqueID) +FinalValue = TypeVar("FinalValue", bound=BaseNode) +IntermediateValue = TypeVar("IntermediateValue", bound=BaseNode) IntermediateNode = TypeVar("IntermediateNode", bound=Any) -FinalNode = TypeVar("FinalNode", bound=ManifestNodes) +FinalNode = TypeVar("FinalNode", bound=ManifestNode) ConfiguredBlockType = TypeVar("ConfiguredBlockType", bound=FileBlock) @@ -169,7 +169,6 @@ def _create_error_node( resource_type=self.resource_type, path=path, original_file_path=original_file_path, - root_path=self.project.project_root, package_name=self.project.project_name, raw_code=raw_code, language=language, @@ -192,6 +191,7 @@ def _create_parsetime_node( name = block.name if block.path.relative_path.endswith(".py"): language = ModelLanguage.python + config.add_config_call({"materialized": "table"}) else: # this is not ideal but we have a lot of tests to adjust if don't do it language = ModelLanguage.sql @@ -202,7 +202,6 @@ def _create_parsetime_node( "database": self.default_database, "fqn": fqn, "name": name, - "root_path": self.project.project_root, "resource_type": self.resource_type, "path": path, "original_file_path": block.path.original_file_path, @@ -217,7 +216,6 @@ def _create_parsetime_node( try: return self.parse_from_dict(dct, validate=True) except ValidationError as exc: - msg = validator_error_message(exc) # this is a bit silly, but build an UnparsedNode just for error # message reasons node = self._create_error_node( @@ -226,7 +224,7 @@ def _create_parsetime_node( original_file_path=block.path.original_file_path, raw_code=block.contents, ) - raise ParsingException(msg, node=node) + raise InvalidDictParse(exc, node=node) def _context_for(self, parsed_node: IntermediateNode, config: ContextConfig) -> Dict[str, Any]: return generate_parser_model_context(parsed_node, self.root_project, self.manifest, config) @@ -255,12 +253,13 @@ def update_parsed_node_config_dict( self._mangle_hooks(final_config_dict) parsed_node.config = parsed_node.config.from_dict(final_config_dict) - def update_parsed_node_name( + def update_parsed_node_relation_names( self, parsed_node: IntermediateNode, config_dict: Dict[str, Any] ) -> None: self._update_node_database(parsed_node, config_dict) self._update_node_schema(parsed_node, config_dict) self._update_node_alias(parsed_node, config_dict) + self._update_node_relation_name(parsed_node) def update_parsed_node_config( self, @@ -319,7 +318,7 @@ def update_parsed_node_config( # parsed_node.config is what it would be if they did nothing self.update_parsed_node_config_dict(parsed_node, config_dict) # This updates the node database/schema/alias - self.update_parsed_node_name(parsed_node, config_dict) + self.update_parsed_node_relation_names(parsed_node, config_dict) # tests don't have hooks if parsed_node.resource_type == NodeType.Test: @@ -364,10 +363,9 @@ def render_update(self, node: IntermediateNode, config: ContextConfig) -> None: self.update_parsed_node_config(node, config, context=context) except ValidationError as exc: # we got a ValidationError - probably bad types in config() - msg = validator_error_message(exc) - raise ParsingException(msg, node=node) from exc + raise InvalidConfigUpdate(exc, node=node) from exc - def add_result_node(self, block: FileBlock, node: ManifestNodes): + def add_result_node(self, block: FileBlock, node: ManifestNode): if node.config.enabled: self.manifest.add_node(block.file, node) else: @@ -390,6 +388,19 @@ def parse_node(self, block: ConfiguredBlockType) -> FinalNode: self.add_result_node(block, result) return result + def _update_node_relation_name(self, node: ManifestNode): + # Seed and Snapshot nodes and Models that are not ephemeral, + # and TestNodes that store_failures. + # TestNodes do not get a relation_name without store failures + # because no schema is created. + if node.is_relational and not node.is_ephemeral_model: + adapter = get_adapter(self.root_project) + relation_cls = adapter.Relation + node.relation_name = str(relation_cls.create_from(self.root_project, node)) + else: + # Set it to None in case it changed with a config update + node.relation_name = None + @abc.abstractmethod def parse_file(self, file_block: FileBlock) -> None: pass diff --git a/core/dbt/parser/docs.py b/core/dbt/parser/docs.py index f24f70544d5..edc7f83acfc 100644 --- a/core/dbt/parser/docs.py +++ b/core/dbt/parser/docs.py @@ -4,7 +4,7 @@ from dbt.clients.jinja import get_rendered from dbt.contracts.files import SourceFile -from dbt.contracts.graph.parsed import ParsedDocumentation +from dbt.contracts.graph.nodes import Documentation from dbt.node_types import NodeType from dbt.parser.base import Parser from dbt.parser.search import BlockContents, FileBlock, BlockSearcher @@ -13,7 +13,7 @@ SHOULD_PARSE_RE = re.compile(r"{[{%]") -class DocumentationParser(Parser[ParsedDocumentation]): +class DocumentationParser(Parser[Documentation]): @property def resource_type(self) -> NodeType: return NodeType.Documentation @@ -23,22 +23,21 @@ def get_compiled_path(cls, block: FileBlock): return block.path.relative_path def generate_unique_id(self, resource_name: str, _: Optional[str] = None) -> str: - # because docs are in their own graph namespace, node type doesn't - # need to be part of the unique ID. - return "{}.{}".format(self.project.project_name, resource_name) + # For consistency, use the same format for doc unique_ids + return f"doc.{self.project.project_name}.{resource_name}" - def parse_block(self, block: BlockContents) -> Iterable[ParsedDocumentation]: + def parse_block(self, block: BlockContents) -> Iterable[Documentation]: unique_id = self.generate_unique_id(block.name) contents = get_rendered(block.contents, {}).strip() - doc = ParsedDocumentation( - root_path=self.project.project_root, + doc = Documentation( path=block.file.path.relative_path, original_file_path=block.path.original_file_path, package_name=self.project.project_name, unique_id=unique_id, name=block.name, block_contents=contents, + resource_type=NodeType.Documentation, ) return [doc] diff --git a/core/dbt/parser/generic_test.py b/core/dbt/parser/generic_test.py index 4706119585b..822dd5b2d85 100644 --- a/core/dbt/parser/generic_test.py +++ b/core/dbt/parser/generic_test.py @@ -4,9 +4,8 @@ from dbt.exceptions import ParsingException from dbt.clients import jinja -from dbt.contracts.graph.parsed import ParsedGenericTestNode +from dbt.contracts.graph.nodes import GenericTestNode, Macro from dbt.contracts.graph.unparsed import UnparsedMacro -from dbt.contracts.graph.parsed import ParsedMacro from dbt.contracts.files import SourceFile from dbt.events.functions import fire_event from dbt.events.types import GenericTestFileParse @@ -14,9 +13,10 @@ from dbt.parser.base import BaseParser from dbt.parser.search import FileBlock from dbt.utils import MACRO_PREFIX +from dbt import flags -class GenericTestParser(BaseParser[ParsedGenericTestNode]): +class GenericTestParser(BaseParser[GenericTestNode]): @property def resource_type(self) -> NodeType: return NodeType.Macro @@ -27,21 +27,20 @@ def get_compiled_path(cls, block: FileBlock): def parse_generic_test( self, block: jinja.BlockTag, base_node: UnparsedMacro, name: str - ) -> ParsedMacro: + ) -> Macro: unique_id = self.generate_unique_id(name) - return ParsedMacro( + return Macro( path=base_node.path, macro_sql=block.full_block, original_file_path=base_node.original_file_path, package_name=base_node.package_name, - root_path=base_node.root_path, resource_type=base_node.resource_type, name=name, unique_id=unique_id, ) - def parse_unparsed_generic_test(self, base_node: UnparsedMacro) -> Iterable[ParsedMacro]: + def parse_unparsed_generic_test(self, base_node: UnparsedMacro) -> Iterable[Macro]: try: blocks: List[jinja.BlockTag] = [ t @@ -88,7 +87,8 @@ def parse_file(self, block: FileBlock): source_file = block.file assert isinstance(source_file.contents, str) original_file_path = source_file.path.original_file_path - fire_event(GenericTestFileParse(path=original_file_path)) + if flags.MACRO_DEBUGGING: + fire_event(GenericTestFileParse(path=original_file_path)) # this is really only used for error messages base_node = UnparsedMacro( @@ -96,7 +96,6 @@ def parse_file(self, block: FileBlock): original_file_path=original_file_path, package_name=self.project.project_name, raw_code=source_file.contents, - root_path=self.project.project_root, resource_type=NodeType.Macro, language="sql", ) diff --git a/core/dbt/parser/generic_test_builders.py b/core/dbt/parser/generic_test_builders.py index 3dfb541cb8f..af0282c953f 100644 --- a/core/dbt/parser/generic_test_builders.py +++ b/core/dbt/parser/generic_test_builders.py @@ -13,7 +13,7 @@ ) from dbt.clients.jinja import get_rendered, GENERIC_TEST_KWARGS_NAME -from dbt.contracts.graph.parsed import UnpatchedSourceDefinition +from dbt.contracts.graph.nodes import UnpatchedSourceDefinition from dbt.contracts.graph.unparsed import ( TestDef, UnparsedAnalysisUpdate, @@ -21,7 +21,19 @@ UnparsedNodeUpdate, UnparsedExposure, ) -from dbt.exceptions import raise_compiler_error, raise_parsing_error, UndefinedMacroException +from dbt.exceptions import ( + CustomMacroPopulatingConfigValues, + SameKeyNested, + TagNotString, + TagsNotListOfStrings, + TestArgIncludesModel, + TestArgsNotDict, + TestDefinitionDictLength, + TestInvalidType, + TestNameNotString, + UnexpectedTestNamePattern, + UndefinedMacroException, +) from dbt.parser.search import FileBlock @@ -222,9 +234,7 @@ def __init__( test_name, test_args = self.extract_test_args(test, column_name) self.args: Dict[str, Any] = test_args if "model" in self.args: - raise_compiler_error( - 'Test arguments include "model", which is a reserved argument', - ) + raise TestArgIncludesModel() self.package_name: str = package_name self.target: Testable = target @@ -232,9 +242,7 @@ def __init__( match = self.TEST_NAME_PATTERN.match(test_name) if match is None: - raise_compiler_error( - "Test name string did not match expected pattern: {}".format(test_name) - ) + raise UnexpectedTestNamePattern(test_name) groups = match.groupdict() self.name: str = groups["test_name"] @@ -251,9 +259,7 @@ def __init__( value = self.args.pop(key, None) # 'modifier' config could be either top level arg or in config if value and "config" in self.args and key in self.args["config"]: - raise_compiler_error( - "Test cannot have the same key at the top-level and in config" - ) + raise SameKeyNested() if not value and "config" in self.args: value = self.args["config"].pop(key, None) if isinstance(value, str): @@ -261,22 +267,12 @@ def __init__( try: value = get_rendered(value, render_ctx, native=True) except UndefinedMacroException as e: - - # Generic tests do not include custom macros in the Jinja - # rendering context, so this will almost always fail. As it - # currently stands, the error message is inscrutable, which - # has caused issues for some projects migrating from - # pre-0.20.0 to post-0.20.0. - # See https://github.com/dbt-labs/dbt-core/issues/4103 - # and https://github.com/dbt-labs/dbt-core/issues/5294 - raise_compiler_error( - f"The {self.target.name}.{column_name} column's " - f'"{self.name}" test references an undefined ' - f"macro in its {key} configuration argument. " - f"The macro {e.msg}.\n" - "Please note that the generic test configuration parser " - "currently does not support using custom macros to " - "populate configuration values" + raise CustomMacroPopulatingConfigValues( + target_name=self.target.name, + column_name=column_name, + name=self.name, + key=key, + err_msg=e.msg ) if value is not None: @@ -314,9 +310,7 @@ def _bad_type(self) -> TypeError: @staticmethod def extract_test_args(test, name=None) -> Tuple[str, Dict[str, Any]]: if not isinstance(test, dict): - raise_parsing_error( - "test must be dict or str, got {} (value {})".format(type(test), test) - ) + raise TestInvalidType(test) # If the test is a dictionary with top-level keys, the test name is "test_name" # and the rest are arguments @@ -330,20 +324,13 @@ def extract_test_args(test, name=None) -> Tuple[str, Dict[str, Any]]: else: test = list(test.items()) if len(test) != 1: - raise_parsing_error( - "test definition dictionary must have exactly one key, got" - " {} instead ({} keys)".format(test, len(test)) - ) + raise TestDefinitionDictLength(test) test_name, test_args = test[0] if not isinstance(test_args, dict): - raise_parsing_error( - "test arguments must be dict, got {} (value {})".format(type(test_args), test_args) - ) + raise TestArgsNotDict(test_args) if not isinstance(test_name, str): - raise_parsing_error( - "test name must be a str, got {} (value {})".format(type(test_name), test_name) - ) + raise TestNameNotString(test_name) test_args = deepcopy(test_args) if name is not None: test_args["column_name"] = name @@ -434,12 +421,10 @@ def tags(self) -> List[str]: if isinstance(tags, str): tags = [tags] if not isinstance(tags, list): - raise_compiler_error( - f"got {tags} ({type(tags)}) for tags, expected a list of strings" - ) + raise TagsNotListOfStrings(tags) for tag in tags: if not isinstance(tag, str): - raise_compiler_error(f"got {tag} ({type(tag)}) for tag, expected a str") + raise TagNotString(tag) return tags[:] def macro_name(self) -> str: diff --git a/core/dbt/parser/hooks.py b/core/dbt/parser/hooks.py index 2ac8bfda0ef..d05ea136dc5 100644 --- a/core/dbt/parser/hooks.py +++ b/core/dbt/parser/hooks.py @@ -3,7 +3,7 @@ from dbt.context.context_config import ContextConfig from dbt.contracts.files import FilePath -from dbt.contracts.graph.parsed import ParsedHookNode +from dbt.contracts.graph.nodes import HookNode from dbt.exceptions import InternalException from dbt.node_types import NodeType, RunHookType from dbt.parser.base import SimpleParser @@ -65,7 +65,7 @@ def __iter__(self) -> Iterator[HookBlock]: ) -class HookParser(SimpleParser[HookBlock, ParsedHookNode]): +class HookParser(SimpleParser[HookBlock, HookNode]): def transform(self, node): return node @@ -81,10 +81,10 @@ def get_path(self) -> FilePath: ) return path - def parse_from_dict(self, dct, validate=True) -> ParsedHookNode: + def parse_from_dict(self, dct, validate=True) -> HookNode: if validate: - ParsedHookNode.validate(dct) - return ParsedHookNode.from_dict(dct) + HookNode.validate(dct) + return HookNode.from_dict(dct) @classmethod def get_compiled_path(cls, block: HookBlock): @@ -98,7 +98,7 @@ def _create_parsetime_node( fqn: List[str], name=None, **kwargs, - ) -> ParsedHookNode: + ) -> HookNode: return super()._create_parsetime_node( block=block, diff --git a/core/dbt/parser/macros.py b/core/dbt/parser/macros.py index 4fe6b422595..7c5336b8ccf 100644 --- a/core/dbt/parser/macros.py +++ b/core/dbt/parser/macros.py @@ -4,7 +4,7 @@ from dbt.clients import jinja from dbt.contracts.graph.unparsed import UnparsedMacro -from dbt.contracts.graph.parsed import ParsedMacro +from dbt.contracts.graph.nodes import Macro from dbt.contracts.files import FilePath, SourceFile from dbt.exceptions import ParsingException from dbt.events.functions import fire_event @@ -13,9 +13,10 @@ from dbt.parser.base import BaseParser from dbt.parser.search import FileBlock, filesystem_search from dbt.utils import MACRO_PREFIX +from dbt import flags -class MacroParser(BaseParser[ParsedMacro]): +class MacroParser(BaseParser[Macro]): # This is only used when creating a MacroManifest separate # from the normal parsing flow. def get_paths(self) -> List[FilePath]: @@ -31,23 +32,20 @@ def resource_type(self) -> NodeType: def get_compiled_path(cls, block: FileBlock): return block.path.relative_path - def parse_macro( - self, block: jinja.BlockTag, base_node: UnparsedMacro, name: str - ) -> ParsedMacro: + def parse_macro(self, block: jinja.BlockTag, base_node: UnparsedMacro, name: str) -> Macro: unique_id = self.generate_unique_id(name) - return ParsedMacro( + return Macro( path=base_node.path, macro_sql=block.full_block, original_file_path=base_node.original_file_path, package_name=base_node.package_name, - root_path=base_node.root_path, resource_type=base_node.resource_type, name=name, unique_id=unique_id, ) - def parse_unparsed_macros(self, base_node: UnparsedMacro) -> Iterable[ParsedMacro]: + def parse_unparsed_macros(self, base_node: UnparsedMacro) -> Iterable[Macro]: try: blocks: List[jinja.BlockTag] = [ t @@ -95,7 +93,8 @@ def parse_file(self, block: FileBlock): source_file = block.file assert isinstance(source_file.contents, str) original_file_path = source_file.path.original_file_path - fire_event(MacroFileParse(path=original_file_path)) + if flags.MACRO_DEBUGGING: + fire_event(MacroFileParse(path=original_file_path)) # this is really only used for error messages base_node = UnparsedMacro( @@ -103,7 +102,6 @@ def parse_file(self, block: FileBlock): original_file_path=original_file_path, package_name=self.project.project_name, raw_code=source_file.contents, - root_path=self.project.project_root, resource_type=NodeType.Macro, language="sql", ) diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index 29f93b5bae2..787b70cfeaf 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -18,7 +18,7 @@ get_adapter_package_names, ) from dbt.helper_types import PathSet -from dbt.events.functions import fire_event, get_invocation_id +from dbt.events.functions import fire_event, get_invocation_id, warn_or_error from dbt.events.types import ( PartialParsingFullReparseBecauseOfError, PartialParsingExceptionFile, @@ -35,10 +35,10 @@ PartialParsingNotEnabled, ParsedFileLoadFailed, PartialParseSaveFileNotFound, - InvalidDisabledSourceInTestNode, - InvalidRefInTestNode, + InvalidDisabledTargetInTestNode, PartialParsingProjectEnvVarsChanged, PartialParsingProfileEnvVarsChanged, + NodeNotFoundOrDisabled, ) from dbt.logger import DbtProcessState from dbt.node_types import NodeType @@ -53,7 +53,6 @@ from dbt.contracts.files import FileHash, ParseFileType, SchemaSourceFile from dbt.parser.read_files import read_files, load_source_file from dbt.parser.partial import PartialParsing, special_override_macros -from dbt.contracts.graph.compiled import ManifestNode from dbt.contracts.graph.manifest import ( Manifest, Disabled, @@ -61,22 +60,18 @@ ManifestStateCheck, ParsingInfo, ) -from dbt.contracts.graph.parsed import ( - ParsedSourceDefinition, - ParsedNode, - ParsedMacro, +from dbt.contracts.graph.nodes import ( + SourceDefinition, + Macro, ColumnInfo, - ParsedExposure, - ParsedMetric, + Exposure, + Metric, + SeedNode, + ManifestNode, + ResultNode, ) from dbt.contracts.util import Writable -from dbt.exceptions import ( - ref_target_not_found, - get_target_not_found_or_disabled_msg, - target_not_found, - get_not_found_or_disabled_msg, - warn_or_error, -) +from dbt.exceptions import TargetNotFound, AmbiguousAlias from dbt.parser.base import Parser from dbt.parser.analysis import AnalysisParser from dbt.parser.generic_test import GenericTestParser @@ -90,7 +85,6 @@ from dbt.parser.seeds import SeedParser from dbt.parser.snapshots import SnapshotParser from dbt.parser.sources import SourcePatcher -from dbt.ui import warning_tag from dbt.version import __version__ from dbt.dataclass_schema import StrEnum, dbtClassMixin @@ -371,7 +365,7 @@ def load(self): self._perf_info.parse_project_elapsed = time.perf_counter() - start_parse_projects # patch_sources converts the UnparsedSourceDefinitions in the - # Manifest.sources to ParsedSourceDefinition via 'patch_source' + # Manifest.sources to SourceDefinition via 'patch_source' # in SourcePatcher start_patch = time.perf_counter() patcher = SourcePatcher(self.root_project, self.manifest) @@ -542,7 +536,9 @@ def macro_depends_on(self): macro.depends_on.add_macro(dep_macro_id) # will check for dupes def write_manifest_for_partial_parse(self): - path = os.path.join(self.root_project.target_path, PARTIAL_PARSE_FILE_NAME) + path = os.path.join( + self.root_project.project_root, self.root_project.target_path, PARTIAL_PARSE_FILE_NAME + ) try: # This shouldn't be necessary, but we have gotten bug reports (#3757) of the # saved manifest not matching the code version. @@ -708,7 +704,7 @@ def build_manifest_state_check(self): vars_hash = FileHash.from_contents( "\x00".join( [ - getattr(config.args, "vars", "{}") or "{}", + str(getattr(config.args, "vars", "{}") or "{}"), getattr(config.args, "profile", "") or "", getattr(config.args, "target", "") or "", __version__, @@ -856,6 +852,10 @@ def process_metrics(self, config: RuntimeConfig): if metric.created_at < self.started_at: continue _process_metrics_for_node(self.manifest, current_project, metric) + for exposure in self.manifest.exposures.values(): + if exposure.created_at < self.started_at: + continue + _process_metrics_for_node(self.manifest, current_project, exposure) # nodes: node and column descriptions # sources: source and table descriptions, column descriptions @@ -920,7 +920,7 @@ def process_sources(self, current_project: str): for node in self.manifest.nodes.values(): if node.resource_type == NodeType.Source: continue - assert not isinstance(node, ParsedSourceDefinition) + assert not isinstance(node, SourceDefinition) if node.created_at < self.started_at: continue _process_sources_for_node(self.manifest, current_project, node) @@ -955,65 +955,43 @@ def process_nodes(self): self.manifest.rebuild_ref_lookup() -def invalid_ref_fail_unless_test(node, target_model_name, target_model_package, disabled): - - if node.resource_type == NodeType.Test: - msg = get_target_not_found_or_disabled_msg( - node=node, - target_name=target_model_name, - target_package=target_model_package, - disabled=disabled, - ) - if disabled: - fire_event(InvalidRefInTestNode(msg=msg)) - else: - warn_or_error(msg, log_fmt=warning_tag("{}")) - else: - ref_target_not_found( - node, - target_model_name, - target_model_package, - disabled=disabled, - ) - - -def invalid_source_fail_unless_test(node, target_name, target_table_name, disabled): +def invalid_target_fail_unless_test( + node, + target_name: str, + target_kind: str, + target_package: Optional[str] = None, + disabled: Optional[bool] = None, +): if node.resource_type == NodeType.Test: - msg = get_not_found_or_disabled_msg( - node=node, - target_name=f"{target_name}.{target_table_name}", - target_kind="source", - disabled=disabled, - ) if disabled: - fire_event(InvalidDisabledSourceInTestNode(msg=msg)) + fire_event( + InvalidDisabledTargetInTestNode( + resource_type_title=node.resource_type.title(), + unique_id=node.unique_id, + original_file_path=node.original_file_path, + target_kind=target_kind, + target_name=target_name, + target_package=target_package if target_package else "", + ) + ) else: - warn_or_error(msg, log_fmt=warning_tag("{}")) - else: - target_not_found( - node=node, - target_name=f"{target_name}.{target_table_name}", - target_kind="source", - disabled=disabled, - ) - - -def invalid_metric_fail_unless_test(node, target_metric_name, target_metric_package, disabled): - - if node.resource_type == NodeType.Test: - msg = get_target_not_found_or_disabled_msg( - node=node, - target_name=target_metric_name, - target_package=target_metric_package, - disabled=disabled, - ) - warn_or_error(msg, log_fmt=warning_tag("{}")) + warn_or_error( + NodeNotFoundOrDisabled( + original_file_path=node.original_file_path, + unique_id=node.unique_id, + resource_type_title=node.resource_type.title(), + target_name=target_name, + target_kind=target_kind, + target_package=target_package if target_package else "", + disabled=str(disabled), + ) + ) else: - target_not_found( + raise TargetNotFound( node=node, - target_name=target_metric_name, - target_kind="metric", - target_package=target_metric_package, + target_name=target_name, + target_kind=target_kind, + target_package=target_package, disabled=disabled, ) @@ -1037,11 +1015,11 @@ def _check_resource_uniqueness( existing_node = names_resources.get(name) if existing_node is not None: - dbt.exceptions.raise_duplicate_resource_name(existing_node, node) + raise dbt.exceptions.DuplicateResourceName(existing_node, node) existing_alias = alias_resources.get(full_node_name) if existing_alias is not None: - dbt.exceptions.raise_ambiguous_alias(existing_alias, node, full_node_name) + raise AmbiguousAlias(node_1=existing_alias, node_2=node, duped_name=full_node_name) names_resources[name] = node alias_resources[full_node_name] = node @@ -1061,7 +1039,7 @@ def _check_manifest(manifest: Manifest, config: RuntimeConfig) -> None: def _get_node_column(node, column_name): - """Given a ParsedNode, add some fields that might be missing. Return a + """Given a ManifestNode, add some fields that might be missing. Return a reference to the dict that refers to the given column, creating it if it doesn't yet exist. """ @@ -1074,7 +1052,7 @@ def _get_node_column(node, column_name): return column -DocsContextCallback = Callable[[Union[ParsedNode, ParsedSourceDefinition]], Dict[str, Any]] +DocsContextCallback = Callable[[ResultNode], Dict[str, Any]] # node and column descriptions @@ -1090,7 +1068,7 @@ def _process_docs_for_node( # source and table descriptions, column descriptions def _process_docs_for_source( context: Dict[str, Any], - source: ParsedSourceDefinition, + source: SourceDefinition, ): table_description = source.description source_description = source.source_description @@ -1106,27 +1084,22 @@ def _process_docs_for_source( # macro argument descriptions -def _process_docs_for_macro(context: Dict[str, Any], macro: ParsedMacro) -> None: +def _process_docs_for_macro(context: Dict[str, Any], macro: Macro) -> None: macro.description = get_rendered(macro.description, context) for arg in macro.arguments: arg.description = get_rendered(arg.description, context) # exposure descriptions -def _process_docs_for_exposure(context: Dict[str, Any], exposure: ParsedExposure) -> None: +def _process_docs_for_exposure(context: Dict[str, Any], exposure: Exposure) -> None: exposure.description = get_rendered(exposure.description, context) -def _process_docs_for_metrics(context: Dict[str, Any], metric: ParsedMetric) -> None: +def _process_docs_for_metrics(context: Dict[str, Any], metric: Metric) -> None: metric.description = get_rendered(metric.description, context) -# TODO: this isn't actually referenced anywhere? -def _process_derived_metrics(context: Dict[str, Any], metric: ParsedMetric) -> None: - metric.description = get_rendered(metric.description, context) - - -def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposure: ParsedExposure): +def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposure: Exposure): """Given a manifest and exposure in that manifest, process its refs""" for ref in exposure.refs: target_model: Optional[Union[Disabled, ManifestNode]] = None @@ -1153,10 +1126,11 @@ def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposur # This may raise. Even if it doesn't, we don't want to add # this exposure to the graph b/c there is no destination exposure exposure.config.enabled = False - invalid_ref_fail_unless_test( - exposure, - target_model_name, - target_model_package, + invalid_target_fail_unless_test( + node=exposure, + target_name=target_model_name, + target_kind="node", + target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), ) @@ -1168,7 +1142,7 @@ def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposur manifest.update_exposure(exposure) -def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: ParsedMetric): +def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: Metric): """Given a manifest and a metric in that manifest, process its refs""" for ref in metric.refs: target_model: Optional[Union[Disabled, ManifestNode]] = None @@ -1195,13 +1169,13 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: P # This may raise. Even if it doesn't, we don't want to add # this metric to the graph b/c there is no destination metric metric.config.enabled = False - invalid_ref_fail_unless_test( - metric, - target_model_name, - target_model_package, + invalid_target_fail_unless_test( + node=metric, + target_name=target_model_name, + target_kind="node", + target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), ) - continue target_model_id = target_model.unique_id @@ -1211,11 +1185,17 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: P def _process_metrics_for_node( - manifest: Manifest, current_project: str, node: Union[ManifestNode, ParsedMetric] + manifest: Manifest, + current_project: str, + node: Union[ManifestNode, Metric, Exposure], ): """Given a manifest and a node in that manifest, process its metrics""" + + if isinstance(node, SeedNode): + return + for metric in node.metrics: - target_metric: Optional[Union[Disabled, ParsedMetric]] = None + target_metric: Optional[Union[Disabled, Metric]] = None target_metric_name: str target_metric_package: Optional[str] = None @@ -1239,13 +1219,13 @@ def _process_metrics_for_node( # This may raise. Even if it doesn't, we don't want to add # this node to the graph b/c there is no destination node node.config.enabled = False - invalid_metric_fail_unless_test( - node, - target_metric_name, - target_metric_package, + invalid_target_fail_unless_test( + node=node, + target_name=target_metric_name, + target_kind="source", + target_package=target_metric_package, disabled=(isinstance(target_metric, Disabled)), ) - continue target_metric_id = target_metric.unique_id @@ -1255,6 +1235,10 @@ def _process_metrics_for_node( def _process_refs_for_node(manifest: Manifest, current_project: str, node: ManifestNode): """Given a manifest and a node in that manifest, process its refs""" + + if isinstance(node, SeedNode): + return + for ref in node.refs: target_model: Optional[Union[Disabled, ManifestNode]] = None target_model_name: str @@ -1280,13 +1264,13 @@ def _process_refs_for_node(manifest: Manifest, current_project: str, node: Manif # This may raise. Even if it doesn't, we don't want to add # this node to the graph b/c there is no destination node node.config.enabled = False - invalid_ref_fail_unless_test( - node, - target_model_name, - target_model_package, + invalid_target_fail_unless_test( + node=node, + target_name=target_model_name, + target_kind="node", + target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), ) - continue target_model_id = target_model.unique_id @@ -1299,10 +1283,8 @@ def _process_refs_for_node(manifest: Manifest, current_project: str, node: Manif manifest.update_node(node) -def _process_sources_for_exposure( - manifest: Manifest, current_project: str, exposure: ParsedExposure -): - target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None +def _process_sources_for_exposure(manifest: Manifest, current_project: str, exposure: Exposure): + target_source: Optional[Union[Disabled, SourceDefinition]] = None for source_name, table_name in exposure.sources: target_source = manifest.resolve_source( source_name, @@ -1312,8 +1294,11 @@ def _process_sources_for_exposure( ) if target_source is None or isinstance(target_source, Disabled): exposure.config.enabled = False - invalid_source_fail_unless_test( - exposure, source_name, table_name, disabled=(isinstance(target_source, Disabled)) + invalid_target_fail_unless_test( + node=exposure, + target_name=f"{source_name}.{table_name}", + target_kind="source", + disabled=(isinstance(target_source, Disabled)), ) continue target_source_id = target_source.unique_id @@ -1321,8 +1306,8 @@ def _process_sources_for_exposure( manifest.update_exposure(exposure) -def _process_sources_for_metric(manifest: Manifest, current_project: str, metric: ParsedMetric): - target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None +def _process_sources_for_metric(manifest: Manifest, current_project: str, metric: Metric): + target_source: Optional[Union[Disabled, SourceDefinition]] = None for source_name, table_name in metric.sources: target_source = manifest.resolve_source( source_name, @@ -1332,8 +1317,11 @@ def _process_sources_for_metric(manifest: Manifest, current_project: str, metric ) if target_source is None or isinstance(target_source, Disabled): metric.config.enabled = False - invalid_source_fail_unless_test( - metric, source_name, table_name, disabled=(isinstance(target_source, Disabled)) + invalid_target_fail_unless_test( + node=metric, + target_name=f"{source_name}.{table_name}", + target_kind="source", + disabled=(isinstance(target_source, Disabled)), ) continue target_source_id = target_source.unique_id @@ -1342,7 +1330,11 @@ def _process_sources_for_metric(manifest: Manifest, current_project: str, metric def _process_sources_for_node(manifest: Manifest, current_project: str, node: ManifestNode): - target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None + + if isinstance(node, SeedNode): + return + + target_source: Optional[Union[Disabled, SourceDefinition]] = None for source_name, table_name in node.sources: target_source = manifest.resolve_source( source_name, @@ -1354,8 +1346,11 @@ def _process_sources_for_node(manifest: Manifest, current_project: str, node: Ma if target_source is None or isinstance(target_source, Disabled): # this folows the same pattern as refs node.config.enabled = False - invalid_source_fail_unless_test( - node, source_name, table_name, disabled=(isinstance(target_source, Disabled)) + invalid_target_fail_unless_test( + node=node, + target_name=f"{source_name}.{table_name}", + target_kind="source", + disabled=(isinstance(target_source, Disabled)), ) continue target_source_id = target_source.unique_id @@ -1365,7 +1360,7 @@ def _process_sources_for_node(manifest: Manifest, current_project: str, node: Ma # This is called in task.rpc.sql_commands when a "dynamic" node is # created in the manifest, in 'add_refs' -def process_macro(config: RuntimeConfig, manifest: Manifest, macro: ParsedMacro) -> None: +def process_macro(config: RuntimeConfig, manifest: Manifest, macro: Macro) -> None: ctx = generate_runtime_docs_context( config, macro, diff --git a/core/dbt/parser/models.py b/core/dbt/parser/models.py index aaf6a0d016e..39bb18be714 100644 --- a/core/dbt/parser/models.py +++ b/core/dbt/parser/models.py @@ -1,6 +1,6 @@ from copy import deepcopy from dbt.context.context_config import ContextConfig -from dbt.contracts.graph.parsed import ParsedModelNode +from dbt.contracts.graph.nodes import ModelNode import dbt.flags as flags from dbt.events.functions import fire_event from dbt.events.types import ( @@ -29,8 +29,13 @@ # New for Python models :p import ast from dbt.dataclass_schema import ValidationError -from dbt.exceptions import ParsingException, validator_error_message, UndefinedMacroException - +from dbt.exceptions import ( + InvalidModelConfig, + ParsingException, + PythonLiteralEval, + PythonParsingException, + UndefinedMacroException, +) dbt_function_key_words = set(["ref", "source", "config", "get"]) dbt_function_full_names = set(["dbt.ref", "dbt.source", "dbt.config", "dbt.config.get"]) @@ -61,7 +66,11 @@ def visit_FunctionDef(self, node: ast.FunctionDef) -> None: def check_error(self, node): if self.num_model_def != 1: - raise ParsingException("dbt only allow one model defined per python file", node=node) + raise ParsingException( + f"dbt allows exactly one model defined per python file, found {self.num_model_def}", + node=node, + ) + if len(self.dbt_errors) != 0: raise ParsingException("\n".join(self.dbt_errors), node=node) @@ -87,12 +96,7 @@ def _safe_eval(self, node): try: return ast.literal_eval(node) except (SyntaxError, ValueError, TypeError, MemoryError, RecursionError) as exc: - msg = validator_error_message( - f"Error when trying to literal_eval an arg to dbt.ref(), dbt.source(), dbt.config() or dbt.config.get() \n{exc}\n" - "https://docs.python.org/3/library/ast.html#ast.literal_eval\n" - "In dbt python model, `dbt.ref`, `dbt.source`, `dbt.config`, `dbt.config.get` function args only support Python literal structures" - ) - raise ParsingException(msg, node=self.dbt_node) from exc + raise PythonLiteralEval(exc, node=self.dbt_node) from exc def _get_call_literals(self, node): # List of literals @@ -177,11 +181,11 @@ def verify_python_model_code(node): raise ParsingException("No jinja in python model code is allowed", node=node) -class ModelParser(SimpleSQLParser[ParsedModelNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedModelNode: +class ModelParser(SimpleSQLParser[ModelNode]): + def parse_from_dict(self, dct, validate=True) -> ModelNode: if validate: - ParsedModelNode.validate(dct) - return ParsedModelNode.from_dict(dct) + ModelNode.validate(dct) + return ModelNode.from_dict(dct) @property def resource_type(self) -> NodeType: @@ -192,32 +196,54 @@ def get_compiled_path(cls, block: FileBlock): return block.path.relative_path def parse_python_model(self, node, config, context): + config_keys_used = [] + config_keys_defaults = [] + try: tree = ast.parse(node.raw_code, filename=node.original_file_path) except SyntaxError as exc: - msg = validator_error_message(exc) - raise ParsingException(f"{msg}\n{exc.text}", node=node) from exc - - # We are doing a validator and a parser because visit_FunctionDef in parser - # would actually make the parser not doing the visit_Calls any more - dbtValidator = PythonValidationVisitor() - dbtValidator.visit(tree) - dbtValidator.check_error(node) - - dbtParser = PythonParseVisitor(node) - dbtParser.visit(tree) - config_keys_used = [] - for (func, args, kwargs) in dbtParser.dbt_function_calls: - if func == "get": - config_keys_used.append(args[0]) - continue + raise PythonParsingException(exc, node=node) from exc + + # Only parse if AST tree has instructions in body + if tree.body: + # We are doing a validator and a parser because visit_FunctionDef in parser + # would actually make the parser not doing the visit_Calls any more + dbt_validator = PythonValidationVisitor() + dbt_validator.visit(tree) + dbt_validator.check_error(node) + + dbt_parser = PythonParseVisitor(node) + dbt_parser.visit(tree) + + for (func, args, kwargs) in dbt_parser.dbt_function_calls: + if func == "get": + num_args = len(args) + if num_args == 0: + raise ParsingException( + "dbt.config.get() requires at least one argument", + node=node, + ) + if num_args > 2: + raise ParsingException( + f"dbt.config.get() takes at most 2 arguments ({num_args} given)", + node=node, + ) + key = args[0] + default_value = args[1] if num_args == 2 else None + config_keys_used.append(key) + config_keys_defaults.append(default_value) + continue + + context[func](*args, **kwargs) - context[func](*args, **kwargs) if config_keys_used: # this is being used in macro build_config_dict - context["config"](config_keys_used=config_keys_used) + context["config"]( + config_keys_used=config_keys_used, + config_keys_defaults=config_keys_defaults, + ) - def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: + def render_update(self, node: ModelNode, config: ContextConfig) -> None: self.manifest._parsing_info.static_analysis_path_count += 1 if node.language == ModelLanguage.python: @@ -229,8 +255,7 @@ def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: except ValidationError as exc: # we got a ValidationError - probably bad types in config() - msg = validator_error_message(exc) - raise ParsingException(msg, node=node) from exc + raise InvalidModelConfig(exc, node=node) from exc return elif not flags.STATIC_PARSER: @@ -262,9 +287,9 @@ def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: # top-level declaration of variables statically_parsed: Optional[Union[str, Dict[str, List[Any]]]] = None experimental_sample: Optional[Union[str, Dict[str, List[Any]]]] = None - exp_sample_node: Optional[ParsedModelNode] = None + exp_sample_node: Optional[ModelNode] = None exp_sample_config: Optional[ContextConfig] = None - jinja_sample_node: Optional[ParsedModelNode] = None + jinja_sample_node: Optional[ModelNode] = None jinja_sample_config: Optional[ContextConfig] = None result: List[str] = [] @@ -365,9 +390,7 @@ def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: } ) - def run_static_parser( - self, node: ParsedModelNode - ) -> Optional[Union[str, Dict[str, List[Any]]]]: + def run_static_parser(self, node: ModelNode) -> Optional[Union[str, Dict[str, List[Any]]]]: # if any banned macros have been overridden by the user, we cannot use the static parser. if self._has_banned_macro(node): # this log line is used for integration testing. If you change @@ -389,7 +412,7 @@ def run_static_parser( return "cannot_parse" def run_experimental_parser( - self, node: ParsedModelNode + self, node: ModelNode ) -> Optional[Union[str, Dict[str, List[Any]]]]: # if any banned macros have been overridden by the user, we cannot use the static parser. if self._has_banned_macro(node): @@ -415,7 +438,7 @@ def run_experimental_parser( return "cannot_parse" # checks for banned macros - def _has_banned_macro(self, node: ParsedModelNode) -> bool: + def _has_banned_macro(self, node: ModelNode) -> bool: # first check if there is a banned macro defined in scope for this model file root_project_name = self.root_project.project_name project_name = node.package_name @@ -435,9 +458,7 @@ def _has_banned_macro(self, node: ParsedModelNode) -> bool: # this method updates the model node rendered and unrendered config as well # as the node object. Used to populate these values when circumventing jinja # rendering like the static parser. - def populate( - self, node: ParsedModelNode, config: ContextConfig, statically_parsed: Dict[str, Any] - ): + def populate(self, node: ModelNode, config: ContextConfig, statically_parsed: Dict[str, Any]): # manually fit configs in config._config_call_dict = _get_config_call_dict(statically_parsed) @@ -485,9 +506,9 @@ def _shift_sources(static_parser_result: Dict[str, List[Any]]) -> Dict[str, List # returns a list of string codes to be sent as a tracking event def _get_exp_sample_result( - sample_node: ParsedModelNode, + sample_node: ModelNode, sample_config: ContextConfig, - node: ParsedModelNode, + node: ModelNode, config: ContextConfig, ) -> List[str]: result: List[Tuple[int, str]] = _get_sample_result(sample_node, sample_config, node, config) @@ -501,9 +522,9 @@ def process(codemsg): # returns a list of string codes to be sent as a tracking event def _get_stable_sample_result( - sample_node: ParsedModelNode, + sample_node: ModelNode, sample_config: ContextConfig, - node: ParsedModelNode, + node: ModelNode, config: ContextConfig, ) -> List[str]: result: List[Tuple[int, str]] = _get_sample_result(sample_node, sample_config, node, config) @@ -518,9 +539,9 @@ def process(codemsg): # returns a list of string codes that need a single digit prefix to be prepended # before being sent as a tracking event def _get_sample_result( - sample_node: ParsedModelNode, + sample_node: ModelNode, sample_config: ContextConfig, - node: ParsedModelNode, + node: ModelNode, config: ContextConfig, ) -> List[Tuple[int, str]]: result: List[Tuple[int, str]] = [] diff --git a/core/dbt/parser/partial.py b/core/dbt/parser/partial.py index 1a8c7e8193e..63ef33429c4 100644 --- a/core/dbt/parser/partial.py +++ b/core/dbt/parser/partial.py @@ -873,7 +873,7 @@ def delete_schema_source(self, schema_file, source_dict): source_name = source_dict["name"] # There may be multiple sources for each source dict, since # there will be a separate source node for each table. - # ParsedSourceDefinition name = table name, dict name is source_name + # SourceDefinition name = table name, dict name is source_name sources = schema_file.sources.copy() for unique_id in sources: if unique_id in self.saved_manifest.sources: diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 8b22427cb39..b5fd8558889 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -27,14 +27,14 @@ from dbt.context.macro_resolver import MacroResolver from dbt.contracts.files import FileHash, SchemaSourceFile from dbt.contracts.graph.model_config import MetricConfig, ExposureConfig -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( ParsedNodePatch, ColumnInfo, - ParsedGenericTestNode, + GenericTestNode, ParsedMacroPatch, UnpatchedSourceDefinition, - ParsedExposure, - ParsedMetric, + Exposure, + Metric, ) from dbt.contracts.graph.unparsed import ( HasColumnDocs, @@ -50,19 +50,25 @@ UnparsedSourceDefinition, ) from dbt.exceptions import ( - warn_invalid_patch, - validator_error_message, + CompilationException, + DuplicateMacroPatchName, + DuplicatePatchPath, + DuplicateSourcePatchName, JSONValidationException, - raise_invalid_property_yml_version, - ValidationException, - ParsingException, - raise_duplicate_patch_name, - raise_duplicate_macro_patch_name, InternalException, - raise_duplicate_source_patch_name, - warn_or_error, - CompilationException, + InvalidSchemaConfig, + InvalidTestConfig, + ParsingException, + PropertyYMLInvalidTag, + PropertyYMLMissingVersion, + PropertyYMLVersionNotInt, + ValidationException, + YamlLoadFailure, + YamlParseDictFailure, + YamlParseListFailure, ) +from dbt.events.functions import warn_or_error +from dbt.events.types import WrongResourceSchemaFile, NoNodeForYamlKey, MacroPatchNotFound from dbt.node_types import NodeType from dbt.parser.base import SimpleParser from dbt.parser.search import FileBlock @@ -74,7 +80,6 @@ TestBlock, Testable, ) -from dbt.ui import warning_tag from dbt.utils import get_pseudo_test_path, coerce_dict_str @@ -92,34 +97,13 @@ ) -def error_context( - path: str, - key: str, - data: Any, - cause: Union[str, ValidationException, JSONValidationException], -) -> str: - """Provide contextual information about an error while parsing""" - if isinstance(cause, str): - reason = cause - elif isinstance(cause, ValidationError): - reason = validator_error_message(cause) - else: - reason = cause.msg - return "Invalid {key} config given in {path} @ {key}: {data} - {reason}".format( - key=key, path=path, data=data, reason=reason - ) - - def yaml_from_file(source_file: SchemaSourceFile) -> Dict[str, Any]: """If loading the yaml fails, raise an exception.""" path = source_file.path.relative_path try: return load_yaml_text(source_file.contents, source_file.path) except ValidationException as e: - reason = validator_error_message(e) - raise ParsingException( - "Error reading {}: {} - {}".format(source_file.project_name, path, reason) - ) + raise YamlLoadFailure(source_file.project_name, path, e) class ParserRef: @@ -169,7 +153,7 @@ def _trimmed(inp: str) -> str: return inp[:44] + "..." + inp[-3:] -class SchemaParser(SimpleParser[GenericTestBlock, ParsedGenericTestNode]): +class SchemaParser(SimpleParser[GenericTestBlock, GenericTestNode]): def __init__( self, project, @@ -196,10 +180,10 @@ def get_compiled_path(cls, block: FileBlock) -> str: def resource_type(self) -> NodeType: return NodeType.Test - def parse_from_dict(self, dct, validate=True) -> ParsedGenericTestNode: + def parse_from_dict(self, dct, validate=True) -> GenericTestNode: if validate: - ParsedGenericTestNode.validate(dct) - return ParsedGenericTestNode.from_dict(dct) + GenericTestNode.validate(dct) + return GenericTestNode.from_dict(dct) def parse_column_tests(self, block: TestBlock, column: UnparsedColumn) -> None: if not column.tests: @@ -220,7 +204,7 @@ def create_test_node( test_metadata: Dict[str, Any], file_key_name: str, column_name: Optional[str], - ) -> ParsedGenericTestNode: + ) -> GenericTestNode: HASH_LENGTH = 10 @@ -245,7 +229,6 @@ def get_hashable_md(data: Union[str, int, float, List, Dict]) -> Union[str, List "database": self.default_database, "fqn": fqn, "name": name, - "root_path": self.project.project_root, "resource_type": self.resource_type, "tags": tags, "path": path, @@ -261,10 +244,9 @@ def get_hashable_md(data: Union[str, int, float, List, Dict]) -> Union[str, List "file_key_name": file_key_name, } try: - ParsedGenericTestNode.validate(dct) - return ParsedGenericTestNode.from_dict(dct) + GenericTestNode.validate(dct) + return GenericTestNode.from_dict(dct) except ValidationError as exc: - msg = validator_error_message(exc) # this is a bit silly, but build an UnparsedNode just for error # message reasons node = self._create_error_node( @@ -273,7 +255,7 @@ def get_hashable_md(data: Union[str, int, float, List, Dict]) -> Union[str, List original_file_path=target.original_file_path, raw_code=raw_code, ) - raise ParsingException(msg, node=node) from exc + raise InvalidTestConfig(exc, node) # lots of time spent in this method def _parse_generic_test( @@ -283,7 +265,7 @@ def _parse_generic_test( tags: List[str], column_name: Optional[str], schema_file_id: str, - ) -> ParsedGenericTestNode: + ) -> GenericTestNode: try: builder = TestBuilder( test=test, @@ -415,10 +397,9 @@ def render_test_update(self, node, config, builder, schema_file_id): # env_vars should have been updated in the context env_var method except ValidationError as exc: # we got a ValidationError - probably bad types in config() - msg = validator_error_message(exc) - raise ParsingException(msg, node=node) from exc + raise InvalidSchemaConfig(exc, node=node) from exc - def parse_node(self, block: GenericTestBlock) -> ParsedGenericTestNode: + def parse_node(self, block: GenericTestBlock) -> GenericTestNode: """In schema parsing, we rewrite most of the part of parse_node that builds the initial node to be parsed, but rendering is basically the same @@ -433,7 +414,7 @@ def parse_node(self, block: GenericTestBlock) -> ParsedGenericTestNode: self.add_test_node(block, node) return node - def add_test_node(self, block: GenericTestBlock, node: ParsedGenericTestNode): + def add_test_node(self, block: GenericTestBlock, node: GenericTestNode): test_from = {"key": block.target.yaml_key, "name": block.target.name} if node.config.enabled: self.manifest.add_node(block.file, node, test_from) @@ -442,7 +423,7 @@ def add_test_node(self, block: GenericTestBlock, node: ParsedGenericTestNode): def render_with_context( self, - node: ParsedGenericTestNode, + node: GenericTestNode, config: ContextConfig, ) -> None: """Given the parsed node and a ContextConfig to use during @@ -556,25 +537,16 @@ def parse_file(self, block: FileBlock, dct: Dict = None) -> None: def check_format_version(file_path, yaml_dct) -> None: if "version" not in yaml_dct: - raise_invalid_property_yml_version( - file_path, - "the yml property file {} is missing a version tag".format(file_path), - ) + raise PropertyYMLMissingVersion(file_path) version = yaml_dct["version"] # if it's not an integer, the version is malformed, or not # set. Either way, only 'version: 2' is supported. if not isinstance(version, int): - raise_invalid_property_yml_version( - file_path, - "its 'version:' tag must be an integer (e.g. version: 2)." - " {} is not an integer".format(version), - ) + raise PropertyYMLVersionNotInt(file_path, version) + if version != 2: - raise_invalid_property_yml_version( - file_path, - "its 'version:' tag is set to {}. Only 2 is supported".format(version), - ) + raise PropertyYMLInvalidTag(file_path, version) Parsed = TypeVar("Parsed", UnpatchedSourceDefinition, ParsedNodePatch, ParsedMacroPatch) @@ -635,8 +607,9 @@ def get_key_dicts(self) -> Iterable[Dict[str, Any]]: # check that entry is a dict and that all dict values # are strings if coerce_dict_str(entry) is None: - msg = error_context(path, self.key, data, "expected a dict with string keys") - raise ParsingException(msg) + raise YamlParseListFailure( + path, self.key, data, "expected a dict with string keys" + ) if "name" not in entry: raise ParsingException("Entry did not contain a name") @@ -683,8 +656,7 @@ def _target_from_dict(self, cls: Type[T], data: Dict[str, Any]) -> T: cls.validate(data) return cls.from_dict(data) except (ValidationError, JSONValidationException) as exc: - msg = error_context(path, self.key, data, exc) - raise ParsingException(msg) from exc + raise YamlParseDictFailure(path, self.key, data, exc) # The other parse method returns TestBlocks. This one doesn't. # This takes the yaml dictionaries in 'sources' keys and uses them @@ -705,7 +677,7 @@ def parse(self) -> List[TestBlock]: # source patches must be unique key = (patch.overrides, patch.name) if key in self.manifest.source_patches: - raise_duplicate_source_patch_name(patch, self.manifest.source_patches[key]) + raise DuplicateSourcePatchName(patch, self.manifest.source_patches[key]) self.manifest.source_patches[key] = patch source_file.source_patches.append(key) else: @@ -729,11 +701,11 @@ def add_source_definitions(self, source: UnparsedSourceDefinition) -> None: table=table, path=original_file_path, original_file_path=original_file_path, - root_path=self.project.project_root, package_name=package_name, unique_id=unique_id, resource_type=NodeType.Source, fqn=fqn, + name=f"{source.name}_{table.name}", ) self.manifest.add_source(self.yaml.file, source_def) @@ -809,8 +781,7 @@ def get_unparsed_target(self) -> Iterable[NonSourceTarget]: self.normalize_docs_attribute(data, path) node = self._target_type().from_dict(data) except (ValidationError, JSONValidationException) as exc: - msg = error_context(path, self.key, data, exc) - raise ParsingException(msg) from exc + raise YamlParseDictFailure(path, self.key, data, exc) else: yield node @@ -873,7 +844,15 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: if unique_id: resource_type = NodeType(unique_id.split(".")[0]) if resource_type.pluralize() != patch.yaml_key: - warn_invalid_patch(patch, resource_type) + warn_or_error( + WrongResourceSchemaFile( + patch_name=patch.name, + resource_type=resource_type, + plural_resource_type=resource_type.pluralize(), + yaml_key=patch.yaml_key, + file_path=patch.original_file_path, + ) + ) return elif patch.yaml_key == "analyses": @@ -912,12 +891,13 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: node.patch(patch) else: - msg = ( - f"Did not find matching node for patch with name '{patch.name}' " - f"in the '{patch.yaml_key}' section of " - f"file '{source_file.path.original_file_path}'" + warn_or_error( + NoNodeForYamlKey( + patch_name=patch.name, + yaml_key=patch.yaml_key, + file_path=source_file.path.original_file_path, + ) ) - warn_or_error(msg, log_fmt=warning_tag("{}")) return # patches can't be overwritten @@ -925,7 +905,7 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: if node: if node.patch_path: package_name, existing_file_path = node.patch_path.split("://") - raise_duplicate_patch_name(patch, existing_file_path) + raise DuplicatePatchPath(patch, existing_file_path) source_file.append_patch(patch.yaml_key, node.unique_id) # re-calculate the node config with the patch config. Always do this @@ -977,12 +957,11 @@ def parse_patch(self, block: TargetBlock[UnparsedMacroUpdate], refs: ParserRef) unique_id = f"macro.{patch.package_name}.{patch.name}" macro = self.manifest.macros.get(unique_id) if not macro: - msg = f'Found patch for macro "{patch.name}" ' f"which was not found" - warn_or_error(msg, log_fmt=warning_tag("{}")) + warn_or_error(MacroPatchNotFound(patch_name=patch.name)) return if macro.patch_path: package_name, existing_file_path = macro.patch_path.split("://") - raise_duplicate_macro_patch_name(patch, existing_file_path) + raise DuplicateMacroPatchName(patch, existing_file_path) source_file.macro_patches[patch.name] = unique_id macro.patch(patch) @@ -1022,9 +1001,9 @@ def parse_exposure(self, unparsed: UnparsedExposure): f"Calculated a {type(config)} for an exposure, but expected an ExposureConfig" ) - parsed = ParsedExposure( + parsed = Exposure( + resource_type=NodeType.Exposure, package_name=package_name, - root_path=self.project.project_root, path=path, original_file_path=self.yaml.path.original_file_path, unique_id=unique_id, @@ -1049,7 +1028,7 @@ def parse_exposure(self, unparsed: UnparsedExposure): ) depends_on_jinja = "\n".join("{{ " + line + "}}" for line in unparsed.depends_on) get_rendered(depends_on_jinja, ctx, parsed, capture_macros=True) - # parsed now has a populated refs/sources + # parsed now has a populated refs/sources/metrics if parsed.config.enabled: self.manifest.add_exposure(self.yaml.file, parsed) @@ -1085,8 +1064,7 @@ def parse(self): UnparsedExposure.validate(data) unparsed = UnparsedExposure.from_dict(data) except (ValidationError, JSONValidationException) as exc: - msg = error_context(self.yaml.path, self.key, data, exc) - raise ParsingException(msg) from exc + raise YamlParseDictFailure(self.yaml.path, self.key, data, exc) self.parse_exposure(unparsed) @@ -1126,9 +1104,9 @@ def parse_metric(self, unparsed: UnparsedMetric): f"Calculated a {type(config)} for a metric, but expected a MetricConfig" ) - parsed = ParsedMetric( + parsed = Metric( + resource_type=NodeType.Metric, package_name=package_name, - root_path=self.project.project_root, path=path, original_file_path=self.yaml.path.original_file_path, unique_id=unique_id, @@ -1203,6 +1181,5 @@ def parse(self): unparsed = UnparsedMetric.from_dict(data) except (ValidationError, JSONValidationException) as exc: - msg = error_context(self.yaml.path, self.key, data, exc) - raise ParsingException(msg) from exc + raise YamlParseDictFailure(self.yaml.path, self.key, data, exc) self.parse_metric(unparsed) diff --git a/core/dbt/parser/seeds.py b/core/dbt/parser/seeds.py index 63550e3f30f..23c77e1ed7c 100644 --- a/core/dbt/parser/seeds.py +++ b/core/dbt/parser/seeds.py @@ -1,15 +1,20 @@ from dbt.context.context_config import ContextConfig -from dbt.contracts.graph.parsed import ParsedSeedNode +from dbt.contracts.graph.nodes import SeedNode from dbt.node_types import NodeType from dbt.parser.base import SimpleSQLParser from dbt.parser.search import FileBlock -class SeedParser(SimpleSQLParser[ParsedSeedNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedSeedNode: +class SeedParser(SimpleSQLParser[SeedNode]): + def parse_from_dict(self, dct, validate=True) -> SeedNode: + # seeds need the root_path because the contents are not loaded + dct["root_path"] = self.project.project_root + if "language" in dct: + del dct["language"] + # raw_code is not currently used, but it might be in the future if validate: - ParsedSeedNode.validate(dct) - return ParsedSeedNode.from_dict(dct) + SeedNode.validate(dct) + return SeedNode.from_dict(dct) @property def resource_type(self) -> NodeType: @@ -19,5 +24,5 @@ def resource_type(self) -> NodeType: def get_compiled_path(cls, block: FileBlock): return block.path.relative_path - def render_with_context(self, parsed_node: ParsedSeedNode, config: ContextConfig) -> None: + def render_with_context(self, parsed_node: SeedNode, config: ContextConfig) -> None: """Seeds don't need to do any rendering.""" diff --git a/core/dbt/parser/singular_test.py b/core/dbt/parser/singular_test.py index 22d203a8ebc..fbb3c8ce8fa 100644 --- a/core/dbt/parser/singular_test.py +++ b/core/dbt/parser/singular_test.py @@ -1,15 +1,15 @@ -from dbt.contracts.graph.parsed import ParsedSingularTestNode +from dbt.contracts.graph.nodes import SingularTestNode from dbt.node_types import NodeType from dbt.parser.base import SimpleSQLParser from dbt.parser.search import FileBlock from dbt.utils import get_pseudo_test_path -class SingularTestParser(SimpleSQLParser[ParsedSingularTestNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedSingularTestNode: +class SingularTestParser(SimpleSQLParser[SingularTestNode]): + def parse_from_dict(self, dct, validate=True) -> SingularTestNode: if validate: - ParsedSingularTestNode.validate(dct) - return ParsedSingularTestNode.from_dict(dct) + SingularTestNode.validate(dct) + return SingularTestNode.from_dict(dct) @property def resource_type(self) -> NodeType: diff --git a/core/dbt/parser/snapshots.py b/core/dbt/parser/snapshots.py index 71e7bba955f..dffc7d90641 100644 --- a/core/dbt/parser/snapshots.py +++ b/core/dbt/parser/snapshots.py @@ -3,15 +3,15 @@ from dbt.dataclass_schema import ValidationError -from dbt.contracts.graph.parsed import IntermediateSnapshotNode, ParsedSnapshotNode -from dbt.exceptions import ParsingException, validator_error_message +from dbt.contracts.graph.nodes import IntermediateSnapshotNode, SnapshotNode +from dbt.exceptions import InvalidSnapshopConfig from dbt.node_types import NodeType from dbt.parser.base import SQLParser from dbt.parser.search import BlockContents, BlockSearcher, FileBlock from dbt.utils import split_path -class SnapshotParser(SQLParser[IntermediateSnapshotNode, ParsedSnapshotNode]): +class SnapshotParser(SQLParser[IntermediateSnapshotNode, SnapshotNode]): def parse_from_dict(self, dct, validate=True) -> IntermediateSnapshotNode: if validate: IntermediateSnapshotNode.validate(dct) @@ -38,6 +38,8 @@ def set_snapshot_attributes(self, node): # the target schema must be set if we got here, so overwrite the node's # schema node.schema = node.config.target_schema + # We need to set relation_name again, since database/schema might have changed + self._update_node_relation_name(node) return node @@ -53,7 +55,7 @@ def get_fqn(self, path: str, name: str) -> List[str]: fqn.append(name) return fqn - def transform(self, node: IntermediateSnapshotNode) -> ParsedSnapshotNode: + def transform(self, node: IntermediateSnapshotNode) -> SnapshotNode: try: # The config_call_dict is not serialized, because normally # it is not needed after parsing. But since the snapshot node @@ -61,12 +63,12 @@ def transform(self, node: IntermediateSnapshotNode) -> ParsedSnapshotNode: # the model config when there is also schema config. config_call_dict = node.config_call_dict dct = node.to_dict(omit_none=True) - parsed_node = ParsedSnapshotNode.from_dict(dct) + parsed_node = SnapshotNode.from_dict(dct) parsed_node.config_call_dict = config_call_dict self.set_snapshot_attributes(parsed_node) return parsed_node except ValidationError as exc: - raise ParsingException(validator_error_message(exc), node) + raise InvalidSnapshopConfig(exc, node) def parse_file(self, file_block: FileBlock) -> None: blocks = BlockSearcher( diff --git a/core/dbt/parser/sources.py b/core/dbt/parser/sources.py index 1c55281db56..cc9acea98c3 100644 --- a/core/dbt/parser/sources.py +++ b/core/dbt/parser/sources.py @@ -1,6 +1,6 @@ import itertools from pathlib import Path -from typing import Iterable, Dict, Optional, Set, Any +from typing import Iterable, Dict, Optional, Set, Any, List from dbt.adapters.factory import get_adapter from dbt.config import RuntimeConfig from dbt.context.context_config import ( @@ -10,10 +10,10 @@ ) from dbt.contracts.graph.manifest import Manifest, SourceKey from dbt.contracts.graph.model_config import SourceConfig -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( UnpatchedSourceDefinition, - ParsedSourceDefinition, - ParsedGenericTestNode, + SourceDefinition, + GenericTestNode, ) from dbt.contracts.graph.unparsed import ( UnparsedSourceDefinition, @@ -24,11 +24,12 @@ UnparsedColumn, Time, ) -from dbt.exceptions import warn_or_error, InternalException +from dbt.events.functions import warn_or_error +from dbt.events.types import UnusedTables +from dbt.exceptions import InternalException from dbt.node_types import NodeType from dbt.parser.schemas import SchemaParser, ParserRef -from dbt import ui # An UnparsedSourceDefinition is taken directly from the yaml @@ -37,7 +38,7 @@ # generate multiple UnpatchedSourceDefinition nodes (one per # table) in the SourceParser.add_source_definitions. The # SourcePatcher takes an UnparsedSourceDefinition and the -# SourcePatch and produces a ParsedSourceDefinition. Each +# SourcePatch and produces a SourceDefinition. Each # SourcePatch can be applied to multiple UnpatchedSourceDefinitions. class SourcePatcher: def __init__( @@ -49,16 +50,16 @@ def __init__( self.manifest = manifest self.schema_parsers: Dict[str, SchemaParser] = {} self.patches_used: Dict[SourceKey, Set[str]] = {} - self.sources: Dict[str, ParsedSourceDefinition] = {} + self.sources: Dict[str, SourceDefinition] = {} # This method calls the 'parse_source' method which takes # the UnpatchedSourceDefinitions in the manifest and combines them - # with SourcePatches to produce ParsedSourceDefinitions. + # with SourcePatches to produce SourceDefinitions. def construct_sources(self) -> None: for unique_id, unpatched in self.manifest.sources.items(): schema_file = self.manifest.files[unpatched.file_id] - if isinstance(unpatched, ParsedSourceDefinition): - # In partial parsing, there will be ParsedSourceDefinitions + if isinstance(unpatched, SourceDefinition): + # In partial parsing, there will be SourceDefinitions # which must be retained. self.sources[unpatched.unique_id] = unpatched continue @@ -79,7 +80,7 @@ def construct_sources(self) -> None: test_from = {"key": "sources", "name": patched.source.name} schema_file.add_test(test.unique_id, test_from) - # Convert UnpatchedSourceDefinition to a ParsedSourceDefinition + # Convert UnpatchedSourceDefinition to a SourceDefinition parsed = self.parse_source(patched) if parsed.config.enabled: self.sources[unique_id] = parsed @@ -117,8 +118,8 @@ def patch_source( table = UnparsedSourceTableDefinition.from_dict(table_dct) return unpatched.replace(source=source, table=table, patch_path=patch_path) - # This converts an UnpatchedSourceDefinition to a ParsedSourceDefinition - def parse_source(self, target: UnpatchedSourceDefinition) -> ParsedSourceDefinition: + # This converts an UnpatchedSourceDefinition to a SourceDefinition + def parse_source(self, target: UnpatchedSourceDefinition) -> SourceDefinition: source = target.source table = target.table refs = ParserRef.from_target(table) @@ -155,12 +156,11 @@ def parse_source(self, target: UnpatchedSourceDefinition) -> ParsedSourceDefinit default_database = self.root_project.credentials.database - parsed_source = ParsedSourceDefinition( + parsed_source = SourceDefinition( package_name=target.package_name, database=(source.database or default_database), schema=(source.schema or source.name), identifier=(table.identifier or table.name), - root_path=target.root_path, path=target.path, original_file_path=target.original_file_path, columns=refs.column_info, @@ -201,9 +201,7 @@ def get_schema_parser_for(self, package_name: str) -> "SchemaParser": self.schema_parsers[package_name] = schema_parser return schema_parser - def get_source_tests( - self, target: UnpatchedSourceDefinition - ) -> Iterable[ParsedGenericTestNode]: + def get_source_tests(self, target: UnpatchedSourceDefinition) -> Iterable[GenericTestNode]: for test, column in target.get_tests(): yield self.parse_source_test( target=target, @@ -215,7 +213,7 @@ def get_patch_for( self, unpatched: UnpatchedSourceDefinition, ) -> Optional[SourcePatch]: - if isinstance(unpatched, ParsedSourceDefinition): + if isinstance(unpatched, SourceDefinition): return None key = (unpatched.package_name, unpatched.source.name) patch: Optional[SourcePatch] = self.manifest.source_patches.get(key) @@ -234,7 +232,7 @@ def parse_source_test( target: UnpatchedSourceDefinition, test: Dict[str, Any], column: Optional[UnparsedColumn], - ) -> ParsedGenericTestNode: + ) -> GenericTestNode: column_name: Optional[str] if column is None: column_name = None @@ -286,7 +284,7 @@ def _generate_source_config(self, target: UnpatchedSourceDefinition, rendered: b patch_config_dict=precedence_configs, ) - def _get_relation_name(self, node: ParsedSourceDefinition): + def _get_relation_name(self, node: SourceDefinition): adapter = get_adapter(self.root_project) relation_cls = adapter.Relation return str(relation_cls.create_from(self.root_project, node)) @@ -307,28 +305,27 @@ def warn_unused(self) -> None: unused_tables[key] = unused if unused_tables: - msg = self.get_unused_msg(unused_tables) - warn_or_error(msg, log_fmt=ui.warning_tag("{}")) + unused_tables_formatted = self.get_unused_msg(unused_tables) + warn_or_error(UnusedTables(unused_tables=unused_tables_formatted)) self.manifest.source_patches = {} def get_unused_msg( self, unused_tables: Dict[SourceKey, Optional[Set[str]]], - ) -> str: - msg = [ - "During parsing, dbt encountered source overrides that had no target:", - ] + ) -> List: + unused_tables_formatted = [] for key, table_names in unused_tables.items(): patch = self.manifest.source_patches[key] patch_name = f"{patch.overrides}.{patch.name}" if table_names is None: - msg.append(f" - Source {patch_name} (in {patch.path})") + unused_tables_formatted.append(f" - Source {patch_name} (in {patch.path})") else: for table_name in sorted(table_names): - msg.append(f" - Source table {patch_name}.{table_name} " f"(in {patch.path})") - msg.append("") - return "\n".join(msg) + unused_tables_formatted.append( + f" - Source table {patch_name}.{table_name} " f"(in {patch.path})" + ) + return unused_tables_formatted def merge_freshness_time_thresholds( diff --git a/core/dbt/parser/sql.py b/core/dbt/parser/sql.py index 35c8f3072dd..82d09c12d6b 100644 --- a/core/dbt/parser/sql.py +++ b/core/dbt/parser/sql.py @@ -3,7 +3,7 @@ from typing import Iterable from dbt.contracts.graph.manifest import SourceFile -from dbt.contracts.graph.parsed import ParsedSqlNode, ParsedMacro +from dbt.contracts.graph.nodes import SqlNode, Macro from dbt.contracts.graph.unparsed import UnparsedMacro from dbt.exceptions import InternalException from dbt.node_types import NodeType @@ -21,11 +21,11 @@ def name(self): return self.block_name -class SqlBlockParser(SimpleSQLParser[ParsedSqlNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedSqlNode: +class SqlBlockParser(SimpleSQLParser[SqlNode]): + def parse_from_dict(self, dct, validate=True) -> SqlNode: if validate: - ParsedSqlNode.validate(dct) - return ParsedSqlNode.from_dict(dct) + SqlNode.validate(dct) + return SqlNode.from_dict(dct) @property def resource_type(self) -> NodeType: @@ -42,21 +42,20 @@ def get_compiled_path(block: FileBlock): return os.path.join("sql", block.name) - def parse_remote(self, sql: str, name: str) -> ParsedSqlNode: + def parse_remote(self, sql: str, name: str) -> SqlNode: source_file = SourceFile.remote(sql, self.project.project_name, "sql") contents = SqlBlock(block_name=name, file=source_file) return self.parse_node(contents) class SqlMacroParser(MacroParser): - def parse_remote(self, contents) -> Iterable[ParsedMacro]: + def parse_remote(self, contents) -> Iterable[Macro]: base = UnparsedMacro( path="from remote system", original_file_path="from remote system", package_name=self.project.project_name, raw_code=contents, language="sql", - root_path=self.project.project_root, resource_type=NodeType.Macro, ) for node in self.parse_unparsed_macros(base): diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index 45f8c0fd0fd..ef78c8d90bf 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -22,7 +22,6 @@ InternalException, ) from dbt.logger import log_manager -import dbt.events.functions as event_logger from dbt.events.functions import fire_event from dbt.events.types import ( DbtProjectError, @@ -37,12 +36,13 @@ InternalExceptionOnRun, GenericExceptionOnRun, NodeConnectionReleaseError, - PrintDebugStackTrace, + LogDebugStackTrace, SkippingDetails, - PrintSkipBecauseError, + LogSkipBecauseError, NodeCompiling, NodeExecuting, ) +from dbt.events.contextvars import get_node_info from .printer import print_run_result_error from dbt.adapters.factory import register_adapter @@ -85,9 +85,6 @@ def pre_init_hook(cls, args): """A hook called before the task is initialized.""" if args.log_format == "json": log_manager.format_json() - # we're mutating the initialized, but not-yet-configured event logger - # because it's being configured too late -- bad! TODO refactor! - event_logger.format_json = True else: log_manager.format_text() @@ -95,9 +92,6 @@ def pre_init_hook(cls, args): def set_log_format(cls): if flags.LOG_FORMAT == "json": log_manager.format_json() - # we're mutating the initialized, but not-yet-configured event logger - # because it's being configured too late -- bad! TODO refactor! - event_logger.format_json = True else: log_manager.format_text() @@ -312,11 +306,10 @@ def skip_result(self, node, message): def compile_and_execute(self, manifest, ctx): result = None with self.adapter.connection_for(self.node): - ctx.node._event_status["node_status"] = RunningStatus.Compiling + ctx.node.update_event_status(node_status=RunningStatus.Compiling) fire_event( NodeCompiling( node_info=ctx.node.node_info, - unique_id=ctx.node.unique_id, ) ) with collect_timing_info("compile") as timing_info: @@ -328,11 +321,10 @@ def compile_and_execute(self, manifest, ctx): # for ephemeral nodes, we only want to compile, not run if not ctx.node.is_ephemeral_model: - ctx.node._event_status["node_status"] = RunningStatus.Executing + ctx.node.update_event_status(node_status=RunningStatus.Executing) fire_event( NodeExecuting( node_info=ctx.node.node_info, - unique_id=ctx.node.unique_id, ) ) with collect_timing_info("execute") as timing_info: @@ -347,7 +339,11 @@ def _handle_catchable_exception(self, e, ctx): if e.node is None: e.add_node(ctx.node) - fire_event(CatchableExceptionOnRun(exc=str(e), exc_info=traceback.format_exc())) + fire_event( + CatchableExceptionOnRun( + exc=str(e), exc_info=traceback.format_exc(), node_info=get_node_info() + ) + ) return str(e) def _handle_internal_exception(self, e, ctx): @@ -362,7 +358,7 @@ def _handle_generic_exception(self, e, ctx): exc=str(e), ) ) - fire_event(PrintDebugStackTrace(exc_info=traceback.format_exc())) + fire_event(LogDebugStackTrace(exc_info=traceback.format_exc())) return str(e) @@ -451,7 +447,7 @@ def on_skip(self): # failure, print a special 'error skip' message. if self._skip_caused_by_ephemeral_failure(): fire_event( - PrintSkipBecauseError( + LogSkipBecauseError( schema=schema_name, relation=node_name, index=self.node_index, diff --git a/core/dbt/task/debug.py b/core/dbt/task/debug.py index 10e3a1ad2a6..853a01ebd1e 100644 --- a/core/dbt/task/debug.py +++ b/core/dbt/task/debug.py @@ -256,9 +256,9 @@ def _load_profile(self): profile: Profile = Profile.render( renderer, profile_name, - self.args.threads, - self.args.target, self.args.profile, + self.args.target, + self.args.threads, ) except dbt.exceptions.DbtConfigError as exc: profile_errors.append(str(exc)) diff --git a/core/dbt/task/deps.py b/core/dbt/task/deps.py index 6b3bc5fb7c4..d03ec3748dc 100644 --- a/core/dbt/task/deps.py +++ b/core/dbt/task/deps.py @@ -1,4 +1,4 @@ -from typing import Dict, Any +from typing import Dict, Any, Optional from dbt import flags @@ -12,7 +12,9 @@ from dbt.config.utils import parse_cli_vars from dbt.deps.base import downloads_directory from dbt.deps.resolver import resolve_packages +from dbt.deps.registry import RegistryPinnedPackage +from dbt.events.proto_types import ListOfStrings from dbt.events.functions import fire_event from dbt.events.types import ( DepsNoPackagesFound, @@ -45,22 +47,27 @@ def __init__( super().__init__(args=args, config=None, project=project) self.cli_vars = cli_vars - def track_package_install(self, package_name: str, source_type: str, version: str) -> None: + def track_package_install( + self, package_name: str, source_type: str, version: Optional[str] + ) -> None: # Hub packages do not need to be hashed, as they are public - # Use the string 'local' for local package versions if source_type == "local": package_name = dbt.utils.md5(package_name) version = "local" + elif source_type == "tarball": + package_name = dbt.utils.md5(package_name) + version = "tarball" elif source_type != "hub": package_name = dbt.utils.md5(package_name) version = dbt.utils.md5(version) + dbt.tracking.track_package_install( "deps", self.project.hashed_name(), {"name": package_name, "source": source_type, "version": version}, ) - def run(self): + def run(self) -> None: system.make_directory(self.project.packages_install_path) packages = self.project.packages.packages if not packages: @@ -81,7 +88,7 @@ def run(self): fire_event(DepsStartPackageInstall(package_name=package_name)) package.install(self.project, renderer) fire_event(DepsInstallInfo(version_name=package.nice_version_name())) - if source_type == "hub": + if isinstance(package, RegistryPinnedPackage): version_latest = package.get_version_latest() if version_latest != version: packages_to_upgrade.append(package_name) @@ -96,7 +103,7 @@ def run(self): ) if packages_to_upgrade: fire_event(EmptyLine()) - fire_event(DepsNotifyUpdatesAvailable(packages=packages_to_upgrade)) + fire_event(DepsNotifyUpdatesAvailable(packages=ListOfStrings(packages_to_upgrade))) @classmethod def _get_unset_profile(cls) -> UnsetProfile: diff --git a/core/dbt/task/freshness.py b/core/dbt/task/freshness.py index ab256334271..704368cf24f 100644 --- a/core/dbt/task/freshness.py +++ b/core/dbt/task/freshness.py @@ -16,19 +16,16 @@ FreshnessStatus, ) from dbt.exceptions import RuntimeException, InternalException -from dbt.events.functions import fire_event +from dbt.events.functions import fire_event, info from dbt.events.types import ( FreshnessCheckComplete, - PrintStartLine, - PrintFreshnessErrorLine, - PrintFreshnessErrorStaleLine, - PrintFreshnessWarnLine, - PrintFreshnessPassLine, + LogStartLine, + LogFreshnessResult, ) from dbt.node_types import NodeType from dbt.graph import ResourceTypeSelector -from dbt.contracts.graph.parsed import ParsedSourceDefinition +from dbt.contracts.graph.nodes import SourceDefinition RESULT_FILE_NAME = "sources.json" @@ -41,7 +38,7 @@ def on_skip(self): def before_execute(self): description = "freshness of {0.source_name}.{0.name}".format(self.node) fire_event( - PrintStartLine( + LogStartLine( description=description, index=self.node_index, total=self.num_nodes, @@ -56,50 +53,19 @@ def after_execute(self, result): else: source_name = result.source_name table_name = result.table_name - if result.status == FreshnessStatus.RuntimeErr: - fire_event( - PrintFreshnessErrorLine( - source_name=source_name, - table_name=table_name, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) - ) - elif result.status == FreshnessStatus.Error: - fire_event( - PrintFreshnessErrorStaleLine( - source_name=source_name, - table_name=table_name, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) - ) - elif result.status == FreshnessStatus.Warn: - fire_event( - PrintFreshnessWarnLine( - source_name=source_name, - table_name=table_name, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) - ) - else: - fire_event( - PrintFreshnessPassLine( - source_name=source_name, - table_name=table_name, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) + level = LogFreshnessResult.status_to_level(str(result.status)) + fire_event( + LogFreshnessResult( + info=info(level=level), + status=result.status, + source_name=source_name, + table_name=table_name, + index=self.node_index, + total=self.num_nodes, + execution_time=result.execution_time, + node_info=self.node.node_info, ) + ) def error_result(self, node, message, start_time, timing_info): return self._build_run_result( @@ -175,7 +141,7 @@ class FreshnessSelector(ResourceTypeSelector): def node_is_match(self, node): if not super().node_is_match(node): return False - if not isinstance(node, ParsedSourceDefinition): + if not isinstance(node, SourceDefinition): return False return node.has_freshness diff --git a/core/dbt/task/generate.py b/core/dbt/task/generate.py index 0bc6f3f9527..87723a530a1 100644 --- a/core/dbt/task/generate.py +++ b/core/dbt/task/generate.py @@ -8,7 +8,7 @@ from .compile import CompileTask from dbt.adapters.factory import get_adapter -from dbt.contracts.graph.compiled import CompileResultNode +from dbt.contracts.graph.nodes import ResultNode from dbt.contracts.graph.manifest import Manifest from dbt.contracts.results import ( NodeStatus, @@ -22,7 +22,7 @@ ColumnMetadata, CatalogArtifact, ) -from dbt.exceptions import InternalException +from dbt.exceptions import InternalException, AmbiguousCatalogMatch from dbt.include.global_project import DOCS_INDEX_FILE_PATH from dbt.events.functions import fire_event from dbt.events.types import ( @@ -119,7 +119,7 @@ def make_unique_id_map( unique_ids = source_map.get(table.key(), set()) for unique_id in unique_ids: if unique_id in sources: - dbt.exceptions.raise_ambiguous_catalog_match( + raise AmbiguousCatalogMatch( unique_id, sources[unique_id].to_dict(omit_none=True), table.to_dict(omit_none=True), @@ -174,7 +174,7 @@ def format_stats(stats: PrimitiveDict) -> StatsDict: return stats_collector -def mapping_key(node: CompileResultNode) -> CatalogKey: +def mapping_key(node: ResultNode) -> CatalogKey: dkey = dbt.utils.lowercase(node.database) return CatalogKey(dkey, node.schema.lower(), node.identifier.lower()) diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py index df0a181ba5c..e1be8f214d3 100644 --- a/core/dbt/task/list.py +++ b/core/dbt/task/list.py @@ -1,14 +1,15 @@ import json -from dbt.contracts.graph.parsed import ParsedExposure, ParsedSourceDefinition, ParsedMetric +from dbt.contracts.graph.nodes import Exposure, SourceDefinition, Metric from dbt.graph import ResourceTypeSelector from dbt.task.runnable import GraphRunnableTask, ManifestTask from dbt.task.test import TestSelector from dbt.node_types import NodeType -from dbt.exceptions import RuntimeException, InternalException, warn_or_error +from dbt.events.functions import warn_or_error +from dbt.events.types import NoNodesSelected +from dbt.exceptions import RuntimeException, InternalException from dbt.logger import log_manager -import logging -import dbt.events.functions as event_logger +from dbt.events.eventmgr import EventLevel class ListTask(GraphRunnableTask): @@ -60,16 +61,15 @@ def pre_init_hook(cls, args): # - mutating the initialized, not-yet-configured STDOUT event logger # because it's being configured too late -- bad! TODO refactor! log_manager.stderr_console() - event_logger.STDOUT_LOG.level = logging.WARN super().pre_init_hook(args) - return logging.WARN + return EventLevel.WARN def _iterate_selected_nodes(self): selector = self.get_node_selector() spec = self.get_selection_spec() nodes = sorted(selector.get_selected(spec)) if not nodes: - warn_or_error("No nodes selected!") + warn_or_error(NoNodesSelected()) return if self.manifest is None: raise InternalException("manifest is None in _iterate_selected_nodes") @@ -91,17 +91,17 @@ def _iterate_selected_nodes(self): def generate_selectors(self): for node in self._iterate_selected_nodes(): if node.resource_type == NodeType.Source: - assert isinstance(node, ParsedSourceDefinition) + assert isinstance(node, SourceDefinition) # sources are searched for by pkg.source_name.table_name source_selector = ".".join([node.package_name, node.source_name, node.name]) yield f"source:{source_selector}" elif node.resource_type == NodeType.Exposure: - assert isinstance(node, ParsedExposure) + assert isinstance(node, Exposure) # exposures are searched for by pkg.exposure_name exposure_selector = ".".join([node.package_name, node.name]) yield f"exposure:{exposure_selector}" elif node.resource_type == NodeType.Metric: - assert isinstance(node, ParsedMetric) + assert isinstance(node, Metric) # metrics are searched for by pkg.metric_name metric_selector = ".".join([node.package_name, node.name]) yield f"metric:{metric_selector}" diff --git a/core/dbt/task/printer.py b/core/dbt/task/printer.py index 3861b41bef2..edb2592d194 100644 --- a/core/dbt/task/printer.py +++ b/core/dbt/task/printer.py @@ -120,6 +120,8 @@ def print_run_result_error(result, newline: bool = True, is_warning: bool = Fals elif result.message is not None: first = True for line in result.message.split("\n"): + # TODO: why do we format like this? Is there a reason this needs to + # be split instead of sending it as a single log line? if first: fire_event(FirstRunResultError(msg=line)) first = False diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index 21550017202..bc8f9a2de75 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -17,28 +17,26 @@ from dbt.adapters.base import BaseRelation from dbt.clients.jinja import MacroGenerator from dbt.context.providers import generate_runtime_model_context -from dbt.contracts.graph.compiled import CompileResultNode from dbt.contracts.graph.model_config import Hook -from dbt.contracts.graph.parsed import ParsedHookNode -from dbt.contracts.results import NodeStatus, RunResult, RunStatus, RunningStatus +from dbt.contracts.graph.nodes import HookNode, ResultNode +from dbt.contracts.results import NodeStatus, RunResult, RunStatus, RunningStatus, BaseResult from dbt.exceptions import ( CompilationException, InternalException, + MissingMaterialization, RuntimeException, ValidationException, - missing_materialization, ) -from dbt.events.functions import fire_event, get_invocation_id +from dbt.events.functions import fire_event, get_invocation_id, info from dbt.events.types import ( DatabaseErrorRunningHook, EmptyLine, HooksRunning, HookFinished, - PrintModelErrorResultLine, - PrintModelResultLine, - PrintStartLine, - PrintHookEndLine, - PrintHookStartLine, + LogModelResult, + LogStartLine, + LogHookEndLine, + LogHookStartLine, ) from dbt.logger import ( TextOnly, @@ -80,17 +78,17 @@ def __eq__(self, other): return isinstance(other, self.__class__) -def _hook_list() -> List[ParsedHookNode]: +def _hook_list() -> List[HookNode]: return [] def get_hooks_by_tags( - nodes: Iterable[CompileResultNode], + nodes: Iterable[ResultNode], match_tags: Set[str], -) -> List[ParsedHookNode]: +) -> List[HookNode]: matched_nodes = [] for node in nodes: - if not isinstance(node, ParsedHookNode): + if not isinstance(node, HookNode): continue node_tags = node.tags if len(set(node_tags) & match_tags): @@ -176,7 +174,7 @@ def describe_node(self): def print_start_line(self): fire_event( - PrintStartLine( + LogStartLine( description=self.describe_node(), index=self.node_index, total=self.num_nodes, @@ -187,27 +185,22 @@ def print_start_line(self): def print_result_line(self, result): description = self.describe_node() if result.status == NodeStatus.Error: - fire_event( - PrintModelErrorResultLine( - description=description, - status=result.status, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) - ) + status = result.status + level = "error" else: - fire_event( - PrintModelResultLine( - description=description, - status=result.message, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) + status = result.message + level = "info" + fire_event( + LogModelResult( + description=description, + status=status, + index=self.node_index, + total=self.num_nodes, + execution_time=result.execution_time, + node_info=self.node.node_info, + info=info(level=level), ) + ) def before_execute(self): self.print_start_line() @@ -259,7 +252,7 @@ def execute(self, model, manifest): ) if materialization_macro is None: - missing_materialization(model, self.adapter.type()) + raise MissingMaterialization(model=model, adapter_type=self.adapter.type()) if "config" not in context: raise InternalException( @@ -310,20 +303,20 @@ def get_hook_sql(self, adapter, hook, idx, num_hooks, extra_context): hook_obj = get_hook(statement, index=hook_index) return hook_obj.sql or "" - def _hook_keyfunc(self, hook: ParsedHookNode) -> Tuple[str, Optional[int]]: + def _hook_keyfunc(self, hook: HookNode) -> Tuple[str, Optional[int]]: package_name = hook.package_name if package_name == self.config.project_name: package_name = BiggestName("") return package_name, hook.index - def get_hooks_by_type(self, hook_type: RunHookType) -> List[ParsedHookNode]: + def get_hooks_by_type(self, hook_type: RunHookType) -> List[HookNode]: if self.manifest is None: raise InternalException("self.manifest was None in get_hooks_by_type") nodes = self.manifest.nodes.values() # find all hooks defined in the manifest (could be multiple projects) - hooks: List[ParsedHookNode] = get_hooks_by_tags(nodes, {hook_type}) + hooks: List[HookNode] = get_hooks_by_tags(nodes, {hook_type}) hooks.sort(key=self._hook_keyfunc) return hooks @@ -346,8 +339,9 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): finishctx = TimestampNamed("node_finished_at") for idx, hook in enumerate(ordered_hooks, start=1): - hook._event_status["started_at"] = datetime.utcnow().isoformat() - hook._event_status["node_status"] = RunningStatus.Started + hook.update_event_status( + started_at=datetime.utcnow().isoformat(), node_status=RunningStatus.Started + ) sql = self.get_hook_sql(adapter, hook, idx, num_hooks, extra_context) hook_text = "{}.{}.{}".format(hook.package_name, hook_type, hook.index) @@ -355,7 +349,7 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): with UniqueID(hook.unique_id): with hook_meta_ctx, startctx: fire_event( - PrintHookStartLine( + LogHookStartLine( statement=hook_text, index=idx, total=num_hooks, @@ -371,11 +365,11 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): status = "OK" self.ran_hooks.append(hook) - hook._event_status["finished_at"] = datetime.utcnow().isoformat() + hook.update_event_status(finished_at=datetime.utcnow().isoformat()) with finishctx, DbtModelState({"node_status": "passed"}): - hook._event_status["node_status"] = RunStatus.Success + hook.update_event_status(node_status=RunStatus.Success) fire_event( - PrintHookEndLine( + LogHookEndLine( statement=hook_text, status=status, index=idx, @@ -386,9 +380,7 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): ) # `_event_status` dict is only used for logging. Make sure # it gets deleted when we're done with it - del hook._event_status["started_at"] - del hook._event_status["finished_at"] - del hook._event_status["node_status"] + hook.clear_event_status() self._total_executed += len(ordered_hooks) @@ -400,12 +392,22 @@ def safe_run_hooks( ) -> None: try: self.run_hooks(adapter, hook_type, extra_context) - except RuntimeException: + except RuntimeException as exc: fire_event(DatabaseErrorRunningHook(hook_type=hook_type.value)) - raise + self.node_results.append( + BaseResult( + status=RunStatus.Error, + thread_id="main", + timing=[], + message=f"{hook_type.value} failed, error:\n {exc.msg}", + adapter_response={}, + execution_time=0, + failures=1, + ) + ) def print_results_line(self, results, execution_time): - nodes = [r.node for r in results] + self.ran_hooks + nodes = [r.node for r in results if hasattr(r, "node")] + self.ran_hooks stat_line = get_counts(nodes) execution = "" @@ -450,9 +452,6 @@ def after_run(self, adapter, results): with adapter.connection_named("master"): self.safe_run_hooks(adapter, RunHookType.End, extras) - def after_hooks(self, adapter, results, elapsed): - self.print_results_line(results, elapsed) - def get_node_selector(self) -> ResourceTypeSelector: if self.manifest is None or self.graph is None: raise InternalException("manifest and graph must be set to get perform node selection") diff --git a/core/dbt/task/run_operation.py b/core/dbt/task/run_operation.py index e7b43a837b0..b9d3115482e 100644 --- a/core/dbt/task/run_operation.py +++ b/core/dbt/task/run_operation.py @@ -15,7 +15,7 @@ from dbt.events.types import ( RunningOperationCaughtError, RunningOperationUncaughtError, - PrintDebugStackTrace, + LogDebugStackTrace, ) @@ -62,11 +62,11 @@ def run(self) -> RunOperationResultsArtifact: self._run_unsafe() except dbt.exceptions.Exception as exc: fire_event(RunningOperationCaughtError(exc=str(exc))) - fire_event(PrintDebugStackTrace(exc_info=traceback.format_exc())) + fire_event(LogDebugStackTrace(exc_info=traceback.format_exc())) success = False except Exception as exc: fire_event(RunningOperationUncaughtError(exc=str(exc))) - fire_event(PrintDebugStackTrace(exc_info=traceback.format_exc())) + fire_event(LogDebugStackTrace(exc_info=traceback.format_exc())) success = False else: success = True diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index c6866cde2e1..fa8fdb724a8 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -26,20 +26,21 @@ ModelMetadata, NodeCount, ) -from dbt.events.functions import fire_event +from dbt.events.functions import fire_event, warn_or_error from dbt.events.types import ( EmptyLine, - PrintCancelLine, + LogCancelLine, DefaultSelector, NodeStart, NodeFinished, QueryCancelationUnsupported, ConcurrencyLine, EndRunResult, + NothingToDo, ) -from dbt.contracts.graph.compiled import CompileResultNode +from dbt.events.contextvars import log_contextvars from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedSourceDefinition +from dbt.contracts.graph.nodes import SourceDefinition, ResultNode from dbt.contracts.results import NodeStatus, RunExecutionResult, RunningStatus from dbt.contracts.state import PreviousState from dbt.exceptions import ( @@ -47,7 +48,6 @@ NotImplementedException, RuntimeException, FailFastException, - warn_or_error, ) from dbt.graph import GraphQueue, NodeSelector, SelectionSpec, parse_difference, Graph @@ -57,7 +57,6 @@ import dbt.exceptions from dbt import flags import dbt.utils -from dbt.ui import warning_tag RESULT_FILE_NAME = "run_results.json" MANIFEST_FILE_NAME = "manifest.json" @@ -108,7 +107,7 @@ class GraphRunnableTask(ManifestTask): def __init__(self, args, config): super().__init__(args, config) self.job_queue: Optional[GraphQueue] = None - self._flattened_nodes: Optional[List[CompileResultNode]] = None + self._flattened_nodes: Optional[List[ResultNode]] = None self.run_count: int = 0 self.num_nodes: int = 0 @@ -213,47 +212,45 @@ def get_runner(self, node): def call_runner(self, runner): uid_context = UniqueID(runner.node.unique_id) - with RUNNING_STATE, uid_context: + with RUNNING_STATE, uid_context, log_contextvars(node_info=runner.node.node_info): startctx = TimestampNamed("node_started_at") index = self.index_offset(runner.node_index) - runner.node._event_status["started_at"] = datetime.utcnow().isoformat() - runner.node._event_status["node_status"] = RunningStatus.Started + runner.node.update_event_status( + started_at=datetime.utcnow().isoformat(), node_status=RunningStatus.Started + ) extended_metadata = ModelMetadata(runner.node, index) with startctx, extended_metadata: fire_event( NodeStart( node_info=runner.node.node_info, - unique_id=runner.node.unique_id, ) ) status: Dict[str, str] = {} try: result = runner.run_with_hooks(self.manifest) status = runner.get_result_status(result) - runner.node._event_status["node_status"] = result.status - runner.node._event_status["finished_at"] = datetime.utcnow().isoformat() + runner.node.update_event_status( + node_status=result.status, finished_at=datetime.utcnow().isoformat() + ) finally: finishctx = TimestampNamed("finished_at") with finishctx, DbtModelState(status): fire_event( NodeFinished( node_info=runner.node.node_info, - unique_id=runner.node.unique_id, run_result=result.to_msg(), ) ) # `_event_status` dict is only used for logging. Make sure # it gets deleted when we're done with it - del runner.node._event_status["started_at"] - del runner.node._event_status["finished_at"] - del runner.node._event_status["node_status"] + runner.node.clear_event_status() fail_fast = flags.FAIL_FAST if result.status in (NodeStatus.Error, NodeStatus.Fail) and fail_fast: self._raise_next_tick = FailFastException( - message="Failing early due to test failure or runtime error", + msg="Failing early due to test failure or runtime error", result=result, node=getattr(result, "node", None), ) @@ -339,7 +336,7 @@ def _handle_result(self, result): if self.manifest is None: raise InternalException("manifest was None in _handle_result") - if isinstance(node, ParsedSourceDefinition): + if isinstance(node, SourceDefinition): self.manifest.update_source(node) else: self.manifest.update_node(node) @@ -371,7 +368,7 @@ def _cancel_connections(self, pool): continue # if we don't have a manifest/don't have a node, print # anyway. - fire_event(PrintCancelLine(conn_name=conn_name)) + fire_event(LogCancelLine(conn_name=conn_name)) pool.join() @@ -379,8 +376,13 @@ def execute_nodes(self): num_threads = self.config.threads target_name = self.config.target_name + # following line can be removed when legacy logger is removed with NodeCount(self.num_nodes): - fire_event(ConcurrencyLine(num_threads=num_threads, target_name=target_name)) + fire_event( + ConcurrencyLine( + num_threads=num_threads, target_name=target_name, node_count=self.num_nodes + ) + ) with TextOnly(): fire_event(EmptyLine()) @@ -421,9 +423,6 @@ def populate_adapter_cache(self, adapter, required_schemas: Set[BaseRelation] = {"adapter_cache_construction_elapsed": cache_populate_time} ) - def before_hooks(self, adapter): - pass - def before_run(self, adapter, selected_uids: AbstractSet[str]): with adapter.connection_named("master"): self.populate_adapter_cache(adapter) @@ -431,24 +430,24 @@ def before_run(self, adapter, selected_uids: AbstractSet[str]): def after_run(self, adapter, results): pass - def after_hooks(self, adapter, results, elapsed): + def print_results_line(self, node_results, elapsed): pass def execute_with_hooks(self, selected_uids: AbstractSet[str]): adapter = get_adapter(self.config) + started = time.time() try: - self.before_hooks(adapter) - started = time.time() self.before_run(adapter, selected_uids) res = self.execute_nodes() self.after_run(adapter, res) - elapsed = time.time() - started - self.after_hooks(adapter, res, elapsed) - finally: adapter.cleanup_connections() + elapsed = time.time() - started + self.print_results_line(self.node_results, elapsed) + result = self.get_result( + results=self.node_results, elapsed_time=elapsed, generated_at=datetime.utcnow() + ) - result = self.get_result(results=res, elapsed_time=elapsed, generated_at=datetime.utcnow()) return result def write_result(self, result): @@ -466,8 +465,7 @@ def run(self): if len(self._flattened_nodes) == 0: with TextOnly(): fire_event(EmptyLine()) - msg = "Nothing to do. Try checking your model configs and model specification args" - warn_or_error(msg, log_fmt=warning_tag("{}")) + warn_or_error(NothingToDo()) result = self.get_result( results=[], generated_at=datetime.utcnow(), diff --git a/core/dbt/task/seed.py b/core/dbt/task/seed.py index 01535916ad8..5c922a5ba90 100644 --- a/core/dbt/task/seed.py +++ b/core/dbt/task/seed.py @@ -9,14 +9,13 @@ from dbt.exceptions import InternalException from dbt.graph import ResourceTypeSelector from dbt.logger import TextOnly -from dbt.events.functions import fire_event +from dbt.events.functions import fire_event, info from dbt.events.types import ( SeedHeader, SeedHeaderSeparator, EmptyLine, - PrintSeedErrorResultLine, - PrintSeedResultLine, - PrintStartLine, + LogSeedResult, + LogStartLine, ) from dbt.node_types import NodeType from dbt.contracts.results import NodeStatus @@ -28,7 +27,7 @@ def describe_node(self): def before_execute(self): fire_event( - PrintStartLine( + LogStartLine( description=self.describe_node(), index=self.node_index, total=self.num_nodes, @@ -47,30 +46,20 @@ def compile(self, manifest): def print_result_line(self, result): model = result.node - if result.status == NodeStatus.Error: - fire_event( - PrintSeedErrorResultLine( - status=result.status, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - schema=self.node.schema, - relation=model.alias, - node_info=model.node_info, - ) - ) - else: - fire_event( - PrintSeedResultLine( - status=result.message, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - schema=self.node.schema, - relation=model.alias, - node_info=model.node_info, - ) + level = "error" if result.status == NodeStatus.Error else "info" + fire_event( + LogSeedResult( + info=info(level=level), + status=result.status, + result_message=result.message, + index=self.node_index, + total=self.num_nodes, + execution_time=result.execution_time, + schema=self.node.schema, + relation=model.alias, + node_info=model.node_info, ) + ) class SeedTask(RunTask): diff --git a/core/dbt/task/snapshot.py b/core/dbt/task/snapshot.py index 7bd62ffb55b..44ccbd88361 100644 --- a/core/dbt/task/snapshot.py +++ b/core/dbt/task/snapshot.py @@ -1,8 +1,8 @@ from .run import ModelRunner, RunTask from dbt.exceptions import InternalException -from dbt.events.functions import fire_event -from dbt.events.types import PrintSnapshotErrorResultLine, PrintSnapshotResultLine +from dbt.events.functions import fire_event, info +from dbt.events.types import LogSnapshotResult from dbt.graph import ResourceTypeSelector from dbt.node_types import NodeType from dbt.contracts.results import NodeStatus @@ -15,30 +15,19 @@ def describe_node(self): def print_result_line(self, result): model = result.node cfg = model.config.to_dict(omit_none=True) - if result.status == NodeStatus.Error: - fire_event( - PrintSnapshotErrorResultLine( - status=result.status, - description=self.get_node_representation(), - cfg=cfg, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=model.node_info, - ) - ) - else: - fire_event( - PrintSnapshotResultLine( - status=result.message, - description=self.get_node_representation(), - cfg=cfg, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=model.node_info, - ) + level = "error" if result.status == NodeStatus.Error else "info" + fire_event( + LogSnapshotResult( + info=info(level=level), + status=result.status, + description=self.get_node_representation(), + cfg=cfg, + index=self.node_index, + total=self.num_nodes, + execution_time=result.execution_time, + node_info=model.node_info, ) + ) class SnapshotTask(RunTask): diff --git a/core/dbt/task/test.py b/core/dbt/task/test.py index ee871b6179d..26d6d46f028 100644 --- a/core/dbt/task/test.py +++ b/core/dbt/task/test.py @@ -5,29 +5,27 @@ from dbt.events.format import pluralize from dbt.dataclass_schema import dbtClassMixin import threading -from typing import Union from .compile import CompileRunner from .run import RunTask -from dbt.contracts.graph.compiled import ( - CompiledSingularTestNode, - CompiledGenericTestNode, - CompiledTestNode, +from dbt.contracts.graph.nodes import ( + TestNode, ) from dbt.contracts.graph.manifest import Manifest from dbt.contracts.results import TestStatus, PrimitiveDict, RunResult from dbt.context.providers import generate_runtime_model_context from dbt.clients.jinja import MacroGenerator -from dbt.events.functions import fire_event +from dbt.events.functions import fire_event, info from dbt.events.types import ( - PrintErrorTestResult, - PrintPassTestResult, - PrintWarnTestResult, - PrintFailureTestResult, - PrintStartLine, + LogTestResult, + LogStartLine, +) +from dbt.exceptions import ( + InternalException, + InvalidBoolean, + MissingMaterialization, ) -from dbt.exceptions import InternalException, invalid_bool_error, missing_materialization from dbt.graph import ( ResourceTypeSelector, ) @@ -53,7 +51,7 @@ def convert_bool_type(field) -> bool: try: return bool(strtobool(field)) # type: ignore except ValueError: - raise invalid_bool_error(field, "get_test_sql") + raise InvalidBoolean(field, "get_test_sql") # need this so we catch both true bools and 0/1 return bool(field) @@ -67,54 +65,22 @@ def describe_node(self): def print_result_line(self, result): model = result.node - if result.status == TestStatus.Error: - fire_event( - PrintErrorTestResult( - name=model.name, - index=self.node_index, - num_models=self.num_nodes, - execution_time=result.execution_time, - node_info=model.node_info, - ) - ) - elif result.status == TestStatus.Pass: - fire_event( - PrintPassTestResult( - name=model.name, - index=self.node_index, - num_models=self.num_nodes, - execution_time=result.execution_time, - node_info=model.node_info, - ) - ) - elif result.status == TestStatus.Warn: - fire_event( - PrintWarnTestResult( - name=model.name, - index=self.node_index, - num_models=self.num_nodes, - execution_time=result.execution_time, - num_failures=result.failures, - node_info=model.node_info, - ) - ) - elif result.status == TestStatus.Fail: - fire_event( - PrintFailureTestResult( - name=model.name, - index=self.node_index, - num_models=self.num_nodes, - execution_time=result.execution_time, - num_failures=result.failures, - node_info=model.node_info, - ) + fire_event( + LogTestResult( + name=model.name, + info=info(level=LogTestResult.status_to_level(str(result.status))), + status=str(result.status), + index=self.node_index, + num_models=self.num_nodes, + execution_time=result.execution_time, + node_info=model.node_info, + num_failures=result.failures, ) - else: - raise RuntimeError("unexpected status: {}".format(result.status)) + ) def print_start_line(self): fire_event( - PrintStartLine( + LogStartLine( description=self.describe_node(), index=self.node_index, total=self.num_nodes, @@ -126,7 +92,7 @@ def before_execute(self): self.print_start_line() def execute_test( - self, test: Union[CompiledSingularTestNode, CompiledGenericTestNode], manifest: Manifest + self, test: TestNode, manifest: Manifest ) -> TestResultData: context = generate_runtime_model_context(test, self.config, manifest) @@ -135,7 +101,7 @@ def execute_test( ) if materialization_macro is None: - missing_materialization(test, self.adapter.type()) + raise MissingMaterialization(model=test, adapter_type=self.adapter.type()) if "config" not in context: raise InternalException( @@ -174,7 +140,7 @@ def execute_test( TestResultData.validate(test_result_dct) return TestResultData.from_dict(test_result_dct) - def execute(self, test: CompiledTestNode, manifest: Manifest): + def execute(self, test: TestNode, manifest: Manifest): result = self.execute_test(test, manifest) severity = test.config.severity.upper() diff --git a/core/dbt/tests/fixtures/project.py b/core/dbt/tests/fixtures/project.py index ffea566f4db..a8c640ef116 100644 --- a/core/dbt/tests/fixtures/project.py +++ b/core/dbt/tests/fixtures/project.py @@ -6,11 +6,11 @@ import warnings import yaml -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationException, DatabaseException import dbt.flags as flags from dbt.config.runtime import RuntimeConfig from dbt.adapters.factory import get_adapter, register_adapter, reset_adapters, get_adapter_by_type -from dbt.events.functions import setup_event_logger +from dbt.events.functions import setup_event_logger, cleanup_event_logger from dbt.tests.util import ( write_file, run_sql_with_adapter, @@ -229,6 +229,15 @@ def selectors_yml(project_root, selectors): write_file(data, project_root, "selectors.yml") +# This fixture ensures that the logging infrastructure does not accidentally +# reuse streams configured on previous test runs, which might now be closed. +# It should be run before (and so included as a parameter by) any other fixture +# which runs dbt-core functions that might fire events. +@pytest.fixture(scope="class") +def clean_up_logging(): + cleanup_event_logger() + + # This creates an adapter that is used for running test setup, such as creating # the test schema, and sql commands that are run in tests prior to the first # dbt command. After a dbt command is run, the project.adapter property will @@ -240,7 +249,7 @@ def selectors_yml(project_root, selectors): # otherwise this will fail. So to test errors in those areas, you need to copy the files # into the project in the tests instead of putting them in the fixtures. @pytest.fixture(scope="class") -def adapter(unique_schema, project_root, profiles_root, profiles_yml, dbt_project_yml): +def adapter(unique_schema, project_root, profiles_root, profiles_yml, dbt_project_yml, clean_up_logging): # The profiles.yml and dbt_project.yml should already be written out args = Namespace( profiles_dir=str(profiles_root), project_dir=str(project_root), target=None, profile=None, threads=None @@ -438,6 +447,7 @@ def get_tables_in_schema(self): # to pull in the other fixtures individually to access their information. @pytest.fixture(scope="class") def project( + clean_up_logging, project_root, profiles_root, request, @@ -484,9 +494,10 @@ def project( # a `load_dependencies` method. # Macros gets executed as part of drop_scheme in core/dbt/adapters/sql/impl.py. When # the macros have errors (which is what we're actually testing for...) they end up - # throwing CompilationExceptions + # throwing CompilationExceptions or DatabaseExceptions try: project.drop_test_schema() - except (KeyError, AttributeError, CompilationException): + except (KeyError, AttributeError, CompilationException, DatabaseException): pass os.chdir(orig_cwd) + cleanup_event_logger() diff --git a/core/dbt/tests/util.py b/core/dbt/tests/util.py index 6cdc4ee5b77..824e6f88630 100644 --- a/core/dbt/tests/util.py +++ b/core/dbt/tests/util.py @@ -1,3 +1,4 @@ +from io import StringIO import os import shutil import yaml @@ -88,7 +89,8 @@ def run_dbt(args: List[str] = None, expect_pass=True): # will turn the logs into json, so you have to be prepared for that. def run_dbt_and_capture(args: List[str] = None, expect_pass=True): try: - stringbuf = capture_stdout_logs() + stringbuf = StringIO() + capture_stdout_logs(stringbuf) res = run_dbt(args, expect_pass=expect_pass) stdout = stringbuf.getvalue() @@ -235,7 +237,7 @@ def run_sql_with_adapter(adapter, sql, fetch=None): return adapter.run_sql_for_tests(sql, fetch, conn) -# Get a Relation object from the identifer (name of table/view). +# Get a Relation object from the identifier (name of table/view). # Uses the default database and schema. If you need a relation # with a different schema, it should be constructed in the test. # Uses: diff --git a/core/dbt/utils.py b/core/dbt/utils.py index b7cc6475319..987371b6b02 100644 --- a/core/dbt/utils.py +++ b/core/dbt/utils.py @@ -15,7 +15,7 @@ from pathlib import PosixPath, WindowsPath from contextlib import contextmanager -from dbt.exceptions import ConnectionException +from dbt.exceptions import ConnectionException, DuplicateAlias from dbt.events.functions import fire_event from dbt.events.types import RetryExternalCall, RecordRetryException from dbt import flags @@ -365,7 +365,7 @@ def translate_mapping(self, kwargs: Mapping[str, Any]) -> Dict[str, Any]: for key, value in kwargs.items(): canonical_key = self.aliases.get(key, key) if canonical_key in result: - dbt.exceptions.raise_duplicate_alias(kwargs, self.aliases, canonical_key) + raise DuplicateAlias(kwargs, self.aliases, canonical_key) result[canonical_key] = self.translate_value(value) return result diff --git a/core/dbt/version.py b/core/dbt/version.py index 65b3a08c476..d668a902ae6 100644 --- a/core/dbt/version.py +++ b/core/dbt/version.py @@ -235,5 +235,5 @@ def _get_adapter_plugin_names() -> Iterator[str]: yield plugin_name -__version__ = "1.4.0a1" +__version__ = "1.4.0b1" installed = get_installed_version() diff --git a/core/setup.py b/core/setup.py index eaad87423c2..241a70ab6bb 100644 --- a/core/setup.py +++ b/core/setup.py @@ -25,7 +25,7 @@ package_name = "dbt-core" -package_version = "1.4.0a1" +package_version = "1.4.0b1" description = """With dbt, data analysts and engineers can build analytics \ the way engineers build applications.""" @@ -50,11 +50,11 @@ "agate>=1.6,<1.6.4", "betterproto==1.2.5", "click>=7.0,<9", - "colorama>=0.3.9,<0.4.6", + "colorama>=0.3.9,<0.4.7", "hologram>=0.0.14,<=0.0.15", "isodate>=0.6,<0.7", "logbook>=1.5,<1.6", - "mashumaro[msgpack]==3.0.4", + "mashumaro[msgpack]==3.2", "minimal-snowplow-tracker==0.0.2", "networkx>=2.3,<2.8.1;python_version<'3.8'", "networkx>=2.3,<3;python_version>='3.8'", @@ -63,7 +63,7 @@ "dbt-extractor~=0.4.1", "typing-extensions>=3.7.4", "werkzeug>=1,<3", - "pathspec~=0.9.0", + "pathspec>=0.9,<0.11", # the following are all to match snowflake-connector-python "requests<3.0.0", "idna>=2.5,<4", @@ -81,6 +81,7 @@ "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", ], python_requires=">=3.7.2", ) diff --git a/docker/Dockerfile b/docker/Dockerfile index 8d3756ca786..72332c35de9 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -14,12 +14,12 @@ FROM --platform=$build_for python:3.10.7-slim-bullseye as base # N.B. The refs updated automagically every release via bumpversion # N.B. dbt-postgres is currently found in the core codebase so a value of dbt-core@ is correct -ARG dbt_core_ref=dbt-core@v1.4.0a1 -ARG dbt_postgres_ref=dbt-core@v1.4.0a1 -ARG dbt_redshift_ref=dbt-redshift@v1.4.0a1 -ARG dbt_bigquery_ref=dbt-bigquery@v1.4.0a1 -ARG dbt_snowflake_ref=dbt-snowflake@v1.4.0a1 -ARG dbt_spark_ref=dbt-spark@v1.4.0a1 +ARG dbt_core_ref=dbt-core@v1.4.0b1 +ARG dbt_postgres_ref=dbt-core@v1.4.0b1 +ARG dbt_redshift_ref=dbt-redshift@v1.4.0b1 +ARG dbt_bigquery_ref=dbt-bigquery@v1.4.0b1 +ARG dbt_snowflake_ref=dbt-snowflake@v1.4.0b1 +ARG dbt_spark_ref=dbt-spark@v1.4.0b1 # special case args ARG dbt_spark_version=all ARG dbt_third_party diff --git a/docker/README.md b/docker/README.md index 4adde533d37..7a48010b7d3 100644 --- a/docker/README.md +++ b/docker/README.md @@ -105,7 +105,7 @@ The `ENTRYPOINT` for this Dockerfile is the command `dbt` so you can bind-mount docker run \ --network=host --mount type=bind,source=path/to/project,target=/usr/app \ ---mount type=bind,source=path/to/profiles.yml,target=/root/.dbt/ \ +--mount type=bind,source=path/to/profiles.yml,target=/root/.dbt/profiles.yml \ my-dbt \ ls ``` diff --git a/plugins/postgres/dbt/adapters/postgres/__version__.py b/plugins/postgres/dbt/adapters/postgres/__version__.py index 70ba273f562..27cfeecd9e8 100644 --- a/plugins/postgres/dbt/adapters/postgres/__version__.py +++ b/plugins/postgres/dbt/adapters/postgres/__version__.py @@ -1 +1 @@ -version = "1.4.0a1" +version = "1.4.0b1" diff --git a/plugins/postgres/dbt/adapters/postgres/impl.py b/plugins/postgres/dbt/adapters/postgres/impl.py index 3664e8d2a51..78b86234eae 100644 --- a/plugins/postgres/dbt/adapters/postgres/impl.py +++ b/plugins/postgres/dbt/adapters/postgres/impl.py @@ -8,7 +8,13 @@ from dbt.adapters.postgres import PostgresColumn from dbt.adapters.postgres import PostgresRelation from dbt.dataclass_schema import dbtClassMixin, ValidationError -import dbt.exceptions +from dbt.exceptions import ( + CrossDbReferenceProhibited, + IndexConfigNotDict, + InvalidIndexConfig, + RuntimeException, + UnexpectedDbReference, +) import dbt.utils @@ -40,14 +46,9 @@ def parse(cls, raw_index) -> Optional["PostgresIndexConfig"]: cls.validate(raw_index) return cls.from_dict(raw_index) except ValidationError as exc: - msg = dbt.exceptions.validator_error_message(exc) - dbt.exceptions.raise_compiler_error(f"Could not parse index config: {msg}") + raise InvalidIndexConfig(exc) except TypeError: - dbt.exceptions.raise_compiler_error( - f"Invalid index config:\n" - f" Got: {raw_index}\n" - f' Expected a dictionary with at minimum a "columns" key' - ) + raise IndexConfigNotDict(raw_index) @dataclass @@ -73,11 +74,7 @@ def verify_database(self, database): database = database.strip('"') expected = self.config.credentials.database if database.lower() != expected.lower(): - raise dbt.exceptions.NotImplementedException( - "Cross-db references not allowed in {} ({} vs {})".format( - self.type(), database, expected - ) - ) + raise UnexpectedDbReference(self.type(), database, expected) # return an empty string on success so macros can call this return "" @@ -110,12 +107,8 @@ def _get_catalog_schemas(self, manifest): schemas = super()._get_catalog_schemas(manifest) try: return schemas.flatten() - except dbt.exceptions.RuntimeException as exc: - dbt.exceptions.raise_compiler_error( - "Cross-db references not allowed in adapter {}: Got {}".format( - self.type(), exc.msg - ) - ) + except RuntimeException as exc: + raise CrossDbReferenceProhibited(self.type(), exc.msg) def _link_cached_relations(self, manifest): schemas: Set[str] = set() diff --git a/plugins/postgres/setup.py b/plugins/postgres/setup.py index 6b76e5cc375..00a91759aec 100644 --- a/plugins/postgres/setup.py +++ b/plugins/postgres/setup.py @@ -41,7 +41,7 @@ def _dbt_psycopg2_name(): package_name = "dbt-postgres" -package_version = "1.4.0a1" +package_version = "1.4.0b1" description = """The postgres adapter plugin for dbt (data build tool)""" this_directory = os.path.abspath(os.path.dirname(__file__)) @@ -83,6 +83,7 @@ def _dbt_psycopg2_name(): "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", ], python_requires=">=3.7", ) diff --git a/schemas/dbt/manifest/v8.json b/schemas/dbt/manifest/v8.json new file mode 100644 index 00000000000..d92dc46b79c --- /dev/null +++ b/schemas/dbt/manifest/v8.json @@ -0,0 +1,4362 @@ +{ + "type": "object", + "required": [ + "metadata", + "nodes", + "sources", + "macros", + "docs", + "exposures", + "metrics", + "selectors" + ], + "properties": { + "metadata": { + "$ref": "#/definitions/ManifestMetadata", + "description": "Metadata about the manifest" + }, + "nodes": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/AnalysisNode" + }, + { + "$ref": "#/definitions/SingularTestNode" + }, + { + "$ref": "#/definitions/HookNode" + }, + { + "$ref": "#/definitions/ModelNode" + }, + { + "$ref": "#/definitions/RPCNode" + }, + { + "$ref": "#/definitions/SqlNode" + }, + { + "$ref": "#/definitions/GenericTestNode" + }, + { + "$ref": "#/definitions/SnapshotNode" + }, + { + "$ref": "#/definitions/SeedNode" + } + ] + }, + "description": "The nodes defined in the dbt project and its dependencies" + }, + "sources": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/SourceDefinition" + }, + "description": "The sources defined in the dbt project and its dependencies" + }, + "macros": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Macro" + }, + "description": "The macros defined in the dbt project and its dependencies" + }, + "docs": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Documentation" + }, + "description": "The docs defined in the dbt project and its dependencies" + }, + "exposures": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Exposure" + }, + "description": "The exposures defined in the dbt project and its dependencies" + }, + "metrics": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/Metric" + }, + "description": "The metrics defined in the dbt project and its dependencies" + }, + "selectors": { + "type": "object", + "description": "The selectors defined in selectors.yml" + }, + "disabled": { + "oneOf": [ + { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/AnalysisNode" + }, + { + "$ref": "#/definitions/SingularTestNode" + }, + { + "$ref": "#/definitions/HookNode" + }, + { + "$ref": "#/definitions/ModelNode" + }, + { + "$ref": "#/definitions/RPCNode" + }, + { + "$ref": "#/definitions/SqlNode" + }, + { + "$ref": "#/definitions/GenericTestNode" + }, + { + "$ref": "#/definitions/SnapshotNode" + }, + { + "$ref": "#/definitions/SeedNode" + }, + { + "$ref": "#/definitions/SourceDefinition" + } + ] + } + } + }, + { + "type": "null" + } + ], + "description": "A mapping of the disabled nodes in the target" + }, + "parent_map": { + "oneOf": [ + { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + { + "type": "null" + } + ], + "description": "A mapping from\u00a0child nodes to their dependencies" + }, + "child_map": { + "oneOf": [ + { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + { + "type": "null" + } + ], + "description": "A mapping from parent nodes to their dependents" + } + }, + "additionalProperties": false, + "description": "WritableManifest(metadata: dbt.contracts.graph.manifest.ManifestMetadata, nodes: Mapping[str, Union[dbt.contracts.graph.nodes.AnalysisNode, dbt.contracts.graph.nodes.SingularTestNode, dbt.contracts.graph.nodes.HookNode, dbt.contracts.graph.nodes.ModelNode, dbt.contracts.graph.nodes.RPCNode, dbt.contracts.graph.nodes.SqlNode, dbt.contracts.graph.nodes.GenericTestNode, dbt.contracts.graph.nodes.SnapshotNode, dbt.contracts.graph.nodes.SeedNode]], sources: Mapping[str, dbt.contracts.graph.nodes.SourceDefinition], macros: Mapping[str, dbt.contracts.graph.nodes.Macro], docs: Mapping[str, dbt.contracts.graph.nodes.Documentation], exposures: Mapping[str, dbt.contracts.graph.nodes.Exposure], metrics: Mapping[str, dbt.contracts.graph.nodes.Metric], selectors: Mapping[str, Any], disabled: Optional[Mapping[str, List[Union[dbt.contracts.graph.nodes.AnalysisNode, dbt.contracts.graph.nodes.SingularTestNode, dbt.contracts.graph.nodes.HookNode, dbt.contracts.graph.nodes.ModelNode, dbt.contracts.graph.nodes.RPCNode, dbt.contracts.graph.nodes.SqlNode, dbt.contracts.graph.nodes.GenericTestNode, dbt.contracts.graph.nodes.SnapshotNode, dbt.contracts.graph.nodes.SeedNode, dbt.contracts.graph.nodes.SourceDefinition]]]], parent_map: Optional[Dict[str, List[str]]], child_map: Optional[Dict[str, List[str]]])", + "definitions": { + "ManifestMetadata": { + "type": "object", + "required": [], + "properties": { + "dbt_schema_version": { + "type": "string", + "default": "https://schemas.getdbt.com/dbt/manifest/v8.json" + }, + "dbt_version": { + "type": "string", + "default": "1.4.0a1" + }, + "generated_at": { + "type": "string", + "format": "date-time", + "default": "2022-12-13T03:30:15.966964Z" + }, + "invocation_id": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "4f2b967b-7e02-46de-a7ea-268a05e3fab1" + }, + "env": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "default": {} + }, + "project_id": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A unique identifier for the project" + }, + "user_id": { + "oneOf": [ + { + "type": "string", + "pattern": "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, + { + "type": "null" + } + ], + "description": "A unique identifier for the user" + }, + "send_anonymous_usage_stats": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "description": "Whether dbt is configured to send anonymous usage statistics" + }, + "adapter_type": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The type name of the adapter" + } + }, + "additionalProperties": false, + "description": "Metadata for the manifest." + }, + "AnalysisNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "analysis" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.970579 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "AnalysisNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" + }, + "FileHash": { + "type": "object", + "required": [ + "name", + "checksum" + ], + "properties": { + "name": { + "type": "string" + }, + "checksum": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "FileHash(name: str, checksum: str)" + }, + "NodeConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + }, + "alias": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tags": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + } + ], + "default": [] + }, + "meta": { + "type": "object", + "default": {} + }, + "materialized": { + "type": "string", + "default": "view" + }, + "incremental_strategy": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "persist_docs": { + "type": "object", + "default": {} + }, + "post-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "pre-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "quoting": { + "type": "object", + "default": {} + }, + "column_types": { + "type": "object", + "default": {} + }, + "full_refresh": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "unique_key": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ] + }, + "on_schema_change": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "ignore" + }, + "grants": { + "type": "object", + "default": {} + }, + "packages": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + } + }, + "additionalProperties": true, + "description": "NodeConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = None, database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'view', incremental_strategy: Optional[str] = None, persist_docs: Dict[str, Any] = , post_hook: List[dbt.contracts.graph.model_config.Hook] = , pre_hook: List[dbt.contracts.graph.model_config.Hook] = , quoting: Dict[str, Any] = , column_types: Dict[str, Any] = , full_refresh: Optional[bool] = None, unique_key: Union[str, List[str], NoneType] = None, on_schema_change: Optional[str] = 'ignore', grants: Dict[str, Any] = , packages: List[str] = , docs: dbt.contracts.graph.unparsed.Docs = )" + }, + "Hook": { + "type": "object", + "required": [ + "sql" + ], + "properties": { + "sql": { + "type": "string" + }, + "transaction": { + "type": "boolean", + "default": true + }, + "index": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "Hook(sql: str, transaction: bool = True, index: Optional[int] = None)" + }, + "Docs": { + "type": "object", + "required": [], + "properties": { + "show": { + "type": "boolean", + "default": true + }, + "node_color": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "Docs(show: bool = True, node_color: Optional[str] = None)" + }, + "ColumnInfo": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string", + "default": "" + }, + "meta": { + "type": "object", + "default": {} + }, + "data_type": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "quote": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + } + }, + "additionalProperties": true, + "description": "Used in all ManifestNodes and SourceDefinition" + }, + "DependsOn": { + "type": "object", + "required": [], + "properties": { + "macros": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "nodes": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "DependsOn(macros: List[str] = , nodes: List[str] = )" + }, + "InjectedCTE": { + "type": "object", + "required": [ + "id", + "sql" + ], + "properties": { + "id": { + "type": "string" + }, + "sql": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "Used in CompiledNodes as part of ephemeral model processing" + }, + "SingularTestNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "test" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/TestConfig", + "default": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "materialized": "test", + "severity": "ERROR", + "store_failures": null, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.973521 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "SingularTestNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.TestConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" + }, + "TestConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + }, + "alias": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "dbt_test__audit" + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tags": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + } + ], + "default": [] + }, + "meta": { + "type": "object", + "default": {} + }, + "materialized": { + "type": "string", + "default": "test" + }, + "severity": { + "type": "string", + "pattern": "^([Ww][Aa][Rr][Nn]|[Ee][Rr][Rr][Oo][Rr])$", + "default": "ERROR" + }, + "store_failures": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "where": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "limit": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "fail_calc": { + "type": "string", + "default": "count(*)" + }, + "warn_if": { + "type": "string", + "default": "!= 0" + }, + "error_if": { + "type": "string", + "default": "!= 0" + } + }, + "additionalProperties": true, + "description": "TestConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = 'dbt_test__audit', database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'test', severity: dbt.contracts.graph.model_config.Severity = 'ERROR', store_failures: Optional[bool] = None, where: Optional[str] = None, limit: Optional[int] = None, fail_calc: str = 'count(*)', warn_if: str = '!= 0', error_if: str = '!= 0')" + }, + "HookNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "operation" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.975156 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + }, + "index": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "HookNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None, index: Optional[int] = None)" + }, + "ModelNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "model" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.976732 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "ModelNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" + }, + "RPCNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "rpc" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.978195 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "RPCNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" + }, + "SqlNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "sql operation" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.979718 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "SqlNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" + }, + "GenericTestNode": { + "type": "object", + "required": [ + "test_metadata", + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "test_metadata": { + "$ref": "#/definitions/TestMetadata" + }, + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "test" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/TestConfig", + "default": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "materialized": "test", + "severity": "ERROR", + "store_failures": null, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.981434 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + }, + "column_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "file_key_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "GenericTestNode(test_metadata: dbt.contracts.graph.nodes.TestMetadata, database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.TestConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None, column_name: Optional[str] = None, file_key_name: Optional[str] = None)" + }, + "TestMetadata": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "kwargs": { + "type": "object", + "default": {} + }, + "namespace": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "TestMetadata(name: str, kwargs: Dict[str, Any] = , namespace: Optional[str] = None)" + }, + "SnapshotNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum", + "config" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "snapshot" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/SnapshotConfig" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.984685 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "SnapshotNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.SnapshotConfig, _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" + }, + "SnapshotConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + }, + "alias": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tags": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + } + ], + "default": [] + }, + "meta": { + "type": "object", + "default": {} + }, + "materialized": { + "type": "string", + "default": "snapshot" + }, + "incremental_strategy": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "persist_docs": { + "type": "object", + "default": {} + }, + "post-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "pre-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "quoting": { + "type": "object", + "default": {} + }, + "column_types": { + "type": "object", + "default": {} + }, + "full_refresh": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "unique_key": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "on_schema_change": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "ignore" + }, + "grants": { + "type": "object", + "default": {} + }, + "packages": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "strategy": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "target_schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "target_database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "updated_at": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "check_cols": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": true, + "description": "SnapshotConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = None, database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'snapshot', incremental_strategy: Optional[str] = None, persist_docs: Dict[str, Any] = , post_hook: List[dbt.contracts.graph.model_config.Hook] = , pre_hook: List[dbt.contracts.graph.model_config.Hook] = , quoting: Dict[str, Any] = , column_types: Dict[str, Any] = , full_refresh: Optional[bool] = None, unique_key: Optional[str] = None, on_schema_change: Optional[str] = 'ignore', grants: Dict[str, Any] = , packages: List[str] = , docs: dbt.contracts.graph.unparsed.Docs = , strategy: Optional[str] = None, target_schema: Optional[str] = None, target_database: Optional[str] = None, updated_at: Optional[str] = None, check_cols: Union[str, List[str], NoneType] = None)" + }, + "SeedNode": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "alias", + "checksum" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "seed" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/SeedConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "seed", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "quote_columns": null, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1670902215.987447 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "root_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "SeedNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.SeedConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', root_path: Optional[str] = None)" + }, + "SeedConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + }, + "alias": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tags": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + } + ], + "default": [] + }, + "meta": { + "type": "object", + "default": {} + }, + "materialized": { + "type": "string", + "default": "seed" + }, + "incremental_strategy": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "persist_docs": { + "type": "object", + "default": {} + }, + "post-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "pre-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "quoting": { + "type": "object", + "default": {} + }, + "column_types": { + "type": "object", + "default": {} + }, + "full_refresh": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "unique_key": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ] + }, + "on_schema_change": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "ignore" + }, + "grants": { + "type": "object", + "default": {} + }, + "packages": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "quote_columns": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": true, + "description": "SeedConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = None, database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'seed', incremental_strategy: Optional[str] = None, persist_docs: Dict[str, Any] = , post_hook: List[dbt.contracts.graph.model_config.Hook] = , pre_hook: List[dbt.contracts.graph.model_config.Hook] = , quoting: Dict[str, Any] = , column_types: Dict[str, Any] = , full_refresh: Optional[bool] = None, unique_key: Union[str, List[str], NoneType] = None, on_schema_change: Optional[str] = 'ignore', grants: Dict[str, Any] = , packages: List[str] = , docs: dbt.contracts.graph.unparsed.Docs = , quote_columns: Optional[bool] = None)" + }, + "SourceDefinition": { + "type": "object", + "required": [ + "database", + "schema", + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "source_name", + "source_description", + "loader", + "identifier" + ], + "properties": { + "database": { + "type": "string" + }, + "schema": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "source" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "source_name": { + "type": "string" + }, + "source_description": { + "type": "string" + }, + "loader": { + "type": "string" + }, + "identifier": { + "type": "string" + }, + "quoting": { + "$ref": "#/definitions/Quoting", + "default": { + "database": null, + "schema": null, + "identifier": null, + "column": null + } + }, + "loaded_at_field": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "freshness": { + "oneOf": [ + { + "$ref": "#/definitions/FreshnessThreshold" + }, + { + "type": "null" + } + ] + }, + "external": { + "oneOf": [ + { + "$ref": "#/definitions/ExternalTable" + }, + { + "type": "null" + } + ] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "source_meta": { + "type": "object", + "default": {} + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "config": { + "$ref": "#/definitions/SourceConfig", + "default": { + "enabled": true + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "created_at": { + "type": "number", + "default": 1670902215.989922 + } + }, + "additionalProperties": false, + "description": "SourceDefinition(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], source_name: str, source_description: str, loader: str, identifier: str, _event_status: Dict[str, Any] = , quoting: dbt.contracts.graph.unparsed.Quoting = , loaded_at_field: Optional[str] = None, freshness: Optional[dbt.contracts.graph.unparsed.FreshnessThreshold] = None, external: Optional[dbt.contracts.graph.unparsed.ExternalTable] = None, description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , source_meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.SourceConfig = , patch_path: Optional[str] = None, unrendered_config: Dict[str, Any] = , relation_name: Optional[str] = None, created_at: float = )" + }, + "Quoting": { + "type": "object", + "required": [], + "properties": { + "database": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "identifier": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "column": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "Quoting(database: Optional[bool] = None, schema: Optional[bool] = None, identifier: Optional[bool] = None, column: Optional[bool] = None)" + }, + "FreshnessThreshold": { + "type": "object", + "required": [], + "properties": { + "warn_after": { + "oneOf": [ + { + "$ref": "#/definitions/Time" + }, + { + "type": "null" + } + ], + "default": { + "count": null, + "period": null + } + }, + "error_after": { + "oneOf": [ + { + "$ref": "#/definitions/Time" + }, + { + "type": "null" + } + ], + "default": { + "count": null, + "period": null + } + }, + "filter": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "FreshnessThreshold(warn_after: Optional[dbt.contracts.graph.unparsed.Time] = , error_after: Optional[dbt.contracts.graph.unparsed.Time] = , filter: Optional[str] = None)" + }, + "FreshnessMetadata": { + "type": "object", + "required": [], + "properties": { + "dbt_schema_version": { + "type": "string", + "default": "https://schemas.getdbt.com/dbt/sources/v3.json" + }, + "dbt_version": { + "type": "string", + "default": "1.4.0a1" + }, + "generated_at": { + "type": "string", + "format": "date-time", + "default": "2022-12-13T03:30:15.961825Z" + }, + "invocation_id": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "4f2b967b-7e02-46de-a7ea-268a05e3fab1" + }, + "env": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "default": {} + } + }, + "additionalProperties": false, + "description": "FreshnessMetadata(dbt_schema_version: str = , dbt_version: str = '1.4.0a1', generated_at: datetime.datetime = , invocation_id: Optional[str] = , env: Dict[str, str] = )" + }, + "SourceFreshnessRuntimeError": { + "type": "object", + "required": [ + "unique_id", + "status" + ], + "properties": { + "unique_id": { + "type": "string" + }, + "error": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "status": { + "type": "string", + "enum": [ + "runtime error" + ] + } + }, + "additionalProperties": false, + "description": "SourceFreshnessRuntimeError(unique_id: str, error: Union[str, int, NoneType], status: dbt.contracts.results.FreshnessErrorEnum)" + }, + "SourceFreshnessOutput": { + "type": "object", + "required": [ + "unique_id", + "max_loaded_at", + "snapshotted_at", + "max_loaded_at_time_ago_in_s", + "status", + "criteria", + "adapter_response", + "timing", + "thread_id", + "execution_time" + ], + "properties": { + "unique_id": { + "type": "string" + }, + "max_loaded_at": { + "type": "string", + "format": "date-time" + }, + "snapshotted_at": { + "type": "string", + "format": "date-time" + }, + "max_loaded_at_time_ago_in_s": { + "type": "number" + }, + "status": { + "type": "string", + "enum": [ + "pass", + "warn", + "error", + "runtime error" + ] + }, + "criteria": { + "$ref": "#/definitions/FreshnessThreshold" + }, + "adapter_response": { + "type": "object" + }, + "timing": { + "type": "array", + "items": { + "$ref": "#/definitions/TimingInfo" + } + }, + "thread_id": { + "type": "string" + }, + "execution_time": { + "type": "number" + } + }, + "additionalProperties": false, + "description": "SourceFreshnessOutput(unique_id: str, max_loaded_at: datetime.datetime, snapshotted_at: datetime.datetime, max_loaded_at_time_ago_in_s: float, status: dbt.contracts.results.FreshnessStatus, criteria: dbt.contracts.graph.unparsed.FreshnessThreshold, adapter_response: Dict[str, Any], timing: List[dbt.contracts.results.TimingInfo], thread_id: str, execution_time: float)" + }, + "Time": { + "type": "object", + "required": [], + "properties": { + "count": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "period": { + "oneOf": [ + { + "type": "string", + "enum": [ + "minute", + "hour", + "day" + ] + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "Time(count: Optional[int] = None, period: Optional[dbt.contracts.graph.unparsed.TimePeriod] = None)" + }, + "TimingInfo": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "started_at": { + "oneOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ] + }, + "completed_at": { + "oneOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "TimingInfo(name: str, started_at: Optional[datetime.datetime] = None, completed_at: Optional[datetime.datetime] = None)" + }, + "ExternalTable": { + "type": "object", + "required": [], + "properties": { + "location": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "file_format": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "row_format": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tbl_properties": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "partitions": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "array", + "items": { + "$ref": "#/definitions/ExternalPartition" + } + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": true, + "description": "ExternalTable(_extra: Dict[str, Any] = , location: Optional[str] = None, file_format: Optional[str] = None, row_format: Optional[str] = None, tbl_properties: Optional[str] = None, partitions: Union[List[str], List[dbt.contracts.graph.unparsed.ExternalPartition], NoneType] = None)" + }, + "ExternalPartition": { + "type": "object", + "required": [], + "properties": { + "name": { + "type": "string", + "default": "" + }, + "description": { + "type": "string", + "default": "" + }, + "data_type": { + "type": "string", + "default": "" + }, + "meta": { + "type": "object", + "default": {} + } + }, + "additionalProperties": true, + "description": "ExternalPartition(_extra: Dict[str, Any] = , name: str = '', description: str = '', data_type: str = '', meta: Dict[str, Any] = )" + }, + "SourceConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + } + }, + "additionalProperties": true, + "description": "SourceConfig(_extra: Dict[str, Any] = , enabled: bool = True)" + }, + "Macro": { + "type": "object", + "required": [ + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "macro_sql" + ], + "properties": { + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "macro" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "macro_sql": { + "type": "string" + }, + "depends_on": { + "$ref": "#/definitions/MacroDependsOn", + "default": { + "macros": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "arguments": { + "type": "array", + "items": { + "$ref": "#/definitions/MacroArgument" + }, + "default": [] + }, + "created_at": { + "type": "number", + "default": 1670902215.990816 + }, + "supported_languages": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string", + "enum": [ + "python", + "sql" + ] + } + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "Macro(name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, macro_sql: str, depends_on: dbt.contracts.graph.nodes.MacroDependsOn = , description: str = '', meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, arguments: List[dbt.contracts.graph.unparsed.MacroArgument] = , created_at: float = , supported_languages: Optional[List[dbt.node_types.ModelLanguage]] = None)" + }, + "MacroDependsOn": { + "type": "object", + "required": [], + "properties": { + "macros": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "Used only in the Macro class" + }, + "MacroArgument": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "type": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "description": { + "type": "string", + "default": "" + } + }, + "additionalProperties": false, + "description": "MacroArgument(name: str, type: Optional[str] = None, description: str = '')" + }, + "Documentation": { + "type": "object", + "required": [ + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "block_contents" + ], + "properties": { + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "doc" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "block_contents": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "Documentation(name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, block_contents: str)" + }, + "Exposure": { + "type": "object", + "required": [ + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "type", + "owner" + ], + "properties": { + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "exposure" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "type": { + "type": "string", + "enum": [ + "dashboard", + "notebook", + "analysis", + "ml", + "application" + ] + }, + "owner": { + "$ref": "#/definitions/ExposureOwner" + }, + "description": { + "type": "string", + "default": "" + }, + "label": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "maturity": { + "oneOf": [ + { + "type": "string", + "enum": [ + "low", + "medium", + "high" + ] + }, + { + "type": "null" + } + ] + }, + "meta": { + "type": "object", + "default": {} + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "config": { + "$ref": "#/definitions/ExposureConfig", + "default": { + "enabled": true + } + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "url": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "created_at": { + "type": "number", + "default": 1670902215.993354 + } + }, + "additionalProperties": false, + "description": "Exposure(name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], type: dbt.contracts.graph.unparsed.ExposureType, owner: dbt.contracts.graph.unparsed.ExposureOwner, description: str = '', label: Optional[str] = None, maturity: Optional[dbt.contracts.graph.unparsed.MaturityType] = None, meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.ExposureConfig = , unrendered_config: Dict[str, Any] = , url: Optional[str] = None, depends_on: dbt.contracts.graph.nodes.DependsOn = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , created_at: float = )" + }, + "ExposureOwner": { + "type": "object", + "required": [ + "email" + ], + "properties": { + "email": { + "type": "string" + }, + "name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "ExposureOwner(email: str, name: Optional[str] = None)" + }, + "ExposureConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + } + }, + "additionalProperties": true, + "description": "ExposureConfig(_extra: Dict[str, Any] = , enabled: bool = True)" + }, + "Metric": { + "type": "object", + "required": [ + "name", + "resource_type", + "package_name", + "path", + "original_file_path", + "unique_id", + "fqn", + "description", + "label", + "calculation_method", + "timestamp", + "expression", + "filters", + "time_grains", + "dimensions" + ], + "properties": { + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "metric" + ] + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "description": { + "type": "string" + }, + "label": { + "type": "string" + }, + "calculation_method": { + "type": "string" + }, + "timestamp": { + "type": "string" + }, + "expression": { + "type": "string" + }, + "filters": { + "type": "array", + "items": { + "$ref": "#/definitions/MetricFilter" + } + }, + "time_grains": { + "type": "array", + "items": { + "type": "string" + } + }, + "dimensions": { + "type": "array", + "items": { + "type": "string" + } + }, + "window": { + "oneOf": [ + { + "$ref": "#/definitions/MetricTime" + }, + { + "type": "null" + } + ] + }, + "model": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "model_unique_id": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "meta": { + "type": "object", + "default": {} + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "config": { + "$ref": "#/definitions/MetricConfig", + "default": { + "enabled": true + } + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "created_at": { + "type": "number", + "default": 1670902215.995033 + } + }, + "additionalProperties": false, + "description": "Metric(name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], description: str, label: str, calculation_method: str, timestamp: str, expression: str, filters: List[dbt.contracts.graph.unparsed.MetricFilter], time_grains: List[str], dimensions: List[str], window: Optional[dbt.contracts.graph.unparsed.MetricTime] = None, model: Optional[str] = None, model_unique_id: Optional[str] = None, meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.MetricConfig = , unrendered_config: Dict[str, Any] = , sources: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , refs: List[List[str]] = , metrics: List[List[str]] = , created_at: float = )" + }, + "MetricFilter": { + "type": "object", + "required": [ + "field", + "operator", + "value" + ], + "properties": { + "field": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "MetricFilter(field: str, operator: str, value: str)" + }, + "MetricTime": { + "type": "object", + "required": [], + "properties": { + "count": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "period": { + "oneOf": [ + { + "type": "string", + "enum": [ + "day", + "week", + "month", + "year" + ] + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "MetricTime(count: Optional[int] = None, period: Optional[dbt.contracts.graph.unparsed.MetricTimePeriod] = None)" + }, + "MetricConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + } + }, + "additionalProperties": true, + "description": "MetricConfig(_extra: Dict[str, Any] = , enabled: bool = True)" + } + }, + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "https://schemas.getdbt.com/dbt/manifest/v8.json" +} diff --git a/test/integration/023_exit_codes_tests/models/bad.sql b/test/integration/023_exit_codes_tests/models/bad.sql deleted file mode 100644 index dad7fe5fc10..00000000000 --- a/test/integration/023_exit_codes_tests/models/bad.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select bad sql here diff --git a/test/integration/023_exit_codes_tests/models/dupe.sql b/test/integration/023_exit_codes_tests/models/dupe.sql deleted file mode 100644 index f7bb37c8b71..00000000000 --- a/test/integration/023_exit_codes_tests/models/dupe.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select 1 as id, current_date as updated_at -union all -select 2 as id, current_date as updated_at -union all -select 3 as id, current_date as updated_at -union all -select 4 as id, current_date as updated_at diff --git a/test/integration/023_exit_codes_tests/models/good.sql b/test/integration/023_exit_codes_tests/models/good.sql deleted file mode 100644 index f7bb37c8b71..00000000000 --- a/test/integration/023_exit_codes_tests/models/good.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select 1 as id, current_date as updated_at -union all -select 2 as id, current_date as updated_at -union all -select 3 as id, current_date as updated_at -union all -select 4 as id, current_date as updated_at diff --git a/test/integration/023_exit_codes_tests/models/schema.yml b/test/integration/023_exit_codes_tests/models/schema.yml deleted file mode 100644 index f7243286b7b..00000000000 --- a/test/integration/023_exit_codes_tests/models/schema.yml +++ /dev/null @@ -1,17 +0,0 @@ -version: 2 -models: -- name: good - columns: - - name: updated_at - tests: - - not_null -- name: bad - columns: - - name: updated_at - tests: - - not_null -- name: dupe - columns: - - name: updated_at - tests: - - unique diff --git a/test/integration/023_exit_codes_tests/seeds-bad/data.csv b/test/integration/023_exit_codes_tests/seeds-bad/data.csv deleted file mode 100644 index fcc8e001bbd..00000000000 --- a/test/integration/023_exit_codes_tests/seeds-bad/data.csv +++ /dev/null @@ -1,2 +0,0 @@ -a,b,c -1,\2,3,a,a,a diff --git a/test/integration/023_exit_codes_tests/seeds-good/data.csv b/test/integration/023_exit_codes_tests/seeds-good/data.csv deleted file mode 100644 index bfde6bfa0b8..00000000000 --- a/test/integration/023_exit_codes_tests/seeds-good/data.csv +++ /dev/null @@ -1,2 +0,0 @@ -a,b,c -1,2,3 diff --git a/test/integration/023_exit_codes_tests/snapshots-bad/b.sql b/test/integration/023_exit_codes_tests/snapshots-bad/b.sql deleted file mode 100644 index 52425b7c9bc..00000000000 --- a/test/integration/023_exit_codes_tests/snapshots-bad/b.sql +++ /dev/null @@ -1,4 +0,0 @@ -{% snapshot good_snapshot %} - {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at_not_real')}} - select * from {{ schema }}.good -{% endsnapshot %} diff --git a/test/integration/023_exit_codes_tests/snapshots-good/g.sql b/test/integration/023_exit_codes_tests/snapshots-good/g.sql deleted file mode 100644 index 0c1205d9441..00000000000 --- a/test/integration/023_exit_codes_tests/snapshots-good/g.sql +++ /dev/null @@ -1,4 +0,0 @@ -{% snapshot good_snapshot %} - {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at')}} - select * from {{ schema }}.good -{% endsnapshot %} diff --git a/test/integration/023_exit_codes_tests/test_exit_codes.py b/test/integration/023_exit_codes_tests/test_exit_codes.py deleted file mode 100644 index 7da8d85e321..00000000000 --- a/test/integration/023_exit_codes_tests/test_exit_codes.py +++ /dev/null @@ -1,200 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - -import dbt.exceptions - - -class TestExitCodes(DBTIntegrationTest): - - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - "snapshot-paths": ['snapshots-good'], - } - - @use_profile('postgres') - def test_postgres_exit_code_run_succeed(self): - results, success = self.run_dbt_and_check(['run', '--model', 'good']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - self.assertTableDoesExist('good') - - @use_profile('postgres') - def test__postgres_exit_code_run_fail(self): - results, success = self.run_dbt_and_check(['run', '--model', 'bad']) - self.assertEqual(len(results.results), 1) - self.assertFalse(success) - self.assertTableDoesNotExist('bad') - - @use_profile('postgres') - def test__postgres_schema_test_pass(self): - results, success = self.run_dbt_and_check(['run', '--model', 'good']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - results, success = self.run_dbt_and_check(['test', '--model', 'good']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - - @use_profile('postgres') - def test__postgres_schema_test_fail(self): - results, success = self.run_dbt_and_check(['run', '--model', 'dupe']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - results, success = self.run_dbt_and_check(['test', '--model', 'dupe']) - self.assertEqual(len(results.results), 1) - self.assertFalse(success) - - @use_profile('postgres') - def test__postgres_compile(self): - results, success = self.run_dbt_and_check(['compile']) - self.assertEqual(len(results.results), 7) - self.assertTrue(success) - - @use_profile('postgres') - def test__postgres_snapshot_pass(self): - self.run_dbt_and_check(['run', '--model', 'good']) - results, success = self.run_dbt_and_check(['snapshot']) - self.assertEqual(len(results.results), 1) - self.assertTableDoesExist('good_snapshot') - self.assertTrue(success) - - -class TestExitCodesSnapshotFail(DBTIntegrationTest): - - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - "snapshot-paths": ['snapshots-bad'], - } - - @use_profile('postgres') - def test__postgres_snapshot_fail(self): - results, success = self.run_dbt_and_check(['run', '--model', 'good']) - self.assertTrue(success) - self.assertEqual(len(results.results), 1) - - results, success = self.run_dbt_and_check(['snapshot']) - self.assertEqual(len(results.results), 1) - self.assertTableDoesNotExist('good_snapshot') - self.assertFalse(success) - -class TestExitCodesDeps(DBTIntegrationTest): - - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def packages_config(self): - return { - "packages": [ - { - 'git': 'https://github.com/dbt-labs/dbt-integration-project', - 'revision': 'dbt/1.0.0', - } - ] - } - - @use_profile('postgres') - def test_postgres_deps(self): - _, success = self.run_dbt_and_check(['deps']) - self.assertTrue(success) - - -class TestExitCodesDepsFail(DBTIntegrationTest): - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def packages_config(self): - return { - "packages": [ - { - 'git': 'https://github.com/dbt-labs/dbt-integration-project', - 'revision': 'bad-branch', - }, - ] - } - - @use_profile('postgres') - def test_postgres_deps(self): - with self.assertRaises(dbt.exceptions.InternalException): - # this should fail - self.run_dbt_and_check(['deps']) - - -class TestExitCodesSeed(DBTIntegrationTest): - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seed-paths': ['seeds-good'], - 'seeds': { - 'quote_columns': False, - }, - } - - @use_profile('postgres') - def test_postgres_seed(self): - results, success = self.run_dbt_and_check(['seed']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - - -class TestExitCodesSeedFail(DBTIntegrationTest): - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seed-paths': ['seeds-bad'], - 'seeds': { - 'quote_columns': False, - }, - } - - @use_profile('postgres') - def test_postgres_seed(self): - _, success = self.run_dbt_and_check(['seed']) - self.assertFalse(success) diff --git a/test/integration/030_statement_tests/models/statement_actual.sql b/test/integration/030_statement_tests/models/statement_actual.sql deleted file mode 100644 index 8c550bc5dc1..00000000000 --- a/test/integration/030_statement_tests/models/statement_actual.sql +++ /dev/null @@ -1,23 +0,0 @@ - --- {{ ref('seed') }} - -{%- call statement('test_statement', fetch_result=True) -%} - - select - count(*) as "num_records" - - from {{ ref('seed') }} - -{%- endcall -%} - -{% set result = load_result('test_statement') %} - -{% set res_table = result['table'] %} -{% set res_matrix = result['data'] %} - -{% set matrix_value = res_matrix[0][0] %} -{% set table_value = res_table[0]['num_records'] %} - -select 'matrix' as source, {{ matrix_value }} as value -union all -select 'table' as source, {{ table_value }} as value diff --git a/test/integration/030_statement_tests/seed/statement_expected.csv b/test/integration/030_statement_tests/seed/statement_expected.csv deleted file mode 100644 index cf9d9af15ac..00000000000 --- a/test/integration/030_statement_tests/seed/statement_expected.csv +++ /dev/null @@ -1,3 +0,0 @@ -source,value -matrix,100 -table,100 diff --git a/test/integration/030_statement_tests/test_statements.py b/test/integration/030_statement_tests/test_statements.py deleted file mode 100644 index 4278f394580..00000000000 --- a/test/integration/030_statement_tests/test_statements.py +++ /dev/null @@ -1,36 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestStatements(DBTIntegrationTest): - - @property - def schema(self): - return "statements_030" - - @staticmethod - def dir(path): - return path.lstrip("/") - - @property - def models(self): - return self.dir("models") - - @property - def project_config(self): - return { - 'config-version': 2, - 'seeds': { - 'quote_columns': False, - } - } - - @use_profile("postgres") - def test_postgres_statements(self): - self.use_default_project({"seed-paths": [self.dir("seed")]}) - - results = self.run_dbt(["seed"]) - self.assertEqual(len(results), 2) - results = self.run_dbt() - self.assertEqual(len(results), 1) - - self.assertTablesEqual("statement_actual", "statement_expected") diff --git a/test/integration/031_thread_count_tests/models/.gitkeep b/test/integration/031_thread_count_tests/models/.gitkeep deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/integration/031_thread_count_tests/models/do_nothing_1.sql b/test/integration/031_thread_count_tests/models/do_nothing_1.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_1.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_10.sql b/test/integration/031_thread_count_tests/models/do_nothing_10.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_10.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_11.sql b/test/integration/031_thread_count_tests/models/do_nothing_11.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_11.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_12.sql b/test/integration/031_thread_count_tests/models/do_nothing_12.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_12.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_13.sql b/test/integration/031_thread_count_tests/models/do_nothing_13.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_13.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_14.sql b/test/integration/031_thread_count_tests/models/do_nothing_14.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_14.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_15.sql b/test/integration/031_thread_count_tests/models/do_nothing_15.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_15.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_16.sql b/test/integration/031_thread_count_tests/models/do_nothing_16.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_16.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_17.sql b/test/integration/031_thread_count_tests/models/do_nothing_17.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_17.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_18.sql b/test/integration/031_thread_count_tests/models/do_nothing_18.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_18.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_19.sql b/test/integration/031_thread_count_tests/models/do_nothing_19.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_19.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_2.sql b/test/integration/031_thread_count_tests/models/do_nothing_2.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_2.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_20.sql b/test/integration/031_thread_count_tests/models/do_nothing_20.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_20.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_3.sql b/test/integration/031_thread_count_tests/models/do_nothing_3.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_3.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_4.sql b/test/integration/031_thread_count_tests/models/do_nothing_4.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_4.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_5.sql b/test/integration/031_thread_count_tests/models/do_nothing_5.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_5.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_6.sql b/test/integration/031_thread_count_tests/models/do_nothing_6.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_6.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_7.sql b/test/integration/031_thread_count_tests/models/do_nothing_7.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_7.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_8.sql b/test/integration/031_thread_count_tests/models/do_nothing_8.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_8.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_9.sql b/test/integration/031_thread_count_tests/models/do_nothing_9.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_9.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/test_thread_count.py b/test/integration/031_thread_count_tests/test_thread_count.py deleted file mode 100644 index 042e2cd8a94..00000000000 --- a/test/integration/031_thread_count_tests/test_thread_count.py +++ /dev/null @@ -1,28 +0,0 @@ - -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestThreadCount(DBTIntegrationTest): - - @property - def project_config(self): - return {'config-version': 2} - - @property - def profile_config(self): - return { - 'threads': 2, - } - - @property - def schema(self): - return "thread_tests_031" - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_threading_8x(self): - results = self.run_dbt(args=['run', '--threads', '16']) - self.assertTrue(len(results), 20) diff --git a/test/integration/044_run_operations_tests/macros/sad_macros.sql b/test/integration/044_run_operations_tests/macros/sad_macros.sql deleted file mode 100644 index 4f2c80bc40f..00000000000 --- a/test/integration/044_run_operations_tests/macros/sad_macros.sql +++ /dev/null @@ -1,7 +0,0 @@ -{% macro syntax_error() %} - {% if execute %} - {% call statement() %} - select NOPE NOT A VALID QUERY - {% endcall %} - {% endif %} -{% endmacro %} diff --git a/test/integration/044_run_operations_tests/models/model.sql b/test/integration/044_run_operations_tests/models/model.sql deleted file mode 100644 index 43258a71464..00000000000 --- a/test/integration/044_run_operations_tests/models/model.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as id diff --git a/test/integration/044_run_operations_tests/test_run_operations.py b/test/integration/044_run_operations_tests/test_run_operations.py deleted file mode 100644 index d0308abe9b9..00000000000 --- a/test/integration/044_run_operations_tests/test_run_operations.py +++ /dev/null @@ -1,76 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import yaml - - -class TestOperations(DBTIntegrationTest): - @property - def schema(self): - return "run_operations_044" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - "macro-paths": ['macros'], - } - - def run_operation(self, macro, expect_pass=True, extra_args=None, **kwargs): - args = ['run-operation', macro] - if kwargs: - args.extend(('--args', yaml.safe_dump(kwargs))) - if extra_args: - args.extend(extra_args) - return self.run_dbt(args, expect_pass=expect_pass) - - @use_profile('postgres') - def test__postgres_macro_noargs(self): - self.run_operation('no_args') - self.assertTableDoesExist('no_args') - - @use_profile('postgres') - def test__postgres_macro_args(self): - self.run_operation('table_name_args', table_name='my_fancy_table') - self.assertTableDoesExist('my_fancy_table') - - @use_profile('postgres') - def test__postgres_macro_exception(self): - self.run_operation('syntax_error', False) - - @use_profile('postgres') - def test__postgres_macro_missing(self): - self.run_operation('this_macro_does_not_exist', False) - - @use_profile('postgres') - def test__postgres_cannot_connect(self): - self.run_operation('no_args', - extra_args=['--target', 'noaccess'], - expect_pass=False) - - @use_profile('postgres') - def test__postgres_vacuum(self): - self.run_dbt(['run']) - # this should succeed - self.run_operation('vacuum', table_name='model') - - @use_profile('postgres') - def test__postgres_vacuum_ref(self): - self.run_dbt(['run']) - # this should succeed - self.run_operation('vacuum_ref', ref_target='model') - - @use_profile('postgres') - def test__postgres_select(self): - self.run_operation('select_something', name='world') - - @use_profile('postgres') - def test__postgres_access_graph(self): - self.run_operation('log_graph') - - @use_profile('postgres') - def test__postgres_print(self): - # Tests that calling the `print()` macro does not cause an exception - self.run_operation('print_something') diff --git a/test/integration/049_dbt_debug_tests/models/model.sql b/test/integration/049_dbt_debug_tests/models/model.sql deleted file mode 100644 index 2c2d9c8de90..00000000000 --- a/test/integration/049_dbt_debug_tests/models/model.sql +++ /dev/null @@ -1 +0,0 @@ -seled 1 as id diff --git a/test/integration/049_dbt_debug_tests/test_debug.py b/test/integration/049_dbt_debug_tests/test_debug.py deleted file mode 100644 index 8a5fbd774f3..00000000000 --- a/test/integration/049_dbt_debug_tests/test_debug.py +++ /dev/null @@ -1,158 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import os -import re -import yaml - -import pytest - - -class TestDebug(DBTIntegrationTest): - @property - def schema(self): - return 'dbt_debug_049' - - @staticmethod - def dir(value): - return os.path.normpath(value) - - @property - def models(self): - return self.dir('models') - - def postgres_profile(self): - profile = super(TestDebug, self).postgres_profile() - profile['test']['outputs'].update({ - 'nopass': { - 'type': 'postgres', - 'threads': 4, - 'host': self.database_host, - 'port': 5432, - 'user': 'root', - # 'pass': 'password', - 'dbname': 'dbt', - 'schema': self.unique_schema() - }, - 'wronguser': { - 'type': 'postgres', - 'threads': 4, - 'host': self.database_host, - 'port': 5432, - 'user': 'notmyuser', - 'pass': 'notmypassword', - 'dbname': 'dbt', - 'schema': self.unique_schema() - }, - 'none_target': None - }) - return profile - - @pytest.fixture(autouse=True) - def capsys(self, capsys): - self.capsys = capsys - - def assertGotValue(self, linepat, result): - found = False - output = self.capsys.readouterr().out - for line in output.split('\n'): - if linepat.match(line): - found = True - self.assertIn(result, line, 'result "{}" not found in "{}" line'.format(result, linepat)) - self.assertTrue(found, 'linepat {} not found in stdout: {}'.format(linepat, output)) - - @use_profile('postgres') - def test_postgres_ok(self): - self.run_dbt(['debug']) - self.assertNotIn('ERROR', self.capsys.readouterr().out) - - @use_profile('postgres') - def test_postgres_nopass(self): - self.run_dbt(['debug', '--target', 'nopass'], expect_pass=False) - self.assertGotValue(re.compile(r'\s+profiles\.yml file'), 'ERROR invalid') - - @use_profile('postgres') - def test_postgres_wronguser(self): - self.run_dbt(['debug', '--target', 'wronguser'], expect_pass=False) - self.assertGotValue(re.compile(r'\s+Connection test'), 'ERROR') - - @use_profile('postgres') - def test_postgres_empty_target(self): - self.run_dbt(['debug', '--target', 'none_target'], expect_pass=False) - self.assertGotValue(re.compile(r"\s+output 'none_target'"), 'misconfigured') - - -class TestDebugProfileVariable(TestDebug): - @property - def project_config(self): - return { - 'config-version': 2, - 'profile': '{{ "te" ~ "st" }}' - } - - -class TestDebugInvalidProject(DBTIntegrationTest): - @property - def schema(self): - return 'dbt_debug_049' - - @staticmethod - def dir(value): - return os.path.normpath(value) - - @property - def models(self): - return self.dir('models') - - @pytest.fixture(autouse=True) - def capsys(self, capsys): - self.capsys = capsys - - @use_profile('postgres') - def test_postgres_empty_project(self): - with open('dbt_project.yml', 'w') as f: - pass - self.run_dbt(['debug', '--profile', 'test'], expect_pass=False) - splitout = self.capsys.readouterr().out.split('\n') - for line in splitout: - if line.strip().startswith('dbt_project.yml file'): - self.assertIn('ERROR invalid', line) - elif line.strip().startswith('profiles.yml file'): - self.assertNotIn('ERROR invalid', line) - - @use_profile('postgres') - def test_postgres_badproject(self): - # load a special project that is an error - self.use_default_project(overrides={ - 'invalid-key': 'not a valid key so this is bad project', - }) - self.run_dbt(['debug', '--profile', 'test'], expect_pass=False) - splitout = self.capsys.readouterr().out.split('\n') - for line in splitout: - if line.strip().startswith('dbt_project.yml file'): - self.assertIn('ERROR invalid', line) - elif line.strip().startswith('profiles.yml file'): - self.assertNotIn('ERROR invalid', line) - - @use_profile('postgres') - def test_postgres_not_found_project_dir(self): - self.run_dbt(['debug', '--project-dir', 'nopass'], expect_pass=False) - splitout = self.capsys.readouterr().out.split('\n') - for line in splitout: - if line.strip().startswith('dbt_project.yml file'): - self.assertIn('ERROR not found', line) - elif line.strip().startswith('profiles.yml file'): - self.assertNotIn('ERROR invalid', line) - - @use_profile('postgres') - def test_postgres_invalid_project_outside_current_dir(self): - # create a dbt_project.yml - project_config = { - 'invalid-key': 'not a valid key in this project' - } - os.makedirs('custom', exist_ok=True) - with open("custom/dbt_project.yml", 'w') as f: - yaml.safe_dump(project_config, f, default_flow_style=True) - self.run_dbt(['debug', '--project-dir', 'custom'], expect_pass=False) - splitout = self.capsys.readouterr().out.split('\n') - for line in splitout: - if line.strip().startswith('dbt_project.yml file'): - self.assertIn('ERROR invalid', line) diff --git a/test/integration/061_use_colors_tests/models/do_nothing_then_fail.sql b/test/integration/061_use_colors_tests/models/do_nothing_then_fail.sql deleted file mode 100644 index 30f1a53ec18..00000000000 --- a/test/integration/061_use_colors_tests/models/do_nothing_then_fail.sql +++ /dev/null @@ -1 +0,0 @@ -select 1, diff --git a/test/integration/061_use_colors_tests/test_no_use_colors.py b/test/integration/061_use_colors_tests/test_no_use_colors.py deleted file mode 100644 index a923c8d855e..00000000000 --- a/test/integration/061_use_colors_tests/test_no_use_colors.py +++ /dev/null @@ -1,29 +0,0 @@ - -from test.integration.base import DBTIntegrationTest, use_profile -import logging -import re -import sys - -class TestNoUseColors(DBTIntegrationTest): - - @property - def project_config(self): - return {'config-version': 2} - - @property - def schema(self): - return "use_colors_tests_061" - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_no_use_colors(self): - # pattern to match formatted log output - pattern = re.compile(r'\[31m.*|\[33m.*') - - results, stdout = self.run_dbt_and_capture(args=['--no-use-colors', 'run'], expect_pass=False) - - stdout_contains_formatting_characters = bool(pattern.search(stdout)) - self.assertFalse(stdout_contains_formatting_characters) diff --git a/test/integration/061_use_colors_tests/test_use_colors.py b/test/integration/061_use_colors_tests/test_use_colors.py deleted file mode 100644 index 6b3dac6a1f1..00000000000 --- a/test/integration/061_use_colors_tests/test_use_colors.py +++ /dev/null @@ -1,29 +0,0 @@ - -from test.integration.base import DBTIntegrationTest, use_profile -import logging -import re -import sys - -class TestUseColors(DBTIntegrationTest): - - @property - def project_config(self): - return {'config-version': 2} - - @property - def schema(self): - return "use_colors_tests_061" - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_use_colors(self): - # pattern to match formatted log output - pattern = re.compile(r'\[31m.*|\[33m.*') - - results, stdout = self.run_dbt_and_capture(args=['--use-colors', 'run'], expect_pass=False) - - stdout_contains_formatting_characters = bool(pattern.search(stdout)) - self.assertTrue(stdout_contains_formatting_characters) diff --git a/test/integration/062_defer_state_tests/test_defer_state.py b/test/integration/062_defer_state_tests/test_defer_state.py index 56004a1f28c..058e43ef05f 100644 --- a/test/integration/062_defer_state_tests/test_defer_state.py +++ b/test/integration/062_defer_state_tests/test_defer_state.py @@ -89,6 +89,9 @@ def run_and_snapshot_defer(self): # defer test, it succeeds results = self.run_dbt(['snapshot', '--state', 'state', '--defer']) + # favor_state test, it succeeds + results = self.run_dbt(['snapshot', '--state', 'state', '--defer', '--favor-state']) + def run_and_defer(self): results = self.run_dbt(['seed']) assert len(results) == 1 @@ -123,6 +126,40 @@ def run_and_defer(self): assert len(results) == 1 + def run_and_defer_favor_state(self): + results = self.run_dbt(['seed']) + assert len(results) == 1 + assert not any(r.node.deferred for r in results) + results = self.run_dbt(['run']) + assert len(results) == 2 + assert not any(r.node.deferred for r in results) + results = self.run_dbt(['test']) + assert len(results) == 2 + + # copy files over from the happy times when we had a good target + self.copy_state() + + # test tests first, because run will change things + # no state, wrong schema, failure. + self.run_dbt(['test', '--target', 'otherschema'], expect_pass=False) + + # no state, run also fails + self.run_dbt(['run', '--target', 'otherschema'], expect_pass=False) + + # defer test, it succeeds + results = self.run_dbt(['test', '-m', 'view_model+', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) + + # with state it should work though + results = self.run_dbt(['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) + assert self.other_schema not in results[0].node.compiled_code + assert self.unique_schema() in results[0].node.compiled_code + + with open('target/manifest.json') as fp: + data = json.load(fp) + assert data['nodes']['seed.test.seed']['deferred'] + + assert len(results) == 1 + def run_switchdirs_defer(self): results = self.run_dbt(['seed']) assert len(results) == 1 @@ -152,6 +189,35 @@ def run_switchdirs_defer(self): expect_pass=False, ) + def run_switchdirs_defer_favor_state(self): + results = self.run_dbt(['seed']) + assert len(results) == 1 + results = self.run_dbt(['run']) + assert len(results) == 2 + + # copy files over from the happy times when we had a good target + self.copy_state() + + self.use_default_project({'model-paths': ['changed_models']}) + # the sql here is just wrong, so it should fail + self.run_dbt( + ['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], + expect_pass=False, + ) + # but this should work since we just use the old happy model + self.run_dbt( + ['run', '-m', 'table_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], + expect_pass=True, + ) + + self.use_default_project({'model-paths': ['changed_models_bad']}) + # this should fail because the table model refs a broken ephemeral + # model, which it should see + self.run_dbt( + ['run', '-m', 'table_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], + expect_pass=False, + ) + def run_defer_iff_not_exists(self): results = self.run_dbt(['seed', '--target', 'otherschema']) assert len(results) == 1 @@ -169,6 +235,23 @@ def run_defer_iff_not_exists(self): assert self.other_schema not in results[0].node.compiled_code assert self.unique_schema() in results[0].node.compiled_code + def run_defer_iff_not_exists_favor_state(self): + results = self.run_dbt(['seed']) + assert len(results) == 1 + results = self.run_dbt(['run']) + assert len(results) == 2 + + # copy files over from the happy times when we had a good target + self.copy_state() + results = self.run_dbt(['seed']) + assert len(results) == 1 + results = self.run_dbt(['run', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) + assert len(results) == 2 + + # because the seed exists in other schema, we should defer it + assert self.other_schema not in results[0].node.compiled_code + assert self.unique_schema() in results[0].node.compiled_code + def run_defer_deleted_upstream(self): results = self.run_dbt(['seed']) assert len(results) == 1 @@ -191,6 +274,27 @@ def run_defer_deleted_upstream(self): assert self.other_schema not in results[0].node.compiled_code assert self.unique_schema() in results[0].node.compiled_code + def run_defer_deleted_upstream_favor_state(self): + results = self.run_dbt(['seed']) + assert len(results) == 1 + results = self.run_dbt(['run']) + assert len(results) == 2 + + # copy files over from the happy times when we had a good target + self.copy_state() + + self.use_default_project({'model-paths': ['changed_models_missing']}) + + self.run_dbt( + ['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], + expect_pass=True, + ) + + # despite deferral, test should use models just created in our schema + results = self.run_dbt(['test', '--state', 'state', '--defer', '--favor-state']) + assert self.other_schema not in results[0].node.compiled_code + assert self.unique_schema() in results[0].node.compiled_code + @use_profile('postgres') def test_postgres_state_changetarget(self): self.run_and_defer() @@ -199,18 +303,38 @@ def test_postgres_state_changetarget(self): with pytest.raises(SystemExit): self.run_dbt(['seed', '--defer']) + @use_profile('postgres') + def test_postgres_state_changetarget_favor_state(self): + self.run_and_defer_favor_state() + + # make sure these commands don't work with --defer + with pytest.raises(SystemExit): + self.run_dbt(['seed', '--defer']) + @use_profile('postgres') def test_postgres_state_changedir(self): self.run_switchdirs_defer() + @use_profile('postgres') + def test_postgres_state_changedir_favor_state(self): + self.run_switchdirs_defer_favor_state() + @use_profile('postgres') def test_postgres_state_defer_iffnotexists(self): self.run_defer_iff_not_exists() + @use_profile('postgres') + def test_postgres_state_defer_iffnotexists_favor_state(self): + self.run_defer_iff_not_exists_favor_state() + @use_profile('postgres') def test_postgres_state_defer_deleted_upstream(self): self.run_defer_deleted_upstream() + @use_profile('postgres') + def test_postgres_state_defer_deleted_upstream_favor_state(self): + self.run_defer_deleted_upstream_favor_state() + @use_profile('postgres') def test_postgres_state_snapshot_defer(self): self.run_and_snapshot_defer() diff --git a/test/integration/063_relation_name_tests/models/my_name_is_51_characters_incremental_abcdefghijklmn.sql b/test/integration/063_relation_name_tests/models/my_name_is_51_characters_incremental_abcdefghijklmn.sql deleted file mode 100644 index 0f6028e5306..00000000000 --- a/test/integration/063_relation_name_tests/models/my_name_is_51_characters_incremental_abcdefghijklmn.sql +++ /dev/null @@ -1,9 +0,0 @@ - -select * from {{ this.schema }}.seed - -{{ - config({ - "unique_key": "col_A", - "materialized": "incremental" - }) -}} diff --git a/test/integration/063_relation_name_tests/models/my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0.sql b/test/integration/063_relation_name_tests/models/my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0.sql deleted file mode 100644 index 3f6bdab0112..00000000000 --- a/test/integration/063_relation_name_tests/models/my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select * from {{ this.schema }}.seed - -{{ - config({ - "materialized": "table" - }) -}} diff --git a/test/integration/063_relation_name_tests/models/my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901.sql b/test/integration/063_relation_name_tests/models/my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901.sql deleted file mode 100644 index 3f6bdab0112..00000000000 --- a/test/integration/063_relation_name_tests/models/my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select * from {{ this.schema }}.seed - -{{ - config({ - "materialized": "table" - }) -}} diff --git a/test/integration/063_relation_name_tests/models/my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012.sql b/test/integration/063_relation_name_tests/models/my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012.sql deleted file mode 100644 index 3f6bdab0112..00000000000 --- a/test/integration/063_relation_name_tests/models/my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select * from {{ this.schema }}.seed - -{{ - config({ - "materialized": "table" - }) -}} diff --git a/test/integration/063_relation_name_tests/seeds/seed.csv b/test/integration/063_relation_name_tests/seeds/seed.csv deleted file mode 100644 index d4a1e26eed2..00000000000 --- a/test/integration/063_relation_name_tests/seeds/seed.csv +++ /dev/null @@ -1,4 +0,0 @@ -col_A,col_B -1,2 -3,4 -5,6 diff --git a/test/integration/063_relation_name_tests/test_relation_name.py b/test/integration/063_relation_name_tests/test_relation_name.py deleted file mode 100644 index df81b57f69b..00000000000 --- a/test/integration/063_relation_name_tests/test_relation_name.py +++ /dev/null @@ -1,74 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -from pytest import mark - - -class TestAdapterDDL(DBTIntegrationTest): - def setUp(self): - DBTIntegrationTest.setUp(self) - self.run_dbt(["seed"]) - - @property - def schema(self): - return "adapter_ddl_063" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - "config-version": 2, - "seeds": { - "quote_columns": False, - }, - } - - # 63 characters is the character limit for a table name in a postgres database - # (assuming compiled without changes from source) - @use_profile("postgres") - def test_postgres_name_longer_than_63_fails(self): - self.run_dbt( - [ - "run", - "-m", - "my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012", - ], - expect_pass=False, - ) - - @mark.skip( - reason="Backup table generation currently adds 12 characters to the relation name, meaning the current name limit is 51." - ) - @use_profile("postgres") - def test_postgres_name_shorter_or_equal_to_63_passes(self): - self.run_dbt( - [ - "run", - "-m", - "my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0" - "my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901", - ], - expect_pass=True, - ) - - @use_profile("postgres") - def test_postgres_long_name_passes_when_temp_tables_are_generated(self): - self.run_dbt( - [ - "run", - "-m", - "my_name_is_51_characters_incremental_abcdefghijklmn", - ], - expect_pass=True, - ) - - # Run again to trigger incremental materialization - self.run_dbt( - [ - "run", - "-m", - "my_name_is_51_characters_incremental_abcdefghijklmn", - ], - expect_pass=True, - ) diff --git a/test/integration/065_postgres_index_tests/models-invalid/invalid_columns_type.sql b/test/integration/065_postgres_index_tests/models-invalid/invalid_columns_type.sql deleted file mode 100644 index 10f41526abd..00000000000 --- a/test/integration/065_postgres_index_tests/models-invalid/invalid_columns_type.sql +++ /dev/null @@ -1,10 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'columns': 'column_a, column_b'}, - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/models-invalid/invalid_type.sql b/test/integration/065_postgres_index_tests/models-invalid/invalid_type.sql deleted file mode 100644 index 824ca36595f..00000000000 --- a/test/integration/065_postgres_index_tests/models-invalid/invalid_type.sql +++ /dev/null @@ -1,10 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'columns': ['column_a'], 'type': 'non_existent_type'}, - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/models-invalid/invalid_unique_config.sql b/test/integration/065_postgres_index_tests/models-invalid/invalid_unique_config.sql deleted file mode 100644 index ca0113272ea..00000000000 --- a/test/integration/065_postgres_index_tests/models-invalid/invalid_unique_config.sql +++ /dev/null @@ -1,10 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'columns': ['column_a'], 'unique': 'yes'}, - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/models-invalid/missing_columns.sql b/test/integration/065_postgres_index_tests/models-invalid/missing_columns.sql deleted file mode 100644 index 9b47943e6cf..00000000000 --- a/test/integration/065_postgres_index_tests/models-invalid/missing_columns.sql +++ /dev/null @@ -1,10 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'unique': True}, - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/models/incremental.sql b/test/integration/065_postgres_index_tests/models/incremental.sql deleted file mode 100644 index 7cd24bdcf8c..00000000000 --- a/test/integration/065_postgres_index_tests/models/incremental.sql +++ /dev/null @@ -1,18 +0,0 @@ -{{ - config( - materialized = "incremental", - indexes=[ - {'columns': ['column_a'], 'type': 'hash'}, - {'columns': ['column_a', 'column_b'], 'unique': True}, - ] - ) -}} - -select * -from ( - select 1 as column_a, 2 as column_b -) t - -{% if is_incremental() %} - where column_a > (select max(column_a) from {{this}}) -{% endif %} diff --git a/test/integration/065_postgres_index_tests/models/table.sql b/test/integration/065_postgres_index_tests/models/table.sql deleted file mode 100644 index 39fccc14b15..00000000000 --- a/test/integration/065_postgres_index_tests/models/table.sql +++ /dev/null @@ -1,14 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'columns': ['column_a']}, - {'columns': ['column_b']}, - {'columns': ['column_a', 'column_b']}, - {'columns': ['column_b', 'column_a'], 'type': 'btree', 'unique': True}, - {'columns': ['column_a'], 'type': 'hash'} - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/seeds/seed.csv b/test/integration/065_postgres_index_tests/seeds/seed.csv deleted file mode 100644 index e744edef675..00000000000 --- a/test/integration/065_postgres_index_tests/seeds/seed.csv +++ /dev/null @@ -1,4 +0,0 @@ -country_code,country_name -US,United States -CA,Canada -GB,United Kingdom diff --git a/test/integration/065_postgres_index_tests/snapshots/colors.sql b/test/integration/065_postgres_index_tests/snapshots/colors.sql deleted file mode 100644 index f3a901d615f..00000000000 --- a/test/integration/065_postgres_index_tests/snapshots/colors.sql +++ /dev/null @@ -1,29 +0,0 @@ -{% snapshot colors %} - - {{ - config( - target_database=database, - target_schema=schema, - unique_key='id', - strategy='check', - check_cols=['color'], - indexes=[ - {'columns': ['id'], 'type': 'hash'}, - {'columns': ['id', 'color'], 'unique': True}, - ] - ) - }} - - {% if var('version') == 1 %} - - select 1 as id, 'red' as color union all - select 2 as id, 'green' as color - - {% else %} - - select 1 as id, 'blue' as color union all - select 2 as id, 'green' as color - - {% endif %} - -{% endsnapshot %} diff --git a/test/integration/065_postgres_index_tests/test_postgres_indexes.py b/test/integration/065_postgres_index_tests/test_postgres_indexes.py deleted file mode 100644 index 56dc557d5ac..00000000000 --- a/test/integration/065_postgres_index_tests/test_postgres_indexes.py +++ /dev/null @@ -1,134 +0,0 @@ -import re - -from test.integration.base import DBTIntegrationTest, use_profile - - -INDEX_DEFINITION_PATTERN = re.compile(r'using\s+(\w+)\s+\((.+)\)\Z') - -class TestPostgresIndex(DBTIntegrationTest): - @property - def schema(self): - return "postgres_index_065" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seeds': { - 'quote_columns': False, - 'indexes': [ - {'columns': ['country_code'], 'unique': False, 'type': 'hash'}, - {'columns': ['country_code', 'country_name'], 'unique': True}, - ], - }, - 'vars': { - 'version': 1 - }, - } - - @use_profile('postgres') - def test__postgres__table(self): - results = self.run_dbt(['run', '--models', 'table']) - self.assertEqual(len(results), 1) - - indexes = self.get_indexes('table') - self.assertCountEqual( - indexes, - [ - {'columns': 'column_a', 'unique': False, 'type': 'btree'}, - {'columns': 'column_b', 'unique': False, 'type': 'btree'}, - {'columns': 'column_a, column_b', 'unique': False, 'type': 'btree'}, - {'columns': 'column_b, column_a', 'unique': True, 'type': 'btree'}, - {'columns': 'column_a', 'unique': False, 'type': 'hash'} - ] - ) - - @use_profile('postgres') - def test__postgres__incremental(self): - for additional_argument in [[], [], ['--full-refresh']]: - results = self.run_dbt(['run', '--models', 'incremental'] + additional_argument) - self.assertEqual(len(results), 1) - - indexes = self.get_indexes('incremental') - self.assertCountEqual( - indexes, - [ - {'columns': 'column_a', 'unique': False, 'type': 'hash'}, - {'columns': 'column_a, column_b', 'unique': True, 'type': 'btree'}, - ] - ) - - @use_profile('postgres') - def test__postgres__seed(self): - for additional_argument in [[], [], ['--full-refresh']]: - results = self.run_dbt(["seed"] + additional_argument) - self.assertEqual(len(results), 1) - - indexes = self.get_indexes('seed') - self.assertCountEqual( - indexes, - [ - {'columns': 'country_code', 'unique': False, 'type': 'hash'}, - {'columns': 'country_code, country_name', 'unique': True, 'type': 'btree'}, - ] - ) - - @use_profile('postgres') - def test__postgres__snapshot(self): - for version in [1, 2]: - results = self.run_dbt(["snapshot", '--vars', 'version: {}'.format(version)]) - self.assertEqual(len(results), 1) - - indexes = self.get_indexes('colors') - self.assertCountEqual( - indexes, - [ - {'columns': 'id', 'unique': False, 'type': 'hash'}, - {'columns': 'id, color', 'unique': True, 'type': 'btree'}, - ] - ) - - def get_indexes(self, table_name): - sql = """ - SELECT - pg_get_indexdef(idx.indexrelid) as index_definition - FROM pg_index idx - JOIN pg_class tab ON tab.oid = idx.indrelid - WHERE - tab.relname = '{table}' - AND tab.relnamespace = ( - SELECT oid FROM pg_namespace WHERE nspname = '{schema}' - ); - """ - - sql = sql.format(table=table_name, schema=self.unique_schema()) - results = self.run_sql(sql, fetch='all') - return [self.parse_index_definition(row[0]) for row in results] - - def parse_index_definition(self, index_definition): - index_definition = index_definition.lower() - is_unique = 'unique' in index_definition - m = INDEX_DEFINITION_PATTERN.search(index_definition) - return {'columns': m.group(2), 'unique': is_unique, 'type': m.group(1)} - -class TestPostgresInvalidIndex(DBTIntegrationTest): - @property - def schema(self): - return "postgres_index_065" - - @property - def models(self): - return "models-invalid" - - @use_profile('postgres') - def test__postgres__invalid_index_configs(self): - results, output = self.run_dbt_and_capture(expect_pass=False) - self.assertEqual(len(results), 4) - self.assertRegex(output, r'columns.*is not of type \'array\'') - self.assertRegex(output, r'unique.*is not of type \'boolean\'') - self.assertRegex(output, r'\'columns\' is a required property') - self.assertRegex(output, r'Database Error in model invalid_type') diff --git a/test/integration/067_store_test_failures_tests/models/fine_model.sql b/test/integration/067_store_test_failures_tests/models/fine_model.sql deleted file mode 100644 index 94b923a17c2..00000000000 --- a/test/integration/067_store_test_failures_tests/models/fine_model.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('people') }} diff --git a/test/integration/067_store_test_failures_tests/models/fine_model_but_with_a_no_good_very_long_name.sql b/test/integration/067_store_test_failures_tests/models/fine_model_but_with_a_no_good_very_long_name.sql deleted file mode 100644 index 97536ffaf06..00000000000 --- a/test/integration/067_store_test_failures_tests/models/fine_model_but_with_a_no_good_very_long_name.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as quite_long_column_name diff --git a/test/integration/067_store_test_failures_tests/models/problematic_model.sql b/test/integration/067_store_test_failures_tests/models/problematic_model.sql deleted file mode 100644 index e780d6b001e..00000000000 --- a/test/integration/067_store_test_failures_tests/models/problematic_model.sql +++ /dev/null @@ -1,11 +0,0 @@ -select * from {{ ref('people') }} - -union all - -select * from {{ ref('people') }} -where id in (1,2) - -union all - -select null as id, first_name, last_name, email, gender, ip_address from {{ ref('people') }} -where id in (3,4) diff --git a/test/integration/067_store_test_failures_tests/models/schema.yml b/test/integration/067_store_test_failures_tests/models/schema.yml deleted file mode 100644 index f01a9e350d8..00000000000 --- a/test/integration/067_store_test_failures_tests/models/schema.yml +++ /dev/null @@ -1,40 +0,0 @@ -version: 2 - -models: - - - name: fine_model - columns: - - name: id - tests: - - unique - - not_null - - - name: problematic_model - columns: - - name: id - tests: - - unique: - store_failures: true - - not_null - - name: first_name - tests: - # test truncation of really long test name - - accepted_values: - values: - - Jack - - Kathryn - - Gerald - - Bonnie - - Harold - - Jacqueline - - Wanda - - Craig - # - Gary - # - Rose - - - name: fine_model_but_with_a_no_good_very_long_name - columns: - - name: quite_long_column_name - tests: - # test truncation of really long test name with builtin - - unique diff --git a/test/integration/067_store_test_failures_tests/seeds/expected/expected_accepted_values.csv b/test/integration/067_store_test_failures_tests/seeds/expected/expected_accepted_values.csv deleted file mode 100644 index 02f28435b46..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/expected/expected_accepted_values.csv +++ /dev/null @@ -1,3 +0,0 @@ -value_field,n_records -Gary,1 -Rose,1 diff --git a/test/integration/067_store_test_failures_tests/seeds/expected/expected_failing_test.csv b/test/integration/067_store_test_failures_tests/seeds/expected/expected_failing_test.csv deleted file mode 100644 index d9e7257f122..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/expected/expected_failing_test.csv +++ /dev/null @@ -1,11 +0,0 @@ -id,first_name,last_name,email,gender,ip_address -1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 -2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 -3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 -4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 -5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136 -6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220 -7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64 -8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13 -9,Gary,Day,gday8@nih.gov,Male,35.81.68.186 -10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100 diff --git a/test/integration/067_store_test_failures_tests/seeds/expected/expected_not_null_problematic_model_id.csv b/test/integration/067_store_test_failures_tests/seeds/expected/expected_not_null_problematic_model_id.csv deleted file mode 100644 index 95fef8a2594..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/expected/expected_not_null_problematic_model_id.csv +++ /dev/null @@ -1,3 +0,0 @@ -id,first_name,last_name,email,gender,ip_address -,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 -,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 diff --git a/test/integration/067_store_test_failures_tests/seeds/expected/expected_unique_problematic_model_id.csv b/test/integration/067_store_test_failures_tests/seeds/expected/expected_unique_problematic_model_id.csv deleted file mode 100644 index 431d54ef8d0..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/expected/expected_unique_problematic_model_id.csv +++ /dev/null @@ -1,3 +0,0 @@ -unique_field,n_records -2,2 -1,2 \ No newline at end of file diff --git a/test/integration/067_store_test_failures_tests/seeds/people.csv b/test/integration/067_store_test_failures_tests/seeds/people.csv deleted file mode 100644 index d9e7257f122..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/people.csv +++ /dev/null @@ -1,11 +0,0 @@ -id,first_name,last_name,email,gender,ip_address -1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 -2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 -3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 -4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 -5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136 -6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220 -7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64 -8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13 -9,Gary,Day,gday8@nih.gov,Male,35.81.68.186 -10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100 diff --git a/test/integration/067_store_test_failures_tests/test_store_test_failures.py b/test/integration/067_store_test_failures_tests/test_store_test_failures.py deleted file mode 100644 index b0ba0875128..00000000000 --- a/test/integration/067_store_test_failures_tests/test_store_test_failures.py +++ /dev/null @@ -1,91 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestStoreTestFailures(DBTIntegrationTest): - @property - def schema(self): - return "test_store_test_failures_067" - - def tearDown(self): - test_audit_schema = self.unique_schema() + "_dbt_test__audit" - with self.adapter.connection_named('__test'): - self._drop_schema_named(self.default_database, test_audit_schema) - - super().tearDown() - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - "config-version": 2, - "test-paths": ["tests"], - "seeds": { - "quote_columns": False, - "test": { - "expected": self.column_type_overrides() - }, - }, - } - - def column_type_overrides(self): - return {} - - def run_tests_store_one_failure(self): - test_audit_schema = self.unique_schema() + "_dbt_test__audit" - - self.run_dbt(["seed"]) - self.run_dbt(["run"]) - self.run_dbt(["test"], expect_pass=False) - - # one test is configured with store_failures: true, make sure it worked - self.assertTablesEqual("unique_problematic_model_id", "expected_unique_problematic_model_id", test_audit_schema) - - def run_tests_store_failures_and_assert(self): - test_audit_schema = self.unique_schema() + "_dbt_test__audit" - - self.run_dbt(["seed"]) - self.run_dbt(["run"]) - # make sure this works idempotently for all tests - self.run_dbt(["test", "--store-failures"], expect_pass=False) - results = self.run_dbt(["test", "--store-failures"], expect_pass=False) - - # compare test results - actual = [(r.status, r.failures) for r in results] - expected = [('pass', 0), ('pass', 0), ('pass', 0), ('pass', 0), - ('fail', 2), ('fail', 2), ('fail', 2), ('fail', 10)] - self.assertEqual(sorted(actual), sorted(expected)) - - # compare test results stored in database - self.assertTablesEqual("failing_test", "expected_failing_test", test_audit_schema) - self.assertTablesEqual("not_null_problematic_model_id", "expected_not_null_problematic_model_id", test_audit_schema) - self.assertTablesEqual("unique_problematic_model_id", "expected_unique_problematic_model_id", test_audit_schema) - self.assertTablesEqual("accepted_values_problematic_mo_c533ab4ca65c1a9dbf14f79ded49b628", "expected_accepted_values", test_audit_schema) - - -class PostgresTestStoreTestFailures(TestStoreTestFailures): - - @property - def schema(self): - return "067" # otherwise too long + truncated - - def column_type_overrides(self): - return { - "expected_unique_problematic_model_id": { - "+column_types": { - "n_records": "bigint", - }, - }, - "expected_accepted_values": { - "+column_types": { - "n_records": "bigint", - }, - }, - } - - @use_profile('postgres') - def test__postgres__store_and_assert(self): - self.run_tests_store_one_failure() - self.run_tests_store_failures_and_assert() diff --git a/test/integration/067_store_test_failures_tests/tests/failing_test.sql b/test/integration/067_store_test_failures_tests/tests/failing_test.sql deleted file mode 100644 index 1bb5ae5ba6e..00000000000 --- a/test/integration/067_store_test_failures_tests/tests/failing_test.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('fine_model') }} diff --git a/test/integration/067_store_test_failures_tests/tests/passing_test.sql b/test/integration/067_store_test_failures_tests/tests/passing_test.sql deleted file mode 100644 index 15c9a7a642d..00000000000 --- a/test/integration/067_store_test_failures_tests/tests/passing_test.sql +++ /dev/null @@ -1,2 +0,0 @@ -select * from {{ ref('fine_model') }} -where false diff --git a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns.sql b/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns.sql deleted file mode 100644 index f9eebdcb852..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns.sql +++ /dev/null @@ -1,29 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='append_new_columns' - ) -}} - -{% set string_type = 'varchar(10)' %} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% if is_incremental() %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2, - cast(field3 as {{string_type}}) as field3, - cast(field4 as {{string_type}}) as field4 -FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2 -FROM source_data where id <= 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one.sql b/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one.sql deleted file mode 100644 index dbb4962a7e5..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one.sql +++ /dev/null @@ -1,28 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='append_new_columns' - ) -}} - -{% set string_type = 'varchar(10)' %} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% if is_incremental() %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field3 as {{string_type}}) as field3, - cast(field4 as {{string_type}}) as field4 -FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2 -FROM source_data where id <= 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one_target.sql deleted file mode 100644 index f3a279f0285..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one_target.sql +++ /dev/null @@ -1,19 +0,0 @@ -{{ - config(materialized='table') -}} - -{% set string_type = 'varchar(10)' %} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -select id, - cast(field1 as {{string_type}}) as field1, - cast(CASE WHEN id > 3 THEN NULL ELSE field2 END as {{string_type}}) AS field2, - cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3, - cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4 - -from source_data \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_target.sql deleted file mode 100644 index 5ff759d7dab..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_target.sql +++ /dev/null @@ -1,19 +0,0 @@ -{{ - config(materialized='table') -}} - -{% set string_type = 'varchar(10)' %} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -select id - ,cast(field1 as {{string_type}}) as field1 - ,cast(field2 as {{string_type}}) as field2 - ,cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3 - ,cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4 - -from source_data \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_fail.sql b/test/integration/070_incremental_schema_tests/models/incremental_fail.sql deleted file mode 100644 index 590f5b56d97..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_fail.sql +++ /dev/null @@ -1,19 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='fail' - ) -}} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% if is_incremental() %} - -SELECT id, field1, field2 FROM source_data - -{% else %} - -SELECT id, field1, field3 FROm source_data - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_ignore.sql b/test/integration/070_incremental_schema_tests/models/incremental_ignore.sql deleted file mode 100644 index 51dee6022fb..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_ignore.sql +++ /dev/null @@ -1,19 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='ignore' - ) -}} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% if is_incremental() %} - -SELECT id, field1, field2, field3, field4 FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -SELECT id, field1, field2 FROM source_data LIMIT 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_ignore_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_ignore_target.sql deleted file mode 100644 index 92d4564e0e8..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_ignore_target.sql +++ /dev/null @@ -1,15 +0,0 @@ -{{ - config(materialized='table') -}} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -select id - ,field1 - ,field2 - -from source_data \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns.sql b/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns.sql deleted file mode 100644 index b742c970419..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns.sql +++ /dev/null @@ -1,31 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='sync_all_columns' - - ) -}} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% set string_type = 'varchar(10)' %} - -{% if is_incremental() %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field3 as {{string_type}}) as field3, -- to validate new fields - cast(field4 as {{string_type}}) AS field4 -- to validate new fields - -FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -select id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2 - -from source_data where id <= 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns_target.sql deleted file mode 100644 index 6cdbaba5c0d..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns_target.sql +++ /dev/null @@ -1,20 +0,0 @@ -{{ - config(materialized='table') -}} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -{% set string_type = 'varchar(10)' %} - -select id - ,cast(field1 as {{string_type}}) as field1 - --,field2 - ,cast(case when id <= 3 then null else field3 end as {{string_type}}) as field3 - ,cast(case when id <= 3 then null else field4 end as {{string_type}}) as field4 - -from source_data -order by id \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only.sql b/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only.sql deleted file mode 100644 index 55bae0ad17e..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only.sql +++ /dev/null @@ -1,29 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='sync_all_columns' - - ) -}} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% set string_type = 'varchar(10)' %} - -{% if is_incremental() %} - -SELECT id, - cast(field1 as {{string_type}}) as field1 - -FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -select id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2 - -from source_data where id <= 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only_target.sql deleted file mode 100644 index ff88512c6f5..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only_target.sql +++ /dev/null @@ -1,17 +0,0 @@ -{{ - config(materialized='table') -}} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -{% set string_type = 'varchar(10)' %} - -select id - ,cast(field1 as {{string_type}}) as field1 - -from source_data -order by id \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/model_a.sql b/test/integration/070_incremental_schema_tests/models/model_a.sql deleted file mode 100644 index 2a0b2ddaff2..00000000000 --- a/test/integration/070_incremental_schema_tests/models/model_a.sql +++ /dev/null @@ -1,22 +0,0 @@ -{{ - config(materialized='table') -}} - -with source_data as ( - - select 1 as id, 'aaa' as field1, 'bbb' as field2, 111 as field3, 'TTT' as field4 - union all select 2 as id, 'ccc' as field1, 'ddd' as field2, 222 as field3, 'UUU' as field4 - union all select 3 as id, 'eee' as field1, 'fff' as field2, 333 as field3, 'VVV' as field4 - union all select 4 as id, 'ggg' as field1, 'hhh' as field2, 444 as field3, 'WWW' as field4 - union all select 5 as id, 'iii' as field1, 'jjj' as field2, 555 as field3, 'XXX' as field4 - union all select 6 as id, 'kkk' as field1, 'lll' as field2, 666 as field3, 'YYY' as field4 - -) - -select id - ,field1 - ,field2 - ,field3 - ,field4 - -from source_data \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/schema.yml b/test/integration/070_incremental_schema_tests/models/schema.yml deleted file mode 100644 index 5546314e413..00000000000 --- a/test/integration/070_incremental_schema_tests/models/schema.yml +++ /dev/null @@ -1,54 +0,0 @@ -version: 2 - -models: - - name: model_a - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_ignore - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_ignore_target - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_append_new_columns - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_append_new_columns_target - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_sync_all_columns - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_sync_all_columns_target - columns: - - name: id - tags: [column_leveL_tag] - tests: - - unique - - - \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/test_incremental_schema.py b/test/integration/070_incremental_schema_tests/test_incremental_schema.py deleted file mode 100644 index 09a494b8952..00000000000 --- a/test/integration/070_incremental_schema_tests/test_incremental_schema.py +++ /dev/null @@ -1,88 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestIncrementalSchemaChange(DBTIntegrationTest): - @property - def schema(self): - return "test_incremental_schema_070" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - "config-version": 2, - "test-paths": ["tests"] - } - - def run_twice_and_assert( - self, include, compare_source, compare_target - ): - - # dbt run (twice) - run_args = ['run'] - if include: - run_args.extend(('--models', include)) - results_one = self.run_dbt(run_args) - results_two = self.run_dbt(run_args) - - self.assertEqual(len(results_one), 3) - self.assertEqual(len(results_two), 3) - - self.assertTablesEqual(compare_source, compare_target) - - def run_incremental_ignore(self): - select = 'model_a incremental_ignore incremental_ignore_target' - compare_source = 'incremental_ignore' - compare_target = 'incremental_ignore_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_append_new_columns(self): - select = 'model_a incremental_append_new_columns incremental_append_new_columns_target' - compare_source = 'incremental_append_new_columns' - compare_target = 'incremental_append_new_columns_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_append_new_columns_remove_one(self): - select = 'model_a incremental_append_new_columns_remove_one incremental_append_new_columns_remove_one_target' - compare_source = 'incremental_append_new_columns_remove_one' - compare_target = 'incremental_append_new_columns_remove_one_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_sync_all_columns(self): - select = 'model_a incremental_sync_all_columns incremental_sync_all_columns_target' - compare_source = 'incremental_sync_all_columns' - compare_target = 'incremental_sync_all_columns_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_sync_remove_only(self): - select = 'model_a incremental_sync_remove_only incremental_sync_remove_only_target' - compare_source = 'incremental_sync_remove_only' - compare_target = 'incremental_sync_remove_only_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_fail_on_schema_change(self): - select = 'model_a incremental_fail' - results_one = self.run_dbt(['run', '--models', select, '--full-refresh']) - results_two = self.run_dbt(['run', '--models', select], expect_pass = False) - self.assertIn('Compilation Error', results_two[1].message) - - @use_profile('postgres') - def test__postgres__run_incremental_ignore(self): - self.run_incremental_ignore() - - @use_profile('postgres') - def test__postgres__run_incremental_append_new_columns(self): - self.run_incremental_append_new_columns() - self.run_incremental_append_new_columns_remove_one() - - @use_profile('postgres') - def test__postgres__run_incremental_sync_all_columns(self): - self.run_incremental_sync_all_columns() - self.run_incremental_sync_remove_only() - - @use_profile('postgres') - def test__postgres__run_incremental_fail_on_schema_change(self): - self.run_incremental_fail_on_schema_change() diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_a.sql b/test/integration/070_incremental_schema_tests/tests/select_from_a.sql deleted file mode 100644 index 3dc8f2857bd..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_a.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('model_a') }} where false diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns.sql deleted file mode 100644 index 947e8458854..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_append_new_columns') }} where false \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns_target.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns_target.sql deleted file mode 100644 index 8b86eddd71d..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns_target.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_append_new_columns_target') }} where false \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore.sql deleted file mode 100644 index d565c846465..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_ignore') }} where false diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore_target.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore_target.sql deleted file mode 100644 index 35d535c5ca5..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore_target.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_ignore_target') }} where false \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns.sql deleted file mode 100644 index aedc9f80396..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_sync_all_columns') }} where false \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns_target.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns_target.sql deleted file mode 100644 index 4b703c988bf..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns_target.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_sync_all_columns_target') }} where false \ No newline at end of file diff --git a/test/integration/base.py b/test/integration/base.py index 8b06782a334..602be18525c 100644 --- a/test/integration/base.py +++ b/test/integration/base.py @@ -1,6 +1,6 @@ +from io import StringIO import json import os -import io import random import shutil import sys @@ -26,7 +26,7 @@ from dbt.context import providers from dbt.logger import log_manager from dbt.events.functions import ( - capture_stdout_logs, fire_event, setup_event_logger, stop_capture_stdout_logs + capture_stdout_logs, fire_event, setup_event_logger, cleanup_event_logger, stop_capture_stdout_logs ) from dbt.events.test_types import ( IntegrationTestInfo, @@ -313,7 +313,7 @@ def setUp(self): os.chdir(self.initial_dir) # before we go anywhere, collect the initial path info self._logs_dir = os.path.join(self.initial_dir, 'logs', self.prefix) - setup_event_logger(self._logs_dir, None, False, True) + setup_event_logger(self._logs_dir, '', False, True) _really_makedirs(self._logs_dir) self.test_original_source_path = _pytest_get_test_root() self.test_root_dir = self._generate_test_root_dir() @@ -440,6 +440,8 @@ def tearDown(self): except EnvironmentError: msg = f"Could not clean up after test - {self.test_root_dir} not removable" fire_event(IntegrationTestException(msg=msg)) + + cleanup_event_logger() def _get_schema_fqn(self, database, schema): schema_fqn = self.quote_as_configured(schema, 'schema') @@ -524,7 +526,8 @@ def run_dbt(self, args=None, expect_pass=True, profiles_dir=True): def run_dbt_and_capture(self, *args, **kwargs): try: - stringbuf = capture_stdout_logs() + stringbuf = StringIO() + capture_stdout_logs(stringbuf) res = self.run_dbt(*args, **kwargs) stdout = stringbuf.getvalue() @@ -548,8 +551,8 @@ def run_dbt_and_check(self, args=None, profiles_dir=True): if profiles_dir: final_args.extend(['--profiles-dir', self.test_root_dir]) final_args.append('--log-cache-events') - msg = f"Invoking dbt with {final_args}" - fire_event(IntegrationTestInfo(msg=msg)) + # msg = f"Invoking dbt with {final_args}" + # fire_event(IntegrationTestInfo(msg=msg)) return dbt.handle_and_check(final_args) def run_sql_file(self, path, kwargs=None): diff --git a/test/unit/test_compiler.py b/test/unit/test_compiler.py index 506c427a067..649a5918f91 100644 --- a/test/unit/test_compiler.py +++ b/test/unit/test_compiler.py @@ -6,8 +6,7 @@ from dbt.adapters.postgres import Plugin from dbt.contracts.files import FileHash from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import NodeConfig, DependsOn, ParsedModelNode -from dbt.contracts.graph.compiled import CompiledModelNode, InjectedCTE +from dbt.contracts.graph.nodes import NodeConfig, DependsOn, ModelNode, InjectedCTE from dbt.node_types import NodeType from datetime import datetime @@ -86,7 +85,7 @@ def test__prepend_ctes__already_has_cte(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -95,7 +94,6 @@ def test__prepend_ctes__already_has_cte(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -103,7 +101,7 @@ def test__prepend_ctes__already_has_cte(self): raw_code='with cte as (select * from something_else) select * from {{ref("ephemeral")}}', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -112,7 +110,6 @@ def test__prepend_ctes__already_has_cte(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral.sql', original_file_path='ephemeral.sql', @@ -150,7 +147,7 @@ def test__prepend_ctes__no_ctes(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -159,7 +156,6 @@ def test__prepend_ctes__no_ctes(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -168,7 +164,7 @@ def test__prepend_ctes__no_ctes(self): 'select * from source_table'), checksum=FileHash.from_contents(''), ), - 'model.root.view_no_cte': ParsedModelNode( + 'model.root.view_no_cte': ModelNode( name='view_no_cte', database='dbt', schema='analytics', @@ -177,7 +173,6 @@ def test__prepend_ctes__no_ctes(self): unique_id='model.root.view_no_cte', fqn=['root', 'view_no_cte'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -228,7 +223,7 @@ def test__prepend_ctes(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -237,7 +232,6 @@ def test__prepend_ctes(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -245,7 +239,7 @@ def test__prepend_ctes(self): raw_code='select * from {{ref("ephemeral")}}', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -254,7 +248,6 @@ def test__prepend_ctes(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral.sql', original_file_path='ephemeral.sql', @@ -290,7 +283,7 @@ def test__prepend_ctes(self): def test__prepend_ctes__cte_not_compiled(self): ephemeral_config = self.model_config.replace(materialized='ephemeral') - parsed_ephemeral = ParsedModelNode( + parsed_ephemeral = ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -299,7 +292,6 @@ def test__prepend_ctes__cte_not_compiled(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', refs=[], sources=[], depends_on=DependsOn(), @@ -311,7 +303,7 @@ def test__prepend_ctes__cte_not_compiled(self): raw_code='select * from source_table', checksum=FileHash.from_contents(''), ) - compiled_ephemeral = CompiledModelNode( + compiled_ephemeral = ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -320,7 +312,6 @@ def test__prepend_ctes__cte_not_compiled(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', refs=[], sources=[], depends_on=DependsOn(), @@ -339,7 +330,7 @@ def test__prepend_ctes__cte_not_compiled(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': CompiledModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -348,7 +339,6 @@ def test__prepend_ctes__cte_not_compiled(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', refs=[], sources=[], depends_on=DependsOn(nodes=['model.root.ephemeral']), @@ -409,7 +399,7 @@ def test__prepend_ctes__multiple_levels(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -418,7 +408,6 @@ def test__prepend_ctes__multiple_levels(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -427,7 +416,7 @@ def test__prepend_ctes__multiple_levels(self): checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -436,7 +425,6 @@ def test__prepend_ctes__multiple_levels(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral.sql', original_file_path='ephemeral.sql', @@ -444,7 +432,7 @@ def test__prepend_ctes__multiple_levels(self): raw_code='select * from {{ref("ephemeral_level_two")}}', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral_level_two': ParsedModelNode( + 'model.root.ephemeral_level_two': ModelNode( name='ephemeral_level_two', database='dbt', schema='analytics', @@ -453,7 +441,6 @@ def test__prepend_ctes__multiple_levels(self): unique_id='model.root.ephemeral_level_two', fqn=['root', 'ephemeral_level_two'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral_level_two.sql', original_file_path='ephemeral_level_two.sql', @@ -500,7 +487,7 @@ def test__prepend_ctes__valid_ephemeral_sql(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -509,7 +496,6 @@ def test__prepend_ctes__valid_ephemeral_sql(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -517,7 +503,7 @@ def test__prepend_ctes__valid_ephemeral_sql(self): raw_code='select * from {{ref("ephemeral")}}', checksum=FileHash.from_contents(''), ), - 'model.root.inner_ephemeral': ParsedModelNode( + 'model.root.inner_ephemeral': ModelNode( name='inner_ephemeral', database='dbt', schema='analytics', @@ -526,7 +512,6 @@ def test__prepend_ctes__valid_ephemeral_sql(self): unique_id='model.root.inner_ephemeral', fqn=['root', 'inner_ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='inner_ephemeral.sql', original_file_path='inner_ephemeral.sql', @@ -534,7 +519,7 @@ def test__prepend_ctes__valid_ephemeral_sql(self): raw_code='select * from source_table', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -543,7 +528,6 @@ def test__prepend_ctes__valid_ephemeral_sql(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral.sql', original_file_path='ephemeral.sql', diff --git a/test/unit/test_config.py b/test/unit/test_config.py index 697dc05a1bb..880a09cc7ad 100644 --- a/test/unit/test_config.py +++ b/test/unit/test_config.py @@ -1086,35 +1086,6 @@ def test_archive_not_allowed(self): with self.assertRaises(dbt.exceptions.DbtProjectError): self.get_project() - def test__no_unused_resource_config_paths(self): - self.default_project_data.update({ - 'models': model_config, - 'seeds': {}, - }) - project = self.from_parts() - - resource_fqns = {'models': model_fqns} - unused = project.get_unused_resource_config_paths(resource_fqns, []) - self.assertEqual(len(unused), 0) - - def test__unused_resource_config_paths(self): - self.default_project_data.update({ - 'models': model_config['my_package_name'], - 'seeds': {}, - }) - project = self.from_parts() - - resource_fqns = {'models': model_fqns} - unused = project.get_unused_resource_config_paths(resource_fqns, []) - self.assertEqual(len(unused), 3) - - def test__get_unused_resource_config_paths_empty(self): - project = self.from_parts() - unused = project.get_unused_resource_config_paths({'models': frozenset(( - ('my_test_project', 'foo', 'bar'), - ('my_test_project', 'foo', 'baz'), - ))}, []) - self.assertEqual(len(unused), 0) def test__warn_for_unused_resource_config_paths_empty(self): project = self.from_parts() @@ -1174,26 +1145,17 @@ def from_parts(self, exc=None): else: return err - def test__get_unused_resource_config_paths(self): - project = self.from_parts() - unused = project.get_unused_resource_config_paths(self.used, []) - self.assertEqual(len(unused), 1) - self.assertEqual(unused[0], ('models', 'my_test_project', 'baz')) - @mock.patch.object(dbt.config.runtime, 'warn_or_error') - def test__warn_for_unused_resource_config_paths(self, warn_or_error): + def test__warn_for_unused_resource_config_paths(self): project = self.from_parts() - project.warn_for_unused_resource_config_paths(self.used, []) - warn_or_error.assert_called_once() - - def test__warn_for_unused_resource_config_paths_disabled(self): - project = self.from_parts() - unused = project.get_unused_resource_config_paths( - self.used, - frozenset([('my_test_project', 'baz')]) - ) - - self.assertEqual(len(unused), 0) + with mock.patch('dbt.config.runtime.warn_or_error') as warn_or_error_patch: + project.warn_for_unused_resource_config_paths(self.used, []) + warn_or_error_patch.assert_called_once() + event = warn_or_error_patch.call_args[0][0] + assert event.info.name == 'UnusedResourceConfigPath' + msg = event.info.msg + expected_msg = "- models.my_test_project.baz" + assert expected_msg in msg class TestRuntimeConfigFiles(BaseFileTest): diff --git a/test/unit/test_context.py b/test/unit/test_context.py index 668d76cc525..a567e032f55 100644 --- a/test/unit/test_context.py +++ b/test/unit/test_context.py @@ -10,11 +10,11 @@ from dbt.adapters import factory from dbt.adapters.base import AdapterConfig from dbt.clients.jinja import MacroStack -from dbt.contracts.graph.parsed import ( - ParsedModelNode, +from dbt.contracts.graph.nodes import ( + ModelNode, NodeConfig, DependsOn, - ParsedMacro, + Macro, ) from dbt.config.project import VarProvider from dbt.context import base, target, configured, providers, docs, manifest, macros @@ -33,7 +33,7 @@ class TestVar(unittest.TestCase): def setUp(self): - self.model = ParsedModelNode( + self.model = ModelNode( alias="model_one", name="model_one", database="dbt", @@ -43,7 +43,6 @@ def setUp(self): fqn=["root", "model_one"], package_name="root", original_file_path="model_one.sql", - root_path="/usr/src/app", refs=[], sources=[], depends_on=DependsOn(), @@ -202,6 +201,7 @@ def assert_has_keys(required_keys: Set[str], maybe_keys: Set[str], ctx: Dict[str "flags", "print", "diff_of_two_dicts", + "local_md5" } ) @@ -273,7 +273,7 @@ def assert_has_keys(required_keys: Set[str], maybe_keys: Set[str], ctx: Dict[str def model(): - return ParsedModelNode( + return ModelNode( alias="model_one", name="model_one", database="dbt", @@ -283,7 +283,6 @@ def model(): fqn=["root", "model_one"], package_name="root", original_file_path="model_one.sql", - root_path="/usr/src/app", refs=[], sources=[], depends_on=DependsOn(), @@ -316,7 +315,7 @@ def test_base_context(): def mock_macro(name, package_name): macro = mock.MagicMock( - __class__=ParsedMacro, + __class__=Macro, package_name=package_name, resource_type="macro", unique_id=f"macro.{package_name}.{name}", @@ -336,7 +335,7 @@ def mock_manifest(config): def mock_model(): return mock.MagicMock( - __class__=ParsedModelNode, + __class__=ModelNode, alias="model_one", name="model_one", database="dbt", @@ -346,7 +345,6 @@ def mock_model(): fqn=["root", "model_one"], package_name="root", original_file_path="model_one.sql", - root_path="/usr/src/app", refs=[], sources=[], depends_on=DependsOn(), @@ -432,7 +430,6 @@ def test_invocation_args_to_dict_in_macro_runtime_context( ) # Comes from dbt/flags.py as they are the only values set that aren't None at default - assert ctx["invocation_args_dict"]["event_buffer_size"] == 100000 assert ctx["invocation_args_dict"]["printer_width"] == 80 # Comes from unit/utils.py config_from_parts_or_dicts method diff --git a/test/unit/test_contracts_graph_compiled.py b/test/unit/test_contracts_graph_compiled.py index aaa44857326..fe1e25d7925 100644 --- a/test/unit/test_contracts_graph_compiled.py +++ b/test/unit/test_contracts_graph_compiled.py @@ -2,10 +2,10 @@ import pytest from dbt.contracts.files import FileHash -from dbt.contracts.graph.compiled import ( - CompiledModelNode, InjectedCTE, CompiledGenericTestNode +from dbt.contracts.graph.nodes import ( + ModelNode, InjectedCTE, GenericTestNode ) -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( DependsOn, NodeConfig, TestConfig, TestMetadata, ColumnInfo ) from dbt.node_types import NodeType @@ -22,9 +22,8 @@ @pytest.fixture def basic_uncompiled_model(): - return CompiledModelNode( + return ModelNode( package_name='test', - root_path='/root/', path='/root/models/foo.sql', original_file_path='models/foo.sql', language='sql', @@ -55,9 +54,8 @@ def basic_uncompiled_model(): @pytest.fixture def basic_compiled_model(): - return CompiledModelNode( + return ModelNode( package_name='test', - root_path='/root/', path='/root/models/foo.sql', original_file_path='models/foo.sql', language='sql', @@ -91,7 +89,6 @@ def basic_compiled_model(): def minimal_uncompiled_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Model), 'path': '/root/models/foo.sql', @@ -114,7 +111,6 @@ def minimal_uncompiled_dict(): def basic_uncompiled_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Model), 'path': '/root/models/foo.sql', @@ -164,7 +160,6 @@ def basic_uncompiled_dict(): def basic_compiled_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Model), 'path': '/root/models/foo.sql', @@ -215,19 +210,19 @@ def basic_compiled_dict(): def test_basic_uncompiled_model(minimal_uncompiled_dict, basic_uncompiled_dict, basic_uncompiled_model): node_dict = basic_uncompiled_dict node = basic_uncompiled_model - assert_symmetric(node, node_dict, CompiledModelNode) + assert_symmetric(node, node_dict, ModelNode) assert node.empty is False assert node.is_refable is True assert node.is_ephemeral is False - assert_from_dict(node, minimal_uncompiled_dict, CompiledModelNode) + assert_from_dict(node, minimal_uncompiled_dict, ModelNode) pickle.loads(pickle.dumps(node)) def test_basic_compiled_model(basic_compiled_dict, basic_compiled_model): node_dict = basic_compiled_dict node = basic_compiled_model - assert_symmetric(node, node_dict, CompiledModelNode) + assert_symmetric(node, node_dict, ModelNode) assert node.empty is False assert node.is_refable is True assert node.is_ephemeral is False @@ -236,13 +231,13 @@ def test_basic_compiled_model(basic_compiled_dict, basic_compiled_model): def test_invalid_extra_fields_model(minimal_uncompiled_dict): bad_extra = minimal_uncompiled_dict bad_extra['notvalid'] = 'nope' - assert_fails_validation(bad_extra, CompiledModelNode) + assert_fails_validation(bad_extra, ModelNode) def test_invalid_bad_type_model(minimal_uncompiled_dict): bad_type = minimal_uncompiled_dict bad_type['resource_type'] = str(NodeType.Macro) - assert_fails_validation(bad_type, CompiledModelNode) + assert_fails_validation(bad_type, ModelNode) unchanged_compiled_models = [ @@ -328,7 +323,6 @@ def test_compare_changed_model(func, basic_uncompiled_model): def minimal_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -352,9 +346,8 @@ def minimal_schema_test_dict(): @pytest.fixture def basic_uncompiled_schema_test_node(): - return CompiledGenericTestNode( + return GenericTestNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -386,9 +379,8 @@ def basic_uncompiled_schema_test_node(): @pytest.fixture def basic_compiled_schema_test_node(): - return CompiledGenericTestNode( + return GenericTestNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -426,7 +418,6 @@ def basic_compiled_schema_test_node(): def basic_uncompiled_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -477,7 +468,6 @@ def basic_uncompiled_schema_test_dict(): def basic_compiled_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -532,19 +522,19 @@ def test_basic_uncompiled_schema_test(basic_uncompiled_schema_test_node, basic_u node = basic_uncompiled_schema_test_node node_dict = basic_uncompiled_schema_test_dict minimum = minimal_schema_test_dict - assert_symmetric(node, node_dict, CompiledGenericTestNode) + assert_symmetric(node, node_dict, GenericTestNode) assert node.empty is False assert node.is_refable is False assert node.is_ephemeral is False - assert_from_dict(node, minimum, CompiledGenericTestNode) + assert_from_dict(node, minimum, GenericTestNode) def test_basic_compiled_schema_test(basic_compiled_schema_test_node, basic_compiled_schema_test_dict): node = basic_compiled_schema_test_node node_dict = basic_compiled_schema_test_dict - assert_symmetric(node, node_dict, CompiledGenericTestNode) + assert_symmetric(node, node_dict, GenericTestNode) assert node.empty is False assert node.is_refable is False assert node.is_ephemeral is False @@ -553,13 +543,13 @@ def test_basic_compiled_schema_test(basic_compiled_schema_test_node, basic_compi def test_invalid_extra_schema_test_fields(minimal_schema_test_dict): bad_extra = minimal_schema_test_dict bad_extra['extra'] = 'extra value' - assert_fails_validation(bad_extra, CompiledGenericTestNode) + assert_fails_validation(bad_extra, GenericTestNode) def test_invalid_resource_type_schema_test(minimal_schema_test_dict): bad_type = minimal_schema_test_dict bad_type['resource_type'] = str(NodeType.Model) - assert_fails_validation(bad_type, CompiledGenericTestNode) + assert_fails_validation(bad_type, GenericTestNode) unchanged_schema_tests = [ diff --git a/test/unit/test_contracts_graph_parsed.py b/test/unit/test_contracts_graph_parsed.py index b5ec79a7aba..ae792cdb718 100644 --- a/test/unit/test_contracts_graph_parsed.py +++ b/test/unit/test_contracts_graph_parsed.py @@ -13,23 +13,23 @@ EmptySnapshotConfig, Hook, ) -from dbt.contracts.graph.parsed import ( - ParsedModelNode, +from dbt.contracts.graph.nodes import ( + ModelNode, DependsOn, ColumnInfo, - ParsedGenericTestNode, - ParsedSnapshotNode, + GenericTestNode, + SnapshotNode, IntermediateSnapshotNode, ParsedNodePatch, - ParsedMacro, - ParsedExposure, - ParsedMetric, - ParsedSeedNode, + Macro, + Exposure, + Metric, + SeedNode, Docs, MacroDependsOn, - ParsedSourceDefinition, - ParsedDocumentation, - ParsedHookNode, + SourceDefinition, + Documentation, + HookNode, ExposureOwner, TestMetadata, ) @@ -127,7 +127,6 @@ def test_config_same(unrendered_node_config_dict, func): def base_parsed_model_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Model), 'path': '/root/x/path.sql', @@ -173,9 +172,8 @@ def base_parsed_model_dict(): @pytest.fixture def basic_parsed_model_object(): - return ParsedModelNode( + return ModelNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -204,7 +202,6 @@ def basic_parsed_model_object(): def minimal_parsed_model_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Model), 'path': '/root/x/path.sql', @@ -226,7 +223,6 @@ def minimal_parsed_model_dict(): def complex_parsed_model_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Model), 'path': '/root/x/path.sql', @@ -283,9 +279,8 @@ def complex_parsed_model_dict(): @pytest.fixture def complex_parsed_model_object(): - return ParsedModelNode( + return ModelNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -347,14 +342,14 @@ def test_invalid_bad_tags(base_parsed_model_dict): # bad top-level field bad_tags = base_parsed_model_dict bad_tags['tags'] = 100 - assert_fails_validation(bad_tags, ParsedModelNode) + assert_fails_validation(bad_tags, ModelNode) def test_invalid_bad_materialized(base_parsed_model_dict): # bad nested field bad_materialized = base_parsed_model_dict bad_materialized['config']['materialized'] = None - assert_fails_validation(bad_materialized, ParsedModelNode) + assert_fails_validation(bad_materialized, ModelNode) unchanged_nodes = [ @@ -428,20 +423,14 @@ def test_compare_changed_model(func, basic_parsed_model_object): def basic_parsed_seed_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Seed), 'path': '/root/seeds/seed.csv', 'original_file_path': 'seeds/seed.csv', 'package_name': 'test', - 'language': 'sql', 'raw_code': '', 'unique_id': 'seed.test.foo', 'fqn': ['test', 'seeds', 'foo'], - 'refs': [], - 'sources': [], - 'metrics': [], - 'depends_on': {'macros': [], 'nodes': []}, 'database': 'test_db', 'description': '', 'schema': 'test_schema', @@ -474,21 +463,15 @@ def basic_parsed_seed_dict(): @pytest.fixture def basic_parsed_seed_object(): - return ParsedSeedNode( + return SeedNode( name='foo', - root_path='/root/', resource_type=NodeType.Seed, path='/root/seeds/seed.csv', original_file_path='seeds/seed.csv', package_name='test', - language='sql', raw_code='', unique_id='seed.test.foo', fqn=['test', 'seeds', 'foo'], - refs=[], - sources=[], - metrics=[], - depends_on=DependsOn(), database='test_db', description='', schema='test_schema', @@ -509,13 +492,11 @@ def basic_parsed_seed_object(): def minimal_parsed_seed_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Seed), 'path': '/root/seeds/seed.csv', 'original_file_path': 'seeds/seed.csv', 'package_name': 'test', - 'language': 'sql', 'raw_code': '', 'unique_id': 'seed.test.foo', 'fqn': ['test', 'seeds', 'foo'], @@ -530,20 +511,14 @@ def minimal_parsed_seed_dict(): def complex_parsed_seed_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Seed), 'path': '/root/seeds/seed.csv', 'original_file_path': 'seeds/seed.csv', 'package_name': 'test', - 'language': 'sql', 'raw_code': '', 'unique_id': 'seed.test.foo', 'fqn': ['test', 'seeds', 'foo'], - 'refs': [], - 'sources': [], - 'metrics': [], - 'depends_on': {'macros': [], 'nodes': []}, 'database': 'test_db', 'description': 'a description', 'schema': 'test_schema', @@ -579,21 +554,15 @@ def complex_parsed_seed_dict(): @pytest.fixture def complex_parsed_seed_object(): - return ParsedSeedNode( + return SeedNode( name='foo', - root_path='/root/', resource_type=NodeType.Seed, path='/root/seeds/seed.csv', original_file_path='seeds/seed.csv', package_name='test', - language='sql', raw_code='', unique_id='seed.test.foo', fqn=['test', 'seeds', 'foo'], - refs=[], - sources=[], - metrics=[], - depends_on=DependsOn(), database='test_db', description='a description', schema='test_schema', @@ -615,10 +584,13 @@ def complex_parsed_seed_object(): def test_seed_basic(basic_parsed_seed_dict, basic_parsed_seed_object, minimal_parsed_seed_dict): + dct = basic_parsed_seed_object.to_dict() + assert_symmetric(basic_parsed_seed_object, basic_parsed_seed_dict) + assert basic_parsed_seed_object.get_materialization() == 'seed' - assert_from_dict(basic_parsed_seed_object, minimal_parsed_seed_dict, ParsedSeedNode) + assert_from_dict(basic_parsed_seed_object, minimal_parsed_seed_dict, SeedNode) def test_seed_complex(complex_parsed_seed_dict, complex_parsed_seed_object): @@ -729,9 +701,8 @@ def basic_parsed_model_patch_object(): @pytest.fixture def patched_model_object(): - return ParsedModelNode( + return ModelNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -771,7 +742,6 @@ def test_patch_parsed_model(basic_parsed_model_object, basic_parsed_model_patch_ def minimal_parsed_hook_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'resource_type': str(NodeType.Operation), 'path': '/root/x/path.sql', 'original_file_path': '/root/path.sql', @@ -791,7 +761,6 @@ def minimal_parsed_hook_dict(): def base_parsed_hook_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Operation), 'path': '/root/x/path.sql', @@ -837,9 +806,8 @@ def base_parsed_hook_dict(): @pytest.fixture def base_parsed_hook_object(): - return ParsedHookNode( + return HookNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -869,7 +837,6 @@ def base_parsed_hook_object(): def complex_parsed_hook_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Operation), 'path': '/root/x/path.sql', @@ -926,9 +893,8 @@ def complex_parsed_hook_dict(): @pytest.fixture def complex_parsed_hook_object(): - return ParsedHookNode( + return HookNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -968,11 +934,11 @@ def test_basic_parsed_hook(minimal_parsed_hook_dict, base_parsed_hook_dict, base node_dict = base_parsed_hook_dict minimum = minimal_parsed_hook_dict - assert_symmetric(node, node_dict, ParsedHookNode) + assert_symmetric(node, node_dict, HookNode) assert node.empty is False assert node.is_refable is False assert node.get_materialization() == 'view' - assert_from_dict(node, minimum, ParsedHookNode) + assert_from_dict(node, minimum, HookNode) pickle.loads(pickle.dumps(node)) @@ -989,14 +955,13 @@ def test_complex_parsed_hook(complex_parsed_hook_dict, complex_parsed_hook_objec def test_invalid_hook_index_type(base_parsed_hook_dict): bad_index = base_parsed_hook_dict bad_index['index'] = 'a string!?' - assert_fails_validation(bad_index, ParsedHookNode) + assert_fails_validation(bad_index, HookNode) @pytest.fixture def minimal_parsed_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -1023,7 +988,6 @@ def minimal_parsed_schema_test_dict(): def basic_parsed_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -1069,9 +1033,8 @@ def basic_parsed_schema_test_dict(): @pytest.fixture def basic_parsed_schema_test_object(): - return ParsedGenericTestNode( + return GenericTestNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1100,7 +1063,6 @@ def basic_parsed_schema_test_object(): def complex_parsed_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -1163,9 +1125,8 @@ def complex_parsed_schema_test_object(): severity='WARN' ) cfg._extra.update({'extra_key': 'extra value'}) - return ParsedGenericTestNode( + return GenericTestNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1201,20 +1162,20 @@ def test_basic_schema_test_node(minimal_parsed_schema_test_dict, basic_parsed_sc node = basic_parsed_schema_test_object node_dict = basic_parsed_schema_test_dict minimum = minimal_parsed_schema_test_dict - assert_symmetric(node, node_dict, ParsedGenericTestNode) + assert_symmetric(node, node_dict, GenericTestNode) assert node.empty is False assert node.is_ephemeral is False assert node.is_refable is False assert node.get_materialization() == 'test' - assert_from_dict(node, minimum, ParsedGenericTestNode) + assert_from_dict(node, minimum, GenericTestNode) pickle.loads(pickle.dumps(node)) def test_complex_schema_test_node(complex_parsed_schema_test_dict, complex_parsed_schema_test_object): # this tests for the presence of _extra keys - node = complex_parsed_schema_test_object # ParsedGenericTestNode + node = complex_parsed_schema_test_object # GenericTestNode assert(node.config._extra['extra_key']) node_dict = complex_parsed_schema_test_dict assert_symmetric(node, node_dict) @@ -1225,13 +1186,13 @@ def test_invalid_column_name_type(complex_parsed_schema_test_dict): # bad top-level field bad_column_name = complex_parsed_schema_test_dict bad_column_name['column_name'] = {} - assert_fails_validation(bad_column_name, ParsedGenericTestNode) + assert_fails_validation(bad_column_name, GenericTestNode) def test_invalid_severity(complex_parsed_schema_test_dict): invalid_config_value = complex_parsed_schema_test_dict invalid_config_value['config']['severity'] = 'WERROR' - assert_fails_validation(invalid_config_value, ParsedGenericTestNode) + assert_fails_validation(invalid_config_value, GenericTestNode) @pytest.fixture @@ -1459,7 +1420,6 @@ def test_invalid_check_value(basic_check_snapshot_config_dict): def basic_timestamp_snapshot_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Snapshot), 'path': '/root/x/path.sql', @@ -1516,9 +1476,8 @@ def basic_timestamp_snapshot_dict(): @pytest.fixture def basic_timestamp_snapshot_object(): - return ParsedSnapshotNode( + return SnapshotNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1567,7 +1526,6 @@ def basic_intermediate_timestamp_snapshot_object(): return IntermediateSnapshotNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1602,7 +1560,6 @@ def basic_intermediate_timestamp_snapshot_object(): def basic_check_snapshot_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Snapshot), 'path': '/root/x/path.sql', @@ -1659,9 +1616,8 @@ def basic_check_snapshot_dict(): @pytest.fixture def basic_check_snapshot_object(): - return ParsedSnapshotNode( + return SnapshotNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1710,7 +1666,6 @@ def basic_intermediate_check_snapshot_object(): return IntermediateSnapshotNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1746,10 +1701,10 @@ def test_timestamp_snapshot_ok(basic_timestamp_snapshot_dict, basic_timestamp_sn node = basic_timestamp_snapshot_object inter = basic_intermediate_timestamp_snapshot_object - assert_symmetric(node, node_dict, ParsedSnapshotNode) -# node_from_dict = ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True)) + assert_symmetric(node, node_dict, SnapshotNode) +# node_from_dict = SnapshotNode.from_dict(inter.to_dict(omit_none=True)) # node_from_dict.created_at = 1 - assert ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node + assert SnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node assert node.is_refable is True assert node.is_ephemeral is False pickle.loads(pickle.dumps(node)) @@ -1760,8 +1715,8 @@ def test_check_snapshot_ok(basic_check_snapshot_dict, basic_check_snapshot_objec node = basic_check_snapshot_object inter = basic_intermediate_check_snapshot_object - assert_symmetric(node, node_dict, ParsedSnapshotNode) - assert ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node + assert_symmetric(node, node_dict, SnapshotNode) + assert SnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node assert node.is_refable is True assert node.is_ephemeral is False pickle.loads(pickle.dumps(node)) @@ -1770,7 +1725,7 @@ def test_check_snapshot_ok(basic_check_snapshot_dict, basic_check_snapshot_objec def test_invalid_snapshot_bad_resource_type(basic_timestamp_snapshot_dict): bad_resource_type = basic_timestamp_snapshot_dict bad_resource_type['resource_type'] = str(NodeType.Model) - assert_fails_validation(bad_resource_type, ParsedSnapshotNode) + assert_fails_validation(bad_resource_type, SnapshotNode) def test_basic_parsed_node_patch(basic_parsed_model_patch_object, basic_parsed_model_patch_dict): @@ -1819,7 +1774,7 @@ def test_populated_parsed_node_patch(populated_parsed_node_patch_dict, populated class TestParsedMacro(ContractTestCase): - ContractType = ParsedMacro + ContractType = Macro def _ok_dict(self): return { @@ -1829,10 +1784,8 @@ def _ok_dict(self): 'created_at': 1.0, 'package_name': 'test', 'macro_sql': '{% macro foo() %}select 1 as id{% endmacro %}', - 'root_path': '/root/', 'resource_type': 'macro', 'unique_id': 'macro.test.foo', - 'tags': [], 'depends_on': {'macros': []}, 'meta': {}, 'description': 'my macro description', @@ -1848,10 +1801,8 @@ def test_ok(self): original_file_path='/root/path.sql', package_name='test', macro_sql='{% macro foo() %}select 1 as id{% endmacro %}', - root_path='/root/', resource_type=NodeType.Macro, unique_id='macro.test.foo', - tags=[], depends_on=MacroDependsOn(), meta={}, description='my macro description', @@ -1872,16 +1823,16 @@ def test_invalid_extra_field(self): class TestParsedDocumentation(ContractTestCase): - ContractType = ParsedDocumentation + ContractType = Documentation def _ok_dict(self): return { 'block_contents': 'some doc contents', 'name': 'foo', + 'resource_type': 'doc', 'original_file_path': '/root/docs/doc.md', 'package_name': 'test', 'path': '/root/docs', - 'root_path': '/root', 'unique_id': 'test.foo', } @@ -1889,12 +1840,12 @@ def test_ok(self): doc_dict = self._ok_dict() doc = self.ContractType( package_name='test', - root_path='/root', path='/root/docs', original_file_path='/root/docs/doc.md', name='foo', unique_id='test.foo', - block_contents='some doc contents' + block_contents='some doc contents', + resource_type=NodeType.Documentation, ) self.assert_symmetric(doc, doc_dict) pickle.loads(pickle.dumps(doc)) @@ -1914,7 +1865,6 @@ def test_invalid_extra(self): def minimum_parsed_source_definition_dict(): return { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/models/sources.yml', 'original_file_path': '/root/models/sources.yml', 'created_at': 1.0, @@ -1935,7 +1885,6 @@ def minimum_parsed_source_definition_dict(): def basic_parsed_source_definition_dict(): return { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/models/sources.yml', 'original_file_path': '/root/models/sources.yml', 'created_at': 1.0, @@ -1964,7 +1913,7 @@ def basic_parsed_source_definition_dict(): @pytest.fixture def basic_parsed_source_definition_object(): - return ParsedSourceDefinition( + return SourceDefinition( columns={}, database='some_db', description='', @@ -1977,7 +1926,6 @@ def basic_parsed_source_definition_object(): path='/root/models/sources.yml', quoting=Quoting(), resource_type=NodeType.Source, - root_path='/root', schema='some_schema', source_description='my source description', source_name='my_source', @@ -1991,7 +1939,6 @@ def basic_parsed_source_definition_object(): def complex_parsed_source_definition_dict(): return { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/models/sources.yml', 'original_file_path': '/root/models/sources.yml', 'created_at': 1.0, @@ -2025,7 +1972,7 @@ def complex_parsed_source_definition_dict(): @pytest.fixture def complex_parsed_source_definition_object(): - return ParsedSourceDefinition( + return SourceDefinition( columns={}, database='some_db', description='', @@ -2038,7 +1985,6 @@ def complex_parsed_source_definition_object(): path='/root/models/sources.yml', quoting=Quoting(), resource_type=NodeType.Source, - root_path='/root', schema='some_schema', source_description='my source description', source_name='my_source', @@ -2055,32 +2001,32 @@ def test_basic_source_definition(minimum_parsed_source_definition_dict, basic_pa node_dict = basic_parsed_source_definition_dict minimum = minimum_parsed_source_definition_dict - assert_symmetric(node, node_dict, ParsedSourceDefinition) + assert_symmetric(node, node_dict, SourceDefinition) assert node.is_ephemeral is False assert node.is_refable is False assert node.has_freshness is False - assert_from_dict(node, minimum, ParsedSourceDefinition) + assert_from_dict(node, minimum, SourceDefinition) pickle.loads(pickle.dumps(node)) def test_invalid_missing(minimum_parsed_source_definition_dict): bad_missing_name = minimum_parsed_source_definition_dict del bad_missing_name['name'] - assert_fails_validation(bad_missing_name, ParsedSourceDefinition) + assert_fails_validation(bad_missing_name, SourceDefinition) def test_invalid_bad_resource_type(minimum_parsed_source_definition_dict): bad_resource_type = minimum_parsed_source_definition_dict bad_resource_type['resource_type'] = str(NodeType.Model) - assert_fails_validation(bad_resource_type, ParsedSourceDefinition) + assert_fails_validation(bad_resource_type, SourceDefinition) def test_complex_source_definition(complex_parsed_source_definition_dict, complex_parsed_source_definition_object): node = complex_parsed_source_definition_object node_dict = complex_parsed_source_definition_dict - assert_symmetric(node, node_dict, ParsedSourceDefinition) + assert_symmetric(node, node_dict, SourceDefinition) assert node.is_ephemeral is False assert node.is_refable is False @@ -2146,10 +2092,10 @@ def minimal_parsed_exposure_dict(): 'meta': {}, 'tags': [], 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'description': '', 'created_at': 1.0, + 'resource_type': 'exposure', } @@ -2168,11 +2114,11 @@ def basic_parsed_exposure_dict(): }, 'refs': [], 'sources': [], + 'metrics': [], 'fqn': ['test', 'exposures', 'my_exposure'], 'unique_id': 'exposure.test.my_exposure', 'package_name': 'test', 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'description': '', 'meta': {}, @@ -2187,14 +2133,14 @@ def basic_parsed_exposure_dict(): @pytest.fixture def basic_parsed_exposure_object(): - return ParsedExposure( + return Exposure( name='my_exposure', + resource_type=NodeType.Exposure, type=ExposureType.Notebook, fqn=['test', 'exposures', 'my_exposure'], unique_id='exposure.test.my_exposure', package_name='test', path='models/something.yml', - root_path='/usr/src/app', original_file_path='models/something.yml', owner=ExposureOwner(email='test@example.com'), description='', @@ -2230,11 +2176,11 @@ def complex_parsed_exposure_dict(): }, 'refs': [], 'sources': [], + 'metrics': [], 'fqn': ['test', 'exposures', 'my_exposure'], 'unique_id': 'exposure.test.my_exposure', 'package_name': 'test', 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'config': { 'enabled': True, @@ -2245,8 +2191,9 @@ def complex_parsed_exposure_dict(): @pytest.fixture def complex_parsed_exposure_object(): - return ParsedExposure( + return Exposure( name='my_exposure', + resource_type=NodeType.Exposure, type=ExposureType.Analysis, owner=ExposureOwner(email='test@example.com', name='A Name'), maturity=MaturityType.Low, @@ -2259,7 +2206,6 @@ def complex_parsed_exposure_object(): unique_id='exposure.test.my_exposure', package_name='test', path='models/something.yml', - root_path='/usr/src/app', original_file_path='models/something.yml', config=ExposureConfig(), unrendered_config={}, @@ -2267,13 +2213,13 @@ def complex_parsed_exposure_object(): def test_basic_parsed_exposure(minimal_parsed_exposure_dict, basic_parsed_exposure_dict, basic_parsed_exposure_object): - assert_symmetric(basic_parsed_exposure_object, basic_parsed_exposure_dict, ParsedExposure) - assert_from_dict(basic_parsed_exposure_object, minimal_parsed_exposure_dict, ParsedExposure) + assert_symmetric(basic_parsed_exposure_object, basic_parsed_exposure_dict, Exposure) + assert_from_dict(basic_parsed_exposure_object, minimal_parsed_exposure_dict, Exposure) pickle.loads(pickle.dumps(basic_parsed_exposure_object)) def test_complex_parsed_exposure(complex_parsed_exposure_dict, complex_parsed_exposure_object): - assert_symmetric(complex_parsed_exposure_object, complex_parsed_exposure_dict, ParsedExposure) + assert_symmetric(complex_parsed_exposure_object, complex_parsed_exposure_dict, Exposure) unchanged_parsed_exposures = [ @@ -2318,7 +2264,6 @@ def minimal_parsed_metric_dict(): 'meta': {}, 'tags': [], 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'description': '', 'created_at': 1.0, @@ -2351,7 +2296,6 @@ def basic_parsed_metric_dict(): 'unique_id': 'metric.test.my_metric', 'package_name': 'test', 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'description': '', 'meta': {}, @@ -2366,14 +2310,14 @@ def basic_parsed_metric_dict(): @pytest.fixture def basic_parsed_metric_object(): - return ParsedMetric( + return Metric( name='my_metric', + resource_type=NodeType.Metric, calculation_method='count', fqn=['test', 'metrics', 'my_metric'], unique_id='metric.test.my_metric', package_name='test', path='models/something.yml', - root_path='/usr/src/app', original_file_path='models/something.yml', description='', meta={}, diff --git a/test/unit/test_contracts_graph_unparsed.py b/test/unit/test_contracts_graph_unparsed.py index 5c89148cd11..8821b355b71 100644 --- a/test/unit/test_contracts_graph_unparsed.py +++ b/test/unit/test_contracts_graph_unparsed.py @@ -24,7 +24,6 @@ def test_ok(self): 'package_name': 'test', 'language': 'sql', 'raw_code': '{% macro foo() %}select 1 as id{% endmacro %}', - 'root_path': '/root/', 'resource_type': 'macro', } macro = self.ContractType( @@ -33,7 +32,6 @@ def test_ok(self): package_name='test', language='sql', raw_code='{% macro foo() %}select 1 as id{% endmacro %}', - root_path='/root/', resource_type=NodeType.Macro, ) self.assert_symmetric(macro, macro_dict) @@ -46,7 +44,6 @@ def test_invalid_missing_field(self): # 'package_name': 'test', 'language': 'sql', 'raw_code': '{% macro foo() %}select 1 as id{% endmacro %}', - 'root_path': '/root/', 'resource_type': 'macro', } self.assert_fails_validation(macro_dict) @@ -58,7 +55,6 @@ def test_invalid_extra_field(self): 'package_name': 'test', 'language': 'sql', 'raw_code': '{% macro foo() %}select 1 as id{% endmacro %}', - 'root_path': '/root/', 'extra': 'extra', 'resource_type': 'macro', } @@ -71,7 +67,6 @@ class TestUnparsedNode(ContractTestCase): def test_ok(self): node_dict = { 'name': 'foo', - 'root_path': '/root/', 'resource_type': NodeType.Model, 'path': '/root/x/path.sql', 'original_file_path': '/root/path.sql', @@ -81,7 +76,6 @@ def test_ok(self): } node = self.ContractType( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -99,7 +93,6 @@ def test_ok(self): def test_empty(self): node_dict = { 'name': 'foo', - 'root_path': '/root/', 'resource_type': NodeType.Model, 'path': '/root/x/path.sql', 'original_file_path': '/root/path.sql', @@ -109,7 +102,6 @@ def test_empty(self): } node = UnparsedNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -126,7 +118,6 @@ def test_empty(self): def test_bad_type(self): node_dict = { 'name': 'foo', - 'root_path': '/root/', 'resource_type': NodeType.Source, # not valid! 'path': '/root/x/path.sql', 'original_file_path': '/root/path.sql', @@ -143,7 +134,6 @@ class TestUnparsedRunHook(ContractTestCase): def test_ok(self): node_dict = { 'name': 'foo', - 'root_path': 'test/dbt_project.yml', 'resource_type': NodeType.Operation, 'path': '/root/dbt_project.yml', 'original_file_path': '/root/dbt_project.yml', @@ -154,7 +144,6 @@ def test_ok(self): } node = self.ContractType( package_name='test', - root_path='test/dbt_project.yml', path='/root/dbt_project.yml', original_file_path='/root/dbt_project.yml', language='sql', @@ -170,7 +159,6 @@ def test_ok(self): def test_bad_type(self): node_dict = { 'name': 'foo', - 'root_path': 'test/dbt_project.yml', 'resource_type': NodeType.Model, # invalid 'path': '/root/dbt_project.yml', 'original_file_path': '/root/dbt_project.yml', @@ -365,14 +353,12 @@ class TestUnparsedDocumentationFile(ContractTestCase): def test_ok(self): doc = self.ContractType( package_name='test', - root_path='/root', path='/root/docs', original_file_path='/root/docs/doc.md', file_contents='blah blah blah', ) doc_dict = { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/docs', 'original_file_path': '/root/docs/doc.md', 'file_contents': 'blah blah blah', @@ -386,7 +372,6 @@ def test_extra_field(self): self.assert_fails_validation({}) doc_dict = { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/docs', 'original_file_path': '/root/docs/doc.md', 'file_contents': 'blah blah blah', diff --git a/test/unit/test_deps.py b/test/unit/test_deps.py index 30639473bff..c758e53bda9 100644 --- a/test/unit/test_deps.py +++ b/test/unit/test_deps.py @@ -7,21 +7,23 @@ import dbt.exceptions from dbt.deps.git import GitUnpinnedPackage from dbt.deps.local import LocalUnpinnedPackage +from dbt.deps.tarball import TarballUnpinnedPackage from dbt.deps.registry import RegistryUnpinnedPackage from dbt.clients.registry import is_compatible_version from dbt.deps.resolver import resolve_packages from dbt.contracts.project import ( LocalPackage, + TarballPackage, GitPackage, RegistryPackage, ) - from dbt.contracts.project import PackageConfig from dbt.semver import VersionSpecifier from dbt.version import get_installed_version from dbt.dataclass_schema import ValidationError + class TestLocalPackage(unittest.TestCase): def test_init(self): a_contract = LocalPackage.from_dict({'local': '/path/to/package'}) @@ -33,6 +35,45 @@ def test_init(self): self.assertEqual(str(a_pinned), '/path/to/package') +class TestTarballPackage(unittest.TestCase): + def test_TarballPackage(self): + from dbt.contracts.project import RegistryPackageMetadata + from mashumaro.exceptions import MissingField + + dict_well_formed_contract = ( + {'tarball': 'http://example.com', + 'name': 'my_cool_package'}) + + a_contract = ( + TarballPackage.from_dict(dict_well_formed_contract)) + + # check contract and resolver + self.assertEqual(a_contract.tarball, 'http://example.com') + self.assertEqual(a_contract.name, 'my_cool_package') + + a = TarballUnpinnedPackage.from_contract(a_contract) + self.assertEqual(a.tarball, 'http://example.com') + self.assertEqual(a.package, 'my_cool_package') + + a_pinned = a.resolved() + self.assertEqual(a_pinned.source_type(), 'tarball') + + # check bad contract (no name) fails + dict_missing_name_should_fail_on_contract = ( + {'tarball': 'http://example.com'}) + + with self.assertRaises(MissingField): + TarballPackage.from_dict(dict_missing_name_should_fail_on_contract) + + # check RegistryPackageMetadata - it is used in TarballUnpinnedPackage + dct = {'name' : a.package, + 'packages': [], # note: required by RegistryPackageMetadata + 'downloads' : {'tarball' : a_pinned.tarball}} + + metastore = RegistryPackageMetadata.from_dict(dct) + self.assertEqual(metastore.downloads.tarball, 'http://example.com') + + class TestGitPackage(unittest.TestCase): def test_init(self): a_contract = GitPackage.from_dict( diff --git a/test/unit/test_docs_blocks.py b/test/unit/test_docs_blocks.py index c6673321480..89821abfe12 100644 --- a/test/unit/test_docs_blocks.py +++ b/test/unit/test_docs_blocks.py @@ -3,7 +3,7 @@ from dbt.contracts.files import SourceFile, FileHash, FilePath from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedDocumentation +from dbt.contracts.graph.nodes import Documentation from dbt.node_types import NodeType from dbt.parser import docs from dbt.parser.search import FileBlock @@ -155,10 +155,9 @@ def test_load_file(self): docs_values = sorted(parser.manifest.docs.values(), key=lambda n: n.name) self.assertEqual(len(docs_values), 2) for result in docs_values: - self.assertIsInstance(result, ParsedDocumentation) + self.assertIsInstance(result, Documentation) self.assertEqual(result.package_name, 'some_package') self.assertEqual(result.original_file_path, self.testfile_path) - self.assertEqual(result.root_path, self.subdir_path) self.assertEqual(result.resource_type, NodeType.Documentation) self.assertEqual(result.path, 'test_file.md') @@ -180,7 +179,7 @@ def test_load_file_extras(self): docs_values = sorted(parser.manifest.docs.values(), key=lambda n: n.name) self.assertEqual(len(docs_values), 2) for result in docs_values: - self.assertIsInstance(result, ParsedDocumentation) + self.assertIsInstance(result, Documentation) self.assertEqual(docs_values[0].name, 'snowplow_sessions') self.assertEqual(docs_values[1].name, 'snowplow_sessions__session_id') @@ -197,10 +196,9 @@ def test_multiple_raw_blocks(self): docs_values = sorted(parser.manifest.docs.values(), key=lambda n: n.name) self.assertEqual(len(docs_values), 2) for result in docs_values: - self.assertIsInstance(result, ParsedDocumentation) + self.assertIsInstance(result, Documentation) self.assertEqual(result.package_name, 'some_package') self.assertEqual(result.original_file_path, self.testfile_path) - self.assertEqual(result.root_path, self.subdir_path) self.assertEqual(result.resource_type, NodeType.Documentation) self.assertEqual(result.path, 'test_file.md') diff --git a/test/unit/test_flags.py b/test/unit/test_flags.py index fc4455f5d1b..4be866338a2 100644 --- a/test/unit/test_flags.py +++ b/test/unit/test_flags.py @@ -261,18 +261,3 @@ def test__flags(self): # cleanup os.environ.pop('DBT_LOG_PATH') delattr(self.args, 'log_path') - - # event_buffer_size - self.user_config.event_buffer_size = 100 - flags.set_from_args(self.args, self.user_config) - self.assertEqual(flags.EVENT_BUFFER_SIZE, 100) - os.environ['DBT_EVENT_BUFFER_SIZE'] = '80' - flags.set_from_args(self.args, self.user_config) - self.assertEqual(flags.EVENT_BUFFER_SIZE, 80) - setattr(self.args, 'event_buffer_size', '120') - flags.set_from_args(self.args, self.user_config) - self.assertEqual(flags.EVENT_BUFFER_SIZE, 120) - # cleanup - os.environ.pop('DBT_EVENT_BUFFER_SIZE') - delattr(self.args, 'event_buffer_size') - self.user_config.event_buffer_size = None diff --git a/test/unit/test_graph_selector_methods.py b/test/unit/test_graph_selector_methods.py index e32267e2d6f..0497d5da02a 100644 --- a/test/unit/test_graph_selector_methods.py +++ b/test/unit/test_graph_selector_methods.py @@ -6,18 +6,18 @@ from pathlib import Path from dbt.contracts.files import FileHash -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( DependsOn, MacroDependsOn, NodeConfig, - ParsedMacro, - ParsedModelNode, - ParsedExposure, - ParsedMetric, - ParsedSeedNode, - ParsedSingularTestNode, - ParsedGenericTestNode, - ParsedSourceDefinition, + Macro, + ModelNode, + Exposure, + Metric, + SeedNode, + SingularTestNode, + GenericTestNode, + SourceDefinition, TestConfig, TestMetadata, ColumnInfo, @@ -42,7 +42,7 @@ MetricSelectorMethod, ) import dbt.exceptions -import dbt.contracts.graph.parsed +import dbt.contracts.graph.nodes from .utils import replace_config @@ -77,7 +77,7 @@ def make_model(pkg, name, sql, refs=None, sources=None, tags=None, path=None, al source_values.append([src.source_name, src.name]) depends_on_nodes.append(src.unique_id) - return ParsedModelNode( + return ModelNode( language='sql', raw_code=sql, database='dbt', @@ -87,7 +87,6 @@ def make_model(pkg, name, sql, refs=None, sources=None, tags=None, path=None, al fqn=fqn, unique_id=f'model.{pkg}.{name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=f'models/{path}', config=NodeConfig(**config_kwargs), @@ -118,9 +117,7 @@ def make_seed(pkg, name, path=None, loader=None, alias=None, tags=None, fqn_extr checksum = FileHash.from_contents('') fqn = [pkg] + fqn_extras + [name] - return ParsedSeedNode( - language='sql', - raw_code='', + return SeedNode( database='dbt', schema='dbt_schema', alias=alias, @@ -128,7 +125,6 @@ def make_seed(pkg, name, path=None, loader=None, alias=None, tags=None, fqn_extr fqn=fqn, unique_id=f'seed.{pkg}.{name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=f'data/{path}', tags=tags, @@ -150,13 +146,12 @@ def make_source(pkg, source_name, table_name, path=None, loader=None, identifier fqn = [pkg] + fqn_extras + [source_name, table_name] - return ParsedSourceDefinition( + return SourceDefinition( fqn=fqn, database='dbt', schema='dbt_schema', unique_id=f'source.{pkg}.{source_name}.{table_name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=path, name=table_name, @@ -177,16 +172,14 @@ def make_macro(pkg, name, macro_sql, path=None, depends_on_macros=None): if depends_on_macros is None: depends_on_macros = [] - return ParsedMacro( + return Macro( name=name, macro_sql=macro_sql, unique_id=f'macro.{pkg}.{name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=path, resource_type=NodeType.Macro, - tags=[], depends_on=MacroDependsOn(macros=depends_on_macros), ) @@ -204,7 +197,7 @@ def make_schema_test(pkg, test_name, test_model, test_kwargs, path=None, refs=No ref_values = [] source_values = [] # this doesn't really have to be correct - if isinstance(test_model, ParsedSourceDefinition): + if isinstance(test_model, SourceDefinition): kwargs['model'] = "{{ source('" + test_model.source_name + \ "', '" + test_model.name + "') }}" source_values.append([test_model.source_name, test_model.name]) @@ -251,7 +244,7 @@ def make_schema_test(pkg, test_name, test_model, test_kwargs, path=None, refs=No source_values.append([source.source_name, source.name]) depends_on_nodes.append(source.unique_id) - return ParsedGenericTestNode( + return GenericTestNode( language='sql', raw_code=raw_code, test_metadata=TestMetadata( @@ -266,7 +259,6 @@ def make_schema_test(pkg, test_name, test_model, test_kwargs, path=None, refs=No fqn=['minimal', 'schema_test', node_name], unique_id=f'test.{pkg}.{node_name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=f'schema_test/{node_name}.sql', original_file_path=f'models/{path}', resource_type=NodeType.Test, @@ -308,7 +300,7 @@ def make_data_test(pkg, name, sql, refs=None, sources=None, tags=None, path=None source_values.append([src.source_name, src.name]) depends_on_nodes.append(src.unique_id) - return ParsedSingularTestNode( + return SingularTestNode( language='sql', raw_code=sql, database='dbt', @@ -318,7 +310,6 @@ def make_data_test(pkg, name, sql, refs=None, sources=None, tags=None, path=None fqn=fqn, unique_id=f'test.{pkg}.{name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=f'tests/{path}', config=TestConfig(**config_kwargs), @@ -342,14 +333,14 @@ def make_exposure(pkg, name, path=None, fqn_extras=None, owner=None): owner = ExposureOwner(email='test@example.com') fqn = [pkg, 'exposures'] + fqn_extras + [name] - return ParsedExposure( + return Exposure( name=name, + resource_type=NodeType.Exposure, type=ExposureType.Notebook, fqn=fqn, unique_id=f'exposure.{pkg}.{name}', package_name=pkg, path=path, - root_path='/usr/src/app', original_file_path=path, owner=owner, ) @@ -359,11 +350,11 @@ def make_metric(pkg, name, path=None): if path is None: path = 'schema.yml' - return ParsedMetric( + return Metric( name=name, + resource_type=NodeType.Metric, path='schema.yml', package_name=pkg, - root_path='/usr/src/app', original_file_path=path, unique_id=f'metric.{pkg}.{name}', fqn=[pkg, 'metrics', name], @@ -978,12 +969,14 @@ def test_select_state_changed_seed_checksum_path_to_path(manifest, previous_stat change_node(manifest, seed.replace(checksum=FileHash( name='path', checksum=seed.original_file_path))) method = statemethod(manifest, previous_state) - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'modified') warn_or_error_patch.assert_called_once() - msg = warn_or_error_patch.call_args[0][0] + event = warn_or_error_patch.call_args[0][0] + assert event.info.name == 'SeedExceedsLimitSamePath' + msg = event.info.msg assert msg.startswith('Found a seed (pkg.seed) >1MB in size') - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') warn_or_error_patch.assert_not_called() @@ -992,13 +985,15 @@ def test_select_state_changed_seed_checksum_sha_to_path(manifest, previous_state change_node(manifest, seed.replace(checksum=FileHash( name='path', checksum=seed.original_file_path))) method = statemethod(manifest, previous_state) - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert search_manifest_using_method( manifest, method, 'modified') == {'seed'} warn_or_error_patch.assert_called_once() - msg = warn_or_error_patch.call_args[0][0] + event = warn_or_error_patch.call_args[0][0] + assert event.info.name == 'SeedIncreased' + msg = event.info.msg assert msg.startswith('Found a seed (pkg.seed) >1MB in size') - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') warn_or_error_patch.assert_not_called() @@ -1007,11 +1002,11 @@ def test_select_state_changed_seed_checksum_path_to_sha(manifest, previous_state change_node(previous_state.manifest, seed.replace( checksum=FileHash(name='path', checksum=seed.original_file_path))) method = statemethod(manifest, previous_state) - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert search_manifest_using_method( manifest, method, 'modified') == {'seed'} warn_or_error_patch.assert_not_called() - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') warn_or_error_patch.assert_not_called() diff --git a/test/unit/test_macro_resolver.py b/test/unit/test_macro_resolver.py index 17e1aca6dca..3e0b7622bce 100644 --- a/test/unit/test_macro_resolver.py +++ b/test/unit/test_macro_resolver.py @@ -1,15 +1,15 @@ import unittest from unittest import mock -from dbt.contracts.graph.parsed import ( - ParsedMacro +from dbt.contracts.graph.nodes import ( + Macro ) from dbt.context.macro_resolver import MacroResolver def mock_macro(name, package_name): macro = mock.MagicMock( - __class__=ParsedMacro, + __class__=Macro, package_name=package_name, resource_type='macro', unique_id=f'macro.{package_name}.{name}', diff --git a/test/unit/test_manifest.py b/test/unit/test_manifest.py index cbce93fc052..576a525823b 100644 --- a/test/unit/test_manifest.py +++ b/test/unit/test_manifest.py @@ -15,14 +15,14 @@ from dbt.adapters.base.plugin import AdapterPlugin from dbt.contracts.files import FileHash from dbt.contracts.graph.manifest import Manifest, ManifestMetadata -from dbt.contracts.graph.parsed import ( - ParsedModelNode, +from dbt.contracts.graph.nodes import ( + ModelNode, DependsOn, NodeConfig, - ParsedSeedNode, - ParsedSourceDefinition, - ParsedExposure, - ParsedMetric + SeedNode, + SourceDefinition, + Exposure, + Metric ) from dbt.contracts.graph.unparsed import ( @@ -33,7 +33,6 @@ MetricTime ) -from dbt.contracts.graph.compiled import CompiledModelNode from dbt.events.functions import reset_metadata_vars from dbt.node_types import NodeType @@ -45,9 +44,9 @@ REQUIRED_PARSED_NODE_KEYS = frozenset({ 'alias', 'tags', 'config', 'unique_id', 'refs', 'sources', 'metrics', 'meta', 'depends_on', 'database', 'schema', 'name', 'resource_type', - 'package_name', 'root_path', 'path', 'original_file_path', 'raw_code', 'language', + 'package_name', 'path', 'original_file_path', 'raw_code', 'language', 'description', 'columns', 'fqn', 'build_path', 'compiled_path', 'patch_path', 'docs', - 'deferred', 'checksum', 'unrendered_config', 'created_at', 'config_call_dict', + 'deferred', 'checksum', 'unrendered_config', 'created_at', 'config_call_dict', 'relation_name', }) REQUIRED_COMPILED_NODE_KEYS = frozenset(REQUIRED_PARSED_NODE_KEYS | { @@ -81,7 +80,7 @@ def setUp(self): }) self.exposures = { - 'exposure.root.my_exposure': ParsedExposure( + 'exposure.root.my_exposure': Exposure( name='my_exposure', type=ExposureType.Dashboard, owner=ExposureOwner(email='some@email.com'), @@ -95,14 +94,13 @@ def setUp(self): fqn=['root', 'my_exposure'], unique_id='exposure.root.my_exposure', package_name='root', - root_path='', path='my_exposure.sql', original_file_path='my_exposure.sql' ) } self.metrics = { - 'metric.root.my_metric': ParsedMetric( + 'metric.root.my_metric': Metric( name='new_customers', label='New Customers', model='ref("multi")', @@ -128,14 +126,13 @@ def setUp(self): fqn=['root', 'my_metric'], unique_id='metric.root.my_metric', package_name='root', - root_path='', path='my_metric.yml', original_file_path='my_metric.yml' ) } self.nested_nodes = { - 'model.snowplow.events': ParsedModelNode( + 'model.snowplow.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -152,13 +149,12 @@ def setUp(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.events': ParsedModelNode( + 'model.root.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -175,13 +171,12 @@ def setUp(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.dep': ParsedModelNode( + 'model.root.dep': ModelNode( name='dep', database='dbt', schema='analytics', @@ -198,13 +193,12 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.nested': ParsedModelNode( + 'model.root.nested': ModelNode( name='nested', database='dbt', schema='analytics', @@ -221,13 +215,12 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.sibling': ParsedModelNode( + 'model.root.sibling': ModelNode( name='sibling', database='dbt', schema='analytics', @@ -244,13 +237,12 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.multi': ParsedModelNode( + 'model.root.multi': ModelNode( name='multi', database='dbt', schema='analytics', @@ -267,7 +259,6 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -276,7 +267,7 @@ def setUp(self): } self.sources = { - 'source.root.my_source.my_table': ParsedSourceDefinition( + 'source.root.my_source.my_table': SourceDefinition( database='raw', schema='analytics', resource_type=NodeType.Source, @@ -289,7 +280,6 @@ def setUp(self): unique_id='source.test.my_source.my_table', fqn=['test', 'my_source', 'my_table'], package_name='root', - root_path='', path='schema.yml', original_file_path='schema.yml', ), @@ -317,7 +307,7 @@ def test__no_nodes(self): metadata=ManifestMetadata(generated_at=datetime.utcnow()), ) - invocation_id = dbt.events.functions.invocation_id + invocation_id = dbt.events.functions.EVENT_MANAGER.invocation_id self.assertEqual( manifest.writable_manifest().to_dict(omit_none=True), { @@ -331,7 +321,7 @@ def test__no_nodes(self): 'child_map': {}, 'metadata': { 'generated_at': '2018-02-14T09:15:13Z', - 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v7.json', + 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v8.json', 'dbt_version': dbt.version.__version__, 'env': {ENV_KEY_NAME: 'value'}, 'invocation_id': invocation_id, @@ -434,7 +424,7 @@ def test__build_flat_graph(self): @mock.patch.object(tracking, 'active_user') def test_metadata(self, mock_user): mock_user.id = 'cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf' - dbt.events.functions.invocation_id = '01234567-0123-0123-0123-0123456789ab' + dbt.events.functions.EVENT_MANAGER.invocation_id = '01234567-0123-0123-0123-0123456789ab' dbt.flags.SEND_ANONYMOUS_USAGE_STATS = False now = datetime.utcnow() self.assertEqual( @@ -457,7 +447,7 @@ def test_metadata(self, mock_user): @freezegun.freeze_time('2018-02-14T09:15:13Z') def test_no_nodes_with_metadata(self, mock_user): mock_user.id = 'cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf' - dbt.events.functions.invocation_id = '01234567-0123-0123-0123-0123456789ab' + dbt.events.functions.EVENT_MANAGER.invocation_id = '01234567-0123-0123-0123-0123456789ab' dbt.flags.SEND_ANONYMOUS_USAGE_STATS = False metadata = ManifestMetadata( project_id='098f6bcd4621d373cade4e832627b4f6', @@ -482,7 +472,7 @@ def test_no_nodes_with_metadata(self, mock_user): 'docs': {}, 'metadata': { 'generated_at': '2018-02-14T09:15:13Z', - 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v7.json', + 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v8.json', 'dbt_version': dbt.version.__version__, 'project_id': '098f6bcd4621d373cade4e832627b4f6', 'user_id': 'cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf', @@ -502,7 +492,7 @@ def test_get_resource_fqns_empty(self): def test_get_resource_fqns(self): nodes = copy.copy(self.nested_nodes) - nodes['seed.root.seed'] = ParsedSeedNode( + nodes['seed.root.seed'] = SeedNode( name='seed', database='dbt', schema='analytics', @@ -511,16 +501,10 @@ def test_get_resource_fqns(self): unique_id='seed.root.seed', fqn=['root', 'seed'], package_name='root', - refs=[['events']], - sources=[], - depends_on=DependsOn(), config=self.model_config, tags=[], path='seed.csv', original_file_path='seed.csv', - root_path='', - language='sql', - raw_code='-- csv --', checksum=FileHash.empty(), ) manifest = Manifest(nodes=nodes, sources=self.sources, macros={}, docs={}, @@ -552,7 +536,7 @@ def test_get_resource_fqns(self): self.assertEqual(resource_fqns, expect) def test__deepcopy_copies_flat_graph(self): - test_node = ParsedModelNode( + test_node = ModelNode( name='events', database='dbt', schema='analytics', @@ -569,7 +553,6 @@ def test__deepcopy_copies_flat_graph(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -597,7 +580,7 @@ def setUp(self): }) self.nested_nodes = { - 'model.snowplow.events': CompiledModelNode( + 'model.snowplow.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -613,7 +596,6 @@ def setUp(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', language='sql', raw_code='does not matter', meta={}, @@ -624,7 +606,7 @@ def setUp(self): extra_ctes=[], checksum=FileHash.empty(), ), - 'model.root.events': CompiledModelNode( + 'model.root.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -640,7 +622,6 @@ def setUp(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', raw_code='does not matter', meta={}, compiled=True, @@ -651,7 +632,7 @@ def setUp(self): extra_ctes=[], checksum=FileHash.empty(), ), - 'model.root.dep': ParsedModelNode( + 'model.root.dep': ModelNode( name='dep', database='dbt', schema='analytics', @@ -667,13 +648,12 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.nested': ParsedModelNode( + 'model.root.nested': ModelNode( name='nested', database='dbt', schema='analytics', @@ -689,13 +669,12 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.sibling': ParsedModelNode( + 'model.root.sibling': ModelNode( name='sibling', database='dbt', schema='analytics', @@ -711,13 +690,12 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.multi': ParsedModelNode( + 'model.root.multi': ModelNode( name='multi', database='dbt', schema='analytics', @@ -733,7 +711,6 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -763,7 +740,7 @@ def test__no_nodes(self): 'child_map': {}, 'metadata': { 'generated_at': '2018-02-14T09:15:13Z', - 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v7.json', + 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v8.json', 'dbt_version': dbt.version.__version__, 'invocation_id': '01234567-0123-0123-0123-0123456789ab', 'env': {ENV_KEY_NAME: 'value'}, diff --git a/test/unit/test_node_types.py b/test/unit/test_node_types.py index fcfb115b9b9..06c27dba7fe 100644 --- a/test/unit/test_node_types.py +++ b/test/unit/test_node_types.py @@ -10,7 +10,7 @@ NodeType.Seed: "seeds", NodeType.RPCCall: "rpcs", NodeType.SqlOperation: "sql operations", - NodeType.Documentation: "docs blocks", + NodeType.Documentation: "docs", NodeType.Source: "sources", NodeType.Macro: "macros", NodeType.Exposure: "exposures", diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py index 529fbef8b94..19800b7c798 100644 --- a/test/unit/test_parser.py +++ b/test/unit/test_parser.py @@ -1,44 +1,39 @@ -import ipdb +import os import unittest +from copy import deepcopy from unittest import mock -import os import yaml -from copy import deepcopy import dbt.flags import dbt.parser from dbt import tracking from dbt.context.context_config import ContextConfig +from dbt.contracts.files import SourceFile, FileHash, FilePath, SchemaSourceFile +from dbt.contracts.graph.manifest import Manifest +from dbt.contracts.graph.model_config import ( + NodeConfig, TestConfig, SnapshotConfig +) +from dbt.contracts.graph.nodes import ( + ModelNode, Macro, DependsOn, SingularTestNode, SnapshotNode, + AnalysisNode, UnpatchedSourceDefinition +) from dbt.exceptions import CompilationException, ParsingException +from dbt.node_types import NodeType from dbt.parser import ( ModelParser, MacroParser, SingularTestParser, GenericTestParser, SchemaParser, SnapshotParser, AnalysisParser ) +from dbt.parser.generic_test_builders import YamlBlock +from dbt.parser.models import ( + _get_config_call_dict, _shift_sources, _get_exp_sample_result, _get_stable_sample_result, _get_sample_result +) from dbt.parser.schemas import ( TestablePatchParser, SourceParser, AnalysisPatchParser, MacroPatchParser ) from dbt.parser.search import FileBlock -from dbt.parser.generic_test_builders import YamlBlock from dbt.parser.sources import SourcePatcher - -from dbt.node_types import NodeType, ModelLanguage -from dbt.contracts.files import SourceFile, FileHash, FilePath, SchemaSourceFile -from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.model_config import ( - NodeConfig, TestConfig, SnapshotConfig -) -from dbt.contracts.graph.parsed import ( - ParsedModelNode, ParsedMacro, ParsedNodePatch, DependsOn, ColumnInfo, - ParsedSingularTestNode, ParsedGenericTestNode, ParsedSnapshotNode, - ParsedAnalysisNode, UnpatchedSourceDefinition -) -from dbt.contracts.graph.unparsed import Docs -from dbt.parser.models import ( - _get_config_call_dict, _shift_sources, _get_exp_sample_result, _get_stable_sample_result, _get_sample_result -) -import itertools -from .utils import config_from_parts_or_dicts, normalize, generate_name_macros, MockNode, MockSource, MockDocumentation +from .utils import config_from_parts_or_dicts, normalize, generate_name_macros, MockNode def get_abs_os_path(unix_path): @@ -60,13 +55,12 @@ def _generate_macros(self): name_sql[name] = sql for name, sql in name_sql.items(): - pm = ParsedMacro( + pm = Macro( name=name, resource_type=NodeType.Macro, unique_id=f'macro.root.{name}', package_name='root', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/root'), path=normalize('macros/macro.sql'), macro_sql=sql, ) @@ -162,7 +156,7 @@ def file_block_for(self, data: str, filename: str, searched: str): return FileBlock(file=source_file) def assert_has_manifest_lengths(self, manifest, macros=3, nodes=0, - sources=0, docs=0, disabled=0): + sources=0, docs=0, disabled=0): self.assertEqual(len(manifest.macros), macros) self.assertEqual(len(manifest.nodes), nodes) self.assertEqual(len(manifest.sources), sources) @@ -174,9 +168,13 @@ def assertEqualNodes(node_one, node_two): node_one_dict = node_one.to_dict() if 'created_at' in node_one_dict: del node_one_dict['created_at'] + if "relation_name" in node_one_dict: + del node_one_dict["relation_name"] node_two_dict = node_two.to_dict() if 'created_at' in node_two_dict: del node_two_dict['created_at'] + if "relation_name" in node_two_dict: + del node_two_dict["relation_name"] # we don't reall care the order of packages, doing this because it is hard to # make config.packages a set instead of a list if 'config' in node_one_dict and 'packages' in node_one_dict['config']: @@ -193,7 +191,6 @@ def assertEqualNodes(node_one, node_two): assert node_one_dict == node_two_dict - SINGLE_TABLE_SOURCE = ''' version: 2 sources: @@ -218,7 +215,6 @@ def assertEqualNodes(node_one, node_two): values: ['red', 'blue', 'green'] ''' - SINGLE_TABLE_MODEL_TESTS = ''' version: 2 models: @@ -236,7 +232,6 @@ def assertEqualNodes(node_one, node_two): arg: 100 ''' - SINGLE_TABLE_SOURCE_PATCH = ''' version: 2 sources: @@ -399,7 +394,7 @@ def setUp(self): patch_path=None, ) nodes = {my_model_node.unique_id: my_model_node} - macros={m.unique_id: m for m in generate_name_macros('root')} + macros = {m.unique_id: m for m in generate_name_macros('root')} self.manifest = Manifest(nodes=nodes, macros=macros) self.manifest.ref_lookup self.parser = SchemaParser( @@ -492,6 +487,138 @@ def test__parse_basic_model_tests(self): self.assertEqual(self.parser.manifest.files[file_id].node_patches, ['model.root.my_model']) +sql_model = """ +{{ config(materialized="table") }} +select 1 as id +""" + +sql_model_parse_error = "{{ SYNTAX ERROR }}" + +python_model = """ +import textblob +import text as a +from torch import b +import textblob.text +import sklearn + +def model(dbt, session): + dbt.config( + materialized='table', + packages=['sklearn==0.1.0'] + ) + df0 = dbt.ref("a_model").to_pandas() + df1 = dbt.ref("my_sql_model").task.limit(2) + df2 = dbt.ref("my_sql_model_1") + df3 = dbt.ref("my_sql_model_2") + df4 = dbt.source("test", 'table1').limit(max=[max(dbt.ref('something'))]) + df5 = [dbt.ref('test1')] + + a_dict = {'test2': dbt.ref('test2')} + df5 = {'test2': dbt.ref('test3')} + df6 = [dbt.ref("test4")] + + df = df0.limit(2) + return df +""" + +python_model_config = """ +def model(dbt, session): + dbt.config.get("param_1") + dbt.config.get("param_2") + return dbt.ref("some_model") +""" + +python_model_config_with_defaults = """ +def model(dbt, session): + dbt.config.get("param_None", None) + dbt.config.get("param_Str", "default") + dbt.config.get("param_List", [1, 2]) + return dbt.ref("some_model") +""" + +python_model_single_argument = """ +def model(dbt): + dbt.config(materialized="table") + return dbt.ref("some_model") +""" + +python_model_no_argument = """ +import pandas as pd + +def model(): + return pd.dataframe([1, 2]) +""" + +python_model_incorrect_argument_name = """ +def model(tbd, session): + tbd.config(materialized="table") + return tbd.ref("some_model") +""" + +python_model_multiple_models = """ +def model(dbt, session): + dbt.config(materialized='table') + return dbt.ref("some_model") + +def model(dbt, session): + dbt.config(materialized='table') + return dbt.ref("some_model") +""" + +python_model_incorrect_function_name = """ +def model1(dbt, session): + dbt.config(materialized='table') + return dbt.ref("some_model") +""" + +python_model_empty_file = """ """ + +python_model_multiple_returns = """ +def model(dbt, session): + dbt.config(materialized='table') + return dbt.ref("some_model"), dbt.ref("some_other_model") +""" + +python_model_no_return = """ +def model(dbt, session): + dbt.config(materialized='table') +""" + +python_model_single_return = """ +import pandas as pd + +def model(dbt, session): + dbt.config(materialized='table') + return pd.dataframe([1, 2]) +""" + +python_model_incorrect_ref = """ +def model(dbt, session): + model_names = ["orders", "customers"] + models = [] + + for model_name in model_names: + models.extend(dbt.ref(model_name)) + + return models[0] +""" + +python_model_default_materialization = """ +import pandas as pd + +def model(dbt, session): + return pd.dataframe([1, 2]) +""" + +python_model_custom_materialization = """ +import pandas as pd + +def model(dbt, session): + dbt.config(materialized="view") + return pd.dataframe([1, 2]) +""" + + class ModelParserTest(BaseParserTest): def setUp(self): super().setUp() @@ -505,13 +632,12 @@ def file_block_for(self, data, filename): return super().file_block_for(data, filename, 'models') def test_basic(self): - raw_code = '{{ config(materialized="table") }}select 1 as id' - block = self.file_block_for(raw_code, 'nested/model_1.sql') + block = self.file_block_for(sql_model, 'nested/model_1.sql') self.parser.manifest.files[block.file.file_id] = block.file self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedModelNode( + expected = ModelNode( alias='model_1', name='model_1', database='test', @@ -521,11 +647,10 @@ def test_basic(self): fqn=['snowplow', 'nested', 'model_1'], package_name='snowplow', original_file_path=normalize('models/nested/model_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=NodeConfig(materialized='table'), path=normalize('nested/model_1.sql'), language='sql', - raw_code=raw_code, + raw_code=sql_model, checksum=block.file.checksum, unrendered_config={'materialized': 'table'}, config_call_dict={ @@ -536,41 +661,21 @@ def test_basic(self): file_id = 'snowplow://' + normalize('models/nested/model_1.sql') self.assertIn(file_id, self.parser.manifest.files) self.assertEqual(self.parser.manifest.files[file_id].nodes, ['model.snowplow.model_1']) - - def test_parse_python_file(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - packages = ['sklearn==0.1.0'] - ) - import textblob - import text as a - from torch import b - import textblob.text - import sklearn - df0 = pandas(dbt.ref("a_model")) - df1 = dbt.ref("my_sql_model").task.limit(2) - df2 = dbt.ref("my_sql_model_1") - df3 = dbt.ref("my_sql_model_2") - df4 = dbt.source("test", 'table1').limit(max = [max(dbt.ref('something'))]) - df5 = [dbt.ref('test1')] - - a_dict = {'test2' : dbt.ref('test2')} - df5 = anotherfunction({'test2' : dbt.ref('test3')}) - df6 = [somethingelse.ref(dbt.ref("test4"))] - - df = df.limit(2) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + + def test_sql_model_parse_error(self): + block = self.file_block_for(sql_model_parse_error, 'nested/model_1.sql') + with self.assertRaises(CompilationException): + self.parser.parse_file(block) + + def test_python_model_parse(self): + block = self.file_block_for(python_model, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] # we decided to not detect and auto supply for now since import name doesn't always match library name python_packages = ['sklearn==0.1.0'] - expected = ParsedModelNode( + expected = ModelNode( alias='py_model', name='py_model', database='test', @@ -580,167 +685,120 @@ def model(dbt, session): fqn=['snowplow', 'nested', 'py_model'], package_name='snowplow', original_file_path=normalize('models/nested/py_model.py'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=NodeConfig(materialized='table', packages=python_packages), # config.packages = ['textblob'] path=normalize('nested/py_model.py'), language='python', - raw_code=py_code, + raw_code=python_model, checksum=block.file.checksum, - unrendered_config={'materialized': 'table', 'packages':python_packages}, - config_call_dict={'materialized': 'table', 'packages':python_packages}, - refs=[['a_model'], ['my_sql_model'], ['my_sql_model_1'], ['my_sql_model_2'], ['something'], ['test1'], ['test2'], ['test3'], ['test4']], - sources = [['test', 'table1']], + unrendered_config={'materialized': 'table', 'packages': python_packages}, + config_call_dict={'materialized': 'table', 'packages': python_packages}, + refs=[['a_model'], ['my_sql_model'], ['my_sql_model_1'], ['my_sql_model_2'], ['something'], ['test1'], + ['test2'], ['test3'], ['test4']], + sources=[['test', 'table1']], ) assertEqualNodes(node, expected) file_id = 'snowplow://' + normalize('models/nested/py_model.py') self.assertIn(file_id, self.parser.manifest.files) self.assertEqual(self.parser.manifest.files[file_id].nodes, ['model.snowplow.py_model']) - def test_python_model_config_get(self): - py_code = """ -def model(dbt, session): - dbt.config.get("param_1") - dbt.config.get("param_2") - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_config(self): + block = self.file_block_for(python_model_config, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - + self.parser.parse_file(block) node = list(self.parser.manifest.nodes.values())[0] self.assertEqual(node.config.to_dict()["config_keys_used"], ["param_1", "param_2"]) - def test_wrong_python_model_def_miss_session(self): - py_code = """ -def model(dbt): - dbt.config( - materialized='table', - ) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_config_with_defaults(self): + block = self.file_block_for(python_model_config_with_defaults, 'nested/py_model.py') + self.parser.manifest.files[block.file.file_id] = block.file + + self.parser.parse_file(block) + node = list(self.parser.manifest.nodes.values())[0] + default_values = node.config.to_dict()["config_keys_defaults"] + self.assertIsNone(default_values[0]) + self.assertEqual(default_values[1], "default") + self.assertEqual(default_values[2], [1, 2]) + + def test_python_model_single_argument(self): + block = self.file_block_for(python_model_single_argument, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - def test_wrong_python_model_def_miss_session(self): - py_code = """ -def model(): - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_no_argument(self): + block = self.file_block_for(python_model_no_argument, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - def test_wrong_python_model_def_wrong_arg(self): - """ First argument for python model should be dbt - """ - py_code = """ -def model(dat, session): - dbt.config( - materialized='table', - ) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_incorrect_argument_name(self): + block = self.file_block_for(python_model_incorrect_argument_name, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - - def test_wrong_python_model_def_multipe_model(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - ) - return df -def model(dbt, session): - dbt.config( - materialized='table', - ) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_multiple_models(self): + block = self.file_block_for(python_model_multiple_models, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - - def test_wrong_python_model_def_no_model(self): - py_code = """ -def model1(dbt, session): - dbt.config( - materialized='table', - ) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + + def test_python_model_incorrect_function_name(self): + block = self.file_block_for(python_model_incorrect_function_name, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - - def test_wrong_python_model_def_mutiple_return(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - ) - return df1, df2 - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + + def test_python_model_empty_file(self): + block = self.file_block_for(python_model_empty_file, "nested/py_model.py") + self.parser.manifest.files[block.file.file_id] = block.file + self.assertIsNone(self.parser.parse_file(block)) + + def test_python_model_multiple_returns(self): + block = self.file_block_for(python_model_multiple_returns, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - - def test_wrong_python_model_def_no_return(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - ) - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + + def test_python_model_no_return(self): + block = self.file_block_for(python_model_no_return, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - def test_correct_python_model_def_return_function(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - ) - return pandas.dataframe([1,2]) - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_single_return(self): + block = self.file_block_for(python_model_single_return, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - self.parser.parse_file(block) + self.assertIsNone(self.parser.parse_file(block)) - def test_parse_error(self): - block = self.file_block_for('{{ SYNTAX ERROR }}', 'nested/model_1.sql') - with self.assertRaises(CompilationException): + def test_python_model_incorrect_ref(self): + block = self.file_block_for(python_model_incorrect_ref, 'nested/py_model.py') + self.parser.manifest.files[block.file.file_id] = block.file + with self.assertRaises(ParsingException): self.parser.parse_file(block) - def test_parse_ref_with_non_string(self): - py_code = """ -def model(dbt, session): - - model_names = ["orders", "customers"] - models = [] - - for model_name in model_names: - models.extend(dbt.ref(model_name)) + def test_python_model_default_materialization(self): + block = self.file_block_for(python_model_default_materialization, 'nested/py_model.py') + self.parser.manifest.files[block.file.file_id] = block.file + self.parser.parse_file(block) + node = list(self.parser.manifest.nodes.values())[0] + self.assertEqual(node.get_materialization(), "table") - return models[0] - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_custom_materialization(self): + block = self.file_block_for(python_model_custom_materialization, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): - self.parser.parse_file(block) - + self.parser.parse_file(block) + node = list(self.parser.manifest.nodes.values())[0] + self.assertEqual(node.get_materialization(), "view") + def test_python_model_custom_materialization(self): + block = self.file_block_for(python_model_custom_materialization, 'nested/py_model.py') + self.parser.manifest.files[block.file.file_id] = block.file + self.parser.parse_file(block) + node = list(self.parser.manifest.nodes.values())[0] + self.assertEqual(node.get_materialization(), "view") class StaticModelParserTest(BaseParserTest): def setUp(self): @@ -759,20 +817,19 @@ def file_block_for(self, data, filename): # parser does not run in this case. That test is in integration test suite 072 def test_built_in_macro_override_detection(self): macro_unique_id = 'macro.root.ref' - self.parser.manifest.macros[macro_unique_id] = ParsedMacro( + self.parser.manifest.macros[macro_unique_id] = Macro( name='ref', resource_type=NodeType.Macro, unique_id=macro_unique_id, package_name='root', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/root'), path=normalize('macros/macro.sql'), macro_sql='{% macro ref(model_name) %}{% set x = raise("boom") %}{% endmacro %}', ) raw_code = '{{ config(materialized="table") }}select 1 as id' block = self.file_block_for(raw_code, 'nested/model_1.sql') - node = ParsedModelNode( + node = ModelNode( alias='model_1', name='model_1', database='test', @@ -782,7 +839,6 @@ def test_built_in_macro_override_detection(self): fqn=['snowplow', 'nested', 'model_1'], package_name='snowplow', original_file_path=normalize('models/nested/model_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=NodeConfig(materialized='table'), path=normalize('nested/model_1.sql'), language='sql', @@ -791,9 +847,10 @@ def test_built_in_macro_override_detection(self): unrendered_config={'materialized': 'table'}, ) - assert(self.parser._has_banned_macro(node)) + assert (self.parser._has_banned_macro(node)) -# TODO + +# TODO class StaticModelParserUnitTest(BaseParserTest): # _get_config_call_dict # _shift_sources @@ -808,7 +865,7 @@ def setUp(self): manifest=self.manifest, root_project=self.root_project_config, ) - self.example_node = ParsedModelNode( + self.example_node = ModelNode( alias='model_1', name='model_1', database='test', @@ -818,7 +875,6 @@ def setUp(self): fqn=['snowplow', 'nested', 'model_1'], package_name='snowplow', original_file_path=normalize('models/nested/model_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=NodeConfig(materialized='table'), path=normalize('nested/model_1.sql'), language='sql', @@ -969,7 +1025,8 @@ def file_block_for(self, data, filename): return super().file_block_for(data, filename, 'snapshots') def test_parse_error(self): - block = self.file_block_for('{% snapshot foo %}select 1 as id{%snapshot bar %}{% endsnapshot %}', 'nested/snap_1.sql') + block = self.file_block_for('{% snapshot foo %}select 1 as id{%snapshot bar %}{% endsnapshot %}', + 'nested/snap_1.sql') with self.assertRaises(CompilationException): self.parser.parse_file(block) @@ -988,7 +1045,7 @@ def test_single_block(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedSnapshotNode( + expected = SnapshotNode( alias='foo', name='foo', # the `database` entry is overrridden by the target_database config @@ -999,7 +1056,6 @@ def test_single_block(self): fqn=['snowplow', 'nested', 'snap_1', 'foo'], package_name='snowplow', original_file_path=normalize('snapshots/nested/snap_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=SnapshotConfig( strategy='timestamp', updated_at='last_update', @@ -1020,10 +1076,10 @@ def test_single_block(self): 'updated_at': 'last_update', }, config_call_dict={ - 'strategy': 'timestamp', - 'target_database': 'dbt', - 'target_schema': 'analytics', - 'unique_key': 'id', + 'strategy': 'timestamp', + 'target_database': 'dbt', + 'target_schema': 'analytics', + 'unique_key': 'id', 'updated_at': 'last_update', }, ) @@ -1058,7 +1114,7 @@ def test_multi_block(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=2) nodes = sorted(self.parser.manifest.nodes.values(), key=lambda n: n.name) - expect_foo = ParsedSnapshotNode( + expect_foo = SnapshotNode( alias='foo', name='foo', database='dbt', @@ -1068,7 +1124,6 @@ def test_multi_block(self): fqn=['snowplow', 'nested', 'snap_1', 'foo'], package_name='snowplow', original_file_path=normalize('snapshots/nested/snap_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=SnapshotConfig( strategy='timestamp', updated_at='last_update', @@ -1089,14 +1144,14 @@ def test_multi_block(self): 'updated_at': 'last_update', }, config_call_dict={ - 'strategy': 'timestamp', - 'target_database': 'dbt', - 'target_schema': 'analytics', - 'unique_key': 'id', + 'strategy': 'timestamp', + 'target_database': 'dbt', + 'target_schema': 'analytics', + 'unique_key': 'id', 'updated_at': 'last_update', }, ) - expect_bar = ParsedSnapshotNode( + expect_bar = SnapshotNode( alias='bar', name='bar', database='dbt', @@ -1106,7 +1161,6 @@ def test_multi_block(self): fqn=['snowplow', 'nested', 'snap_1', 'bar'], package_name='snowplow', original_file_path=normalize('snapshots/nested/snap_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=SnapshotConfig( strategy='timestamp', updated_at='last_update', @@ -1127,10 +1181,10 @@ def test_multi_block(self): 'updated_at': 'last_update', }, config_call_dict={ - 'strategy': 'timestamp', - 'target_database': 'dbt', - 'target_schema': 'analytics', - 'unique_key': 'id', + 'strategy': 'timestamp', + 'target_database': 'dbt', + 'target_schema': 'analytics', + 'unique_key': 'id', 'updated_at': 'last_update', }, ) @@ -1160,13 +1214,12 @@ def test_single_block(self): self.parser.parse_file(block) self.assertEqual(len(self.parser.manifest.macros), 1) macro = list(self.parser.manifest.macros.values())[0] - expected = ParsedMacro( + expected = Macro( name='foo', resource_type=NodeType.Macro, unique_id='macro.snowplow.foo', package_name='snowplow', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), path=normalize('macros/macro.sql'), macro_sql=raw_code, ) @@ -1183,23 +1236,21 @@ def test_multiple_blocks(self): self.parser.parse_file(block) self.assertEqual(len(self.parser.manifest.macros), 2) macros = sorted(self.parser.manifest.macros.values(), key=lambda m: m.name) - expected_bar = ParsedMacro( + expected_bar = Macro( name='bar', resource_type=NodeType.Macro, unique_id='macro.snowplow.bar', package_name='snowplow', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), path=normalize('macros/macro.sql'), macro_sql='{% macro bar(c, d) %}c + d{% endmacro %}', ) - expected_foo = ParsedMacro( + expected_foo = Macro( name='foo', resource_type=NodeType.Macro, unique_id='macro.snowplow.foo', package_name='snowplow', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), path=normalize('macros/macro.sql'), macro_sql='{% macro foo(a, b) %}a ~ b{% endmacro %}', ) @@ -1232,7 +1283,7 @@ def test_basic(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedSingularTestNode( + expected = SingularTestNode( alias='test_1', name='test_1', database='test', @@ -1242,7 +1293,6 @@ def test_basic(self): fqn=['snowplow', 'test_1'], package_name='snowplow', original_file_path=normalize('tests/test_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), refs=[['blah']], config=TestConfig(severity='ERROR'), tags=[], @@ -1259,7 +1309,7 @@ def test_basic(self): class GenericTestParserTest(BaseParserTest): -# generic tests in the test-paths directory currently leverage the macro parser + # generic tests in the test-paths directory currently leverage the macro parser def setUp(self): super().setUp() self.parser = GenericTestParser( @@ -1276,13 +1326,12 @@ def test_basic(self): self.parser.manifest.files[block.file.file_id] = block.file self.parser.parse_file(block) node = list(self.parser.manifest.macros.values())[0] - expected = ParsedMacro( + expected = Macro( name='test_not_null', resource_type=NodeType.Macro, unique_id='macro.snowplow.test_not_null', package_name='snowplow', original_file_path=normalize('tests/generic/test_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), path=normalize('tests/generic/test_1.sql'), macro_sql=raw_code, ) @@ -1311,7 +1360,7 @@ def test_basic(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedAnalysisNode( + expected = AnalysisNode( alias='analysis_1', name='analysis_1', database='test', @@ -1321,7 +1370,6 @@ def test_basic(self): fqn=['snowplow', 'analysis', 'nested', 'analysis_1'], package_name='snowplow', original_file_path=normalize('analyses/nested/analysis_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), depends_on=DependsOn(), config=NodeConfig(), path=normalize('analysis/nested/analysis_1.sql'), @@ -1329,10 +1377,9 @@ def test_basic(self): raw_code=raw_code, checksum=block.file.checksum, unrendered_config={}, + relation_name=None, ) assertEqualNodes(node, expected) - file_id = 'snowplow://' + normalize('analyses/nested/analysis_1.sql') + file_id = 'snowplow://' + normalize('analyses/nested/analysis_1.sql') self.assertIn(file_id, self.parser.manifest.files) self.assertEqual(self.parser.manifest.files[file_id].nodes, ['analysis.snowplow.analysis_1']) - - diff --git a/test/unit/test_partial_parsing.py b/test/unit/test_partial_parsing.py index de0e230ad3c..34e85b0cef0 100644 --- a/test/unit/test_partial_parsing.py +++ b/test/unit/test_partial_parsing.py @@ -5,7 +5,7 @@ import dbt.exceptions from dbt.parser.partial import PartialParsing from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedModelNode +from dbt.contracts.graph.nodes import ModelNode from dbt.contracts.files import ParseFileType, SourceFile, SchemaSourceFile, FilePath, FileHash from dbt.node_types import NodeType from .utils import normalize @@ -88,9 +88,8 @@ def setUp(self): self.partial_parsing = PartialParsing(self.saved_manifest, self.new_files) def get_model(self, name): - return ParsedModelNode( + return ModelNode( package_name='my_test', - root_path='/users/root/', path=f'{name}.sql', original_file_path=f'models/{name}.sql', language='sql', @@ -107,9 +106,8 @@ def get_model(self, name): ) def get_python_model(self, name): - return ParsedModelNode( + return ModelNode( package_name='my_test', - root_path='/users/root/', path=f'{name}.py', original_file_path=f'models/{name}.py', raw_code='import something', diff --git a/test/unit/utils.py b/test/unit/utils.py index 521a83e329c..e1512abee2d 100644 --- a/test/unit/utils.py +++ b/test/unit/utils.py @@ -225,7 +225,7 @@ def assert_fails_validation(dct, cls): def generate_name_macros(package): - from dbt.contracts.graph.parsed import ParsedMacro + from dbt.contracts.graph.nodes import Macro from dbt.node_types import NodeType name_sql = {} for component in ('database', 'schema', 'alias'): @@ -238,13 +238,12 @@ def generate_name_macros(package): name_sql[name] = sql for name, sql in name_sql.items(): - pm = ParsedMacro( + pm = Macro( name=name, resource_type=NodeType.Macro, unique_id=f'macro.{package}.{name}', package_name=package, original_file_path=normalize('macros/macro.sql'), - root_path='./dbt_packages/root', path=normalize('macros/macro.sql'), macro_sql=sql, ) @@ -274,7 +273,7 @@ def _make_table_of(self, rows, column_types): def MockMacro(package, name='my_macro', **kwargs): - from dbt.contracts.graph.parsed import ParsedMacro + from dbt.contracts.graph.nodes import Macro from dbt.node_types import NodeType mock_kwargs = dict( @@ -287,7 +286,7 @@ def MockMacro(package, name='my_macro', **kwargs): mock_kwargs.update(kwargs) macro = mock.MagicMock( - spec=ParsedMacro, + spec=Macro, **mock_kwargs ) macro.name = name @@ -308,9 +307,9 @@ def MockGenerateMacro(package, component='some_component', **kwargs): def MockSource(package, source_name, name, **kwargs): from dbt.node_types import NodeType - from dbt.contracts.graph.parsed import ParsedSourceDefinition + from dbt.contracts.graph.nodes import SourceDefinition src = mock.MagicMock( - __class__=ParsedSourceDefinition, + __class__=SourceDefinition, resource_type=NodeType.Source, source_name=source_name, package_name=package, @@ -324,13 +323,13 @@ def MockSource(package, source_name, name, **kwargs): def MockNode(package, name, resource_type=None, **kwargs): from dbt.node_types import NodeType - from dbt.contracts.graph.parsed import ParsedModelNode, ParsedSeedNode + from dbt.contracts.graph.nodes import ModelNode, SeedNode if resource_type is None: resource_type = NodeType.Model if resource_type == NodeType.Model: - cls = ParsedModelNode + cls = ModelNode elif resource_type == NodeType.Seed: - cls = ParsedSeedNode + cls = SeedNode else: raise ValueError(f'I do not know how to handle {resource_type}') node = mock.MagicMock( @@ -347,9 +346,9 @@ def MockNode(package, name, resource_type=None, **kwargs): def MockDocumentation(package, name, **kwargs): from dbt.node_types import NodeType - from dbt.contracts.graph.parsed import ParsedDocumentation + from dbt.contracts.graph.nodes import Documentation doc = mock.MagicMock( - __class__=ParsedDocumentation, + __class__=Documentation, resource_type=NodeType.Documentation, package_name=package, search_name=name, diff --git a/tests/CONVERTING.md b/tests/CONVERTING.md index 89801fc74b9..44057cad05b 100644 --- a/tests/CONVERTING.md +++ b/tests/CONVERTING.md @@ -30,7 +30,7 @@ * some of the legacy tests used a 'default_project' method to change (for example) the seeds directory to load a different seed. Don't do that. Copying a file is probably a better option. - +* If there are more than 50 lines of fixture strings, they should be defined in a fixtures.py and then imported. We definitely don't do this everywhere right now but should move to this model. # Integration test directories that have been converted * 001\_simple\_copy\_tests => moved to 'basic' diff --git a/tests/adapter/dbt/tests/adapter/__version__.py b/tests/adapter/dbt/tests/adapter/__version__.py index 70ba273f562..27cfeecd9e8 100644 --- a/tests/adapter/dbt/tests/adapter/__version__.py +++ b/tests/adapter/dbt/tests/adapter/__version__.py @@ -1 +1 @@ -version = "1.4.0a1" +version = "1.4.0b1" diff --git a/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py b/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py index d9ff6b5b28f..a9f846e2ca4 100644 --- a/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py +++ b/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py @@ -50,7 +50,10 @@ def models(self): @pytest.fixture(scope="class") def macros(self): - return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} + return { + "cast.sql": MACROS__CAST_SQL, + "expect_value.sql": MACROS__EXPECT_VALUE_SQL + } def test_alias_model_name(self, project): results = run_dbt(["run"]) @@ -68,7 +71,10 @@ def project_config_update(self): @pytest.fixture(scope="class") def macros(self): - return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} + return { + "cast.sql": MACROS__CAST_SQL, + "expect_value.sql": MACROS__EXPECT_VALUE_SQL + } @pytest.fixture(scope="class") def models(self): @@ -94,7 +100,10 @@ def project_config_update(self): @pytest.fixture(scope="class") def macros(self): - return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} + return { + "cast.sql": MACROS__CAST_SQL, + "expect_value.sql": MACROS__EXPECT_VALUE_SQL + } @pytest.fixture(scope="class") def models(self): @@ -121,14 +130,19 @@ def project_config_update(self, unique_schema): "models": { "test": { "alias": "duped_alias", - "model_b": {"schema": unique_schema + "_alt"}, + "model_b": { + "schema": unique_schema + "_alt" + }, }, }, } @pytest.fixture(scope="class") def macros(self): - return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} + return { + "cast.sql": MACROS__CAST_SQL, + "expect_value.sql": MACROS__EXPECT_VALUE_SQL + } @pytest.fixture(scope="class") def models(self): diff --git a/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py b/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py new file mode 100644 index 00000000000..b7b0ff9ac17 --- /dev/null +++ b/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py @@ -0,0 +1,107 @@ +import pytest +import os +import re +import yaml +from dbt.tests.util import run_dbt + +MODELS__MODEL_SQL = """ +seled 1 as id +""" + + +class BaseDebug: + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": MODELS__MODEL_SQL} + + @pytest.fixture(autouse=True) + def capsys(self, capsys): + self.capsys = capsys + + def assertGotValue(self, linepat, result): + found = False + output = self.capsys.readouterr().out + for line in output.split('\n'): + if linepat.match(line): + found = True + assert result in line + if not found: + with pytest.raises(Exception) as exc: + msg = f"linepat {linepat} not found in stdout: {output}" + assert msg in str(exc.value) + + def check_project(self, splitout, msg="ERROR invalid"): + for line in splitout: + if line.strip().startswith("dbt_project.yml file"): + assert msg in line + elif line.strip().startswith("profiles.yml file"): + assert "ERROR invalid" not in line + + +class BaseDebugProfileVariable(BaseDebug): + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "config-version": 2, + "profile": '{{ "te" ~ "st" }}' + } + + +class TestDebugPostgres(BaseDebug): + def test_ok(self, project): + run_dbt(["debug"]) + assert "ERROR" not in self.capsys.readouterr().out + + def test_nopass(self, project): + run_dbt(["debug", "--target", "nopass"], expect_pass=False) + self.assertGotValue(re.compile(r"\s+profiles\.yml file"), "ERROR invalid") + + def test_wronguser(self, project): + run_dbt(["debug", "--target", "wronguser"], expect_pass=False) + self.assertGotValue(re.compile(r"\s+Connection test"), "ERROR") + + def test_empty_target(self, project): + run_dbt(["debug", "--target", "none_target"], expect_pass=False) + self.assertGotValue(re.compile(r"\s+output 'none_target'"), "misconfigured") + + +class TestDebugProfileVariablePostgres(BaseDebugProfileVariable): + pass + + +class TestDebugInvalidProjectPostgres(BaseDebug): + + def test_empty_project(self, project): + with open("dbt_project.yml", "w") as f: # noqa: F841 + pass + + run_dbt(["debug", "--profile", "test"], expect_pass=False) + splitout = self.capsys.readouterr().out.split("\n") + self.check_project(splitout) + + def test_badproject(self, project): + update_project = {"invalid-key": "not a valid key so this is bad project"} + + with open("dbt_project.yml", "w") as f: + yaml.safe_dump(update_project, f) + + run_dbt(["debug", "--profile", "test"], expect_pass=False) + splitout = self.capsys.readouterr().out.split("\n") + self.check_project(splitout) + + def test_not_found_project(self, project): + run_dbt(["debug", "--project-dir", "nopass"], expect_pass=False) + splitout = self.capsys.readouterr().out.split("\n") + self.check_project(splitout, msg="ERROR not found") + + def test_invalid_project_outside_current_dir(self, project): + # create a dbt_project.yml + project_config = { + "invalid-key": "not a valid key in this project" + } + os.makedirs("custom", exist_ok=True) + with open("custom/dbt_project.yml", "w") as f: + yaml.safe_dump(project_config, f, default_flow_style=True) + run_dbt(["debug", "--project-dir", "custom"], expect_pass=False) + splitout = self.capsys.readouterr().out.split("\n") + self.check_project(splitout) diff --git a/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py new file mode 100644 index 00000000000..11a4b6c0384 --- /dev/null +++ b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py @@ -0,0 +1,154 @@ +import pytest +from dbt.tests.util import run_dbt, check_relations_equal +from collections import namedtuple + + +models__delete_insert_incremental_predicates_sql = """ +{{ config( + materialized = 'incremental', + unique_key = 'id' +) }} + +{% if not is_incremental() %} + +select 1 as id, 'hello' as msg, 'blue' as color +union all +select 2 as id, 'goodbye' as msg, 'red' as color + +{% else %} + +-- delete will not happen on the above record where id = 2, so new record will be inserted instead +select 1 as id, 'hey' as msg, 'blue' as color +union all +select 2 as id, 'yo' as msg, 'green' as color +union all +select 3 as id, 'anyway' as msg, 'purple' as color + +{% endif %} +""" + +seeds__expected_delete_insert_incremental_predicates_csv = """id,msg,color +1,hey,blue +2,goodbye,red +2,yo,green +3,anyway,purple +""" + +ResultHolder = namedtuple( + "ResultHolder", + [ + "seed_count", + "model_count", + "seed_rows", + "inc_test_model_count", + "opt_model_count", + "relation", + ], +) + + +class BaseIncrementalPredicates: + @pytest.fixture(scope="class") + def models(self): + return { + "delete_insert_incremental_predicates.sql": models__delete_insert_incremental_predicates_sql + } + + @pytest.fixture(scope="class") + def seeds(self): + return { + "expected_delete_insert_incremental_predicates.csv": seeds__expected_delete_insert_incremental_predicates_csv + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "models": { + "+incremental_predicates": [ + "id != 2" + ], + "+incremental_strategy": "delete+insert" + } + } + + def update_incremental_model(self, incremental_model): + """update incremental model after the seed table has been updated""" + model_result_set = run_dbt(["run", "--select", incremental_model]) + return len(model_result_set) + + def get_test_fields( + self, project, seed, incremental_model, update_sql_file, opt_model_count=None + ): + + seed_count = len(run_dbt(["seed", "--select", seed, "--full-refresh"])) + + model_count = len(run_dbt(["run", "--select", incremental_model, "--full-refresh"])) + # pass on kwarg + relation = incremental_model + # update seed in anticipation of incremental model update + row_count_query = "select * from {}.{}".format(project.test_schema, seed) + # project.run_sql_file(Path("seeds") / Path(update_sql_file + ".sql")) + seed_rows = len(project.run_sql(row_count_query, fetch="all")) + + # propagate seed state to incremental model according to unique keys + inc_test_model_count = self.update_incremental_model(incremental_model=incremental_model) + + return ResultHolder( + seed_count, model_count, seed_rows, inc_test_model_count, opt_model_count, relation + ) + + def check_scenario_correctness(self, expected_fields, test_case_fields, project): + """Invoke assertions to verify correct build functionality""" + # 1. test seed(s) should build afresh + assert expected_fields.seed_count == test_case_fields.seed_count + # 2. test model(s) should build afresh + assert expected_fields.model_count == test_case_fields.model_count + # 3. seeds should have intended row counts post update + assert expected_fields.seed_rows == test_case_fields.seed_rows + # 4. incremental test model(s) should be updated + assert expected_fields.inc_test_model_count == test_case_fields.inc_test_model_count + # 5. extra incremental model(s) should be built; optional since + # comparison may be between an incremental model and seed + if expected_fields.opt_model_count and test_case_fields.opt_model_count: + assert expected_fields.opt_model_count == test_case_fields.opt_model_count + # 6. result table should match intended result set (itself a relation) + check_relations_equal( + project.adapter, [expected_fields.relation, test_case_fields.relation] + ) + + def get_expected_fields(self, relation, seed_rows, opt_model_count=None): + return ResultHolder( + seed_count=1, + model_count=1, + inc_test_model_count=1, + seed_rows=seed_rows, + opt_model_count=opt_model_count, + relation=relation + ) + + # no unique_key test + def test__incremental_predicates(self, project): + """seed should match model after two incremental runs""" + + expected_fields = self.get_expected_fields(relation="expected_delete_insert_incremental_predicates", seed_rows=4) + test_case_fields = self.get_test_fields( + project, seed="expected_delete_insert_incremental_predicates", incremental_model="delete_insert_incremental_predicates", update_sql_file=None + ) + self.check_scenario_correctness(expected_fields, test_case_fields, project) + + +class TestIncrementalPredicatesDeleteInsert(BaseIncrementalPredicates): + pass + + +class TestPredicatesDeleteInsert(BaseIncrementalPredicates): + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "models": { + "+predicates": [ + "id != 2" + ], + "+incremental_strategy": "delete+insert" + } + } diff --git a/tests/adapter/setup.py b/tests/adapter/setup.py index ddb664d6989..f9ac627e445 100644 --- a/tests/adapter/setup.py +++ b/tests/adapter/setup.py @@ -20,7 +20,7 @@ package_name = "dbt-tests-adapter" -package_version = "1.4.0a1" +package_version = "1.4.0b1" description = """The dbt adapter tests for adapter plugins""" this_directory = os.path.abspath(os.path.dirname(__file__)) diff --git a/tests/functional/artifacts/data/state/v8/manifest.json b/tests/functional/artifacts/data/state/v8/manifest.json new file mode 100644 index 00000000000..58e3f04da3c --- /dev/null +++ b/tests/functional/artifacts/data/state/v8/manifest.json @@ -0,0 +1 @@ +{"metadata": {"dbt_schema_version": "https://schemas.getdbt.com/dbt/manifest/v8.json", "dbt_version": "1.4.0a1", "generated_at": "2022-12-12T13:54:37.804887Z", "invocation_id": "843eaaec-db3b-4406-87ec-a3651f124d69", "env": {}, "project_id": "098f6bcd4621d373cade4e832627b4f6", "user_id": null, "send_anonymous_usage_stats": false, "adapter_type": "postgres"}, "nodes": {"model.test.my_model": {"database": "dbt", "schema": "test16708532772964762671_test_previous_version_state", "name": "my_model", "resource_type": "model", "package_name": "test", "path": "my_model.sql", "original_file_path": "models/my_model.sql", "unique_id": "model.test.my_model", "fqn": ["test", "my_model"], "alias": "my_model", "checksum": {"name": "sha256", "checksum": "2b9123e04ab8bb798f7c565afdc3ee0e56fcd66b4bfbdb435b4891c878d947c5"}, "config": {"enabled": true, "alias": null, "schema": null, "database": null, "tags": [], "meta": {}, "materialized": "view", "incremental_strategy": null, "persist_docs": {}, "quoting": {}, "column_types": {}, "full_refresh": null, "unique_key": null, "on_schema_change": "ignore", "grants": {}, "packages": [], "docs": {"show": true, "node_color": null}, "post-hook": [], "pre-hook": []}, "tags": [], "description": "", "columns": {}, "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "build_path": null, "deferred": false, "unrendered_config": {}, "created_at": 1670853278.478401, "relation_name": "\"dbt\".\"test16708532772964762671_test_previous_version_state\".\"my_model\"", "raw_code": "select 1 as id", "language": "sql", "refs": [], "sources": [], "metrics": [], "depends_on": {"macros": [], "nodes": []}, "compiled_path": null}}, "sources": {}, "macros": {"macro.dbt_postgres.postgres__current_timestamp": {"name": "postgres__current_timestamp", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__current_timestamp", "macro_sql": "{% macro postgres__current_timestamp() -%}\n now()\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.828495, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_string_as_time": {"name": "postgres__snapshot_string_as_time", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__snapshot_string_as_time", "macro_sql": "{% macro postgres__snapshot_string_as_time(timestamp) -%}\n {%- set result = \"'\" ~ timestamp ~ \"'::timestamp without time zone\" -%}\n {{ return(result) }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.829041, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_get_time": {"name": "postgres__snapshot_get_time", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__snapshot_get_time", "macro_sql": "{% macro postgres__snapshot_get_time() -%}\n {{ current_timestamp() }}::timestamp without time zone\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.829317, "supported_languages": null}, "macro.dbt_postgres.postgres__current_timestamp_backcompat": {"name": "postgres__current_timestamp_backcompat", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__current_timestamp_backcompat", "macro_sql": "{% macro postgres__current_timestamp_backcompat() %}\n current_timestamp::{{ type_timestamp() }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.829592, "supported_languages": null}, "macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat": {"name": "postgres__current_timestamp_in_utc_backcompat", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat", "macro_sql": "{% macro postgres__current_timestamp_in_utc_backcompat() %}\n (current_timestamp at time zone 'utc')::{{ type_timestamp() }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.829864, "supported_languages": null}, "macro.dbt_postgres.postgres__get_catalog": {"name": "postgres__get_catalog", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/catalog.sql", "original_file_path": "macros/catalog.sql", "unique_id": "macro.dbt_postgres.postgres__get_catalog", "macro_sql": "{% macro postgres__get_catalog(information_schema, schemas) -%}\n\n {%- call statement('catalog', fetch_result=True) -%}\n {#\n If the user has multiple databases set and the first one is wrong, this will fail.\n But we won't fail in the case where there are multiple quoting-difference-only dbs, which is better.\n #}\n {% set database = information_schema.database %}\n {{ adapter.verify_database(database) }}\n\n select\n '{{ database }}' as table_database,\n sch.nspname as table_schema,\n tbl.relname as table_name,\n case tbl.relkind\n when 'v' then 'VIEW'\n else 'BASE TABLE'\n end as table_type,\n tbl_desc.description as table_comment,\n col.attname as column_name,\n col.attnum as column_index,\n pg_catalog.format_type(col.atttypid, col.atttypmod) as column_type,\n col_desc.description as column_comment,\n pg_get_userbyid(tbl.relowner) as table_owner\n\n from pg_catalog.pg_namespace sch\n join pg_catalog.pg_class tbl on tbl.relnamespace = sch.oid\n join pg_catalog.pg_attribute col on col.attrelid = tbl.oid\n left outer join pg_catalog.pg_description tbl_desc on (tbl_desc.objoid = tbl.oid and tbl_desc.objsubid = 0)\n left outer join pg_catalog.pg_description col_desc on (col_desc.objoid = tbl.oid and col_desc.objsubid = col.attnum)\n\n where (\n {%- for schema in schemas -%}\n upper(sch.nspname) = upper('{{ schema }}'){%- if not loop.last %} or {% endif -%}\n {%- endfor -%}\n )\n and not pg_is_other_temp_schema(sch.oid) -- not a temporary schema belonging to another session\n and tbl.relpersistence in ('p', 'u') -- [p]ermanent table or [u]nlogged table. Exclude [t]emporary tables\n and tbl.relkind in ('r', 'v', 'f', 'p') -- o[r]dinary table, [v]iew, [f]oreign table, [p]artitioned table. Other values are [i]ndex, [S]equence, [c]omposite type, [t]OAST table, [m]aterialized view\n and col.attnum > 0 -- negative numbers are used for system columns such as oid\n and not col.attisdropped -- column as not been dropped\n\n order by\n sch.nspname,\n tbl.relname,\n col.attnum\n\n {%- endcall -%}\n\n {{ return(load_result('catalog').table) }}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.832119, "supported_languages": null}, "macro.dbt_postgres.postgres_get_relations": {"name": "postgres_get_relations", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/relations.sql", "original_file_path": "macros/relations.sql", "unique_id": "macro.dbt_postgres.postgres_get_relations", "macro_sql": "{% macro postgres_get_relations () -%}\n\n {#\n -- in pg_depend, objid is the dependent, refobjid is the referenced object\n -- > a pg_depend entry indicates that the referenced object cannot be\n -- > dropped without also dropping the dependent object.\n #}\n\n {%- call statement('relations', fetch_result=True) -%}\n with relation as (\n select\n pg_rewrite.ev_class as class,\n pg_rewrite.oid as id\n from pg_rewrite\n ),\n class as (\n select\n oid as id,\n relname as name,\n relnamespace as schema,\n relkind as kind\n from pg_class\n ),\n dependency as (\n select distinct\n pg_depend.objid as id,\n pg_depend.refobjid as ref\n from pg_depend\n ),\n schema as (\n select\n pg_namespace.oid as id,\n pg_namespace.nspname as name\n from pg_namespace\n where nspname != 'information_schema' and nspname not like 'pg\\_%'\n ),\n referenced as (\n select\n relation.id AS id,\n referenced_class.name ,\n referenced_class.schema ,\n referenced_class.kind\n from relation\n join class as referenced_class on relation.class=referenced_class.id\n where referenced_class.kind in ('r', 'v')\n ),\n relationships as (\n select\n referenced.name as referenced_name,\n referenced.schema as referenced_schema_id,\n dependent_class.name as dependent_name,\n dependent_class.schema as dependent_schema_id,\n referenced.kind as kind\n from referenced\n join dependency on referenced.id=dependency.id\n join class as dependent_class on dependency.ref=dependent_class.id\n where\n (referenced.name != dependent_class.name or\n referenced.schema != dependent_class.schema)\n )\n\n select\n referenced_schema.name as referenced_schema,\n relationships.referenced_name as referenced_name,\n dependent_schema.name as dependent_schema,\n relationships.dependent_name as dependent_name\n from relationships\n join schema as dependent_schema on relationships.dependent_schema_id=dependent_schema.id\n join schema as referenced_schema on relationships.referenced_schema_id=referenced_schema.id\n group by referenced_schema, referenced_name, dependent_schema, dependent_name\n order by referenced_schema, referenced_name, dependent_schema, dependent_name;\n\n {%- endcall -%}\n\n {{ return(load_result('relations').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.833379, "supported_languages": null}, "macro.dbt_postgres.postgres__create_table_as": {"name": "postgres__create_table_as", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__create_table_as", "macro_sql": "{% macro postgres__create_table_as(temporary, relation, sql) -%}\n {%- set unlogged = config.get('unlogged', default=false) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n\n create {% if temporary -%}\n temporary\n {%- elif unlogged -%}\n unlogged\n {%- endif %} table {{ relation }}\n as (\n {{ sql }}\n );\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.844162, "supported_languages": null}, "macro.dbt_postgres.postgres__get_create_index_sql": {"name": "postgres__get_create_index_sql", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__get_create_index_sql", "macro_sql": "{% macro postgres__get_create_index_sql(relation, index_dict) -%}\n {%- set index_config = adapter.parse_index(index_dict) -%}\n {%- set comma_separated_columns = \", \".join(index_config.columns) -%}\n {%- set index_name = index_config.render(relation) -%}\n\n create {% if index_config.unique -%}\n unique\n {%- endif %} index if not exists\n \"{{ index_name }}\"\n on {{ relation }} {% if index_config.type -%}\n using {{ index_config.type }}\n {%- endif %}\n ({{ comma_separated_columns }});\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.84543, "supported_languages": null}, "macro.dbt_postgres.postgres__create_schema": {"name": "postgres__create_schema", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__create_schema", "macro_sql": "{% macro postgres__create_schema(relation) -%}\n {% if relation.database -%}\n {{ adapter.verify_database(relation.database) }}\n {%- endif -%}\n {%- call statement('create_schema') -%}\n create schema if not exists {{ relation.without_identifier().include(database=False) }}\n {%- endcall -%}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.846217, "supported_languages": null}, "macro.dbt_postgres.postgres__drop_schema": {"name": "postgres__drop_schema", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__drop_schema", "macro_sql": "{% macro postgres__drop_schema(relation) -%}\n {% if relation.database -%}\n {{ adapter.verify_database(relation.database) }}\n {%- endif -%}\n {%- call statement('drop_schema') -%}\n drop schema if exists {{ relation.without_identifier().include(database=False) }} cascade\n {%- endcall -%}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.847004, "supported_languages": null}, "macro.dbt_postgres.postgres__get_columns_in_relation": {"name": "postgres__get_columns_in_relation", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__get_columns_in_relation", "macro_sql": "{% macro postgres__get_columns_in_relation(relation) -%}\n {% call statement('get_columns_in_relation', fetch_result=True) %}\n select\n column_name,\n data_type,\n character_maximum_length,\n numeric_precision,\n numeric_scale\n\n from {{ relation.information_schema('columns') }}\n where table_name = '{{ relation.identifier }}'\n {% if relation.schema %}\n and table_schema = '{{ relation.schema }}'\n {% endif %}\n order by ordinal_position\n\n {% endcall %}\n {% set table = load_result('get_columns_in_relation').table %}\n {{ return(sql_convert_columns_in_relation(table)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement", "macro.dbt.sql_convert_columns_in_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8481832, "supported_languages": null}, "macro.dbt_postgres.postgres__list_relations_without_caching": {"name": "postgres__list_relations_without_caching", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__list_relations_without_caching", "macro_sql": "{% macro postgres__list_relations_without_caching(schema_relation) %}\n {% call statement('list_relations_without_caching', fetch_result=True) -%}\n select\n '{{ schema_relation.database }}' as database,\n tablename as name,\n schemaname as schema,\n 'table' as type\n from pg_tables\n where schemaname ilike '{{ schema_relation.schema }}'\n union all\n select\n '{{ schema_relation.database }}' as database,\n viewname as name,\n schemaname as schema,\n 'view' as type\n from pg_views\n where schemaname ilike '{{ schema_relation.schema }}'\n {% endcall %}\n {{ return(load_result('list_relations_without_caching').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8492, "supported_languages": null}, "macro.dbt_postgres.postgres__information_schema_name": {"name": "postgres__information_schema_name", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__information_schema_name", "macro_sql": "{% macro postgres__information_schema_name(database) -%}\n {% if database_name -%}\n {{ adapter.verify_database(database_name) }}\n {%- endif -%}\n information_schema\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.849643, "supported_languages": null}, "macro.dbt_postgres.postgres__list_schemas": {"name": "postgres__list_schemas", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__list_schemas", "macro_sql": "{% macro postgres__list_schemas(database) %}\n {% if database -%}\n {{ adapter.verify_database(database) }}\n {%- endif -%}\n {% call statement('list_schemas', fetch_result=True, auto_begin=False) %}\n select distinct nspname from pg_namespace\n {% endcall %}\n {{ return(load_result('list_schemas').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8505101, "supported_languages": null}, "macro.dbt_postgres.postgres__check_schema_exists": {"name": "postgres__check_schema_exists", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__check_schema_exists", "macro_sql": "{% macro postgres__check_schema_exists(information_schema, schema) -%}\n {% if information_schema.database -%}\n {{ adapter.verify_database(information_schema.database) }}\n {%- endif -%}\n {% call statement('check_schema_exists', fetch_result=True, auto_begin=False) %}\n select count(*) from pg_namespace where nspname = '{{ schema }}'\n {% endcall %}\n {{ return(load_result('check_schema_exists').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.851476, "supported_languages": null}, "macro.dbt_postgres.postgres__make_relation_with_suffix": {"name": "postgres__make_relation_with_suffix", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__make_relation_with_suffix", "macro_sql": "{% macro postgres__make_relation_with_suffix(base_relation, suffix, dstring) %}\n {% if dstring %}\n {% set dt = modules.datetime.datetime.now() %}\n {% set dtstring = dt.strftime(\"%H%M%S%f\") %}\n {% set suffix = suffix ~ dtstring %}\n {% endif %}\n {% set suffix_length = suffix|length %}\n {% set relation_max_name_length = base_relation.relation_max_name_length() %}\n {% if suffix_length > relation_max_name_length %}\n {% do exceptions.raise_compiler_error('Relation suffix is too long (' ~ suffix_length ~ ' characters). Maximum length is ' ~ relation_max_name_length ~ ' characters.') %}\n {% endif %}\n {% set identifier = base_relation.identifier[:relation_max_name_length - suffix_length] ~ suffix %}\n\n {{ return(base_relation.incorporate(path={\"identifier\": identifier })) }}\n\n {% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.853593, "supported_languages": null}, "macro.dbt_postgres.postgres__make_intermediate_relation": {"name": "postgres__make_intermediate_relation", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__make_intermediate_relation", "macro_sql": "{% macro postgres__make_intermediate_relation(base_relation, suffix) %}\n {{ return(postgres__make_relation_with_suffix(base_relation, suffix, dstring=False)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.854094, "supported_languages": null}, "macro.dbt_postgres.postgres__make_temp_relation": {"name": "postgres__make_temp_relation", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__make_temp_relation", "macro_sql": "{% macro postgres__make_temp_relation(base_relation, suffix) %}\n {% set temp_relation = postgres__make_relation_with_suffix(base_relation, suffix, dstring=True) %}\n {{ return(temp_relation.incorporate(path={\"schema\": none,\n \"database\": none})) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.855048, "supported_languages": null}, "macro.dbt_postgres.postgres__make_backup_relation": {"name": "postgres__make_backup_relation", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__make_backup_relation", "macro_sql": "{% macro postgres__make_backup_relation(base_relation, backup_relation_type, suffix) %}\n {% set backup_relation = postgres__make_relation_with_suffix(base_relation, suffix, dstring=False) %}\n {{ return(backup_relation.incorporate(type=backup_relation_type)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.855862, "supported_languages": null}, "macro.dbt_postgres.postgres_escape_comment": {"name": "postgres_escape_comment", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres_escape_comment", "macro_sql": "{% macro postgres_escape_comment(comment) -%}\n {% if comment is not string %}\n {% do exceptions.raise_compiler_error('cannot escape a non-string: ' ~ comment) %}\n {% endif %}\n {%- set magic = '$dbt_comment_literal_block$' -%}\n {%- if magic in comment -%}\n {%- do exceptions.raise_compiler_error('The string ' ~ magic ~ ' is not allowed in comments.') -%}\n {%- endif -%}\n {{ magic }}{{ comment }}{{ magic }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.856958, "supported_languages": null}, "macro.dbt_postgres.postgres__alter_relation_comment": {"name": "postgres__alter_relation_comment", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__alter_relation_comment", "macro_sql": "{% macro postgres__alter_relation_comment(relation, comment) %}\n {% set escaped_comment = postgres_escape_comment(comment) %}\n comment on {{ relation.type }} {{ relation }} is {{ escaped_comment }};\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres_escape_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8575392, "supported_languages": null}, "macro.dbt_postgres.postgres__alter_column_comment": {"name": "postgres__alter_column_comment", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__alter_column_comment", "macro_sql": "{% macro postgres__alter_column_comment(relation, column_dict) %}\n {% set existing_columns = adapter.get_columns_in_relation(relation) | map(attribute=\"name\") | list %}\n {% for column_name in column_dict if (column_name in existing_columns) %}\n {% set comment = column_dict[column_name]['description'] %}\n {% set escaped_comment = postgres_escape_comment(comment) %}\n comment on column {{ relation }}.{{ adapter.quote(column_name) if column_dict[column_name]['quote'] else column_name }} is {{ escaped_comment }};\n {% endfor %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres_escape_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8590431, "supported_languages": null}, "macro.dbt_postgres.postgres__get_show_grant_sql": {"name": "postgres__get_show_grant_sql", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__get_show_grant_sql", "macro_sql": "\n\n{%- macro postgres__get_show_grant_sql(relation) -%}\n select grantee, privilege_type\n from {{ relation.information_schema('role_table_grants') }}\n where grantor = current_role\n and grantee != current_role\n and table_schema = '{{ relation.schema }}'\n and table_name = '{{ relation.identifier }}'\n{%- endmacro -%}\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8596349, "supported_languages": null}, "macro.dbt_postgres.postgres__copy_grants": {"name": "postgres__copy_grants", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__copy_grants", "macro_sql": "{% macro postgres__copy_grants() %}\n {{ return(False) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.85995, "supported_languages": null}, "macro.dbt_postgres.postgres__get_incremental_default_sql": {"name": "postgres__get_incremental_default_sql", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/materializations/incremental_strategies.sql", "original_file_path": "macros/materializations/incremental_strategies.sql", "unique_id": "macro.dbt_postgres.postgres__get_incremental_default_sql", "macro_sql": "{% macro postgres__get_incremental_default_sql(arg_dict) %}\n\n {% if arg_dict[\"unique_key\"] %}\n {% do return(get_incremental_delete_insert_sql(arg_dict)) %}\n {% else %}\n {% do return(get_incremental_append_sql(arg_dict)) %}\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_incremental_delete_insert_sql", "macro.dbt.get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.860895, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_merge_sql": {"name": "postgres__snapshot_merge_sql", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/materializations/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshot_merge.sql", "unique_id": "macro.dbt_postgres.postgres__snapshot_merge_sql", "macro_sql": "{% macro postgres__snapshot_merge_sql(target, source, insert_cols) -%}\n {%- set insert_cols_csv = insert_cols | join(', ') -%}\n\n update {{ target }}\n set dbt_valid_to = DBT_INTERNAL_SOURCE.dbt_valid_to\n from {{ source }} as DBT_INTERNAL_SOURCE\n where DBT_INTERNAL_SOURCE.dbt_scd_id::text = {{ target }}.dbt_scd_id::text\n and DBT_INTERNAL_SOURCE.dbt_change_type::text in ('update'::text, 'delete'::text)\n and {{ target }}.dbt_valid_to is null;\n\n insert into {{ target }} ({{ insert_cols_csv }})\n select {% for column in insert_cols -%}\n DBT_INTERNAL_SOURCE.{{ column }} {%- if not loop.last %}, {%- endif %}\n {%- endfor %}\n from {{ source }} as DBT_INTERNAL_SOURCE\n where DBT_INTERNAL_SOURCE.dbt_change_type::text = 'insert'::text;\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8624861, "supported_languages": null}, "macro.dbt_postgres.postgres__dateadd": {"name": "postgres__dateadd", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "unique_id": "macro.dbt_postgres.postgres__dateadd", "macro_sql": "{% macro postgres__dateadd(datepart, interval, from_date_or_timestamp) %}\n\n {{ from_date_or_timestamp }} + ((interval '1 {{ datepart }}') * ({{ interval }}))\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.863015, "supported_languages": null}, "macro.dbt_postgres.postgres__listagg": {"name": "postgres__listagg", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "unique_id": "macro.dbt_postgres.postgres__listagg", "macro_sql": "{% macro postgres__listagg(measure, delimiter_text, order_by_clause, limit_num) -%}\n\n {% if limit_num -%}\n array_to_string(\n (array_agg(\n {{ measure }}\n {% if order_by_clause -%}\n {{ order_by_clause }}\n {%- endif %}\n ))[1:{{ limit_num }}],\n {{ delimiter_text }}\n )\n {%- else %}\n string_agg(\n {{ measure }},\n {{ delimiter_text }}\n {% if order_by_clause -%}\n {{ order_by_clause }}\n {%- endif %}\n )\n {%- endif %}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.864436, "supported_languages": null}, "macro.dbt_postgres.postgres__datediff": {"name": "postgres__datediff", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "unique_id": "macro.dbt_postgres.postgres__datediff", "macro_sql": "{% macro postgres__datediff(first_date, second_date, datepart) -%}\n\n {% if datepart == 'year' %}\n (date_part('year', ({{second_date}})::date) - date_part('year', ({{first_date}})::date))\n {% elif datepart == 'quarter' %}\n ({{ datediff(first_date, second_date, 'year') }} * 4 + date_part('quarter', ({{second_date}})::date) - date_part('quarter', ({{first_date}})::date))\n {% elif datepart == 'month' %}\n ({{ datediff(first_date, second_date, 'year') }} * 12 + date_part('month', ({{second_date}})::date) - date_part('month', ({{first_date}})::date))\n {% elif datepart == 'day' %}\n (({{second_date}})::date - ({{first_date}})::date)\n {% elif datepart == 'week' %}\n ({{ datediff(first_date, second_date, 'day') }} / 7 + case\n when date_part('dow', ({{first_date}})::timestamp) <= date_part('dow', ({{second_date}})::timestamp) then\n case when {{first_date}} <= {{second_date}} then 0 else -1 end\n else\n case when {{first_date}} <= {{second_date}} then 1 else 0 end\n end)\n {% elif datepart == 'hour' %}\n ({{ datediff(first_date, second_date, 'day') }} * 24 + date_part('hour', ({{second_date}})::timestamp) - date_part('hour', ({{first_date}})::timestamp))\n {% elif datepart == 'minute' %}\n ({{ datediff(first_date, second_date, 'hour') }} * 60 + date_part('minute', ({{second_date}})::timestamp) - date_part('minute', ({{first_date}})::timestamp))\n {% elif datepart == 'second' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60 + floor(date_part('second', ({{second_date}})::timestamp)) - floor(date_part('second', ({{first_date}})::timestamp)))\n {% elif datepart == 'millisecond' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60000 + floor(date_part('millisecond', ({{second_date}})::timestamp)) - floor(date_part('millisecond', ({{first_date}})::timestamp)))\n {% elif datepart == 'microsecond' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60000000 + floor(date_part('microsecond', ({{second_date}})::timestamp)) - floor(date_part('microsecond', ({{first_date}})::timestamp)))\n {% else %}\n {{ exceptions.raise_compiler_error(\"Unsupported datepart for macro datediff in postgres: {!r}\".format(datepart)) }}\n {% endif %}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.datediff"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.871541, "supported_languages": null}, "macro.dbt_postgres.postgres__any_value": {"name": "postgres__any_value", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "unique_id": "macro.dbt_postgres.postgres__any_value", "macro_sql": "{% macro postgres__any_value(expression) -%}\n\n min({{ expression }})\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8719308, "supported_languages": null}, "macro.dbt_postgres.postgres__last_day": {"name": "postgres__last_day", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "unique_id": "macro.dbt_postgres.postgres__last_day", "macro_sql": "{% macro postgres__last_day(date, datepart) -%}\n\n {%- if datepart == 'quarter' -%}\n -- postgres dateadd does not support quarter interval.\n cast(\n {{dbt.dateadd('day', '-1',\n dbt.dateadd('month', '3', dbt.date_trunc(datepart, date))\n )}}\n as date)\n {%- else -%}\n {{dbt.default_last_day(date, datepart)}}\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.dateadd", "macro.dbt.date_trunc", "macro.dbt.default_last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8731148, "supported_languages": null}, "macro.dbt_postgres.postgres__split_part": {"name": "postgres__split_part", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "unique_id": "macro.dbt_postgres.postgres__split_part", "macro_sql": "{% macro postgres__split_part(string_text, delimiter_text, part_number) %}\n\n {% if part_number >= 0 %}\n {{ dbt.default__split_part(string_text, delimiter_text, part_number) }}\n {% else %}\n {{ dbt._split_part_negative(string_text, delimiter_text, part_number) }}\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__split_part", "macro.dbt._split_part_negative"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8740962, "supported_languages": null}, "macro.dbt.run_hooks": {"name": "run_hooks", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.run_hooks", "macro_sql": "{% macro run_hooks(hooks, inside_transaction=True) %}\n {% for hook in hooks | selectattr('transaction', 'equalto', inside_transaction) %}\n {% if not inside_transaction and loop.first %}\n {% call statement(auto_begin=inside_transaction) %}\n commit;\n {% endcall %}\n {% endif %}\n {% set rendered = render(hook.get('sql')) | trim %}\n {% if (rendered | length) > 0 %}\n {% call statement(auto_begin=inside_transaction) %}\n {{ rendered }}\n {% endcall %}\n {% endif %}\n {% endfor %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.876752, "supported_languages": null}, "macro.dbt.make_hook_config": {"name": "make_hook_config", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.make_hook_config", "macro_sql": "{% macro make_hook_config(sql, inside_transaction) %}\n {{ tojson({\"sql\": sql, \"transaction\": inside_transaction}) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8772988, "supported_languages": null}, "macro.dbt.before_begin": {"name": "before_begin", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.before_begin", "macro_sql": "{% macro before_begin(sql) %}\n {{ make_hook_config(sql, inside_transaction=False) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.877683, "supported_languages": null}, "macro.dbt.in_transaction": {"name": "in_transaction", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.in_transaction", "macro_sql": "{% macro in_transaction(sql) %}\n {{ make_hook_config(sql, inside_transaction=True) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.878058, "supported_languages": null}, "macro.dbt.after_commit": {"name": "after_commit", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.after_commit", "macro_sql": "{% macro after_commit(sql) %}\n {{ make_hook_config(sql, inside_transaction=False) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.878428, "supported_languages": null}, "macro.dbt.set_sql_header": {"name": "set_sql_header", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "unique_id": "macro.dbt.set_sql_header", "macro_sql": "{% macro set_sql_header(config) -%}\n {{ config.set('sql_header', caller()) }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8792682, "supported_languages": null}, "macro.dbt.should_full_refresh": {"name": "should_full_refresh", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "unique_id": "macro.dbt.should_full_refresh", "macro_sql": "{% macro should_full_refresh() %}\n {% set config_full_refresh = config.get('full_refresh') %}\n {% if config_full_refresh is none %}\n {% set config_full_refresh = flags.FULL_REFRESH %}\n {% endif %}\n {% do return(config_full_refresh) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.880049, "supported_languages": null}, "macro.dbt.should_store_failures": {"name": "should_store_failures", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "unique_id": "macro.dbt.should_store_failures", "macro_sql": "{% macro should_store_failures() %}\n {% set config_store_failures = config.get('store_failures') %}\n {% if config_store_failures is none %}\n {% set config_store_failures = flags.STORE_FAILURES %}\n {% endif %}\n {% do return(config_store_failures) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.880833, "supported_languages": null}, "macro.dbt.snapshot_merge_sql": {"name": "snapshot_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshots/snapshot_merge.sql", "unique_id": "macro.dbt.snapshot_merge_sql", "macro_sql": "{% macro snapshot_merge_sql(target, source, insert_cols) -%}\n {{ adapter.dispatch('snapshot_merge_sql', 'dbt')(target, source, insert_cols) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.881776, "supported_languages": null}, "macro.dbt.default__snapshot_merge_sql": {"name": "default__snapshot_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshots/snapshot_merge.sql", "unique_id": "macro.dbt.default__snapshot_merge_sql", "macro_sql": "{% macro default__snapshot_merge_sql(target, source, insert_cols) -%}\n {%- set insert_cols_csv = insert_cols | join(', ') -%}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on DBT_INTERNAL_SOURCE.dbt_scd_id = DBT_INTERNAL_DEST.dbt_scd_id\n\n when matched\n and DBT_INTERNAL_DEST.dbt_valid_to is null\n and DBT_INTERNAL_SOURCE.dbt_change_type in ('update', 'delete')\n then update\n set dbt_valid_to = DBT_INTERNAL_SOURCE.dbt_valid_to\n\n when not matched\n and DBT_INTERNAL_SOURCE.dbt_change_type = 'insert'\n then insert ({{ insert_cols_csv }})\n values ({{ insert_cols_csv }})\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.882448, "supported_languages": null}, "macro.dbt.strategy_dispatch": {"name": "strategy_dispatch", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.strategy_dispatch", "macro_sql": "{% macro strategy_dispatch(name) -%}\n{% set original_name = name %}\n {% if '.' in name %}\n {% set package_name, name = name.split(\".\", 1) %}\n {% else %}\n {% set package_name = none %}\n {% endif %}\n\n {% if package_name is none %}\n {% set package_context = context %}\n {% elif package_name in context %}\n {% set package_context = context[package_name] %}\n {% else %}\n {% set error_msg %}\n Could not find package '{{package_name}}', called with '{{original_name}}'\n {% endset %}\n {{ exceptions.raise_compiler_error(error_msg | trim) }}\n {% endif %}\n\n {%- set search_name = 'snapshot_' ~ name ~ '_strategy' -%}\n\n {% if search_name not in package_context %}\n {% set error_msg %}\n The specified strategy macro '{{name}}' was not found in package '{{ package_name }}'\n {% endset %}\n {{ exceptions.raise_compiler_error(error_msg | trim) }}\n {% endif %}\n {{ return(package_context[search_name]) }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.88957, "supported_languages": null}, "macro.dbt.snapshot_hash_arguments": {"name": "snapshot_hash_arguments", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_hash_arguments", "macro_sql": "{% macro snapshot_hash_arguments(args) -%}\n {{ adapter.dispatch('snapshot_hash_arguments', 'dbt')(args) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.89017, "supported_languages": null}, "macro.dbt.default__snapshot_hash_arguments": {"name": "default__snapshot_hash_arguments", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.default__snapshot_hash_arguments", "macro_sql": "{% macro default__snapshot_hash_arguments(args) -%}\n md5({%- for arg in args -%}\n coalesce(cast({{ arg }} as varchar ), '')\n {% if not loop.last %} || '|' || {% endif %}\n {%- endfor -%})\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.890751, "supported_languages": null}, "macro.dbt.snapshot_timestamp_strategy": {"name": "snapshot_timestamp_strategy", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_timestamp_strategy", "macro_sql": "{% macro snapshot_timestamp_strategy(node, snapshotted_rel, current_rel, config, target_exists) %}\n {% set primary_key = config['unique_key'] %}\n {% set updated_at = config['updated_at'] %}\n {% set invalidate_hard_deletes = config.get('invalidate_hard_deletes', false) %}\n\n {#/*\n The snapshot relation might not have an {{ updated_at }} value if the\n snapshot strategy is changed from `check` to `timestamp`. We\n should use a dbt-created column for the comparison in the snapshot\n table instead of assuming that the user-supplied {{ updated_at }}\n will be present in the historical data.\n\n See https://github.com/dbt-labs/dbt-core/issues/2350\n */ #}\n {% set row_changed_expr -%}\n ({{ snapshotted_rel }}.dbt_valid_from < {{ current_rel }}.{{ updated_at }})\n {%- endset %}\n\n {% set scd_id_expr = snapshot_hash_arguments([primary_key, updated_at]) %}\n\n {% do return({\n \"unique_key\": primary_key,\n \"updated_at\": updated_at,\n \"row_changed\": row_changed_expr,\n \"scd_id\": scd_id_expr,\n \"invalidate_hard_deletes\": invalidate_hard_deletes\n }) %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.892682, "supported_languages": null}, "macro.dbt.snapshot_string_as_time": {"name": "snapshot_string_as_time", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_string_as_time", "macro_sql": "{% macro snapshot_string_as_time(timestamp) -%}\n {{ adapter.dispatch('snapshot_string_as_time', 'dbt')(timestamp) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_string_as_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.893109, "supported_languages": null}, "macro.dbt.default__snapshot_string_as_time": {"name": "default__snapshot_string_as_time", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.default__snapshot_string_as_time", "macro_sql": "{% macro default__snapshot_string_as_time(timestamp) %}\n {% do exceptions.raise_not_implemented(\n 'snapshot_string_as_time macro not implemented for adapter '+adapter.type()\n ) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.893554, "supported_languages": null}, "macro.dbt.snapshot_check_all_get_existing_columns": {"name": "snapshot_check_all_get_existing_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_check_all_get_existing_columns", "macro_sql": "{% macro snapshot_check_all_get_existing_columns(node, target_exists, check_cols_config) -%}\n {%- if not target_exists -%}\n {#-- no table yet -> return whatever the query does --#}\n {{ return((false, query_columns)) }}\n {%- endif -%}\n\n {#-- handle any schema changes --#}\n {%- set target_relation = adapter.get_relation(database=node.database, schema=node.schema, identifier=node.alias) -%}\n\n {% if check_cols_config == 'all' %}\n {%- set query_columns = get_columns_in_query(node['compiled_code']) -%}\n\n {% elif check_cols_config is iterable and (check_cols_config | length) > 0 %}\n {#-- query for proper casing/quoting, to support comparison below --#}\n {%- set select_check_cols_from_target -%}\n select {{ check_cols_config | join(', ') }} from ({{ node['compiled_code'] }}) subq\n {%- endset -%}\n {% set query_columns = get_columns_in_query(select_check_cols_from_target) %}\n\n {% else %}\n {% do exceptions.raise_compiler_error(\"Invalid value for 'check_cols': \" ~ check_cols_config) %}\n {% endif %}\n\n {%- set existing_cols = adapter.get_columns_in_relation(target_relation) | map(attribute = 'name') | list -%}\n {%- set ns = namespace() -%} {#-- handle for-loop scoping with a namespace --#}\n {%- set ns.column_added = false -%}\n\n {%- set intersection = [] -%}\n {%- for col in query_columns -%}\n {%- if col in existing_cols -%}\n {%- do intersection.append(adapter.quote(col)) -%}\n {%- else -%}\n {% set ns.column_added = true %}\n {%- endif -%}\n {%- endfor -%}\n {{ return((ns.column_added, intersection)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.get_columns_in_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.897105, "supported_languages": null}, "macro.dbt.snapshot_check_strategy": {"name": "snapshot_check_strategy", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_check_strategy", "macro_sql": "{% macro snapshot_check_strategy(node, snapshotted_rel, current_rel, config, target_exists) %}\n {% set check_cols_config = config['check_cols'] %}\n {% set primary_key = config['unique_key'] %}\n {% set invalidate_hard_deletes = config.get('invalidate_hard_deletes', false) %}\n {% set updated_at = config.get('updated_at', snapshot_get_time()) %}\n\n {% set column_added = false %}\n\n {% set column_added, check_cols = snapshot_check_all_get_existing_columns(node, target_exists, check_cols_config) %}\n\n {%- set row_changed_expr -%}\n (\n {%- if column_added -%}\n {{ get_true_sql() }}\n {%- else -%}\n {%- for col in check_cols -%}\n {{ snapshotted_rel }}.{{ col }} != {{ current_rel }}.{{ col }}\n or\n (\n (({{ snapshotted_rel }}.{{ col }} is null) and not ({{ current_rel }}.{{ col }} is null))\n or\n ((not {{ snapshotted_rel }}.{{ col }} is null) and ({{ current_rel }}.{{ col }} is null))\n )\n {%- if not loop.last %} or {% endif -%}\n {%- endfor -%}\n {%- endif -%}\n )\n {%- endset %}\n\n {% set scd_id_expr = snapshot_hash_arguments([primary_key, updated_at]) %}\n\n {% do return({\n \"unique_key\": primary_key,\n \"updated_at\": updated_at,\n \"row_changed\": row_changed_expr,\n \"scd_id\": scd_id_expr,\n \"invalidate_hard_deletes\": invalidate_hard_deletes\n }) %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.snapshot_get_time", "macro.dbt.snapshot_check_all_get_existing_columns", "macro.dbt.get_true_sql", "macro.dbt.snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.90045, "supported_languages": null}, "macro.dbt.create_columns": {"name": "create_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.create_columns", "macro_sql": "{% macro create_columns(relation, columns) %}\n {{ adapter.dispatch('create_columns', 'dbt')(relation, columns) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__create_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9079978, "supported_languages": null}, "macro.dbt.default__create_columns": {"name": "default__create_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__create_columns", "macro_sql": "{% macro default__create_columns(relation, columns) %}\n {% for column in columns %}\n {% call statement() %}\n alter table {{ relation }} add column \"{{ column.name }}\" {{ column.data_type }};\n {% endcall %}\n {% endfor %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.908781, "supported_languages": null}, "macro.dbt.post_snapshot": {"name": "post_snapshot", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.post_snapshot", "macro_sql": "{% macro post_snapshot(staging_relation) %}\n {{ adapter.dispatch('post_snapshot', 'dbt')(staging_relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__post_snapshot"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.909224, "supported_languages": null}, "macro.dbt.default__post_snapshot": {"name": "default__post_snapshot", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__post_snapshot", "macro_sql": "{% macro default__post_snapshot(staging_relation) %}\n {# no-op #}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.909463, "supported_languages": null}, "macro.dbt.get_true_sql": {"name": "get_true_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.get_true_sql", "macro_sql": "{% macro get_true_sql() %}\n {{ adapter.dispatch('get_true_sql', 'dbt')() }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_true_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9098508, "supported_languages": null}, "macro.dbt.default__get_true_sql": {"name": "default__get_true_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__get_true_sql", "macro_sql": "{% macro default__get_true_sql() %}\n {{ return('TRUE') }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9101608, "supported_languages": null}, "macro.dbt.snapshot_staging_table": {"name": "snapshot_staging_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.snapshot_staging_table", "macro_sql": "{% macro snapshot_staging_table(strategy, source_sql, target_relation) -%}\n {{ adapter.dispatch('snapshot_staging_table', 'dbt')(strategy, source_sql, target_relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__snapshot_staging_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.910683, "supported_languages": null}, "macro.dbt.default__snapshot_staging_table": {"name": "default__snapshot_staging_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__snapshot_staging_table", "macro_sql": "{% macro default__snapshot_staging_table(strategy, source_sql, target_relation) -%}\n\n with snapshot_query as (\n\n {{ source_sql }}\n\n ),\n\n snapshotted_data as (\n\n select *,\n {{ strategy.unique_key }} as dbt_unique_key\n\n from {{ target_relation }}\n where dbt_valid_to is null\n\n ),\n\n insertions_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n nullif({{ strategy.updated_at }}, {{ strategy.updated_at }}) as dbt_valid_to,\n {{ strategy.scd_id }} as dbt_scd_id\n\n from snapshot_query\n ),\n\n updates_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n {{ strategy.updated_at }} as dbt_valid_to\n\n from snapshot_query\n ),\n\n {%- if strategy.invalidate_hard_deletes %}\n\n deletes_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key\n from snapshot_query\n ),\n {% endif %}\n\n insertions as (\n\n select\n 'insert' as dbt_change_type,\n source_data.*\n\n from insertions_source_data as source_data\n left outer join snapshotted_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where snapshotted_data.dbt_unique_key is null\n or (\n snapshotted_data.dbt_unique_key is not null\n and (\n {{ strategy.row_changed }}\n )\n )\n\n ),\n\n updates as (\n\n select\n 'update' as dbt_change_type,\n source_data.*,\n snapshotted_data.dbt_scd_id\n\n from updates_source_data as source_data\n join snapshotted_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where (\n {{ strategy.row_changed }}\n )\n )\n\n {%- if strategy.invalidate_hard_deletes -%}\n ,\n\n deletes as (\n\n select\n 'delete' as dbt_change_type,\n source_data.*,\n {{ snapshot_get_time() }} as dbt_valid_from,\n {{ snapshot_get_time() }} as dbt_updated_at,\n {{ snapshot_get_time() }} as dbt_valid_to,\n snapshotted_data.dbt_scd_id\n\n from snapshotted_data\n left join deletes_source_data as source_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where source_data.dbt_unique_key is null\n )\n {%- endif %}\n\n select * from insertions\n union all\n select * from updates\n {%- if strategy.invalidate_hard_deletes %}\n union all\n select * from deletes\n {%- endif %}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.snapshot_get_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.912821, "supported_languages": null}, "macro.dbt.build_snapshot_table": {"name": "build_snapshot_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.build_snapshot_table", "macro_sql": "{% macro build_snapshot_table(strategy, sql) -%}\n {{ adapter.dispatch('build_snapshot_table', 'dbt')(strategy, sql) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__build_snapshot_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.913307, "supported_languages": null}, "macro.dbt.default__build_snapshot_table": {"name": "default__build_snapshot_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__build_snapshot_table", "macro_sql": "{% macro default__build_snapshot_table(strategy, sql) %}\n\n select *,\n {{ strategy.scd_id }} as dbt_scd_id,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n nullif({{ strategy.updated_at }}, {{ strategy.updated_at }}) as dbt_valid_to\n from (\n {{ sql }}\n ) sbq\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.913965, "supported_languages": null}, "macro.dbt.build_snapshot_staging_table": {"name": "build_snapshot_staging_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.build_snapshot_staging_table", "macro_sql": "{% macro build_snapshot_staging_table(strategy, sql, target_relation) %}\n {% set temp_relation = make_temp_relation(target_relation) %}\n\n {% set select = snapshot_staging_table(strategy, sql, target_relation) %}\n\n {% call statement('build_snapshot_staging_relation') %}\n {{ create_table_as(True, temp_relation, select) }}\n {% endcall %}\n\n {% do return(temp_relation) %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.make_temp_relation", "macro.dbt.snapshot_staging_table", "macro.dbt.statement", "macro.dbt.create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.915048, "supported_languages": null}, "macro.dbt.materialization_snapshot_default": {"name": "materialization_snapshot_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot.sql", "original_file_path": "macros/materializations/snapshots/snapshot.sql", "unique_id": "macro.dbt.materialization_snapshot_default", "macro_sql": "{% materialization snapshot, default %}\n {%- set config = model['config'] -%}\n\n {%- set target_table = model.get('alias', model.get('name')) -%}\n\n {%- set strategy_name = config.get('strategy') -%}\n {%- set unique_key = config.get('unique_key') %}\n -- grab current tables grants config for comparision later on\n {%- set grant_config = config.get('grants') -%}\n\n {% set target_relation_exists, target_relation = get_or_create_relation(\n database=model.database,\n schema=model.schema,\n identifier=target_table,\n type='table') -%}\n\n {%- if not target_relation.is_table -%}\n {% do exceptions.relation_wrong_type(target_relation, 'table') %}\n {%- endif -%}\n\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n {% set strategy_macro = strategy_dispatch(strategy_name) %}\n {% set strategy = strategy_macro(model, \"snapshotted_data\", \"source_data\", config, target_relation_exists) %}\n\n {% if not target_relation_exists %}\n\n {% set build_sql = build_snapshot_table(strategy, model['compiled_code']) %}\n {% set final_sql = create_table_as(False, target_relation, build_sql) %}\n\n {% else %}\n\n {{ adapter.valid_snapshot_target(target_relation) }}\n\n {% set staging_table = build_snapshot_staging_table(strategy, sql, target_relation) %}\n\n -- this may no-op if the database does not require column expansion\n {% do adapter.expand_target_column_types(from_relation=staging_table,\n to_relation=target_relation) %}\n\n {% set missing_columns = adapter.get_missing_columns(staging_table, target_relation)\n | rejectattr('name', 'equalto', 'dbt_change_type')\n | rejectattr('name', 'equalto', 'DBT_CHANGE_TYPE')\n | rejectattr('name', 'equalto', 'dbt_unique_key')\n | rejectattr('name', 'equalto', 'DBT_UNIQUE_KEY')\n | list %}\n\n {% do create_columns(target_relation, missing_columns) %}\n\n {% set source_columns = adapter.get_columns_in_relation(staging_table)\n | rejectattr('name', 'equalto', 'dbt_change_type')\n | rejectattr('name', 'equalto', 'DBT_CHANGE_TYPE')\n | rejectattr('name', 'equalto', 'dbt_unique_key')\n | rejectattr('name', 'equalto', 'DBT_UNIQUE_KEY')\n | list %}\n\n {% set quoted_source_columns = [] %}\n {% for column in source_columns %}\n {% do quoted_source_columns.append(adapter.quote(column.name)) %}\n {% endfor %}\n\n {% set final_sql = snapshot_merge_sql(\n target = target_relation,\n source = staging_table,\n insert_cols = quoted_source_columns\n )\n %}\n\n {% endif %}\n\n {% call statement('main') %}\n {{ final_sql }}\n {% endcall %}\n\n {% set should_revoke = should_revoke(target_relation_exists, full_refresh_mode=False) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if not target_relation_exists %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {{ adapter.commit() }}\n\n {% if staging_table is defined %}\n {% do post_snapshot(staging_table) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", "depends_on": {"macros": ["macro.dbt.get_or_create_relation", "macro.dbt.run_hooks", "macro.dbt.strategy_dispatch", "macro.dbt.build_snapshot_table", "macro.dbt.create_table_as", "macro.dbt.build_snapshot_staging_table", "macro.dbt.create_columns", "macro.dbt.snapshot_merge_sql", "macro.dbt.statement", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes", "macro.dbt.post_snapshot"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.928437, "supported_languages": ["sql"]}, "macro.dbt.materialization_test_default": {"name": "materialization_test_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/test.sql", "original_file_path": "macros/materializations/tests/test.sql", "unique_id": "macro.dbt.materialization_test_default", "macro_sql": "{%- materialization test, default -%}\n\n {% set relations = [] %}\n\n {% if should_store_failures() %}\n\n {% set identifier = model['alias'] %}\n {% set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) %}\n {% set target_relation = api.Relation.create(\n identifier=identifier, schema=schema, database=database, type='table') -%} %}\n\n {% if old_relation %}\n {% do adapter.drop_relation(old_relation) %}\n {% endif %}\n\n {% call statement(auto_begin=True) %}\n {{ create_table_as(False, target_relation, sql) }}\n {% endcall %}\n\n {% do relations.append(target_relation) %}\n\n {% set main_sql %}\n select *\n from {{ target_relation }}\n {% endset %}\n\n {{ adapter.commit() }}\n\n {% else %}\n\n {% set main_sql = sql %}\n\n {% endif %}\n\n {% set limit = config.get('limit') %}\n {% set fail_calc = config.get('fail_calc') %}\n {% set warn_if = config.get('warn_if') %}\n {% set error_if = config.get('error_if') %}\n\n {% call statement('main', fetch_result=True) -%}\n\n {{ get_test_sql(main_sql, fail_calc, warn_if, error_if, limit)}}\n\n {%- endcall %}\n\n {{ return({'relations': relations}) }}\n\n{%- endmaterialization -%}", "depends_on": {"macros": ["macro.dbt.should_store_failures", "macro.dbt.statement", "macro.dbt.create_table_as", "macro.dbt.get_test_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.933413, "supported_languages": ["sql"]}, "macro.dbt.get_test_sql": {"name": "get_test_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/helpers.sql", "original_file_path": "macros/materializations/tests/helpers.sql", "unique_id": "macro.dbt.get_test_sql", "macro_sql": "{% macro get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}\n {{ adapter.dispatch('get_test_sql', 'dbt')(main_sql, fail_calc, warn_if, error_if, limit) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_test_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.934442, "supported_languages": null}, "macro.dbt.default__get_test_sql": {"name": "default__get_test_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/helpers.sql", "original_file_path": "macros/materializations/tests/helpers.sql", "unique_id": "macro.dbt.default__get_test_sql", "macro_sql": "{% macro default__get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}\n select\n {{ fail_calc }} as failures,\n {{ fail_calc }} {{ warn_if }} as should_warn,\n {{ fail_calc }} {{ error_if }} as should_error\n from (\n {{ main_sql }}\n {{ \"limit \" ~ limit if limit != none }}\n ) dbt_internal_test\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.93521, "supported_languages": null}, "macro.dbt.get_where_subquery": {"name": "get_where_subquery", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/where_subquery.sql", "original_file_path": "macros/materializations/tests/where_subquery.sql", "unique_id": "macro.dbt.get_where_subquery", "macro_sql": "{% macro get_where_subquery(relation) -%}\n {% do return(adapter.dispatch('get_where_subquery', 'dbt')(relation)) %}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_where_subquery"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9360561, "supported_languages": null}, "macro.dbt.default__get_where_subquery": {"name": "default__get_where_subquery", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/where_subquery.sql", "original_file_path": "macros/materializations/tests/where_subquery.sql", "unique_id": "macro.dbt.default__get_where_subquery", "macro_sql": "{% macro default__get_where_subquery(relation) -%}\n {% set where = config.get('where', '') %}\n {% if where %}\n {%- set filtered -%}\n (select * from {{ relation }} where {{ where }}) dbt_subquery\n {%- endset -%}\n {% do return(filtered) %}\n {%- else -%}\n {% do return(relation) %}\n {%- endif -%}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9370618, "supported_languages": null}, "macro.dbt.get_quoted_csv": {"name": "get_quoted_csv", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.get_quoted_csv", "macro_sql": "{% macro get_quoted_csv(column_names) %}\n\n {% set quoted = [] %}\n {% for col in column_names -%}\n {%- do quoted.append(adapter.quote(col)) -%}\n {%- endfor %}\n\n {%- set dest_cols_csv = quoted | join(', ') -%}\n {{ return(dest_cols_csv) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.940309, "supported_languages": null}, "macro.dbt.diff_columns": {"name": "diff_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.diff_columns", "macro_sql": "{% macro diff_columns(source_columns, target_columns) %}\n\n {% set result = [] %}\n {% set source_names = source_columns | map(attribute = 'column') | list %}\n {% set target_names = target_columns | map(attribute = 'column') | list %}\n\n {# --check whether the name attribute exists in the target - this does not perform a data type check #}\n {% for sc in source_columns %}\n {% if sc.name not in target_names %}\n {{ result.append(sc) }}\n {% endif %}\n {% endfor %}\n\n {{ return(result) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9417028, "supported_languages": null}, "macro.dbt.diff_column_data_types": {"name": "diff_column_data_types", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.diff_column_data_types", "macro_sql": "{% macro diff_column_data_types(source_columns, target_columns) %}\n\n {% set result = [] %}\n {% for sc in source_columns %}\n {% set tc = target_columns | selectattr(\"name\", \"equalto\", sc.name) | list | first %}\n {% if tc %}\n {% if sc.data_type != tc.data_type and not sc.can_expand_to(other_column=tc) %}\n {{ result.append( { 'column_name': tc.name, 'new_type': sc.data_type } ) }}\n {% endif %}\n {% endif %}\n {% endfor %}\n\n {{ return(result) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.943423, "supported_languages": null}, "macro.dbt.get_merge_update_columns": {"name": "get_merge_update_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.get_merge_update_columns", "macro_sql": "{% macro get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}\n {{ return(adapter.dispatch('get_merge_update_columns', 'dbt')(merge_update_columns, merge_exclude_columns, dest_columns)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_merge_update_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.944038, "supported_languages": null}, "macro.dbt.default__get_merge_update_columns": {"name": "default__get_merge_update_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.default__get_merge_update_columns", "macro_sql": "{% macro default__get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}\n {%- set default_cols = dest_columns | map(attribute=\"quoted\") | list -%}\n\n {%- if merge_update_columns and merge_exclude_columns -%}\n {{ exceptions.raise_compiler_error(\n 'Model cannot specify merge_update_columns and merge_exclude_columns. Please update model to use only one config'\n )}}\n {%- elif merge_update_columns -%}\n {%- set update_columns = merge_update_columns -%}\n {%- elif merge_exclude_columns -%}\n {%- set update_columns = [] -%}\n {%- for column in dest_columns -%}\n {% if column.column | lower not in merge_exclude_columns | map(\"lower\") | list %}\n {%- do update_columns.append(column.quoted) -%}\n {% endif %}\n {%- endfor -%}\n {%- else -%}\n {%- set update_columns = default_cols -%}\n {%- endif -%}\n\n {{ return(update_columns) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9458601, "supported_languages": null}, "macro.dbt.get_merge_sql": {"name": "get_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.get_merge_sql", "macro_sql": "{% macro get_merge_sql(target, source, unique_key, dest_columns, predicates=none) -%}\n {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, predicates) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.954162, "supported_languages": null}, "macro.dbt.default__get_merge_sql": {"name": "default__get_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.default__get_merge_sql", "macro_sql": "{% macro default__get_merge_sql(target, source, unique_key, dest_columns, predicates) -%}\n {%- set predicates = [] if predicates is none else [] + predicates -%}\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n {%- set merge_update_columns = config.get('merge_update_columns') -%}\n {%- set merge_exclude_columns = config.get('merge_exclude_columns') -%}\n {%- set update_columns = get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {% if unique_key %}\n {% if unique_key is sequence and unique_key is not mapping and unique_key is not string %}\n {% for key in unique_key %}\n {% set this_key_match %}\n DBT_INTERNAL_SOURCE.{{ key }} = DBT_INTERNAL_DEST.{{ key }}\n {% endset %}\n {% do predicates.append(this_key_match) %}\n {% endfor %}\n {% else %}\n {% set unique_key_match %}\n DBT_INTERNAL_SOURCE.{{ unique_key }} = DBT_INTERNAL_DEST.{{ unique_key }}\n {% endset %}\n {% do predicates.append(unique_key_match) %}\n {% endif %}\n {% else %}\n {% do predicates.append('FALSE') %}\n {% endif %}\n\n {{ sql_header if sql_header is not none }}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on {{ predicates | join(' and ') }}\n\n {% if unique_key %}\n when matched then update set\n {% for column_name in update_columns -%}\n {{ column_name }} = DBT_INTERNAL_SOURCE.{{ column_name }}\n {%- if not loop.last %}, {%- endif %}\n {%- endfor %}\n {% endif %}\n\n when not matched then insert\n ({{ dest_cols_csv }})\n values\n ({{ dest_cols_csv }})\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_quoted_csv", "macro.dbt.get_merge_update_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.958357, "supported_languages": null}, "macro.dbt.get_delete_insert_merge_sql": {"name": "get_delete_insert_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.get_delete_insert_merge_sql", "macro_sql": "{% macro get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%}\n {{ adapter.dispatch('get_delete_insert_merge_sql', 'dbt')(target, source, unique_key, dest_columns) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_delete_insert_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.959069, "supported_languages": null}, "macro.dbt.default__get_delete_insert_merge_sql": {"name": "default__get_delete_insert_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.default__get_delete_insert_merge_sql", "macro_sql": "{% macro default__get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%}\n\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n\n {% if unique_key %}\n {% if unique_key is sequence and unique_key is not string %}\n delete from {{target }}\n using {{ source }}\n where (\n {% for key in unique_key %}\n {{ source }}.{{ key }} = {{ target }}.{{ key }}\n {{ \"and \" if not loop.last }}\n {% endfor %}\n );\n {% else %}\n delete from {{ target }}\n where (\n {{ unique_key }}) in (\n select ({{ unique_key }})\n from {{ source }}\n );\n\n {% endif %}\n {% endif %}\n\n insert into {{ target }} ({{ dest_cols_csv }})\n (\n select {{ dest_cols_csv }}\n from {{ source }}\n )\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9617019, "supported_languages": null}, "macro.dbt.get_insert_overwrite_merge_sql": {"name": "get_insert_overwrite_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.get_insert_overwrite_merge_sql", "macro_sql": "{% macro get_insert_overwrite_merge_sql(target, source, dest_columns, predicates, include_sql_header=false) -%}\n {{ adapter.dispatch('get_insert_overwrite_merge_sql', 'dbt')(target, source, dest_columns, predicates, include_sql_header) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_insert_overwrite_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9624152, "supported_languages": null}, "macro.dbt.default__get_insert_overwrite_merge_sql": {"name": "default__get_insert_overwrite_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.default__get_insert_overwrite_merge_sql", "macro_sql": "{% macro default__get_insert_overwrite_merge_sql(target, source, dest_columns, predicates, include_sql_header) -%}\n {#-- The only time include_sql_header is True: --#}\n {#-- BigQuery + insert_overwrite strategy + \"static\" partitions config --#}\n {#-- We should consider including the sql header at the materialization level instead --#}\n\n {%- set predicates = [] if predicates is none else [] + predicates -%}\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none and include_sql_header }}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on FALSE\n\n when not matched by source\n {% if predicates %} and {{ predicates | join(' and ') }} {% endif %}\n then delete\n\n when not matched then insert\n ({{ dest_cols_csv }})\n values\n ({{ dest_cols_csv }})\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.964027, "supported_languages": null}, "macro.dbt.is_incremental": {"name": "is_incremental", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/is_incremental.sql", "original_file_path": "macros/materializations/models/incremental/is_incremental.sql", "unique_id": "macro.dbt.is_incremental", "macro_sql": "{% macro is_incremental() %}\n {#-- do not run introspective queries in parsing #}\n {% if not execute %}\n {{ return(False) }}\n {% else %}\n {% set relation = adapter.get_relation(this.database, this.schema, this.table) %}\n {{ return(relation is not none\n and relation.type == 'table'\n and model.config.materialized == 'incremental'\n and not should_full_refresh()) }}\n {% endif %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.should_full_refresh"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9655309, "supported_languages": null}, "macro.dbt.get_incremental_append_sql": {"name": "get_incremental_append_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_append_sql", "macro_sql": "{% macro get_incremental_append_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_append_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.967351, "supported_languages": null}, "macro.dbt.default__get_incremental_append_sql": {"name": "default__get_incremental_append_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_append_sql", "macro_sql": "{% macro default__get_incremental_append_sql(arg_dict) %}\n\n {% do return(get_insert_into_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_insert_into_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.967958, "supported_languages": null}, "macro.dbt.get_incremental_delete_insert_sql": {"name": "get_incremental_delete_insert_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_delete_insert_sql", "macro_sql": "{% macro get_incremental_delete_insert_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_delete_insert_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_incremental_delete_insert_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9684448, "supported_languages": null}, "macro.dbt.default__get_incremental_delete_insert_sql": {"name": "default__get_incremental_delete_insert_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_delete_insert_sql", "macro_sql": "{% macro default__get_incremental_delete_insert_sql(arg_dict) %}\n\n {% do return(get_delete_insert_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"unique_key\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_delete_insert_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.969121, "supported_languages": null}, "macro.dbt.get_incremental_merge_sql": {"name": "get_incremental_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_merge_sql", "macro_sql": "{% macro get_incremental_merge_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_merge_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_incremental_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9696, "supported_languages": null}, "macro.dbt.default__get_incremental_merge_sql": {"name": "default__get_incremental_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_merge_sql", "macro_sql": "{% macro default__get_incremental_merge_sql(arg_dict) %}\n\n {% do return(get_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"unique_key\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.970359, "supported_languages": null}, "macro.dbt.get_incremental_insert_overwrite_sql": {"name": "get_incremental_insert_overwrite_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_insert_overwrite_sql", "macro_sql": "{% macro get_incremental_insert_overwrite_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_insert_overwrite_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_incremental_insert_overwrite_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.970861, "supported_languages": null}, "macro.dbt.default__get_incremental_insert_overwrite_sql": {"name": "default__get_incremental_insert_overwrite_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_insert_overwrite_sql", "macro_sql": "{% macro default__get_incremental_insert_overwrite_sql(arg_dict) %}\n\n {% do return(get_insert_overwrite_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"dest_columns\"], arg_dict[\"predicates\"])) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_insert_overwrite_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.971532, "supported_languages": null}, "macro.dbt.get_incremental_default_sql": {"name": "get_incremental_default_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_default_sql", "macro_sql": "{% macro get_incremental_default_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_default_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_incremental_default_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9720068, "supported_languages": null}, "macro.dbt.default__get_incremental_default_sql": {"name": "default__get_incremental_default_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_default_sql", "macro_sql": "{% macro default__get_incremental_default_sql(arg_dict) %}\n\n {% do return(get_incremental_append_sql(arg_dict)) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.972397, "supported_languages": null}, "macro.dbt.get_insert_into_sql": {"name": "get_insert_into_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_insert_into_sql", "macro_sql": "{% macro get_insert_into_sql(target_relation, temp_relation, dest_columns) %}\n\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n\n insert into {{ target_relation }} ({{ dest_cols_csv }})\n (\n select {{ dest_cols_csv }}\n from {{ temp_relation }}\n )\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9731112, "supported_languages": null}, "macro.dbt.materialization_incremental_default": {"name": "materialization_incremental_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/incremental.sql", "original_file_path": "macros/materializations/models/incremental/incremental.sql", "unique_id": "macro.dbt.materialization_incremental_default", "macro_sql": "{% materialization incremental, default -%}\n\n -- relations\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='table') -%}\n {%- set temp_relation = make_temp_relation(target_relation)-%}\n {%- set intermediate_relation = make_intermediate_relation(target_relation)-%}\n {%- set backup_relation_type = 'table' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n\n -- configs\n {%- set unique_key = config.get('unique_key') -%}\n {%- set full_refresh_mode = (should_full_refresh() or existing_relation.is_view) -%}\n {%- set on_schema_change = incremental_validate_on_schema_change(config.get('on_schema_change'), default='ignore') -%}\n\n -- the temp_ and backup_ relations should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation. This has to happen before\n -- BEGIN, in a separate transaction\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation)-%}\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n {% set to_drop = [] %}\n\n {% if existing_relation is none %}\n {% set build_sql = get_create_table_as_sql(False, target_relation, sql) %}\n {% elif full_refresh_mode %}\n {% set build_sql = get_create_table_as_sql(False, intermediate_relation, sql) %}\n {% set need_swap = true %}\n {% else %}\n {% do run_query(get_create_table_as_sql(True, temp_relation, sql)) %}\n {% do adapter.expand_target_column_types(\n from_relation=temp_relation,\n to_relation=target_relation) %}\n {#-- Process schema changes. Returns dict of changes if successful. Use source columns for upserting/merging --#}\n {% set dest_columns = process_schema_changes(on_schema_change, temp_relation, existing_relation) %}\n {% if not dest_columns %}\n {% set dest_columns = adapter.get_columns_in_relation(existing_relation) %}\n {% endif %}\n\n {#-- Get the incremental_strategy, the macro to use for the strategy, and build the sql --#}\n {% set incremental_strategy = config.get('incremental_strategy') or 'default' %}\n {% set incremental_predicates = config.get('incremental_predicates', none) %}\n {% set strategy_sql_macro_func = adapter.get_incremental_strategy_macro(context, incremental_strategy) %}\n {% set strategy_arg_dict = ({'target_relation': target_relation, 'temp_relation': temp_relation, 'unique_key': unique_key, 'dest_columns': dest_columns, 'predicates': incremental_predicates }) %}\n {% set build_sql = strategy_sql_macro_func(strategy_arg_dict) %}\n\n {% endif %}\n\n {% call statement(\"main\") %}\n {{ build_sql }}\n {% endcall %}\n\n {% if need_swap %}\n {% do adapter.rename_relation(target_relation, backup_relation) %}\n {% do adapter.rename_relation(intermediate_relation, target_relation) %}\n {% do to_drop.append(backup_relation) %}\n {% endif %}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if existing_relation is none or existing_relation.is_view or should_full_refresh() %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n -- `COMMIT` happens here\n {% do adapter.commit() %}\n\n {% for rel in to_drop %}\n {% do adapter.drop_relation(rel) %}\n {% endfor %}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{%- endmaterialization %}", "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_temp_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.should_full_refresh", "macro.dbt.incremental_validate_on_schema_change", "macro.dbt.drop_relation_if_exists", "macro.dbt.run_hooks", "macro.dbt.get_create_table_as_sql", "macro.dbt.run_query", "macro.dbt.process_schema_changes", "macro.dbt.statement", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.984826, "supported_languages": ["sql"]}, "macro.dbt.incremental_validate_on_schema_change": {"name": "incremental_validate_on_schema_change", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "unique_id": "macro.dbt.incremental_validate_on_schema_change", "macro_sql": "{% macro incremental_validate_on_schema_change(on_schema_change, default='ignore') %}\n\n {% if on_schema_change not in ['sync_all_columns', 'append_new_columns', 'fail', 'ignore'] %}\n\n {% set log_message = 'Invalid value for on_schema_change (%s) specified. Setting default value of %s.' % (on_schema_change, default) %}\n {% do log(log_message) %}\n\n {{ return(default) }}\n\n {% else %}\n\n {{ return(on_schema_change) }}\n\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.995018, "supported_languages": null}, "macro.dbt.check_for_schema_changes": {"name": "check_for_schema_changes", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "unique_id": "macro.dbt.check_for_schema_changes", "macro_sql": "{% macro check_for_schema_changes(source_relation, target_relation) %}\n\n {% set schema_changed = False %}\n\n {%- set source_columns = adapter.get_columns_in_relation(source_relation) -%}\n {%- set target_columns = adapter.get_columns_in_relation(target_relation) -%}\n {%- set source_not_in_target = diff_columns(source_columns, target_columns) -%}\n {%- set target_not_in_source = diff_columns(target_columns, source_columns) -%}\n\n {% set new_target_types = diff_column_data_types(source_columns, target_columns) %}\n\n {% if source_not_in_target != [] %}\n {% set schema_changed = True %}\n {% elif target_not_in_source != [] or new_target_types != [] %}\n {% set schema_changed = True %}\n {% elif new_target_types != [] %}\n {% set schema_changed = True %}\n {% endif %}\n\n {% set changes_dict = {\n 'schema_changed': schema_changed,\n 'source_not_in_target': source_not_in_target,\n 'target_not_in_source': target_not_in_source,\n 'source_columns': source_columns,\n 'target_columns': target_columns,\n 'new_target_types': new_target_types\n } %}\n\n {% set msg %}\n In {{ target_relation }}:\n Schema changed: {{ schema_changed }}\n Source columns not in target: {{ source_not_in_target }}\n Target columns not in source: {{ target_not_in_source }}\n New column types: {{ new_target_types }}\n {% endset %}\n\n {% do log(msg) %}\n\n {{ return(changes_dict) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.diff_columns", "macro.dbt.diff_column_data_types"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9980521, "supported_languages": null}, "macro.dbt.sync_column_schemas": {"name": "sync_column_schemas", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "unique_id": "macro.dbt.sync_column_schemas", "macro_sql": "{% macro sync_column_schemas(on_schema_change, target_relation, schema_changes_dict) %}\n\n {%- set add_to_target_arr = schema_changes_dict['source_not_in_target'] -%}\n\n {%- if on_schema_change == 'append_new_columns'-%}\n {%- if add_to_target_arr | length > 0 -%}\n {%- do alter_relation_add_remove_columns(target_relation, add_to_target_arr, none) -%}\n {%- endif -%}\n\n {% elif on_schema_change == 'sync_all_columns' %}\n {%- set remove_from_target_arr = schema_changes_dict['target_not_in_source'] -%}\n {%- set new_target_types = schema_changes_dict['new_target_types'] -%}\n\n {% if add_to_target_arr | length > 0 or remove_from_target_arr | length > 0 %}\n {%- do alter_relation_add_remove_columns(target_relation, add_to_target_arr, remove_from_target_arr) -%}\n {% endif %}\n\n {% if new_target_types != [] %}\n {% for ntt in new_target_types %}\n {% set column_name = ntt['column_name'] %}\n {% set new_type = ntt['new_type'] %}\n {% do alter_column_type(target_relation, column_name, new_type) %}\n {% endfor %}\n {% endif %}\n\n {% endif %}\n\n {% set schema_change_message %}\n In {{ target_relation }}:\n Schema change approach: {{ on_schema_change }}\n Columns added: {{ add_to_target_arr }}\n Columns removed: {{ remove_from_target_arr }}\n Data types changed: {{ new_target_types }}\n {% endset %}\n\n {% do log(schema_change_message) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.alter_relation_add_remove_columns", "macro.dbt.alter_column_type"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.00105, "supported_languages": null}, "macro.dbt.process_schema_changes": {"name": "process_schema_changes", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "unique_id": "macro.dbt.process_schema_changes", "macro_sql": "{% macro process_schema_changes(on_schema_change, source_relation, target_relation) %}\n\n {% if on_schema_change == 'ignore' %}\n\n {{ return({}) }}\n\n {% else %}\n\n {% set schema_changes_dict = check_for_schema_changes(source_relation, target_relation) %}\n\n {% if schema_changes_dict['schema_changed'] %}\n\n {% if on_schema_change == 'fail' %}\n\n {% set fail_msg %}\n The source and target schemas on this incremental model are out of sync!\n They can be reconciled in several ways:\n - set the `on_schema_change` config to either append_new_columns or sync_all_columns, depending on your situation.\n - Re-run the incremental model with `full_refresh: True` to update the target schema.\n - update the schema manually and re-run the process.\n\n Additional troubleshooting context:\n Source columns not in target: {{ schema_changes_dict['source_not_in_target'] }}\n Target columns not in source: {{ schema_changes_dict['target_not_in_source'] }}\n New column types: {{ schema_changes_dict['new_target_types'] }}\n {% endset %}\n\n {% do exceptions.raise_compiler_error(fail_msg) %}\n\n {# -- unless we ignore, run the sync operation per the config #}\n {% else %}\n\n {% do sync_column_schemas(on_schema_change, target_relation, schema_changes_dict) %}\n\n {% endif %}\n\n {% endif %}\n\n {{ return(schema_changes_dict['source_columns']) }}\n\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.check_for_schema_changes", "macro.dbt.sync_column_schemas"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.003159, "supported_languages": null}, "macro.dbt.materialization_table_default": {"name": "materialization_table_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/table.sql", "original_file_path": "macros/materializations/models/table/table.sql", "unique_id": "macro.dbt.materialization_table_default", "macro_sql": "{% materialization table, default %}\n\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='table') %}\n {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}\n -- the intermediate_relation should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}\n /*\n See ../view/view.sql for more information about this relation.\n */\n {%- set backup_relation_type = 'table' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n -- as above, the backup_relation should not already exist\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n\n -- drop the temp relations if they exist already in the database\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_table_as_sql(False, intermediate_relation, sql) }}\n {%- endcall %}\n\n -- cleanup\n {% if existing_relation is not none %}\n {{ adapter.rename_relation(existing_relation, backup_relation) }}\n {% endif %}\n\n {{ adapter.rename_relation(intermediate_relation, target_relation) }}\n\n {% do create_indexes(target_relation) %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n -- `COMMIT` happens here\n {{ adapter.commit() }}\n\n -- finally, drop the existing/backup relation after the commit\n {{ drop_relation_if_exists(backup_relation) }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n{% endmaterialization %}", "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.drop_relation_if_exists", "macro.dbt.run_hooks", "macro.dbt.statement", "macro.dbt.get_create_table_as_sql", "macro.dbt.create_indexes", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.009005, "supported_languages": ["sql"]}, "macro.dbt.get_create_table_as_sql": {"name": "get_create_table_as_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "unique_id": "macro.dbt.get_create_table_as_sql", "macro_sql": "{% macro get_create_table_as_sql(temporary, relation, sql) -%}\n {{ adapter.dispatch('get_create_table_as_sql', 'dbt')(temporary, relation, sql) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_create_table_as_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.010164, "supported_languages": null}, "macro.dbt.default__get_create_table_as_sql": {"name": "default__get_create_table_as_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "unique_id": "macro.dbt.default__get_create_table_as_sql", "macro_sql": "{% macro default__get_create_table_as_sql(temporary, relation, sql) -%}\n {{ return(create_table_as(temporary, relation, sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.010643, "supported_languages": null}, "macro.dbt.create_table_as": {"name": "create_table_as", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "unique_id": "macro.dbt.create_table_as", "macro_sql": "{% macro create_table_as(temporary, relation, compiled_code, language='sql') -%}\n {# backward compatibility for create_table_as that does not support language #}\n {% if language == \"sql\" %}\n {{ adapter.dispatch('create_table_as', 'dbt')(temporary, relation, compiled_code)}}\n {% else %}\n {{ adapter.dispatch('create_table_as', 'dbt')(temporary, relation, compiled_code, language) }}\n {% endif %}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.01173, "supported_languages": null}, "macro.dbt.default__create_table_as": {"name": "default__create_table_as", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "unique_id": "macro.dbt.default__create_table_as", "macro_sql": "{% macro default__create_table_as(temporary, relation, sql) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n\n create {% if temporary: -%}temporary{%- endif %} table\n {{ relation.include(database=(not temporary), schema=(not temporary)) }}\n as (\n {{ sql }}\n );\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.012759, "supported_languages": null}, "macro.dbt.materialization_view_default": {"name": "materialization_view_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/view.sql", "original_file_path": "macros/materializations/models/view/view.sql", "unique_id": "macro.dbt.materialization_view_default", "macro_sql": "{%- materialization view, default -%}\n\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='view') -%}\n {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}\n\n -- the intermediate_relation should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}\n /*\n This relation (probably) doesn't exist yet. If it does exist, it's a leftover from\n a previous run, and we're going to try to drop it immediately. At the end of this\n materialization, we're going to rename the \"existing_relation\" to this identifier,\n and then we're going to drop it. In order to make sure we run the correct one of:\n - drop view ...\n - drop table ...\n\n We need to set the type of this relation to be the type of the existing_relation, if it exists,\n or else \"view\" as a sane default if it does not. Note that if the existing_relation does not\n exist, then there is nothing to move out of the way and subsequentally drop. In that case,\n this relation will be effectively unused.\n */\n {%- set backup_relation_type = 'view' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n -- as above, the backup_relation should not already exist\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- drop the temp relations if they exist already in the database\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_view_as_sql(intermediate_relation, sql) }}\n {%- endcall %}\n\n -- cleanup\n -- move the existing view out of the way\n {% if existing_relation is not none %}\n {{ adapter.rename_relation(existing_relation, backup_relation) }}\n {% endif %}\n {{ adapter.rename_relation(intermediate_relation, target_relation) }}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {{ adapter.commit() }}\n\n {{ drop_relation_if_exists(backup_relation) }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{%- endmaterialization -%}", "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.run_hooks", "macro.dbt.drop_relation_if_exists", "macro.dbt.statement", "macro.dbt.get_create_view_as_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0184052, "supported_languages": ["sql"]}, "macro.dbt.handle_existing_table": {"name": "handle_existing_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/helpers.sql", "original_file_path": "macros/materializations/models/view/helpers.sql", "unique_id": "macro.dbt.handle_existing_table", "macro_sql": "{% macro handle_existing_table(full_refresh, old_relation) %}\n {{ adapter.dispatch('handle_existing_table', 'dbt')(full_refresh, old_relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__handle_existing_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.019115, "supported_languages": null}, "macro.dbt.default__handle_existing_table": {"name": "default__handle_existing_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/helpers.sql", "original_file_path": "macros/materializations/models/view/helpers.sql", "unique_id": "macro.dbt.default__handle_existing_table", "macro_sql": "{% macro default__handle_existing_table(full_refresh, old_relation) %}\n {{ log(\"Dropping relation \" ~ old_relation ~ \" because it is of type \" ~ old_relation.type) }}\n {{ adapter.drop_relation(old_relation) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.019679, "supported_languages": null}, "macro.dbt.create_or_replace_view": {"name": "create_or_replace_view", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_or_replace_view.sql", "original_file_path": "macros/materializations/models/view/create_or_replace_view.sql", "unique_id": "macro.dbt.create_or_replace_view", "macro_sql": "{% macro create_or_replace_view() %}\n {%- set identifier = model['alias'] -%}\n\n {%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}\n {%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}\n\n {%- set target_relation = api.Relation.create(\n identifier=identifier, schema=schema, database=database,\n type='view') -%}\n {% set grant_config = config.get('grants') %}\n\n {{ run_hooks(pre_hooks) }}\n\n -- If there's a table with the same name and we weren't told to full refresh,\n -- that's an error. If we were told to full refresh, drop it. This behavior differs\n -- for Snowflake and BigQuery, so multiple dispatch is used.\n {%- if old_relation is not none and old_relation.is_table -%}\n {{ handle_existing_table(should_full_refresh(), old_relation) }}\n {%- endif -%}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_view_as_sql(target_relation, sql) }}\n {%- endcall %}\n\n {% set should_revoke = should_revoke(exists_as_view, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=True) %}\n\n {{ run_hooks(post_hooks) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.run_hooks", "macro.dbt.handle_existing_table", "macro.dbt.should_full_refresh", "macro.dbt.statement", "macro.dbt.get_create_view_as_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0234811, "supported_languages": null}, "macro.dbt.get_create_view_as_sql": {"name": "get_create_view_as_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "unique_id": "macro.dbt.get_create_view_as_sql", "macro_sql": "{% macro get_create_view_as_sql(relation, sql) -%}\n {{ adapter.dispatch('get_create_view_as_sql', 'dbt')(relation, sql) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_create_view_as_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0243561, "supported_languages": null}, "macro.dbt.default__get_create_view_as_sql": {"name": "default__get_create_view_as_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "unique_id": "macro.dbt.default__get_create_view_as_sql", "macro_sql": "{% macro default__get_create_view_as_sql(relation, sql) -%}\n {{ return(create_view_as(relation, sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.create_view_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0248141, "supported_languages": null}, "macro.dbt.create_view_as": {"name": "create_view_as", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "unique_id": "macro.dbt.create_view_as", "macro_sql": "{% macro create_view_as(relation, sql) -%}\n {{ adapter.dispatch('create_view_as', 'dbt')(relation, sql) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__create_view_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.025273, "supported_languages": null}, "macro.dbt.default__create_view_as": {"name": "default__create_view_as", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "unique_id": "macro.dbt.default__create_view_as", "macro_sql": "{% macro default__create_view_as(relation, sql) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n create view {{ relation }} as (\n {{ sql }}\n );\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.025913, "supported_languages": null}, "macro.dbt.materialization_seed_default": {"name": "materialization_seed_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/seed.sql", "original_file_path": "macros/materializations/seeds/seed.sql", "unique_id": "macro.dbt.materialization_seed_default", "macro_sql": "{% materialization seed, default %}\n\n {%- set identifier = model['alias'] -%}\n {%- set full_refresh_mode = (should_full_refresh()) -%}\n\n {%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}\n\n {%- set exists_as_table = (old_relation is not none and old_relation.is_table) -%}\n {%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}\n\n {%- set grant_config = config.get('grants') -%}\n {%- set agate_table = load_agate_table() -%}\n -- grab current tables grants config for comparision later on\n\n {%- do store_result('agate_table', response='OK', agate_table=agate_table) -%}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% set create_table_sql = \"\" %}\n {% if exists_as_view %}\n {{ exceptions.raise_compiler_error(\"Cannot seed to '{}', it is a view\".format(old_relation)) }}\n {% elif exists_as_table %}\n {% set create_table_sql = reset_csv_table(model, full_refresh_mode, old_relation, agate_table) %}\n {% else %}\n {% set create_table_sql = create_csv_table(model, agate_table) %}\n {% endif %}\n\n {% set code = 'CREATE' if full_refresh_mode else 'INSERT' %}\n {% set rows_affected = (agate_table.rows | length) %}\n {% set sql = load_csv_rows(model, agate_table) %}\n\n {% call noop_statement('main', code ~ ' ' ~ rows_affected, code, rows_affected) %}\n {{ get_csv_sql(create_table_sql, sql) }};\n {% endcall %}\n\n {% set target_relation = this.incorporate(type='table') %}\n\n {% set should_revoke = should_revoke(old_relation, full_refresh_mode) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if full_refresh_mode or not exists_as_table %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n -- `COMMIT` happens here\n {{ adapter.commit() }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", "depends_on": {"macros": ["macro.dbt.should_full_refresh", "macro.dbt.run_hooks", "macro.dbt.reset_csv_table", "macro.dbt.create_csv_table", "macro.dbt.load_csv_rows", "macro.dbt.noop_statement", "macro.dbt.get_csv_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.033872, "supported_languages": ["sql"]}, "macro.dbt.create_csv_table": {"name": "create_csv_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.create_csv_table", "macro_sql": "{% macro create_csv_table(model, agate_table) -%}\n {{ adapter.dispatch('create_csv_table', 'dbt')(model, agate_table) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__create_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0429308, "supported_languages": null}, "macro.dbt.default__create_csv_table": {"name": "default__create_csv_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__create_csv_table", "macro_sql": "{% macro default__create_csv_table(model, agate_table) %}\n {%- set column_override = model['config'].get('column_types', {}) -%}\n {%- set quote_seed_column = model['config'].get('quote_columns', None) -%}\n\n {% set sql %}\n create table {{ this.render() }} (\n {%- for col_name in agate_table.column_names -%}\n {%- set inferred_type = adapter.convert_type(agate_table, loop.index0) -%}\n {%- set type = column_override.get(col_name, inferred_type) -%}\n {%- set column_name = (col_name | string) -%}\n {{ adapter.quote_seed_column(column_name, quote_seed_column) }} {{ type }} {%- if not loop.last -%}, {%- endif -%}\n {%- endfor -%}\n )\n {% endset %}\n\n {% call statement('_') -%}\n {{ sql }}\n {%- endcall %}\n\n {{ return(sql) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0452118, "supported_languages": null}, "macro.dbt.reset_csv_table": {"name": "reset_csv_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.reset_csv_table", "macro_sql": "{% macro reset_csv_table(model, full_refresh, old_relation, agate_table) -%}\n {{ adapter.dispatch('reset_csv_table', 'dbt')(model, full_refresh, old_relation, agate_table) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__reset_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0458, "supported_languages": null}, "macro.dbt.default__reset_csv_table": {"name": "default__reset_csv_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__reset_csv_table", "macro_sql": "{% macro default__reset_csv_table(model, full_refresh, old_relation, agate_table) %}\n {% set sql = \"\" %}\n {% if full_refresh %}\n {{ adapter.drop_relation(old_relation) }}\n {% set sql = create_csv_table(model, agate_table) %}\n {% else %}\n {{ adapter.truncate_relation(old_relation) }}\n {% set sql = \"truncate table \" ~ old_relation %}\n {% endif %}\n\n {{ return(sql) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.create_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.047039, "supported_languages": null}, "macro.dbt.get_csv_sql": {"name": "get_csv_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.get_csv_sql", "macro_sql": "{% macro get_csv_sql(create_or_truncate_sql, insert_sql) %}\n {{ adapter.dispatch('get_csv_sql', 'dbt')(create_or_truncate_sql, insert_sql) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_csv_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.047529, "supported_languages": null}, "macro.dbt.default__get_csv_sql": {"name": "default__get_csv_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__get_csv_sql", "macro_sql": "{% macro default__get_csv_sql(create_or_truncate_sql, insert_sql) %}\n {{ create_or_truncate_sql }};\n -- dbt seed --\n {{ insert_sql }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.047877, "supported_languages": null}, "macro.dbt.get_binding_char": {"name": "get_binding_char", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.get_binding_char", "macro_sql": "{% macro get_binding_char() -%}\n {{ adapter.dispatch('get_binding_char', 'dbt')() }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_binding_char"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0482402, "supported_languages": null}, "macro.dbt.default__get_binding_char": {"name": "default__get_binding_char", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__get_binding_char", "macro_sql": "{% macro default__get_binding_char() %}\n {{ return('%s') }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.048544, "supported_languages": null}, "macro.dbt.get_batch_size": {"name": "get_batch_size", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.get_batch_size", "macro_sql": "{% macro get_batch_size() -%}\n {{ return(adapter.dispatch('get_batch_size', 'dbt')()) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_batch_size"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.048952, "supported_languages": null}, "macro.dbt.default__get_batch_size": {"name": "default__get_batch_size", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__get_batch_size", "macro_sql": "{% macro default__get_batch_size() %}\n {{ return(10000) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0492558, "supported_languages": null}, "macro.dbt.get_seed_column_quoted_csv": {"name": "get_seed_column_quoted_csv", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.get_seed_column_quoted_csv", "macro_sql": "{% macro get_seed_column_quoted_csv(model, column_names) %}\n {%- set quote_seed_column = model['config'].get('quote_columns', None) -%}\n {% set quoted = [] %}\n {% for col in column_names -%}\n {%- do quoted.append(adapter.quote_seed_column(col, quote_seed_column)) -%}\n {%- endfor %}\n\n {%- set dest_cols_csv = quoted | join(', ') -%}\n {{ return(dest_cols_csv) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0504608, "supported_languages": null}, "macro.dbt.load_csv_rows": {"name": "load_csv_rows", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.load_csv_rows", "macro_sql": "{% macro load_csv_rows(model, agate_table) -%}\n {{ adapter.dispatch('load_csv_rows', 'dbt')(model, agate_table) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__load_csv_rows"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.050931, "supported_languages": null}, "macro.dbt.default__load_csv_rows": {"name": "default__load_csv_rows", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__load_csv_rows", "macro_sql": "{% macro default__load_csv_rows(model, agate_table) %}\n\n {% set batch_size = get_batch_size() %}\n\n {% set cols_sql = get_seed_column_quoted_csv(model, agate_table.column_names) %}\n {% set bindings = [] %}\n\n {% set statements = [] %}\n\n {% for chunk in agate_table.rows | batch(batch_size) %}\n {% set bindings = [] %}\n\n {% for row in chunk %}\n {% do bindings.extend(row) %}\n {% endfor %}\n\n {% set sql %}\n insert into {{ this.render() }} ({{ cols_sql }}) values\n {% for row in chunk -%}\n ({%- for column in agate_table.column_names -%}\n {{ get_binding_char() }}\n {%- if not loop.last%},{%- endif %}\n {%- endfor -%})\n {%- if not loop.last%},{%- endif %}\n {%- endfor %}\n {% endset %}\n\n {% do adapter.add_query(sql, bindings=bindings, abridge_sql_log=True) %}\n\n {% if loop.index0 == 0 %}\n {% do statements.append(sql) %}\n {% endif %}\n {% endfor %}\n\n {# Return SQL so we can render it out into the compiled files #}\n {{ return(statements[0]) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_batch_size", "macro.dbt.get_seed_column_quoted_csv", "macro.dbt.get_binding_char"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0541089, "supported_languages": null}, "macro.dbt.generate_alias_name": {"name": "generate_alias_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_alias.sql", "original_file_path": "macros/get_custom_name/get_custom_alias.sql", "unique_id": "macro.dbt.generate_alias_name", "macro_sql": "{% macro generate_alias_name(custom_alias_name=none, node=none) -%}\n {% do return(adapter.dispatch('generate_alias_name', 'dbt')(custom_alias_name, node)) %}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__generate_alias_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0551748, "supported_languages": null}, "macro.dbt.default__generate_alias_name": {"name": "default__generate_alias_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_alias.sql", "original_file_path": "macros/get_custom_name/get_custom_alias.sql", "unique_id": "macro.dbt.default__generate_alias_name", "macro_sql": "{% macro default__generate_alias_name(custom_alias_name=none, node=none) -%}\n\n {%- if custom_alias_name is none -%}\n\n {{ node.name }}\n\n {%- else -%}\n\n {{ custom_alias_name | trim }}\n\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.055804, "supported_languages": null}, "macro.dbt.generate_schema_name": {"name": "generate_schema_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "unique_id": "macro.dbt.generate_schema_name", "macro_sql": "{% macro generate_schema_name(custom_schema_name=none, node=none) -%}\n {{ return(adapter.dispatch('generate_schema_name', 'dbt')(custom_schema_name, node)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__generate_schema_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0569532, "supported_languages": null}, "macro.dbt.default__generate_schema_name": {"name": "default__generate_schema_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "unique_id": "macro.dbt.default__generate_schema_name", "macro_sql": "{% macro default__generate_schema_name(custom_schema_name, node) -%}\n\n {%- set default_schema = target.schema -%}\n {%- if custom_schema_name is none -%}\n\n {{ default_schema }}\n\n {%- else -%}\n\n {{ default_schema }}_{{ custom_schema_name | trim }}\n\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.05763, "supported_languages": null}, "macro.dbt.generate_schema_name_for_env": {"name": "generate_schema_name_for_env", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "unique_id": "macro.dbt.generate_schema_name_for_env", "macro_sql": "{% macro generate_schema_name_for_env(custom_schema_name, node) -%}\n\n {%- set default_schema = target.schema -%}\n {%- if target.name == 'prod' and custom_schema_name is not none -%}\n\n {{ custom_schema_name | trim }}\n\n {%- else -%}\n\n {{ default_schema }}\n\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0583591, "supported_languages": null}, "macro.dbt.generate_database_name": {"name": "generate_database_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_database.sql", "original_file_path": "macros/get_custom_name/get_custom_database.sql", "unique_id": "macro.dbt.generate_database_name", "macro_sql": "{% macro generate_database_name(custom_database_name=none, node=none) -%}\n {% do return(adapter.dispatch('generate_database_name', 'dbt')(custom_database_name, node)) %}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__generate_database_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.059273, "supported_languages": null}, "macro.dbt.default__generate_database_name": {"name": "default__generate_database_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_database.sql", "original_file_path": "macros/get_custom_name/get_custom_database.sql", "unique_id": "macro.dbt.default__generate_database_name", "macro_sql": "{% macro default__generate_database_name(custom_database_name=none, node=none) -%}\n {%- set default_database = target.database -%}\n {%- if custom_database_name is none -%}\n\n {{ default_database }}\n\n {%- else -%}\n\n {{ custom_database_name }}\n\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0599208, "supported_languages": null}, "macro.dbt.default__test_relationships": {"name": "default__test_relationships", "resource_type": "macro", "package_name": "dbt", "path": "macros/generic_test_sql/relationships.sql", "original_file_path": "macros/generic_test_sql/relationships.sql", "unique_id": "macro.dbt.default__test_relationships", "macro_sql": "{% macro default__test_relationships(model, column_name, to, field) %}\n\nwith child as (\n select {{ column_name }} as from_field\n from {{ model }}\n where {{ column_name }} is not null\n),\n\nparent as (\n select {{ field }} as to_field\n from {{ to }}\n)\n\nselect\n from_field\n\nfrom child\nleft join parent\n on child.from_field = parent.to_field\n\nwhere parent.to_field is null\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.060704, "supported_languages": null}, "macro.dbt.default__test_not_null": {"name": "default__test_not_null", "resource_type": "macro", "package_name": "dbt", "path": "macros/generic_test_sql/not_null.sql", "original_file_path": "macros/generic_test_sql/not_null.sql", "unique_id": "macro.dbt.default__test_not_null", "macro_sql": "{% macro default__test_not_null(model, column_name) %}\n\n{% set column_list = '*' if should_store_failures() else column_name %}\n\nselect {{ column_list }}\nfrom {{ model }}\nwhere {{ column_name }} is null\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.should_store_failures"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.061424, "supported_languages": null}, "macro.dbt.default__test_unique": {"name": "default__test_unique", "resource_type": "macro", "package_name": "dbt", "path": "macros/generic_test_sql/unique.sql", "original_file_path": "macros/generic_test_sql/unique.sql", "unique_id": "macro.dbt.default__test_unique", "macro_sql": "{% macro default__test_unique(model, column_name) %}\n\nselect\n {{ column_name }} as unique_field,\n count(*) as n_records\n\nfrom {{ model }}\nwhere {{ column_name }} is not null\ngroup by {{ column_name }}\nhaving count(*) > 1\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.062015, "supported_languages": null}, "macro.dbt.default__test_accepted_values": {"name": "default__test_accepted_values", "resource_type": "macro", "package_name": "dbt", "path": "macros/generic_test_sql/accepted_values.sql", "original_file_path": "macros/generic_test_sql/accepted_values.sql", "unique_id": "macro.dbt.default__test_accepted_values", "macro_sql": "{% macro default__test_accepted_values(model, column_name, values, quote=True) %}\n\nwith all_values as (\n\n select\n {{ column_name }} as value_field,\n count(*) as n_records\n\n from {{ model }}\n group by {{ column_name }}\n\n)\n\nselect *\nfrom all_values\nwhere value_field not in (\n {% for value in values -%}\n {% if quote -%}\n '{{ value }}'\n {%- else -%}\n {{ value }}\n {%- endif -%}\n {%- if not loop.last -%},{%- endif %}\n {%- endfor %}\n)\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.063346, "supported_languages": null}, "macro.dbt.statement": {"name": "statement", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "unique_id": "macro.dbt.statement", "macro_sql": "\n{%- macro statement(name=None, fetch_result=False, auto_begin=True, language='sql') -%}\n {%- if execute: -%}\n {%- set compiled_code = caller() -%}\n\n {%- if name == 'main' -%}\n {{ log('Writing runtime {} for node \"{}\"'.format(language, model['unique_id'])) }}\n {{ write(compiled_code) }}\n {%- endif -%}\n {%- if language == 'sql'-%}\n {%- set res, table = adapter.execute(compiled_code, auto_begin=auto_begin, fetch=fetch_result) -%}\n {%- elif language == 'python' -%}\n {%- set res = submit_python_job(model, compiled_code) -%}\n {#-- TODO: What should table be for python models? --#}\n {%- set table = None -%}\n {%- else -%}\n {% do exceptions.raise_compiler_error(\"statement macro didn't get supported language\") %}\n {%- endif -%}\n\n {%- if name is not none -%}\n {{ store_result(name, response=res, agate_table=table) }}\n {%- endif -%}\n\n {%- endif -%}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.066719, "supported_languages": null}, "macro.dbt.noop_statement": {"name": "noop_statement", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "unique_id": "macro.dbt.noop_statement", "macro_sql": "{% macro noop_statement(name=None, message=None, code=None, rows_affected=None, res=None) -%}\n {%- set sql = caller() -%}\n\n {%- if name == 'main' -%}\n {{ log('Writing runtime SQL for node \"{}\"'.format(model['unique_id'])) }}\n {{ write(sql) }}\n {%- endif -%}\n\n {%- if name is not none -%}\n {{ store_raw_result(name, message=message, code=code, rows_affected=rows_affected, agate_table=res) }}\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.068168, "supported_languages": null}, "macro.dbt.run_query": {"name": "run_query", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "unique_id": "macro.dbt.run_query", "macro_sql": "{% macro run_query(sql) %}\n {% call statement(\"run_query_statement\", fetch_result=true, auto_begin=false) %}\n {{ sql }}\n {% endcall %}\n\n {% do return(load_result(\"run_query_statement\").table) %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.068917, "supported_languages": null}, "macro.dbt.convert_datetime": {"name": "convert_datetime", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "unique_id": "macro.dbt.convert_datetime", "macro_sql": "{% macro convert_datetime(date_str, date_fmt) %}\n\n {% set error_msg -%}\n The provided partition date '{{ date_str }}' does not match the expected format '{{ date_fmt }}'\n {%- endset %}\n\n {% set res = try_or_compiler_error(error_msg, modules.datetime.datetime.strptime, date_str.strip(), date_fmt) %}\n {{ return(res) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.072692, "supported_languages": null}, "macro.dbt.dates_in_range": {"name": "dates_in_range", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "unique_id": "macro.dbt.dates_in_range", "macro_sql": "{% macro dates_in_range(start_date_str, end_date_str=none, in_fmt=\"%Y%m%d\", out_fmt=\"%Y%m%d\") %}\n {% set end_date_str = start_date_str if end_date_str is none else end_date_str %}\n\n {% set start_date = convert_datetime(start_date_str, in_fmt) %}\n {% set end_date = convert_datetime(end_date_str, in_fmt) %}\n\n {% set day_count = (end_date - start_date).days %}\n {% if day_count < 0 %}\n {% set msg -%}\n Partiton start date is after the end date ({{ start_date }}, {{ end_date }})\n {%- endset %}\n\n {{ exceptions.raise_compiler_error(msg, model) }}\n {% endif %}\n\n {% set date_list = [] %}\n {% for i in range(0, day_count + 1) %}\n {% set the_date = (modules.datetime.timedelta(days=i) + start_date) %}\n {% if not out_fmt %}\n {% set _ = date_list.append(the_date) %}\n {% else %}\n {% set _ = date_list.append(the_date.strftime(out_fmt)) %}\n {% endif %}\n {% endfor %}\n\n {{ return(date_list) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.convert_datetime"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0760698, "supported_languages": null}, "macro.dbt.partition_range": {"name": "partition_range", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "unique_id": "macro.dbt.partition_range", "macro_sql": "{% macro partition_range(raw_partition_date, date_fmt='%Y%m%d') %}\n {% set partition_range = (raw_partition_date | string).split(\",\") %}\n\n {% if (partition_range | length) == 1 %}\n {% set start_date = partition_range[0] %}\n {% set end_date = none %}\n {% elif (partition_range | length) == 2 %}\n {% set start_date = partition_range[0] %}\n {% set end_date = partition_range[1] %}\n {% else %}\n {{ exceptions.raise_compiler_error(\"Invalid partition time. Expected format: {Start Date}[,{End Date}]. Got: \" ~ raw_partition_date) }}\n {% endif %}\n\n {{ return(dates_in_range(start_date, end_date, in_fmt=date_fmt)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.dates_in_range"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.077999, "supported_languages": null}, "macro.dbt.py_current_timestring": {"name": "py_current_timestring", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "unique_id": "macro.dbt.py_current_timestring", "macro_sql": "{% macro py_current_timestring() %}\n {% set dt = modules.datetime.datetime.now() %}\n {% do return(dt.strftime(\"%Y%m%d%H%M%S%f\")) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.078628, "supported_languages": null}, "macro.dbt.except": {"name": "except", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/except.sql", "original_file_path": "macros/utils/except.sql", "unique_id": "macro.dbt.except", "macro_sql": "{% macro except() %}\n {{ return(adapter.dispatch('except', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__except"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.079215, "supported_languages": null}, "macro.dbt.default__except": {"name": "default__except", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/except.sql", "original_file_path": "macros/utils/except.sql", "unique_id": "macro.dbt.default__except", "macro_sql": "{% macro default__except() %}\n\n except\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.079419, "supported_languages": null}, "macro.dbt.replace": {"name": "replace", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/replace.sql", "original_file_path": "macros/utils/replace.sql", "unique_id": "macro.dbt.replace", "macro_sql": "{% macro replace(field, old_chars, new_chars) -%}\n {{ return(adapter.dispatch('replace', 'dbt') (field, old_chars, new_chars)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__replace"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.080247, "supported_languages": null}, "macro.dbt.default__replace": {"name": "default__replace", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/replace.sql", "original_file_path": "macros/utils/replace.sql", "unique_id": "macro.dbt.default__replace", "macro_sql": "{% macro default__replace(field, old_chars, new_chars) %}\n\n replace(\n {{ field }},\n {{ old_chars }},\n {{ new_chars }}\n )\n\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.080682, "supported_languages": null}, "macro.dbt.concat": {"name": "concat", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/concat.sql", "original_file_path": "macros/utils/concat.sql", "unique_id": "macro.dbt.concat", "macro_sql": "{% macro concat(fields) -%}\n {{ return(adapter.dispatch('concat', 'dbt')(fields)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__concat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.081283, "supported_languages": null}, "macro.dbt.default__concat": {"name": "default__concat", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/concat.sql", "original_file_path": "macros/utils/concat.sql", "unique_id": "macro.dbt.default__concat", "macro_sql": "{% macro default__concat(fields) -%}\n {{ fields|join(' || ') }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0816069, "supported_languages": null}, "macro.dbt.length": {"name": "length", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/length.sql", "original_file_path": "macros/utils/length.sql", "unique_id": "macro.dbt.length", "macro_sql": "{% macro length(expression) -%}\n {{ return(adapter.dispatch('length', 'dbt') (expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__length"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.082229, "supported_languages": null}, "macro.dbt.default__length": {"name": "default__length", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/length.sql", "original_file_path": "macros/utils/length.sql", "unique_id": "macro.dbt.default__length", "macro_sql": "{% macro default__length(expression) %}\n\n length(\n {{ expression }}\n )\n\n{%- endmacro -%}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.082507, "supported_languages": null}, "macro.dbt.dateadd": {"name": "dateadd", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "unique_id": "macro.dbt.dateadd", "macro_sql": "{% macro dateadd(datepart, interval, from_date_or_timestamp) %}\n {{ return(adapter.dispatch('dateadd', 'dbt')(datepart, interval, from_date_or_timestamp)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__dateadd"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.083297, "supported_languages": null}, "macro.dbt.default__dateadd": {"name": "default__dateadd", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "unique_id": "macro.dbt.default__dateadd", "macro_sql": "{% macro default__dateadd(datepart, interval, from_date_or_timestamp) %}\n\n dateadd(\n {{ datepart }},\n {{ interval }},\n {{ from_date_or_timestamp }}\n )\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0837162, "supported_languages": null}, "macro.dbt.intersect": {"name": "intersect", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/intersect.sql", "original_file_path": "macros/utils/intersect.sql", "unique_id": "macro.dbt.intersect", "macro_sql": "{% macro intersect() %}\n {{ return(adapter.dispatch('intersect', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__intersect"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.084266, "supported_languages": null}, "macro.dbt.default__intersect": {"name": "default__intersect", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/intersect.sql", "original_file_path": "macros/utils/intersect.sql", "unique_id": "macro.dbt.default__intersect", "macro_sql": "{% macro default__intersect() %}\n\n intersect\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0844731, "supported_languages": null}, "macro.dbt.escape_single_quotes": {"name": "escape_single_quotes", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/escape_single_quotes.sql", "original_file_path": "macros/utils/escape_single_quotes.sql", "unique_id": "macro.dbt.escape_single_quotes", "macro_sql": "{% macro escape_single_quotes(expression) %}\n {{ return(adapter.dispatch('escape_single_quotes', 'dbt') (expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__escape_single_quotes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.08511, "supported_languages": null}, "macro.dbt.default__escape_single_quotes": {"name": "default__escape_single_quotes", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/escape_single_quotes.sql", "original_file_path": "macros/utils/escape_single_quotes.sql", "unique_id": "macro.dbt.default__escape_single_quotes", "macro_sql": "{% macro default__escape_single_quotes(expression) -%}\n{{ expression | replace(\"'\",\"''\") }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.085469, "supported_languages": null}, "macro.dbt.right": {"name": "right", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/right.sql", "original_file_path": "macros/utils/right.sql", "unique_id": "macro.dbt.right", "macro_sql": "{% macro right(string_text, length_expression) -%}\n {{ return(adapter.dispatch('right', 'dbt') (string_text, length_expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__right"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.086175, "supported_languages": null}, "macro.dbt.default__right": {"name": "default__right", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/right.sql", "original_file_path": "macros/utils/right.sql", "unique_id": "macro.dbt.default__right", "macro_sql": "{% macro default__right(string_text, length_expression) %}\n\n right(\n {{ string_text }},\n {{ length_expression }}\n )\n\n{%- endmacro -%}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0865312, "supported_languages": null}, "macro.dbt.listagg": {"name": "listagg", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "unique_id": "macro.dbt.listagg", "macro_sql": "{% macro listagg(measure, delimiter_text=\"','\", order_by_clause=none, limit_num=none) -%}\n {{ return(adapter.dispatch('listagg', 'dbt') (measure, delimiter_text, order_by_clause, limit_num)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__listagg"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.087887, "supported_languages": null}, "macro.dbt.default__listagg": {"name": "default__listagg", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "unique_id": "macro.dbt.default__listagg", "macro_sql": "{% macro default__listagg(measure, delimiter_text, order_by_clause, limit_num) -%}\n\n {% if limit_num -%}\n array_to_string(\n array_slice(\n array_agg(\n {{ measure }}\n ){% if order_by_clause -%}\n within group ({{ order_by_clause }})\n {%- endif %}\n ,0\n ,{{ limit_num }}\n ),\n {{ delimiter_text }}\n )\n {%- else %}\n listagg(\n {{ measure }},\n {{ delimiter_text }}\n )\n {% if order_by_clause -%}\n within group ({{ order_by_clause }})\n {%- endif %}\n {%- endif %}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.089178, "supported_languages": null}, "macro.dbt.datediff": {"name": "datediff", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "unique_id": "macro.dbt.datediff", "macro_sql": "{% macro datediff(first_date, second_date, datepart) %}\n {{ return(adapter.dispatch('datediff', 'dbt')(first_date, second_date, datepart)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__datediff"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.090015, "supported_languages": null}, "macro.dbt.default__datediff": {"name": "default__datediff", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "unique_id": "macro.dbt.default__datediff", "macro_sql": "{% macro default__datediff(first_date, second_date, datepart) -%}\n\n datediff(\n {{ datepart }},\n {{ first_date }},\n {{ second_date }}\n )\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0904791, "supported_languages": null}, "macro.dbt.safe_cast": {"name": "safe_cast", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/safe_cast.sql", "original_file_path": "macros/utils/safe_cast.sql", "unique_id": "macro.dbt.safe_cast", "macro_sql": "{% macro safe_cast(field, type) %}\n {{ return(adapter.dispatch('safe_cast', 'dbt') (field, type)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__safe_cast"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.091284, "supported_languages": null}, "macro.dbt.default__safe_cast": {"name": "default__safe_cast", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/safe_cast.sql", "original_file_path": "macros/utils/safe_cast.sql", "unique_id": "macro.dbt.default__safe_cast", "macro_sql": "{% macro default__safe_cast(field, type) %}\n {# most databases don't support this function yet\n so we just need to use cast #}\n cast({{field}} as {{type}})\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0916579, "supported_languages": null}, "macro.dbt.hash": {"name": "hash", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/hash.sql", "original_file_path": "macros/utils/hash.sql", "unique_id": "macro.dbt.hash", "macro_sql": "{% macro hash(field) -%}\n {{ return(adapter.dispatch('hash', 'dbt') (field)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__hash"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0922902, "supported_languages": null}, "macro.dbt.default__hash": {"name": "default__hash", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/hash.sql", "original_file_path": "macros/utils/hash.sql", "unique_id": "macro.dbt.default__hash", "macro_sql": "{% macro default__hash(field) -%}\n md5(cast({{ field }} as {{ api.Column.translate_type('string') }}))\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0927832, "supported_languages": null}, "macro.dbt.cast_bool_to_text": {"name": "cast_bool_to_text", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/cast_bool_to_text.sql", "original_file_path": "macros/utils/cast_bool_to_text.sql", "unique_id": "macro.dbt.cast_bool_to_text", "macro_sql": "{% macro cast_bool_to_text(field) %}\n {{ adapter.dispatch('cast_bool_to_text', 'dbt') (field) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__cast_bool_to_text"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.093413, "supported_languages": null}, "macro.dbt.default__cast_bool_to_text": {"name": "default__cast_bool_to_text", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/cast_bool_to_text.sql", "original_file_path": "macros/utils/cast_bool_to_text.sql", "unique_id": "macro.dbt.default__cast_bool_to_text", "macro_sql": "{% macro default__cast_bool_to_text(field) %}\n cast({{ field }} as {{ api.Column.translate_type('string') }})\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0938308, "supported_languages": null}, "macro.dbt.any_value": {"name": "any_value", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "unique_id": "macro.dbt.any_value", "macro_sql": "{% macro any_value(expression) -%}\n {{ return(adapter.dispatch('any_value', 'dbt') (expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__any_value"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.094443, "supported_languages": null}, "macro.dbt.default__any_value": {"name": "default__any_value", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "unique_id": "macro.dbt.default__any_value", "macro_sql": "{% macro default__any_value(expression) -%}\n\n any_value({{ expression }})\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.094721, "supported_languages": null}, "macro.dbt.position": {"name": "position", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/position.sql", "original_file_path": "macros/utils/position.sql", "unique_id": "macro.dbt.position", "macro_sql": "{% macro position(substring_text, string_text) -%}\n {{ return(adapter.dispatch('position', 'dbt') (substring_text, string_text)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__position"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.095426, "supported_languages": null}, "macro.dbt.default__position": {"name": "default__position", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/position.sql", "original_file_path": "macros/utils/position.sql", "unique_id": "macro.dbt.default__position", "macro_sql": "{% macro default__position(substring_text, string_text) %}\n\n position(\n {{ substring_text }} in {{ string_text }}\n )\n\n{%- endmacro -%}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.095783, "supported_languages": null}, "macro.dbt.string_literal": {"name": "string_literal", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/literal.sql", "original_file_path": "macros/utils/literal.sql", "unique_id": "macro.dbt.string_literal", "macro_sql": "{%- macro string_literal(value) -%}\n {{ return(adapter.dispatch('string_literal', 'dbt') (value)) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__string_literal"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.096468, "supported_languages": null}, "macro.dbt.default__string_literal": {"name": "default__string_literal", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/literal.sql", "original_file_path": "macros/utils/literal.sql", "unique_id": "macro.dbt.default__string_literal", "macro_sql": "{% macro default__string_literal(value) -%}\n '{{ value }}'\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.096813, "supported_languages": null}, "macro.dbt.type_string": {"name": "type_string", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_string", "macro_sql": "\n\n{%- macro type_string() -%}\n {{ return(adapter.dispatch('type_string', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_string"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.098714, "supported_languages": null}, "macro.dbt.default__type_string": {"name": "default__type_string", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_string", "macro_sql": "{% macro default__type_string() %}\n {{ return(api.Column.translate_type(\"string\")) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.099203, "supported_languages": null}, "macro.dbt.type_timestamp": {"name": "type_timestamp", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_timestamp", "macro_sql": "\n\n{%- macro type_timestamp() -%}\n {{ return(adapter.dispatch('type_timestamp', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0996249, "supported_languages": null}, "macro.dbt.default__type_timestamp": {"name": "default__type_timestamp", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_timestamp", "macro_sql": "{% macro default__type_timestamp() %}\n {{ return(api.Column.translate_type(\"timestamp\")) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1000152, "supported_languages": null}, "macro.dbt.type_float": {"name": "type_float", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_float", "macro_sql": "\n\n{%- macro type_float() -%}\n {{ return(adapter.dispatch('type_float', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_float"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.100458, "supported_languages": null}, "macro.dbt.default__type_float": {"name": "default__type_float", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_float", "macro_sql": "{% macro default__type_float() %}\n {{ return(api.Column.translate_type(\"float\")) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.10098, "supported_languages": null}, "macro.dbt.type_numeric": {"name": "type_numeric", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_numeric", "macro_sql": "\n\n{%- macro type_numeric() -%}\n {{ return(adapter.dispatch('type_numeric', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_numeric"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.101395, "supported_languages": null}, "macro.dbt.default__type_numeric": {"name": "default__type_numeric", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_numeric", "macro_sql": "{% macro default__type_numeric() %}\n {{ return(api.Column.numeric_type(\"numeric\", 28, 6)) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.101847, "supported_languages": null}, "macro.dbt.type_bigint": {"name": "type_bigint", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_bigint", "macro_sql": "\n\n{%- macro type_bigint() -%}\n {{ return(adapter.dispatch('type_bigint', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_bigint"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1022651, "supported_languages": null}, "macro.dbt.default__type_bigint": {"name": "default__type_bigint", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_bigint", "macro_sql": "{% macro default__type_bigint() %}\n {{ return(api.Column.translate_type(\"bigint\")) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.102656, "supported_languages": null}, "macro.dbt.type_int": {"name": "type_int", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_int", "macro_sql": "\n\n{%- macro type_int() -%}\n {{ return(adapter.dispatch('type_int', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_int"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.103071, "supported_languages": null}, "macro.dbt.default__type_int": {"name": "default__type_int", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_int", "macro_sql": "{%- macro default__type_int() -%}\n {{ return(api.Column.translate_type(\"integer\")) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.103501, "supported_languages": null}, "macro.dbt.type_boolean": {"name": "type_boolean", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_boolean", "macro_sql": "\n\n{%- macro type_boolean() -%}\n {{ return(adapter.dispatch('type_boolean', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_boolean"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.103938, "supported_languages": null}, "macro.dbt.default__type_boolean": {"name": "default__type_boolean", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_boolean", "macro_sql": "{%- macro default__type_boolean() -%}\n {{ return(api.Column.translate_type(\"boolean\")) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.104328, "supported_languages": null}, "macro.dbt.array_concat": {"name": "array_concat", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_concat.sql", "original_file_path": "macros/utils/array_concat.sql", "unique_id": "macro.dbt.array_concat", "macro_sql": "{% macro array_concat(array_1, array_2) -%}\n {{ return(adapter.dispatch('array_concat', 'dbt')(array_1, array_2)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__array_concat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.105006, "supported_languages": null}, "macro.dbt.default__array_concat": {"name": "default__array_concat", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_concat.sql", "original_file_path": "macros/utils/array_concat.sql", "unique_id": "macro.dbt.default__array_concat", "macro_sql": "{% macro default__array_concat(array_1, array_2) -%}\n array_cat({{ array_1 }}, {{ array_2 }})\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.105355, "supported_languages": null}, "macro.dbt.bool_or": {"name": "bool_or", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/bool_or.sql", "original_file_path": "macros/utils/bool_or.sql", "unique_id": "macro.dbt.bool_or", "macro_sql": "{% macro bool_or(expression) -%}\n {{ return(adapter.dispatch('bool_or', 'dbt') (expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__bool_or"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1059608, "supported_languages": null}, "macro.dbt.default__bool_or": {"name": "default__bool_or", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/bool_or.sql", "original_file_path": "macros/utils/bool_or.sql", "unique_id": "macro.dbt.default__bool_or", "macro_sql": "{% macro default__bool_or(expression) -%}\n\n bool_or({{ expression }})\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1062348, "supported_languages": null}, "macro.dbt.last_day": {"name": "last_day", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "unique_id": "macro.dbt.last_day", "macro_sql": "{% macro last_day(date, datepart) %}\n {{ return(adapter.dispatch('last_day', 'dbt') (date, datepart)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.107027, "supported_languages": null}, "macro.dbt.default_last_day": {"name": "default_last_day", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "unique_id": "macro.dbt.default_last_day", "macro_sql": "\n\n{%- macro default_last_day(date, datepart) -%}\n cast(\n {{dbt.dateadd('day', '-1',\n dbt.dateadd(datepart, '1', dbt.date_trunc(datepart, date))\n )}}\n as date)\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.dateadd", "macro.dbt.date_trunc"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1076999, "supported_languages": null}, "macro.dbt.default__last_day": {"name": "default__last_day", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "unique_id": "macro.dbt.default__last_day", "macro_sql": "{% macro default__last_day(date, datepart) -%}\n {{dbt.default_last_day(date, datepart)}}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default_last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.108077, "supported_languages": null}, "macro.dbt.split_part": {"name": "split_part", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "unique_id": "macro.dbt.split_part", "macro_sql": "{% macro split_part(string_text, delimiter_text, part_number) %}\n {{ return(adapter.dispatch('split_part', 'dbt') (string_text, delimiter_text, part_number)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__split_part"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1092212, "supported_languages": null}, "macro.dbt.default__split_part": {"name": "default__split_part", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "unique_id": "macro.dbt.default__split_part", "macro_sql": "{% macro default__split_part(string_text, delimiter_text, part_number) %}\n\n split_part(\n {{ string_text }},\n {{ delimiter_text }},\n {{ part_number }}\n )\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.109689, "supported_languages": null}, "macro.dbt._split_part_negative": {"name": "_split_part_negative", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "unique_id": "macro.dbt._split_part_negative", "macro_sql": "{% macro _split_part_negative(string_text, delimiter_text, part_number) %}\n\n split_part(\n {{ string_text }},\n {{ delimiter_text }},\n length({{ string_text }})\n - length(\n replace({{ string_text }}, {{ delimiter_text }}, '')\n ) + 2 {{ part_number }}\n )\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.11041, "supported_languages": null}, "macro.dbt.date_trunc": {"name": "date_trunc", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/date_trunc.sql", "original_file_path": "macros/utils/date_trunc.sql", "unique_id": "macro.dbt.date_trunc", "macro_sql": "{% macro date_trunc(datepart, date) -%}\n {{ return(adapter.dispatch('date_trunc', 'dbt') (datepart, date)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__date_trunc"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.111156, "supported_languages": null}, "macro.dbt.default__date_trunc": {"name": "default__date_trunc", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/date_trunc.sql", "original_file_path": "macros/utils/date_trunc.sql", "unique_id": "macro.dbt.default__date_trunc", "macro_sql": "{% macro default__date_trunc(datepart, date) -%}\n date_trunc('{{datepart}}', {{date}})\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1115131, "supported_languages": null}, "macro.dbt.array_construct": {"name": "array_construct", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_construct.sql", "original_file_path": "macros/utils/array_construct.sql", "unique_id": "macro.dbt.array_construct", "macro_sql": "{% macro array_construct(inputs=[], data_type=api.Column.translate_type('integer')) -%}\n {{ return(adapter.dispatch('array_construct', 'dbt')(inputs, data_type)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__array_construct"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1124191, "supported_languages": null}, "macro.dbt.default__array_construct": {"name": "default__array_construct", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_construct.sql", "original_file_path": "macros/utils/array_construct.sql", "unique_id": "macro.dbt.default__array_construct", "macro_sql": "{% macro default__array_construct(inputs, data_type) -%}\n {% if inputs|length > 0 %}\n array[ {{ inputs|join(' , ') }} ]\n {% else %}\n array[]::{{data_type}}[]\n {% endif %}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.113195, "supported_languages": null}, "macro.dbt.array_append": {"name": "array_append", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_append.sql", "original_file_path": "macros/utils/array_append.sql", "unique_id": "macro.dbt.array_append", "macro_sql": "{% macro array_append(array, new_element) -%}\n {{ return(adapter.dispatch('array_append', 'dbt')(array, new_element)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__array_append"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.113871, "supported_languages": null}, "macro.dbt.default__array_append": {"name": "default__array_append", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_append.sql", "original_file_path": "macros/utils/array_append.sql", "unique_id": "macro.dbt.default__array_append", "macro_sql": "{% macro default__array_append(array, new_element) -%}\n array_append({{ array }}, {{ new_element }})\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.114215, "supported_languages": null}, "macro.dbt.create_schema": {"name": "create_schema", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "unique_id": "macro.dbt.create_schema", "macro_sql": "{% macro create_schema(relation) -%}\n {{ adapter.dispatch('create_schema', 'dbt')(relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__create_schema"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.115037, "supported_languages": null}, "macro.dbt.default__create_schema": {"name": "default__create_schema", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "unique_id": "macro.dbt.default__create_schema", "macro_sql": "{% macro default__create_schema(relation) -%}\n {%- call statement('create_schema') -%}\n create schema if not exists {{ relation.without_identifier() }}\n {% endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.115519, "supported_languages": null}, "macro.dbt.drop_schema": {"name": "drop_schema", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "unique_id": "macro.dbt.drop_schema", "macro_sql": "{% macro drop_schema(relation) -%}\n {{ adapter.dispatch('drop_schema', 'dbt')(relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__drop_schema"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.115935, "supported_languages": null}, "macro.dbt.default__drop_schema": {"name": "default__drop_schema", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "unique_id": "macro.dbt.default__drop_schema", "macro_sql": "{% macro default__drop_schema(relation) -%}\n {%- call statement('drop_schema') -%}\n drop schema if exists {{ relation.without_identifier() }} cascade\n {% endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.116411, "supported_languages": null}, "macro.dbt.current_timestamp": {"name": "current_timestamp", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.current_timestamp", "macro_sql": "{%- macro current_timestamp() -%}\n {{ adapter.dispatch('current_timestamp', 'dbt')() }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.117439, "supported_languages": null}, "macro.dbt.default__current_timestamp": {"name": "default__current_timestamp", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.default__current_timestamp", "macro_sql": "{% macro default__current_timestamp() -%}\n {{ exceptions.raise_not_implemented(\n 'current_timestamp macro not implemented for adapter ' + adapter.type()) }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.117823, "supported_languages": null}, "macro.dbt.snapshot_get_time": {"name": "snapshot_get_time", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.snapshot_get_time", "macro_sql": "\n\n{%- macro snapshot_get_time() -%}\n {{ adapter.dispatch('snapshot_get_time', 'dbt')() }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_get_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.118193, "supported_languages": null}, "macro.dbt.default__snapshot_get_time": {"name": "default__snapshot_get_time", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.default__snapshot_get_time", "macro_sql": "{% macro default__snapshot_get_time() %}\n {{ current_timestamp() }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.118471, "supported_languages": null}, "macro.dbt.current_timestamp_backcompat": {"name": "current_timestamp_backcompat", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.current_timestamp_backcompat", "macro_sql": "{% macro current_timestamp_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_backcompat', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.118895, "supported_languages": null}, "macro.dbt.default__current_timestamp_backcompat": {"name": "default__current_timestamp_backcompat", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.default__current_timestamp_backcompat", "macro_sql": "{% macro default__current_timestamp_backcompat() %}\n current_timestamp::timestamp\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.119096, "supported_languages": null}, "macro.dbt.current_timestamp_in_utc_backcompat": {"name": "current_timestamp_in_utc_backcompat", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.current_timestamp_in_utc_backcompat", "macro_sql": "{% macro current_timestamp_in_utc_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_in_utc_backcompat', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1195142, "supported_languages": null}, "macro.dbt.default__current_timestamp_in_utc_backcompat": {"name": "default__current_timestamp_in_utc_backcompat", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.default__current_timestamp_in_utc_backcompat", "macro_sql": "{% macro default__current_timestamp_in_utc_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_backcompat', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.current_timestamp_backcompat", "macro.dbt_postgres.postgres__current_timestamp_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.119937, "supported_languages": null}, "macro.dbt.get_create_index_sql": {"name": "get_create_index_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "unique_id": "macro.dbt.get_create_index_sql", "macro_sql": "{% macro get_create_index_sql(relation, index_dict) -%}\n {{ return(adapter.dispatch('get_create_index_sql', 'dbt')(relation, index_dict)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_create_index_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.120949, "supported_languages": null}, "macro.dbt.default__get_create_index_sql": {"name": "default__get_create_index_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "unique_id": "macro.dbt.default__get_create_index_sql", "macro_sql": "{% macro default__get_create_index_sql(relation, index_dict) -%}\n {% do return(None) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.121309, "supported_languages": null}, "macro.dbt.create_indexes": {"name": "create_indexes", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "unique_id": "macro.dbt.create_indexes", "macro_sql": "{% macro create_indexes(relation) -%}\n {{ adapter.dispatch('create_indexes', 'dbt')(relation) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.12189, "supported_languages": null}, "macro.dbt.default__create_indexes": {"name": "default__create_indexes", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "unique_id": "macro.dbt.default__create_indexes", "macro_sql": "{% macro default__create_indexes(relation) -%}\n {%- set _indexes = config.get('indexes', default=[]) -%}\n\n {% for _index_dict in _indexes %}\n {% set create_index_sql = get_create_index_sql(relation, _index_dict) %}\n {% if create_index_sql %}\n {% do run_query(create_index_sql) %}\n {% endif %}\n {% endfor %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_create_index_sql", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1229818, "supported_languages": null}, "macro.dbt.make_intermediate_relation": {"name": "make_intermediate_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.make_intermediate_relation", "macro_sql": "{% macro make_intermediate_relation(base_relation, suffix='__dbt_tmp') %}\n {{ return(adapter.dispatch('make_intermediate_relation', 'dbt')(base_relation, suffix)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_intermediate_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1292732, "supported_languages": null}, "macro.dbt.default__make_intermediate_relation": {"name": "default__make_intermediate_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__make_intermediate_relation", "macro_sql": "{% macro default__make_intermediate_relation(base_relation, suffix) %}\n {{ return(default__make_temp_relation(base_relation, suffix)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__make_temp_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.129865, "supported_languages": null}, "macro.dbt.make_temp_relation": {"name": "make_temp_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.make_temp_relation", "macro_sql": "{% macro make_temp_relation(base_relation, suffix='__dbt_tmp') %}\n {{ return(adapter.dispatch('make_temp_relation', 'dbt')(base_relation, suffix)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_temp_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.130422, "supported_languages": null}, "macro.dbt.default__make_temp_relation": {"name": "default__make_temp_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__make_temp_relation", "macro_sql": "{% macro default__make_temp_relation(base_relation, suffix) %}\n {%- set temp_identifier = base_relation.identifier ~ suffix -%}\n {%- set temp_relation = base_relation.incorporate(\n path={\"identifier\": temp_identifier}) -%}\n\n {{ return(temp_relation) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.131156, "supported_languages": null}, "macro.dbt.make_backup_relation": {"name": "make_backup_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.make_backup_relation", "macro_sql": "{% macro make_backup_relation(base_relation, backup_relation_type, suffix='__dbt_backup') %}\n {{ return(adapter.dispatch('make_backup_relation', 'dbt')(base_relation, backup_relation_type, suffix)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_backup_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.131776, "supported_languages": null}, "macro.dbt.default__make_backup_relation": {"name": "default__make_backup_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__make_backup_relation", "macro_sql": "{% macro default__make_backup_relation(base_relation, backup_relation_type, suffix) %}\n {%- set backup_identifier = base_relation.identifier ~ suffix -%}\n {%- set backup_relation = base_relation.incorporate(\n path={\"identifier\": backup_identifier},\n type=backup_relation_type\n ) -%}\n {{ return(backup_relation) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1325812, "supported_languages": null}, "macro.dbt.drop_relation": {"name": "drop_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.drop_relation", "macro_sql": "{% macro drop_relation(relation) -%}\n {{ return(adapter.dispatch('drop_relation', 'dbt')(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__drop_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.133047, "supported_languages": null}, "macro.dbt.default__drop_relation": {"name": "default__drop_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__drop_relation", "macro_sql": "{% macro default__drop_relation(relation) -%}\n {% call statement('drop_relation', auto_begin=False) -%}\n drop {{ relation.type }} if exists {{ relation }} cascade\n {%- endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.133596, "supported_languages": null}, "macro.dbt.truncate_relation": {"name": "truncate_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.truncate_relation", "macro_sql": "{% macro truncate_relation(relation) -%}\n {{ return(adapter.dispatch('truncate_relation', 'dbt')(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__truncate_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.134069, "supported_languages": null}, "macro.dbt.default__truncate_relation": {"name": "default__truncate_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__truncate_relation", "macro_sql": "{% macro default__truncate_relation(relation) -%}\n {% call statement('truncate_relation') -%}\n truncate table {{ relation }}\n {%- endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1344929, "supported_languages": null}, "macro.dbt.rename_relation": {"name": "rename_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.rename_relation", "macro_sql": "{% macro rename_relation(from_relation, to_relation) -%}\n {{ return(adapter.dispatch('rename_relation', 'dbt')(from_relation, to_relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__rename_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.13501, "supported_languages": null}, "macro.dbt.default__rename_relation": {"name": "default__rename_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__rename_relation", "macro_sql": "{% macro default__rename_relation(from_relation, to_relation) -%}\n {% set target_name = adapter.quote_as_configured(to_relation.identifier, 'identifier') %}\n {% call statement('rename_relation') -%}\n alter table {{ from_relation }} rename to {{ target_name }}\n {%- endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.135726, "supported_languages": null}, "macro.dbt.get_or_create_relation": {"name": "get_or_create_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.get_or_create_relation", "macro_sql": "{% macro get_or_create_relation(database, schema, identifier, type) -%}\n {{ return(adapter.dispatch('get_or_create_relation', 'dbt')(database, schema, identifier, type)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_or_create_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.136352, "supported_languages": null}, "macro.dbt.default__get_or_create_relation": {"name": "default__get_or_create_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__get_or_create_relation", "macro_sql": "{% macro default__get_or_create_relation(database, schema, identifier, type) %}\n {%- set target_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) %}\n\n {% if target_relation %}\n {% do return([true, target_relation]) %}\n {% endif %}\n\n {%- set new_relation = api.Relation.create(\n database=database,\n schema=schema,\n identifier=identifier,\n type=type\n ) -%}\n {% do return([false, new_relation]) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1378748, "supported_languages": null}, "macro.dbt.load_cached_relation": {"name": "load_cached_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.load_cached_relation", "macro_sql": "{% macro load_cached_relation(relation) %}\n {% do return(adapter.get_relation(\n database=relation.database,\n schema=relation.schema,\n identifier=relation.identifier\n )) -%}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1384661, "supported_languages": null}, "macro.dbt.load_relation": {"name": "load_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.load_relation", "macro_sql": "{% macro load_relation(relation) %}\n {{ return(load_cached_relation(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.load_cached_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.138835, "supported_languages": null}, "macro.dbt.drop_relation_if_exists": {"name": "drop_relation_if_exists", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.drop_relation_if_exists", "macro_sql": "{% macro drop_relation_if_exists(relation) %}\n {% if relation is not none %}\n {{ adapter.drop_relation(relation) }}\n {% endif %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.139338, "supported_languages": null}, "macro.dbt.collect_freshness": {"name": "collect_freshness", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/freshness.sql", "original_file_path": "macros/adapters/freshness.sql", "unique_id": "macro.dbt.collect_freshness", "macro_sql": "{% macro collect_freshness(source, loaded_at_field, filter) %}\n {{ return(adapter.dispatch('collect_freshness', 'dbt')(source, loaded_at_field, filter))}}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__collect_freshness"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.140299, "supported_languages": null}, "macro.dbt.default__collect_freshness": {"name": "default__collect_freshness", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/freshness.sql", "original_file_path": "macros/adapters/freshness.sql", "unique_id": "macro.dbt.default__collect_freshness", "macro_sql": "{% macro default__collect_freshness(source, loaded_at_field, filter) %}\n {% call statement('collect_freshness', fetch_result=True, auto_begin=False) -%}\n select\n max({{ loaded_at_field }}) as max_loaded_at,\n {{ current_timestamp() }} as snapshotted_at\n from {{ source }}\n {% if filter %}\n where {{ filter }}\n {% endif %}\n {% endcall %}\n {{ return(load_result('collect_freshness').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement", "macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.141366, "supported_languages": null}, "macro.dbt.copy_grants": {"name": "copy_grants", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.copy_grants", "macro_sql": "{% macro copy_grants() %}\n {{ return(adapter.dispatch('copy_grants', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__copy_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.144733, "supported_languages": null}, "macro.dbt.default__copy_grants": {"name": "default__copy_grants", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__copy_grants", "macro_sql": "{% macro default__copy_grants() %}\n {{ return(True) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.14506, "supported_languages": null}, "macro.dbt.support_multiple_grantees_per_dcl_statement": {"name": "support_multiple_grantees_per_dcl_statement", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.support_multiple_grantees_per_dcl_statement", "macro_sql": "{% macro support_multiple_grantees_per_dcl_statement() %}\n {{ return(adapter.dispatch('support_multiple_grantees_per_dcl_statement', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__support_multiple_grantees_per_dcl_statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.145495, "supported_languages": null}, "macro.dbt.default__support_multiple_grantees_per_dcl_statement": {"name": "default__support_multiple_grantees_per_dcl_statement", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__support_multiple_grantees_per_dcl_statement", "macro_sql": "\n\n{%- macro default__support_multiple_grantees_per_dcl_statement() -%}\n {{ return(True) }}\n{%- endmacro -%}\n\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1457942, "supported_languages": null}, "macro.dbt.should_revoke": {"name": "should_revoke", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.should_revoke", "macro_sql": "{% macro should_revoke(existing_relation, full_refresh_mode=True) %}\n\n {% if not existing_relation %}\n {#-- The table doesn't already exist, so no grants to copy over --#}\n {{ return(False) }}\n {% elif full_refresh_mode %}\n {#-- The object is being REPLACED -- whether grants are copied over depends on the value of user config --#}\n {{ return(copy_grants()) }}\n {% else %}\n {#-- The table is being merged/upserted/inserted -- grants will be carried over --#}\n {{ return(True) }}\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.copy_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.146819, "supported_languages": null}, "macro.dbt.get_show_grant_sql": {"name": "get_show_grant_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.get_show_grant_sql", "macro_sql": "{% macro get_show_grant_sql(relation) %}\n {{ return(adapter.dispatch(\"get_show_grant_sql\", \"dbt\")(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_show_grant_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.147292, "supported_languages": null}, "macro.dbt.default__get_show_grant_sql": {"name": "default__get_show_grant_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__get_show_grant_sql", "macro_sql": "{% macro default__get_show_grant_sql(relation) %}\n show grants on {{ relation }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.147562, "supported_languages": null}, "macro.dbt.get_grant_sql": {"name": "get_grant_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.get_grant_sql", "macro_sql": "{% macro get_grant_sql(relation, privilege, grantees) %}\n {{ return(adapter.dispatch('get_grant_sql', 'dbt')(relation, privilege, grantees)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_grant_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1481369, "supported_languages": null}, "macro.dbt.default__get_grant_sql": {"name": "default__get_grant_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__get_grant_sql", "macro_sql": "\n\n{%- macro default__get_grant_sql(relation, privilege, grantees) -%}\n grant {{ privilege }} on {{ relation }} to {{ grantees | join(', ') }}\n{%- endmacro -%}\n\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.148619, "supported_languages": null}, "macro.dbt.get_revoke_sql": {"name": "get_revoke_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.get_revoke_sql", "macro_sql": "{% macro get_revoke_sql(relation, privilege, grantees) %}\n {{ return(adapter.dispatch('get_revoke_sql', 'dbt')(relation, privilege, grantees)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_revoke_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1491919, "supported_languages": null}, "macro.dbt.default__get_revoke_sql": {"name": "default__get_revoke_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__get_revoke_sql", "macro_sql": "\n\n{%- macro default__get_revoke_sql(relation, privilege, grantees) -%}\n revoke {{ privilege }} on {{ relation }} from {{ grantees | join(', ') }}\n{%- endmacro -%}\n\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.149673, "supported_languages": null}, "macro.dbt.get_dcl_statement_list": {"name": "get_dcl_statement_list", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.get_dcl_statement_list", "macro_sql": "{% macro get_dcl_statement_list(relation, grant_config, get_dcl_macro) %}\n {{ return(adapter.dispatch('get_dcl_statement_list', 'dbt')(relation, grant_config, get_dcl_macro)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_dcl_statement_list"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1502562, "supported_languages": null}, "macro.dbt.default__get_dcl_statement_list": {"name": "default__get_dcl_statement_list", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__get_dcl_statement_list", "macro_sql": "\n\n{%- macro default__get_dcl_statement_list(relation, grant_config, get_dcl_macro) -%}\n {#\n -- Unpack grant_config into specific privileges and the set of users who need them granted/revoked.\n -- Depending on whether this database supports multiple grantees per statement, pass in the list of\n -- all grantees per privilege, or (if not) template one statement per privilege-grantee pair.\n -- `get_dcl_macro` will be either `get_grant_sql` or `get_revoke_sql`\n #}\n {%- set dcl_statements = [] -%}\n {%- for privilege, grantees in grant_config.items() %}\n {%- if support_multiple_grantees_per_dcl_statement() and grantees -%}\n {%- set dcl = get_dcl_macro(relation, privilege, grantees) -%}\n {%- do dcl_statements.append(dcl) -%}\n {%- else -%}\n {%- for grantee in grantees -%}\n {% set dcl = get_dcl_macro(relation, privilege, [grantee]) %}\n {%- do dcl_statements.append(dcl) -%}\n {% endfor -%}\n {%- endif -%}\n {%- endfor -%}\n {{ return(dcl_statements) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.support_multiple_grantees_per_dcl_statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.151925, "supported_languages": null}, "macro.dbt.call_dcl_statements": {"name": "call_dcl_statements", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.call_dcl_statements", "macro_sql": "{% macro call_dcl_statements(dcl_statement_list) %}\n {{ return(adapter.dispatch(\"call_dcl_statements\", \"dbt\")(dcl_statement_list)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__call_dcl_statements"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.152409, "supported_languages": null}, "macro.dbt.default__call_dcl_statements": {"name": "default__call_dcl_statements", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__call_dcl_statements", "macro_sql": "{% macro default__call_dcl_statements(dcl_statement_list) %}\n {#\n -- By default, supply all grant + revoke statements in a single semicolon-separated block,\n -- so that they're all processed together.\n\n -- Some databases do not support this. Those adapters will need to override this macro\n -- to run each statement individually.\n #}\n {% call statement('grants') %}\n {% for dcl_statement in dcl_statement_list %}\n {{ dcl_statement }};\n {% endfor %}\n {% endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.153051, "supported_languages": null}, "macro.dbt.apply_grants": {"name": "apply_grants", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.apply_grants", "macro_sql": "{% macro apply_grants(relation, grant_config, should_revoke) %}\n {{ return(adapter.dispatch(\"apply_grants\", \"dbt\")(relation, grant_config, should_revoke)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__apply_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.153632, "supported_languages": null}, "macro.dbt.default__apply_grants": {"name": "default__apply_grants", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__apply_grants", "macro_sql": "{% macro default__apply_grants(relation, grant_config, should_revoke=True) %}\n {#-- If grant_config is {} or None, this is a no-op --#}\n {% if grant_config %}\n {% if should_revoke %}\n {#-- We think previous grants may have carried over --#}\n {#-- Show current grants and calculate diffs --#}\n {% set current_grants_table = run_query(get_show_grant_sql(relation)) %}\n {% set current_grants_dict = adapter.standardize_grants_dict(current_grants_table) %}\n {% set needs_granting = diff_of_two_dicts(grant_config, current_grants_dict) %}\n {% set needs_revoking = diff_of_two_dicts(current_grants_dict, grant_config) %}\n {% if not (needs_granting or needs_revoking) %}\n {{ log('On ' ~ relation ~': All grants are in place, no revocation or granting needed.')}}\n {% endif %}\n {% else %}\n {#-- We don't think there's any chance of previous grants having carried over. --#}\n {#-- Jump straight to granting what the user has configured. --#}\n {% set needs_revoking = {} %}\n {% set needs_granting = grant_config %}\n {% endif %}\n {% if needs_granting or needs_revoking %}\n {% set revoke_statement_list = get_dcl_statement_list(relation, needs_revoking, get_revoke_sql) %}\n {% set grant_statement_list = get_dcl_statement_list(relation, needs_granting, get_grant_sql) %}\n {% set dcl_statement_list = revoke_statement_list + grant_statement_list %}\n {% if dcl_statement_list %}\n {{ call_dcl_statements(dcl_statement_list) }}\n {% endif %}\n {% endif %}\n {% endif %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.run_query", "macro.dbt.get_show_grant_sql", "macro.dbt.get_dcl_statement_list", "macro.dbt.call_dcl_statements"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.156651, "supported_languages": null}, "macro.dbt.alter_column_comment": {"name": "alter_column_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.alter_column_comment", "macro_sql": "{% macro alter_column_comment(relation, column_dict) -%}\n {{ return(adapter.dispatch('alter_column_comment', 'dbt')(relation, column_dict)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__alter_column_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.158199, "supported_languages": null}, "macro.dbt.default__alter_column_comment": {"name": "default__alter_column_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.default__alter_column_comment", "macro_sql": "{% macro default__alter_column_comment(relation, column_dict) -%}\n {{ exceptions.raise_not_implemented(\n 'alter_column_comment macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1586392, "supported_languages": null}, "macro.dbt.alter_relation_comment": {"name": "alter_relation_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.alter_relation_comment", "macro_sql": "{% macro alter_relation_comment(relation, relation_comment) -%}\n {{ return(adapter.dispatch('alter_relation_comment', 'dbt')(relation, relation_comment)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__alter_relation_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.159168, "supported_languages": null}, "macro.dbt.default__alter_relation_comment": {"name": "default__alter_relation_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.default__alter_relation_comment", "macro_sql": "{% macro default__alter_relation_comment(relation, relation_comment) -%}\n {{ exceptions.raise_not_implemented(\n 'alter_relation_comment macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.159607, "supported_languages": null}, "macro.dbt.persist_docs": {"name": "persist_docs", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.persist_docs", "macro_sql": "{% macro persist_docs(relation, model, for_relation=true, for_columns=true) -%}\n {{ return(adapter.dispatch('persist_docs', 'dbt')(relation, model, for_relation, for_columns)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.160279, "supported_languages": null}, "macro.dbt.default__persist_docs": {"name": "default__persist_docs", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.default__persist_docs", "macro_sql": "{% macro default__persist_docs(relation, model, for_relation, for_columns) -%}\n {% if for_relation and config.persist_relation_docs() and model.description %}\n {% do run_query(alter_relation_comment(relation, model.description)) %}\n {% endif %}\n\n {% if for_columns and config.persist_column_docs() and model.columns %}\n {% do run_query(alter_column_comment(relation, model.columns)) %}\n {% endif %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.run_query", "macro.dbt.alter_relation_comment", "macro.dbt.alter_column_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.161473, "supported_languages": null}, "macro.dbt.get_catalog": {"name": "get_catalog", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.get_catalog", "macro_sql": "{% macro get_catalog(information_schema, schemas) -%}\n {{ return(adapter.dispatch('get_catalog', 'dbt')(information_schema, schemas)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_catalog"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1643069, "supported_languages": null}, "macro.dbt.default__get_catalog": {"name": "default__get_catalog", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__get_catalog", "macro_sql": "{% macro default__get_catalog(information_schema, schemas) -%}\n\n {% set typename = adapter.type() %}\n {% set msg -%}\n get_catalog not implemented for {{ typename }}\n {%- endset %}\n\n {{ exceptions.raise_compiler_error(msg) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.16497, "supported_languages": null}, "macro.dbt.information_schema_name": {"name": "information_schema_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.information_schema_name", "macro_sql": "{% macro information_schema_name(database) %}\n {{ return(adapter.dispatch('information_schema_name', 'dbt')(database)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__information_schema_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.165441, "supported_languages": null}, "macro.dbt.default__information_schema_name": {"name": "default__information_schema_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__information_schema_name", "macro_sql": "{% macro default__information_schema_name(database) -%}\n {%- if database -%}\n {{ database }}.INFORMATION_SCHEMA\n {%- else -%}\n INFORMATION_SCHEMA\n {%- endif -%}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.166005, "supported_languages": null}, "macro.dbt.list_schemas": {"name": "list_schemas", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.list_schemas", "macro_sql": "{% macro list_schemas(database) -%}\n {{ return(adapter.dispatch('list_schemas', 'dbt')(database)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__list_schemas"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.166465, "supported_languages": null}, "macro.dbt.default__list_schemas": {"name": "default__list_schemas", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__list_schemas", "macro_sql": "{% macro default__list_schemas(database) -%}\n {% set sql %}\n select distinct schema_name\n from {{ information_schema_name(database) }}.SCHEMATA\n where catalog_name ilike '{{ database }}'\n {% endset %}\n {{ return(run_query(sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.information_schema_name", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.167077, "supported_languages": null}, "macro.dbt.check_schema_exists": {"name": "check_schema_exists", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.check_schema_exists", "macro_sql": "{% macro check_schema_exists(information_schema, schema) -%}\n {{ return(adapter.dispatch('check_schema_exists', 'dbt')(information_schema, schema)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__check_schema_exists"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.167588, "supported_languages": null}, "macro.dbt.default__check_schema_exists": {"name": "default__check_schema_exists", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__check_schema_exists", "macro_sql": "{% macro default__check_schema_exists(information_schema, schema) -%}\n {% set sql -%}\n select count(*)\n from {{ information_schema.replace(information_schema_view='SCHEMATA') }}\n where catalog_name='{{ information_schema.database }}'\n and schema_name='{{ schema }}'\n {%- endset %}\n {{ return(run_query(sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.replace", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.168344, "supported_languages": null}, "macro.dbt.list_relations_without_caching": {"name": "list_relations_without_caching", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.list_relations_without_caching", "macro_sql": "{% macro list_relations_without_caching(schema_relation) %}\n {{ return(adapter.dispatch('list_relations_without_caching', 'dbt')(schema_relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__list_relations_without_caching"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.168813, "supported_languages": null}, "macro.dbt.default__list_relations_without_caching": {"name": "default__list_relations_without_caching", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__list_relations_without_caching", "macro_sql": "{% macro default__list_relations_without_caching(schema_relation) %}\n {{ exceptions.raise_not_implemented(\n 'list_relations_without_caching macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.169232, "supported_languages": null}, "macro.dbt.get_columns_in_relation": {"name": "get_columns_in_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.get_columns_in_relation", "macro_sql": "{% macro get_columns_in_relation(relation) -%}\n {{ return(adapter.dispatch('get_columns_in_relation', 'dbt')(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_columns_in_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.172499, "supported_languages": null}, "macro.dbt.default__get_columns_in_relation": {"name": "default__get_columns_in_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.default__get_columns_in_relation", "macro_sql": "{% macro default__get_columns_in_relation(relation) -%}\n {{ exceptions.raise_not_implemented(\n 'get_columns_in_relation macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.172908, "supported_languages": null}, "macro.dbt.sql_convert_columns_in_relation": {"name": "sql_convert_columns_in_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.sql_convert_columns_in_relation", "macro_sql": "{% macro sql_convert_columns_in_relation(table) -%}\n {% set columns = [] %}\n {% for row in table %}\n {% do columns.append(api.Column(*row)) %}\n {% endfor %}\n {{ return(columns) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.173693, "supported_languages": null}, "macro.dbt.get_columns_in_query": {"name": "get_columns_in_query", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.get_columns_in_query", "macro_sql": "{% macro get_columns_in_query(select_sql) -%}\n {{ return(adapter.dispatch('get_columns_in_query', 'dbt')(select_sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_columns_in_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.174159, "supported_languages": null}, "macro.dbt.default__get_columns_in_query": {"name": "default__get_columns_in_query", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.default__get_columns_in_query", "macro_sql": "{% macro default__get_columns_in_query(select_sql) %}\n {% call statement('get_columns_in_query', fetch_result=True, auto_begin=False) -%}\n select * from (\n {{ select_sql }}\n ) as __dbt_sbq\n where false\n limit 0\n {% endcall %}\n\n {{ return(load_result('get_columns_in_query').table.columns | map(attribute='name') | list) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.175012, "supported_languages": null}, "macro.dbt.alter_column_type": {"name": "alter_column_type", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.alter_column_type", "macro_sql": "{% macro alter_column_type(relation, column_name, new_column_type) -%}\n {{ return(adapter.dispatch('alter_column_type', 'dbt')(relation, column_name, new_column_type)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__alter_column_type"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1755981, "supported_languages": null}, "macro.dbt.default__alter_column_type": {"name": "default__alter_column_type", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.default__alter_column_type", "macro_sql": "{% macro default__alter_column_type(relation, column_name, new_column_type) -%}\n {#\n 1. Create a new column (w/ temp name and correct type)\n 2. Copy data over to it\n 3. Drop the existing column (cascade!)\n 4. Rename the new column to existing column\n #}\n {%- set tmp_column = column_name + \"__dbt_alter\" -%}\n\n {% call statement('alter_column_type') %}\n alter table {{ relation }} add column {{ adapter.quote(tmp_column) }} {{ new_column_type }};\n update {{ relation }} set {{ adapter.quote(tmp_column) }} = {{ adapter.quote(column_name) }};\n alter table {{ relation }} drop column {{ adapter.quote(column_name) }} cascade;\n alter table {{ relation }} rename column {{ adapter.quote(tmp_column) }} to {{ adapter.quote(column_name) }}\n {% endcall %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.177139, "supported_languages": null}, "macro.dbt.alter_relation_add_remove_columns": {"name": "alter_relation_add_remove_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.alter_relation_add_remove_columns", "macro_sql": "{% macro alter_relation_add_remove_columns(relation, add_columns = none, remove_columns = none) -%}\n {{ return(adapter.dispatch('alter_relation_add_remove_columns', 'dbt')(relation, add_columns, remove_columns)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__alter_relation_add_remove_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.177797, "supported_languages": null}, "macro.dbt.default__alter_relation_add_remove_columns": {"name": "default__alter_relation_add_remove_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.default__alter_relation_add_remove_columns", "macro_sql": "{% macro default__alter_relation_add_remove_columns(relation, add_columns, remove_columns) %}\n\n {% if add_columns is none %}\n {% set add_columns = [] %}\n {% endif %}\n {% if remove_columns is none %}\n {% set remove_columns = [] %}\n {% endif %}\n\n {% set sql -%}\n\n alter {{ relation.type }} {{ relation }}\n\n {% for column in add_columns %}\n add column {{ column.name }} {{ column.data_type }}{{ ',' if not loop.last }}\n {% endfor %}{{ ',' if add_columns and remove_columns }}\n\n {% for column in remove_columns %}\n drop column {{ column.name }}{{ ',' if not loop.last }}\n {% endfor %}\n\n {%- endset -%}\n\n {% do run_query(sql) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.179789, "supported_languages": null}, "macro.dbt.build_ref_function": {"name": "build_ref_function", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.build_ref_function", "macro_sql": "{% macro build_ref_function(model) %}\n\n {%- set ref_dict = {} -%}\n {%- for _ref in model.refs -%}\n {%- set resolved = ref(*_ref) -%}\n {%- do ref_dict.update({_ref | join(\".\"): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}\n {%- endfor -%}\n\ndef ref(*args,dbt_load_df_function):\n refs = {{ ref_dict | tojson }}\n key = \".\".join(args)\n return dbt_load_df_function(refs[key])\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.182821, "supported_languages": null}, "macro.dbt.build_source_function": {"name": "build_source_function", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.build_source_function", "macro_sql": "{% macro build_source_function(model) %}\n\n {%- set source_dict = {} -%}\n {%- for _source in model.sources -%}\n {%- set resolved = source(*_source) -%}\n {%- do source_dict.update({_source | join(\".\"): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}\n {%- endfor -%}\n\ndef source(*args, dbt_load_df_function):\n sources = {{ source_dict | tojson }}\n key = \".\".join(args)\n return dbt_load_df_function(sources[key])\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.183993, "supported_languages": null}, "macro.dbt.build_config_dict": {"name": "build_config_dict", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.build_config_dict", "macro_sql": "{% macro build_config_dict(model) %}\n {%- set config_dict = {} -%}\n {% set config_dbt_used = zip(model.config.config_keys_used, model.config.config_keys_defaults) | list %}\n {%- for key, default in config_dbt_used -%}\n {# weird type testing with enum, would be much easier to write this logic in Python! #}\n {%- if key == 'language' -%}\n {%- set value = 'python' -%}\n {%- endif -%}\n {%- set value = model.config.get(key, default) -%}\n {%- do config_dict.update({key: value}) -%}\n {%- endfor -%}\nconfig_dict = {{ config_dict }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.185586, "supported_languages": null}, "macro.dbt.py_script_postfix": {"name": "py_script_postfix", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.py_script_postfix", "macro_sql": "{% macro py_script_postfix(model) %}\n# This part is user provided model code\n# you will need to copy the next section to run the code\n# COMMAND ----------\n# this part is dbt logic for get ref work, do not modify\n\n{{ build_ref_function(model ) }}\n{{ build_source_function(model ) }}\n{{ build_config_dict(model) }}\n\nclass config:\n def __init__(self, *args, **kwargs):\n pass\n\n @staticmethod\n def get(key, default=None):\n return config_dict.get(key, default)\n\nclass this:\n \"\"\"dbt.this() or dbt.this.identifier\"\"\"\n database = '{{ this.database }}'\n schema = '{{ this.schema }}'\n identifier = '{{ this.identifier }}'\n def __repr__(self):\n return '{{ this }}'\n\n\nclass dbtObj:\n def __init__(self, load_df_function) -> None:\n self.source = lambda *args: source(*args, dbt_load_df_function=load_df_function)\n self.ref = lambda *args: ref(*args, dbt_load_df_function=load_df_function)\n self.config = config\n self.this = this()\n self.is_incremental = {{ is_incremental() }}\n\n# COMMAND ----------\n{{py_script_comment()}}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.build_ref_function", "macro.dbt.build_source_function", "macro.dbt.build_config_dict", "macro.dbt.is_incremental", "macro.dbt.py_script_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.186578, "supported_languages": null}, "macro.dbt.py_script_comment": {"name": "py_script_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.py_script_comment", "macro_sql": "{%macro py_script_comment()%}\n{%endmacro%}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1867762, "supported_languages": null}, "macro.dbt.test_unique": {"name": "test_unique", "resource_type": "macro", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "unique_id": "macro.dbt.test_unique", "macro_sql": "{% test unique(model, column_name) %}\n {% set macro = adapter.dispatch('test_unique', 'dbt') %}\n {{ macro(model, column_name) }}\n{% endtest %}", "depends_on": {"macros": ["macro.dbt.default__test_unique"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1879969, "supported_languages": null}, "macro.dbt.test_not_null": {"name": "test_not_null", "resource_type": "macro", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "unique_id": "macro.dbt.test_not_null", "macro_sql": "{% test not_null(model, column_name) %}\n {% set macro = adapter.dispatch('test_not_null', 'dbt') %}\n {{ macro(model, column_name) }}\n{% endtest %}", "depends_on": {"macros": ["macro.dbt.default__test_not_null"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.188813, "supported_languages": null}, "macro.dbt.test_accepted_values": {"name": "test_accepted_values", "resource_type": "macro", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "unique_id": "macro.dbt.test_accepted_values", "macro_sql": "{% test accepted_values(model, column_name, values, quote=True) %}\n {% set macro = adapter.dispatch('test_accepted_values', 'dbt') %}\n {{ macro(model, column_name, values, quote) }}\n{% endtest %}", "depends_on": {"macros": ["macro.dbt.default__test_accepted_values"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.189566, "supported_languages": null}, "macro.dbt.test_relationships": {"name": "test_relationships", "resource_type": "macro", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "unique_id": "macro.dbt.test_relationships", "macro_sql": "{% test relationships(model, column_name, to, field) %}\n {% set macro = adapter.dispatch('test_relationships', 'dbt') %}\n {{ macro(model, column_name, to, field) }}\n{% endtest %}", "depends_on": {"macros": ["macro.dbt.default__test_relationships"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1902661, "supported_languages": null}}, "docs": {"doc.dbt.__overview__": {"name": "__overview__", "resource_type": "doc", "package_name": "dbt", "path": "overview.md", "original_file_path": "docs/overview.md", "unique_id": "doc.dbt.__overview__", "block_contents": "### Welcome!\n\nWelcome to the auto-generated documentation for your dbt project!\n\n### Navigation\n\nYou can use the `Project` and `Database` navigation tabs on the left side of the window to explore the models\nin your project.\n\n#### Project Tab\nThe `Project` tab mirrors the directory structure of your dbt project. In this tab, you can see all of the\nmodels defined in your dbt project, as well as models imported from dbt packages.\n\n#### Database Tab\nThe `Database` tab also exposes your models, but in a format that looks more like a database explorer. This view\nshows relations (tables and views) grouped into database schemas. Note that ephemeral models are _not_ shown\nin this interface, as they do not exist in the database.\n\n### Graph Exploration\nYou can click the blue icon on the bottom-right corner of the page to view the lineage graph of your models.\n\nOn model pages, you'll see the immediate parents and children of the model you're exploring. By clicking the `Expand`\nbutton at the top-right of this lineage pane, you'll be able to see all of the models that are used to build,\nor are built from, the model you're exploring.\n\nOnce expanded, you'll be able to use the `--select` and `--exclude` model selection syntax to filter the\nmodels in the graph. For more information on model selection, check out the [dbt docs](https://docs.getdbt.com/docs/model-selection-syntax).\n\nNote that you can also right-click on models to interactively filter and explore the graph.\n\n---\n\n### More information\n\n- [What is dbt](https://docs.getdbt.com/docs/introduction)?\n- Read the [dbt viewpoint](https://docs.getdbt.com/docs/viewpoint)\n- [Installation](https://docs.getdbt.com/docs/installation)\n- Join the [dbt Community](https://www.getdbt.com/community/) for questions and discussion"}}, "exposures": {}, "metrics": {"metric.test.my_metric": {"name": "my_metric", "resource_type": "metric", "package_name": "test", "path": "metric.yml", "original_file_path": "models/metric.yml", "unique_id": "metric.test.my_metric", "fqn": ["test", "my_metric"], "description": "", "label": "Count records", "calculation_method": "count", "timestamp": "updated_at", "expression": "*", "filters": [], "time_grains": ["day"], "dimensions": [], "window": null, "model": "ref('my_model')", "model_unique_id": null, "meta": {}, "tags": [], "config": {"enabled": true}, "unrendered_config": {}, "sources": [], "depends_on": {"macros": [], "nodes": ["model.test.my_model"]}, "refs": [["my_model"]], "metrics": [], "created_at": 1670853278.56334}}, "selectors": {}, "disabled": {}, "parent_map": {"model.test.my_model": [], "metric.test.my_metric": ["model.test.my_model"]}, "child_map": {"model.test.my_model": ["metric.test.my_metric"], "metric.test.my_metric": []}} diff --git a/tests/functional/artifacts/expected_manifest.py b/tests/functional/artifacts/expected_manifest.py index 32c9dcfbfa1..51a6b633e40 100644 --- a/tests/functional/artifacts/expected_manifest.py +++ b/tests/functional/artifacts/expected_manifest.py @@ -94,7 +94,9 @@ def get_rendered_snapshot_config(**updates): "strategy": "check", "check_cols": "all", "unique_key": "id", + "target_database": None, "target_schema": None, + "updated_at": None, "meta": {}, "grants": {}, "packages": [], @@ -241,7 +243,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "build_path": None, "created_at": ANY, "name": "model", - "root_path": project.project_root, "relation_name": relation_name_node_format.format( model_database, my_schema_name, "model" ), @@ -321,7 +322,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "build_path": None, "created_at": ANY, "name": "second_model", - "root_path": project.project_root, "relation_name": relation_name_node_format.format( project.database, alternate_schema, "second_model" ), @@ -399,30 +399,19 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "unrendered_config": unrendered_second_config, }, "seed.test.seed": { - "compiled_path": None, "build_path": None, "created_at": ANY, - "compiled": True, - "compiled_code": "", "config": seed_config, "patch_path": "test://" + seed_schema_yml_path, "path": "seed.csv", "name": "seed", "root_path": project.project_root, - "relation_name": relation_name_node_format.format( - project.database, my_schema_name, "seed" - ), "resource_type": "seed", "raw_code": "", - "language": "sql", "package_name": "test", "original_file_path": seed_path, - "refs": [], - "sources": [], - "depends_on": {"nodes": [], "macros": []}, "unique_id": "seed.test.seed", "fqn": ["test", "seed"], - "metrics": [], "tags": [], "meta": {}, "schema": my_schema_name, @@ -473,12 +462,11 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): }, }, "docs": {"node_color": None, "show": True}, - "compiled": True, - "compiled_code": "", - "extra_ctes_injected": True, - "extra_ctes": [], "checksum": checksum_file(seed_path), "unrendered_config": unrendered_seed_config, + "relation_name": relation_name_node_format.format( + project.database, my_schema_name, "seed" + ), }, "test.test.not_null_model_id.d01cc630e6": { "alias": "not_null_model_id", @@ -510,7 +498,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "refs": [["model"]], "relation_name": None, "resource_type": "test", - "root_path": project.project_root, "schema": test_audit_schema, "database": project.database, "tags": [], @@ -571,7 +558,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): project.database, alternate_schema, "snapshot_seed" ), "resource_type": "snapshot", - "root_path": project.project_root, "schema": alternate_schema, "sources": [], "tags": [], @@ -608,7 +594,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "refs": [["model"]], "relation_name": None, "resource_type": "test", - "root_path": project.project_root, "schema": test_audit_schema, "database": project.database, "tags": [], @@ -659,7 +644,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "refs": [["model"]], "relation_name": None, "resource_type": "test", - "root_path": project.project_root, "schema": test_audit_schema, "database": project.database, "tags": [], @@ -725,7 +709,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): project.database, my_schema_name, "seed" ), "resource_type": "source", - "root_path": project.project_root, "schema": my_schema_name, "source_description": "My source", "source_name": "my_source", @@ -751,6 +734,7 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "fqn": ["test", "notebook_exposure"], "maturity": "medium", "meta": {"tool": "my_tool", "languages": ["python"]}, + "metrics": [], "tags": ["my_department"], "name": "notebook_exposure", "original_file_path": os.path.join("models", "schema.yml"), @@ -759,7 +743,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "path": "schema.yml", "refs": [["model"], ["second_model"]], "resource_type": "exposure", - "root_path": project.project_root, "sources": [], "type": "notebook", "unique_id": "exposure.test.notebook_exposure", @@ -778,6 +761,7 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "enabled": True, }, "fqn": ["test", "simple_exposure"], + "metrics": [], "name": "simple_exposure", "original_file_path": os.path.join("models", "schema.yml"), "owner": { @@ -788,7 +772,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "path": "schema.yml", "refs": [["model"]], "resource_type": "exposure", - "root_path": project.project_root, "sources": [["my_source", "my_table"]], "type": "dashboard", "unique_id": "exposure.test.simple_exposure", @@ -839,9 +822,9 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "test.test.unique_model_id.67b76558ff": [], }, "docs": { - "dbt.__overview__": ANY, - "test.macro_info": ANY, - "test.macro_arg_info": ANY, + "doc.dbt.__overview__": ANY, + "doc.test.macro_info": ANY, + "doc.test.macro_arg_info": ANY, }, "disabled": {}, } @@ -892,7 +875,6 @@ def expected_references_manifest(project): "refs": [], "relation_name": None, "resource_type": "model", - "root_path": project.project_root, "schema": my_schema_name, "database": project.database, "tags": [], @@ -948,7 +930,6 @@ def expected_references_manifest(project): model_database, my_schema_name ), "resource_type": "model", - "root_path": project.project_root, "schema": my_schema_name, "database": project.database, "tags": [], @@ -1002,7 +983,6 @@ def expected_references_manifest(project): "refs": [["ephemeral_summary"]], "relation_name": '"{0}"."{1}".view_summary'.format(model_database, my_schema_name), "resource_type": "model", - "root_path": project.project_root, "schema": my_schema_name, "sources": [], "tags": [], @@ -1017,7 +997,6 @@ def expected_references_manifest(project): }, "seed.test.seed": { "alias": "seed", - "compiled_path": None, "build_path": None, "created_at": ANY, "columns": { @@ -1063,22 +1042,16 @@ def expected_references_manifest(project): }, }, "config": get_rendered_seed_config(), - "sources": [], - "depends_on": {"macros": [], "nodes": []}, "deferred": False, "description": "The test seed", "docs": {"node_color": None, "show": True}, "fqn": ["test", "seed"], - "metrics": [], "name": "seed", "original_file_path": seed_path, "package_name": "test", "patch_path": "test://" + os.path.join("seeds", "schema.yml"), "path": "seed.csv", "raw_code": "", - "language": "sql", - "refs": [], - "relation_name": '"{0}"."{1}".seed'.format(model_database, my_schema_name), "resource_type": "seed", "root_path": project.project_root, "schema": my_schema_name, @@ -1086,12 +1059,11 @@ def expected_references_manifest(project): "tags": [], "meta": {}, "unique_id": "seed.test.seed", - "compiled": True, - "compiled_code": "", - "extra_ctes_injected": True, - "extra_ctes": [], "checksum": checksum_file(seed_path), "unrendered_config": get_unrendered_seed_config(), + "relation_name": '"{0}"."{1}".seed'.format( + project.database, my_schema_name + ), }, "snapshot.test.snapshot_seed": { "alias": "snapshot_seed", @@ -1125,7 +1097,6 @@ def expected_references_manifest(project): model_database, alternate_schema ), "resource_type": "snapshot", - "root_path": project.project_root, "schema": alternate_schema, "sources": [], "tags": [], @@ -1176,7 +1147,6 @@ def expected_references_manifest(project): "patch_path": None, "relation_name": '{0}."{1}"."seed"'.format(project.database, my_schema_name), "resource_type": "source", - "root_path": project.project_root, "schema": my_schema_name, "source_description": "My source", "source_name": "my_source", @@ -1199,6 +1169,7 @@ def expected_references_manifest(project): "fqn": ["test", "notebook_exposure"], "maturity": "medium", "meta": {"tool": "my_tool", "languages": ["python"]}, + "metrics": [], "tags": ["my_department"], "name": "notebook_exposure", "original_file_path": os.path.join("models", "schema.yml"), @@ -1207,7 +1178,6 @@ def expected_references_manifest(project): "path": "schema.yml", "refs": [["view_summary"]], "resource_type": "exposure", - "root_path": project.project_root, "sources": [], "type": "notebook", "unique_id": "exposure.test.notebook_exposure", @@ -1218,98 +1188,98 @@ def expected_references_manifest(project): "metrics": {}, "selectors": {}, "docs": { - "dbt.__overview__": ANY, - "test.column_info": { + "doc.dbt.__overview__": ANY, + "doc.test.column_info": { "block_contents": "An ID field", + "resource_type": "doc", "name": "column_info", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.column_info", + "unique_id": "doc.test.column_info", }, - "test.ephemeral_summary": { + "doc.test.ephemeral_summary": { "block_contents": ("A summmary table of the ephemeral copy of the seed data"), + "resource_type": "doc", "name": "ephemeral_summary", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.ephemeral_summary", + "unique_id": "doc.test.ephemeral_summary", }, - "test.source_info": { + "doc.test.source_info": { "block_contents": "My source", + "resource_type": "doc", "name": "source_info", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.source_info", + "unique_id": "doc.test.source_info", }, - "test.summary_count": { + "doc.test.summary_count": { "block_contents": "The number of instances of the first name", + "resource_type": "doc", "name": "summary_count", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.summary_count", + "unique_id": "doc.test.summary_count", }, - "test.summary_first_name": { + "doc.test.summary_first_name": { "block_contents": "The first name being summarized", + "resource_type": "doc", "name": "summary_first_name", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.summary_first_name", + "unique_id": "doc.test.summary_first_name", }, - "test.table_info": { + "doc.test.table_info": { "block_contents": "My table", + "resource_type": "doc", "name": "table_info", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.table_info", + "unique_id": "doc.test.table_info", }, - "test.view_summary": { + "doc.test.view_summary": { "block_contents": ( "A view of the summary of the ephemeral copy of the seed data" ), + "resource_type": "doc", "name": "view_summary", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.view_summary", + "unique_id": "doc.test.view_summary", }, - "test.macro_info": { + "doc.test.macro_info": { "block_contents": "My custom test that I wrote that does nothing", + "resource_type": "doc", "name": "macro_info", "original_file_path": os.path.join("macros", "macro.md"), "package_name": "test", "path": "macro.md", - "root_path": project.project_root, - "unique_id": "test.macro_info", + "unique_id": "doc.test.macro_info", }, - "test.notebook_info": { + "doc.test.notebook_info": { "block_contents": "A description of the complex exposure", + "resource_type": "doc", "name": "notebook_info", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, - "unique_id": "test.notebook_info", + "unique_id": "doc.test.notebook_info", }, - "test.macro_arg_info": { + "doc.test.macro_arg_info": { "block_contents": "The model for my custom test", + "resource_type": "doc", "name": "macro_arg_info", "original_file_path": os.path.join("macros", "macro.md"), "package_name": "test", "path": "macro.md", - "root_path": project.project_root, - "unique_id": "test.macro_arg_info", + "unique_id": "doc.test.macro_arg_info", }, }, "child_map": { @@ -1348,8 +1318,6 @@ def expected_references_manifest(project): "patch_path": "test://" + os.path.join("macros", "schema.yml"), "resource_type": "macro", "unique_id": "macro.test.test_nothing", - "tags": [], - "root_path": project.project_root, "supported_languages": None, "arguments": [ { diff --git a/tests/functional/artifacts/test_previous_version_state.py b/tests/functional/artifacts/test_previous_version_state.py index c835e5a001c..a7a7ed5417c 100644 --- a/tests/functional/artifacts/test_previous_version_state.py +++ b/tests/functional/artifacts/test_previous_version_state.py @@ -42,7 +42,7 @@ class TestPreviousVersionState: - CURRENT_EXPECTED_MANIFEST_VERSION = 7 + CURRENT_EXPECTED_MANIFEST_VERSION = 8 @pytest.fixture(scope="class") def models(self): diff --git a/tests/functional/colors/test_colors.py b/tests/functional/colors/test_colors.py new file mode 100644 index 00000000000..7e92e039506 --- /dev/null +++ b/tests/functional/colors/test_colors.py @@ -0,0 +1,43 @@ +import pytest +import re +from dbt.tests.util import run_dbt_and_capture + + +models__do_nothing_then_fail_sql = """ +select 1, + +""" + + +@pytest.fixture(scope="class") +def models(): + return {"do_nothing_then_fail.sql": models__do_nothing_then_fail_sql} + + +@pytest.fixture(scope="class") +def project_config_update(): + return {'config-version': 2} + + +class TestColors: + def test_use_colors(self, project): + self.assert_colors_used( + "--use-colors", + expect_colors=True, + ) + + def test_no_use_colors(self, project): + self.assert_colors_used( + "--no-use-colors", + expect_colors=False, + ) + + def assert_colors_used(self, flag, expect_colors): + _, stdout = run_dbt_and_capture(args=[flag, "run"], expect_pass=False) + # pattern to match formatted log output + pattern = re.compile(r"\[31m.*|\[33m.*") + stdout_contains_formatting_characters = bool(pattern.search(stdout)) + if expect_colors: + assert stdout_contains_formatting_characters + else: + assert not stdout_contains_formatting_characters diff --git a/tests/functional/context_methods/test_builtin_functions.py b/tests/functional/context_methods/test_builtin_functions.py index 68501c146f9..e2f416d2fb4 100644 --- a/tests/functional/context_methods/test_builtin_functions.py +++ b/tests/functional/context_methods/test_builtin_functions.py @@ -112,15 +112,15 @@ def test_builtin_invocation_args_dict_function(self, project): expected = "invocation_result: {'debug': True, 'log_format': 'json', 'write_json': True, 'use_colors': True, 'printer_width': 80, 'version_check': True, 'partial_parse': True, 'static_parser': True, 'profiles_dir': " assert expected in str(result) - expected = "'send_anonymous_usage_stats': False, 'event_buffer_size': 100000, 'quiet': False, 'no_print': False, 'macro': 'validate_invocation', 'args': '{my_variable: test_variable}', 'which': 'run-operation', 'rpc_method': 'run-operation', 'anonymous_usage_stats': True, 'indirect_selection': 'eager'}" + expected = "'send_anonymous_usage_stats': False, 'quiet': False, 'no_print': False, 'macro': 'validate_invocation', 'args': '{my_variable: test_variable}', 'which': 'run-operation', 'rpc_method': 'run-operation', 'anonymous_usage_stats': True, 'indirect_selection': 'eager'}" assert expected in str(result) def test_builtin_dbt_metadata_envs_function(self, project, monkeypatch): envs = { - "DBT_ENV_CUSTOM_ENV_RUN_ID": 1234, - "DBT_ENV_CUSTOM_ENV_JOB_ID": 5678, - "DBT_ENV_RUN_ID": 91011, - "RANDOM_ENV": 121314, + "DBT_ENV_CUSTOM_ENV_RUN_ID": "1234", + "DBT_ENV_CUSTOM_ENV_JOB_ID": "5678", + "DBT_ENV_RUN_ID": "91011", + "RANDOM_ENV": "121314", } monkeypatch.setattr(os, "environ", envs) @@ -133,7 +133,7 @@ def test_builtin_dbt_metadata_envs_function(self, project, monkeypatch): assert result - expected = "dbt_metadata_envs_result:{'RUN_ID': 1234, 'JOB_ID': 5678}" + expected = "dbt_metadata_envs_result:{'RUN_ID': '1234', 'JOB_ID': '5678'}" assert expected in str(result) diff --git a/tests/functional/duplicates/test_duplicate_model.py b/tests/functional/duplicates/test_duplicate_model.py index 031ba6236c0..fbcd1b79671 100644 --- a/tests/functional/duplicates/test_duplicate_model.py +++ b/tests/functional/duplicates/test_duplicate_model.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationException, DuplicateResourceName from dbt.tests.fixtures.project import write_project_files from dbt.tests.util import run_dbt, get_manifest @@ -108,7 +108,7 @@ def packages(self): def test_duplicate_model_enabled_across_packages(self, project): run_dbt(["deps"]) message = "dbt found two models with the name" - with pytest.raises(CompilationException) as exc: + with pytest.raises(DuplicateResourceName) as exc: run_dbt(["run"]) assert message in str(exc.value) diff --git a/tests/functional/exit_codes/fixtures.py b/tests/functional/exit_codes/fixtures.py new file mode 100644 index 00000000000..23a0bef3897 --- /dev/null +++ b/tests/functional/exit_codes/fixtures.py @@ -0,0 +1,78 @@ +import pytest + +bad_sql = """ +select bad sql here +""" + +dupe_sql = """ +select 1 as id, current_date as updated_at +union all +select 2 as id, current_date as updated_at +union all +select 3 as id, current_date as updated_at +union all +select 4 as id, current_date as updated_at +""" + +good_sql = """ +select 1 as id, current_date as updated_at +union all +select 2 as id, current_date as updated_at +union all +select 3 as id, current_date as updated_at +union all +select 4 as id, current_date as updated_at +""" + +snapshots_good_sql = """ +{% snapshot good_snapshot %} + {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at')}} + select * from {{ schema }}.good +{% endsnapshot %} +""" + +snapshots_bad_sql = """ +{% snapshot good_snapshot %} + {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at_not_real')}} + select * from {{ schema }}.good +{% endsnapshot %} +""" + +schema_yml = """ +version: 2 +models: +- name: good + columns: + - name: updated_at + tests: + - not_null +- name: bad + columns: + - name: updated_at + tests: + - not_null +- name: dupe + columns: + - name: updated_at + tests: + - unique +""" + +data_seed_good_csv = """a,b,c +1,2,3 +""" + +data_seed_bad_csv = """a,b,c +1,\2,3,a,a,a +""" + + +class BaseConfigProject: + @pytest.fixture(scope="class") + def models(self): + return { + "bad.sql": bad_sql, + "dupe.sql": dupe_sql, + "good.sql": good_sql, + "schema.yml": schema_yml + } diff --git a/tests/functional/exit_codes/test_exit_codes.py b/tests/functional/exit_codes/test_exit_codes.py new file mode 100644 index 00000000000..54b5cb6865e --- /dev/null +++ b/tests/functional/exit_codes/test_exit_codes.py @@ -0,0 +1,124 @@ +import pytest + +import dbt.exceptions +from dbt.tests.util import ( + check_table_does_exist, + check_table_does_not_exist, + run_dbt +) +from tests.functional.exit_codes.fixtures import ( + BaseConfigProject, + snapshots_bad_sql, + snapshots_good_sql, + data_seed_bad_csv, + data_seed_good_csv +) + + +class TestExitCodes(BaseConfigProject): + @pytest.fixture(scope="class") + def snapshots(self): + return {"g.sql": snapshots_good_sql} + + def test_exit_code_run_succeed(self, project): + results = run_dbt(['run', '--model', 'good']) + assert len(results) == 1 + check_table_does_exist(project.adapter, 'good') + + def test_exit_code_run_fail(self, project): + results = run_dbt(['run', '--model', 'bad'], expect_pass=False) + assert len(results) == 1 + check_table_does_not_exist(project.adapter, 'bad') + + def test_schema_test_pass(self, project): + results = run_dbt(['run', '--model', 'good']) + assert len(results) == 1 + + results = run_dbt(['test', '--model', 'good']) + assert len(results) == 1 + + def test_schema_test_fail(self, project): + results = run_dbt(['run', '--model', 'dupe']) + assert len(results) == 1 + + results = run_dbt(['test', '--model', 'dupe'], expect_pass=False) + assert len(results) == 1 + + def test_compile(self, project): + results = run_dbt(['compile']) + assert len(results) == 7 + + def test_snapshot_pass(self, project): + run_dbt(["run", "--model", "good"]) + results = run_dbt(['snapshot']) + assert len(results) == 1 + check_table_does_exist(project.adapter, 'good_snapshot') + + +class TestExitCodesSnapshotFail(BaseConfigProject): + @pytest.fixture(scope="class") + def snapshots(self): + return {"b.sql": snapshots_bad_sql} + + def test_snapshot_fail(self, project): + results = run_dbt(['run', '--model', 'good']) + assert len(results) == 1 + + results = run_dbt(['snapshot'], expect_pass=False) + assert len(results) == 1 + check_table_does_not_exist(project.adapter, 'good_snapshot') + + +class TestExitCodesDeps: + @pytest.fixture(scope="class") + def packages(self): + return { + "packages": [ + { + 'git': 'https://github.com/dbt-labs/dbt-integration-project', + 'revision': 'dbt/1.0.0', + } + ] + } + + def test_deps(self, project): + results = run_dbt(['deps']) + assert results is None + + +class TestExitCodesDepsFail: + @pytest.fixture(scope="class") + def packages(self): + return { + "packages": [ + { + 'git': 'https://github.com/dbt-labs/dbt-integration-project', + 'revision': 'bad-branch', + }, + ] + } + + def test_deps_fail(self, project): + with pytest.raises(dbt.exceptions.GitCheckoutError) as exc: + run_dbt(['deps']) + expected_msg = "Error checking out spec='bad-branch'" + assert expected_msg in str(exc.value) + + +class TestExitCodesSeed: + @pytest.fixture(scope="class") + def seeds(self): + return {"good.csv": data_seed_good_csv} + + def test_seed(self, project): + results = run_dbt(['seed']) + assert len(results) == 1 + + +class TestExitCodesSeedFail: + @pytest.fixture(scope="class") + def seeds(self): + return {"bad.csv": data_seed_bad_csv} + + def test_seed(self, project): + run_dbt(['seed'], expect_pass=False) diff --git a/tests/functional/exposures/fixtures.py b/tests/functional/exposures/fixtures.py index 847a3cf5f73..1d573b1a7b6 100644 --- a/tests/functional/exposures/fixtures.py +++ b/tests/functional/exposures/fixtures.py @@ -7,6 +7,29 @@ select 1 as id """ + +source_schema_yml = """version: 2 + +sources: + - name: test_source + tables: + - name: test_table +""" + +metrics_schema_yml = """version: 2 + +metrics: + - name: metric + model: ref('model') + label: "label" + + calculation_method: count_distinct + expression: id + + timestamp: first_order + time_grains: [day] +""" + simple_exposure_yml = """ version: 2 @@ -16,6 +39,8 @@ type: dashboard depends_on: - ref('model') + - source('test_source', 'test_table') + - metric('metric') owner: email: something@example.com - name: notebook_exposure diff --git a/tests/functional/exposures/test_exposure_configs.py b/tests/functional/exposures/test_exposure_configs.py index ed49f565ec7..a7018204952 100644 --- a/tests/functional/exposures/test_exposure_configs.py +++ b/tests/functional/exposures/test_exposure_configs.py @@ -10,7 +10,9 @@ simple_exposure_yml, disabled_models_exposure_yml, enabled_yaml_level_exposure_yml, - invalid_config_exposure_yml + invalid_config_exposure_yml, + source_schema_yml, + metrics_schema_yml ) @@ -29,7 +31,9 @@ def models(self): return { "model.sql": models_sql, "second_model.sql": second_model_sql, - "schema.yml": simple_exposure_yml, + "exposure.yml": simple_exposure_yml, + "schema.yml": source_schema_yml, + "metrics.yml": metrics_schema_yml, } @pytest.fixture(scope="class") diff --git a/tests/functional/exposures/test_exposures.py b/tests/functional/exposures/test_exposures.py index 52ff74d4b0c..777a8e161c4 100644 --- a/tests/functional/exposures/test_exposures.py +++ b/tests/functional/exposures/test_exposures.py @@ -5,6 +5,8 @@ models_sql, second_model_sql, simple_exposure_yml, + source_schema_yml, + metrics_schema_yml ) @@ -15,6 +17,8 @@ def models(self): "exposure.yml": simple_exposure_yml, "model.sql": models_sql, "second_model.sql": second_model_sql, + "schema.yml": source_schema_yml, + "metrics.yml": metrics_schema_yml, } def test_names_with_spaces(self, project): @@ -27,3 +31,14 @@ def test_names_with_spaces(self, project): ] assert exposure_ids == expected_exposure_ids assert manifest.exposures["exposure.test.simple_exposure"].label == "simple exposure label" + + def test_depends_on(self, project): + run_dbt(["run"]) + manifest = get_manifest(project.project_root) + exposure_depends_on = manifest.exposures["exposure.test.simple_exposure"].depends_on.nodes + expected_exposure_depends_on = [ + 'source.test.test_source.test_table', + 'model.test.model', + 'metric.test.metric' + ] + assert sorted(exposure_depends_on) == sorted(expected_exposure_depends_on) diff --git a/tests/functional/incremental_schema_tests/fixtures.py b/tests/functional/incremental_schema_tests/fixtures.py new file mode 100644 index 00000000000..c6eebc5e183 --- /dev/null +++ b/tests/functional/incremental_schema_tests/fixtures.py @@ -0,0 +1,395 @@ + +# +# Properties +# +_PROPERTIES__SCHEMA = """ +version: 2 + +models: + - name: model_a + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_ignore + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_ignore_target + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_append_new_columns + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_append_new_columns_target + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_sync_all_columns + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_sync_all_columns_target + columns: + - name: id + tags: [column_leveL_tag] + tests: + - unique +""" + +# +# Models +# +_MODELS__INCREMENTAL_SYNC_REMOVE_ONLY = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='sync_all_columns' + + ) +}} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% set string_type = 'varchar(10)' %} + +{% if is_incremental() %} + +SELECT id, + cast(field1 as {{string_type}}) as field1 + +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +select id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2 + +from source_data where id <= 3 + +{% endif %} +""" + +_MODELS__INCREMENTAL_IGNORE = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='ignore' + ) +}} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% if is_incremental() %} + +SELECT id, field1, field2, field3, field4 FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +SELECT id, field1, field2 FROM source_data LIMIT 3 + +{% endif %} +""" + +_MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET = """ +{{ + config(materialized='table') +}} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +{% set string_type = 'varchar(10)' %} + +select id + ,cast(field1 as {{string_type}}) as field1 + +from source_data +order by id +""" + +_MODELS__INCREMENTAL_IGNORE_TARGET = """ +{{ + config(materialized='table') +}} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +select id + ,field1 + ,field2 + +from source_data +""" + +_MODELS__INCREMENTAL_FAIL = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='fail' + ) +}} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% if is_incremental() %} + +SELECT id, field1, field2 FROM source_data + +{% else %} + +SELECT id, field1, field3 FROm source_data + +{% endif %} +""" + +_MODELS__INCREMENTAL_SYNC_ALL_COLUMNS = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='sync_all_columns' + + ) +}} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% set string_type = 'varchar(10)' %} + +{% if is_incremental() %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field3 as {{string_type}}) as field3, -- to validate new fields + cast(field4 as {{string_type}}) AS field4 -- to validate new fields + +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +select id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2 + +from source_data where id <= 3 + +{% endif %} +""" + +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='append_new_columns' + ) +}} + +{% set string_type = 'varchar(10)' %} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% if is_incremental() %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field3 as {{string_type}}) as field3, + cast(field4 as {{string_type}}) as field4 +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2 +FROM source_data where id <= 3 + +{% endif %} +""" + +_MODELS__A = """ +{{ + config(materialized='table') +}} + +with source_data as ( + + select 1 as id, 'aaa' as field1, 'bbb' as field2, 111 as field3, 'TTT' as field4 + union all select 2 as id, 'ccc' as field1, 'ddd' as field2, 222 as field3, 'UUU' as field4 + union all select 3 as id, 'eee' as field1, 'fff' as field2, 333 as field3, 'VVV' as field4 + union all select 4 as id, 'ggg' as field1, 'hhh' as field2, 444 as field3, 'WWW' as field4 + union all select 5 as id, 'iii' as field1, 'jjj' as field2, 555 as field3, 'XXX' as field4 + union all select 6 as id, 'kkk' as field1, 'lll' as field2, 666 as field3, 'YYY' as field4 + +) + +select id + ,field1 + ,field2 + ,field3 + ,field4 + +from source_data +""" + +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET = """ +{{ + config(materialized='table') +}} + +{% set string_type = 'varchar(10)' %} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +select id + ,cast(field1 as {{string_type}}) as field1 + ,cast(field2 as {{string_type}}) as field2 + ,cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3 + ,cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4 + +from source_data +""" + +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='append_new_columns' + ) +}} + +{% set string_type = 'varchar(10)' %} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% if is_incremental() %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2, + cast(field3 as {{string_type}}) as field3, + cast(field4 as {{string_type}}) as field4 +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2 +FROM source_data where id <= 3 + +{% endif %} +""" + +_MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET = """ +{{ + config(materialized='table') +}} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +{% set string_type = 'varchar(10)' %} + +select id + ,cast(field1 as {{string_type}}) as field1 + --,field2 + ,cast(case when id <= 3 then null else field3 end as {{string_type}}) as field3 + ,cast(case when id <= 3 then null else field4 end as {{string_type}}) as field4 + +from source_data +order by id +""" + +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET = """ +{{ + config(materialized='table') +}} + +{% set string_type = 'varchar(10)' %} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +select id, + cast(field1 as {{string_type}}) as field1, + cast(CASE WHEN id > 3 THEN NULL ELSE field2 END as {{string_type}}) AS field2, + cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3, + cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4 + +from source_data +""" + +# +# Tests +# + +_TESTS__SELECT_FROM_INCREMENTAL_IGNORE = """ +select * from {{ ref('incremental_ignore') }} where false +""" + +_TESTS__SELECT_FROM_A = """ +select * from {{ ref('model_a') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET = """ +select * from {{ ref('incremental_append_new_columns_target') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS = """ +select * from {{ ref('incremental_sync_all_columns') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET = """ +select * from {{ ref('incremental_sync_all_columns_target') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET = """ +select * from {{ ref('incremental_ignore_target') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS = """ +select * from {{ ref('incremental_append_new_columns') }} where false +""" diff --git a/tests/functional/incremental_schema_tests/test_incremental_schema.py b/tests/functional/incremental_schema_tests/test_incremental_schema.py new file mode 100644 index 00000000000..3ee9e6477e4 --- /dev/null +++ b/tests/functional/incremental_schema_tests/test_incremental_schema.py @@ -0,0 +1,136 @@ +import pytest + +from dbt.tests.util import ( + check_relations_equal, + run_dbt, +) + +from tests.functional.incremental_schema_tests.fixtures import ( + _PROPERTIES__SCHEMA, + _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY, + _MODELS__INCREMENTAL_IGNORE, + _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET, + _MODELS__INCREMENTAL_IGNORE_TARGET, + _MODELS__INCREMENTAL_FAIL, + _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE, + _MODELS__A, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS, + _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET, + _TESTS__SELECT_FROM_INCREMENTAL_IGNORE, + _TESTS__SELECT_FROM_A, + _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS, + _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + _TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET, + _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS, +) + + +class TestIncrementalSchemaChange: + @pytest.fixture(scope="class") + def properties(self): + return { + "schema.yml": _PROPERTIES__SCHEMA, + } + + @pytest.fixture(scope="class") + def models(self): + return { + "incremental_sync_remove_only.sql": _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY, + "incremental_ignore.sql": _MODELS__INCREMENTAL_IGNORE, + "incremental_sync_remove_only_target.sql": + _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET, + "incremental_ignore_target.sql": _MODELS__INCREMENTAL_IGNORE_TARGET, + "incremental_fail.sql": _MODELS__INCREMENTAL_FAIL, + "incremental_sync_all_columns.sql": _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS, + "incremental_append_new_columns_remove_one.sql": + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE, + "model_a.sql": _MODELS__A, + "incremental_append_new_columns_target.sql": + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + "incremental_append_new_columns.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS, + "incremental_sync_all_columns_target.sql": + _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + "incremental_append_new_columns_remove_one_target.sql": + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET, + } + + @pytest.fixture(scope="class") + def tests(self): + return { + "select_from_incremental.sql": _TESTS__SELECT_FROM_INCREMENTAL_IGNORE, + "select_from_a.sql": _TESTS__SELECT_FROM_A, + "select_from_incremental_append_new_columns_target.sql": + _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + "select_from_incremental_sync_all_columns.sql": + _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS, + "select_from_incremental_sync_all_columns_target.sql": + _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + "select_from_incremental_ignore_target.sql": + _TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET, + "select_from_incremental_append_new_columns.sql": + _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS, + } + + def run_twice_and_assert( + self, include, compare_source, compare_target, project + ): + + # dbt run (twice) + run_args = ['run'] + if include: + run_args.extend(('--select', include)) + results_one = run_dbt(run_args) + assert len(results_one) == 3 + + results_two = run_dbt(run_args) + assert len(results_two) == 3 + + check_relations_equal(project.adapter, [compare_source, compare_target]) + + def run_incremental_append_new_columns(self, project): + select = 'model_a incremental_append_new_columns incremental_append_new_columns_target' + compare_source = 'incremental_append_new_columns' + compare_target = 'incremental_append_new_columns_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def run_incremental_append_new_columns_remove_one(self, project): + select = 'model_a incremental_append_new_columns_remove_one incremental_append_new_columns_remove_one_target' + compare_source = 'incremental_append_new_columns_remove_one' + compare_target = 'incremental_append_new_columns_remove_one_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def run_incremental_sync_all_columns(self, project): + select = 'model_a incremental_sync_all_columns incremental_sync_all_columns_target' + compare_source = 'incremental_sync_all_columns' + compare_target = 'incremental_sync_all_columns_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def run_incremental_sync_remove_only(self, project): + select = 'model_a incremental_sync_remove_only incremental_sync_remove_only_target' + compare_source = 'incremental_sync_remove_only' + compare_target = 'incremental_sync_remove_only_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def test_run_incremental_ignore(self, project): + select = 'model_a incremental_ignore incremental_ignore_target' + compare_source = 'incremental_ignore' + compare_target = 'incremental_ignore_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def test_run_incremental_append_new_columns(self, project): + self.run_incremental_append_new_columns(project) + self.run_incremental_append_new_columns_remove_one(project) + + def test_run_incremental_sync_all_columns(self, project): + self.run_incremental_sync_all_columns(project) + self.run_incremental_sync_remove_only(project) + + def test_run_incremental_fail_on_schema_change(self, project): + select = 'model_a incremental_fail' + run_dbt(['run', '--models', select, '--full-refresh']) + results_two = run_dbt(['run', '--models', select], expect_pass=False) + assert 'Compilation Error' in results_two[1].message diff --git a/tests/functional/list/test_list.py b/tests/functional/list/test_list.py index 78fca376d7d..cf0d3d89add 100644 --- a/tests/functional/list/test_list.py +++ b/tests/functional/list/test_list.py @@ -357,7 +357,6 @@ def expect_seed_output(self): "json": { "name": "seed", "package_name": "test", - "depends_on": {"nodes": [], "macros": []}, "tags": [], "config": { "enabled": True, diff --git a/tests/functional/logging/test_logging.py b/tests/functional/logging/test_logging.py new file mode 100644 index 00000000000..b0feea50809 --- /dev/null +++ b/tests/functional/logging/test_logging.py @@ -0,0 +1,51 @@ +import pytest +from dbt.tests.util import run_dbt, get_manifest, read_file +import json + + +my_model_sql = """ + select 1 as fun +""" + + +@pytest.fixture(scope="class") +def models(): + return {"my_model.sql": my_model_sql} + + +# This test checks that various events contain node_info, +# which is supplied by the log_contextvars context manager +def test_basic(project, logs_dir): + results = run_dbt(["--log-format=json", "run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + assert "model.test.my_model" in manifest.nodes + + # get log file + log_file = read_file(logs_dir, "dbt.log") + assert log_file + node_start = False + node_finished = False + for log_line in log_file.split('\n'): + # skip empty lines + if len(log_line) == 0: + continue + # The adapter logging also shows up, so skip non-json lines + if "[debug]" in log_line: + continue + log_dct = json.loads(log_line) + log_event = log_dct['info']['name'] + if log_event == "NodeStart": + node_start = True + if log_event == "NodeFinished": + node_finished = True + if node_start and not node_finished: + if log_event == 'NodeExecuting': + assert "node_info" in log_dct + if log_event == "JinjaLogDebug": + assert "node_info" in log_dct + if log_event == "SQLQuery": + assert "node_info" in log_dct + if log_event == "TimingInfoCollected": + assert "node_info" in log_dct + assert "timing_info" in log_dct diff --git a/tests/functional/partial_parsing/test_pp_docs.py b/tests/functional/partial_parsing/test_pp_docs.py index f9ab5e3a2d7..b3c7d52212d 100644 --- a/tests/functional/partial_parsing/test_pp_docs.py +++ b/tests/functional/partial_parsing/test_pp_docs.py @@ -129,7 +129,7 @@ def test_pp_docs(self, project): results = run_dbt(["--partial-parse", "run"]) manifest = get_manifest(project.project_root) assert len(manifest.docs) == 2 - doc_id = "test.customer_table" + doc_id = "doc.test.customer_table" assert doc_id in manifest.docs doc = manifest.docs[doc_id] doc_file_id = doc.file_id @@ -225,7 +225,7 @@ def models(self): def test_remove_replace(self, project): run_dbt(["parse", "--write-manifest"]) manifest = get_manifest(project.project_root) - doc_id = "test.whatever" + doc_id = "doc.test.whatever" assert doc_id in manifest.docs doc = manifest.docs[doc_id] doc_file = manifest.files[doc.file_id] diff --git a/tests/functional/postgres/fixtures.py b/tests/functional/postgres/fixtures.py new file mode 100644 index 00000000000..93b26b4f31b --- /dev/null +++ b/tests/functional/postgres/fixtures.py @@ -0,0 +1,134 @@ +models__incremental_sql = """ +{{ + config( + materialized = "incremental", + indexes=[ + {'columns': ['column_a'], 'type': 'hash'}, + {'columns': ['column_a', 'column_b'], 'unique': True}, + ] + ) +}} + +select * +from ( + select 1 as column_a, 2 as column_b +) t + +{% if is_incremental() %} + where column_a > (select max(column_a) from {{this}}) +{% endif %} + +""" + +models__table_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'columns': ['column_a']}, + {'columns': ['column_b']}, + {'columns': ['column_a', 'column_b']}, + {'columns': ['column_b', 'column_a'], 'type': 'btree', 'unique': True}, + {'columns': ['column_a'], 'type': 'hash'} + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +models_invalid__invalid_columns_type_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'columns': 'column_a, column_b'}, + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +models_invalid__invalid_type_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'columns': ['column_a'], 'type': 'non_existent_type'}, + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +models_invalid__invalid_unique_config_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'columns': ['column_a'], 'unique': 'yes'}, + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +models_invalid__missing_columns_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'unique': True}, + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +snapshots__colors_sql = """ +{% snapshot colors %} + + {{ + config( + target_database=database, + target_schema=schema, + unique_key='id', + strategy='check', + check_cols=['color'], + indexes=[ + {'columns': ['id'], 'type': 'hash'}, + {'columns': ['id', 'color'], 'unique': True}, + ] + ) + }} + + {% if var('version') == 1 %} + + select 1 as id, 'red' as color union all + select 2 as id, 'green' as color + + {% else %} + + select 1 as id, 'blue' as color union all + select 2 as id, 'green' as color + + {% endif %} + +{% endsnapshot %} + +""" + +seeds__seed_csv = """country_code,country_name +US,United States +CA,Canada +GB,United Kingdom +""" diff --git a/tests/functional/postgres/test_postgres_indexes.py b/tests/functional/postgres/test_postgres_indexes.py new file mode 100644 index 00000000000..64d61d2df87 --- /dev/null +++ b/tests/functional/postgres/test_postgres_indexes.py @@ -0,0 +1,149 @@ +import pytest +import re +from dbt.tests.util import ( + run_dbt, + run_dbt_and_capture, +) +from tests.functional.postgres.fixtures import ( + models__incremental_sql, + models__table_sql, + models_invalid__missing_columns_sql, + models_invalid__invalid_columns_type_sql, + models_invalid__invalid_type_sql, + models_invalid__invalid_unique_config_sql, + seeds__seed_csv, + snapshots__colors_sql, +) + + +INDEX_DEFINITION_PATTERN = re.compile(r"using\s+(\w+)\s+\((.+)\)\Z") + + +class TestPostgresIndex: + @pytest.fixture(scope="class") + def models(self): + return { + "table.sql": models__table_sql, + "incremental.sql": models__incremental_sql, + } + + @pytest.fixture(scope="class") + def seeds(self): + return {"seed.csv": seeds__seed_csv} + + @pytest.fixture(scope="class") + def snapshots(self): + return {"colors.sql": snapshots__colors_sql} + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "config-version": 2, + "seeds": { + "quote_columns": False, + "indexes": [ + {"columns": ["country_code"], "unique": False, "type": "hash"}, + {"columns": ["country_code", "country_name"], "unique": True}, + ], + }, + "vars": { + "version": 1, + }, + } + + def test_table(self, project, unique_schema): + results = run_dbt(["run", "--models", "table"]) + assert len(results) == 1 + + indexes = self.get_indexes("table", project, unique_schema) + expected = [ + {"columns": "column_a", "unique": False, "type": "btree"}, + {"columns": "column_b", "unique": False, "type": "btree"}, + {"columns": "column_a, column_b", "unique": False, "type": "btree"}, + {"columns": "column_b, column_a", "unique": True, "type": "btree"}, + {"columns": "column_a", "unique": False, "type": "hash"}, + ] + assert len(indexes) == len(expected) + + def test_incremental(self, project, unique_schema): + for additional_argument in [[], [], ["--full-refresh"]]: + results = run_dbt(["run", "--models", "incremental"] + additional_argument) + assert len(results) == 1 + + indexes = self.get_indexes('incremental', project, unique_schema) + expected = [ + {"columns": "column_a", "unique": False, "type": "hash"}, + {"columns": "column_a, column_b", "unique": True, "type": "btree"}, + ] + assert len(indexes) == len(expected) + + def test_seed(self, project, unique_schema): + for additional_argument in [[], [], ['--full-refresh']]: + results = run_dbt(["seed"] + additional_argument) + assert len(results) == 1 + + indexes = self.get_indexes('seed', project, unique_schema) + expected = [ + {"columns": "country_code", "unique": False, "type": "hash"}, + {"columns": "country_code, country_name", "unique": True, "type": "btree"}, + ] + assert len(indexes) == len(expected) + + def test_snapshot(self, project, unique_schema): + for version in [1, 2]: + results = run_dbt(["snapshot", "--vars", f"version: {version}"]) + assert len(results) == 1 + + indexes = self.get_indexes('colors', project, unique_schema) + expected = [ + {"columns": "id", "unique": False, "type": "hash"}, + {"columns": "id, color", "unique": True, "type": "btree"}, + ] + assert len(indexes) == len(expected) + + def get_indexes(self, table_name, project, unique_schema): + sql = f""" + SELECT + pg_get_indexdef(idx.indexrelid) as index_definition + FROM pg_index idx + JOIN pg_class tab ON tab.oid = idx.indrelid + WHERE + tab.relname = '{table_name}' + AND tab.relnamespace = ( + SELECT oid FROM pg_namespace WHERE nspname = '{unique_schema}' + ); + """ + results = project.run_sql(sql, fetch="all") + return [self.parse_index_definition(row[0]) for row in results] + + def parse_index_definition(self, index_definition): + index_definition = index_definition.lower() + is_unique = "unique" in index_definition + m = INDEX_DEFINITION_PATTERN.search(index_definition) + return { + "columns": m.group(2), + "unique": is_unique, + "type": m.group(1), + } + + def assertCountEqual(self, a, b): + assert len(a) == len(b) + + +class TestPostgresInvalidIndex(): + @pytest.fixture(scope="class") + def models(self): + return { + "invalid_unique_config.sql": models_invalid__invalid_unique_config_sql, + "invalid_type.sql": models_invalid__invalid_type_sql, + "invalid_columns_type.sql": models_invalid__invalid_columns_type_sql, + "missing_columns.sql": models_invalid__missing_columns_sql, + } + + def test_invalid_index_configs(self, project): + results, output = run_dbt_and_capture(expect_pass=False) + assert len(results) == 4 + assert re.search(r"columns.*is not of type 'array'", output) + assert re.search(r"unique.*is not of type 'boolean'", output) + assert re.search(r"'columns' is a required property", output) + assert re.search(r"Database Error in model invalid_type", output) diff --git a/tests/functional/relation_names/test_relation_name.py b/tests/functional/relation_names/test_relation_name.py new file mode 100644 index 00000000000..5d941d96da5 --- /dev/null +++ b/tests/functional/relation_names/test_relation_name.py @@ -0,0 +1,124 @@ +import pytest + +from dbt.contracts.results import RunStatus +from dbt.tests.util import run_dbt + +# Test coverage: A relation is a name for a database entity, i.e. a table or view. Every relation has +# a name. These tests verify the default Postgres rules for relation names are followed. Adapters +# may override connection rules and thus may have their own tests. + +seeds__seed = """col_A,col_B +1,2 +3,4 +5,6 +""" + +models__basic_incremental = """ +select * from {{ this.schema }}.seed + +{{ + config({ + "unique_key": "col_A", + "materialized": "incremental" + }) +}} +""" + +models__basic_table = """ +select * from {{ this.schema }}.seed + +{{ + config({ + "materialized": "table" + }) +}} +""" + + +class TestGeneratedDDLNameRules: + @classmethod + def setup_class(self): + self.incremental_filename = "my_name_is_51_characters_incremental_abcdefghijklmn" + # length is 63 + self.max_length_filename = "my_name_is_max_length_chars_abcdefghijklmnopqrstuvwxyz123456789" + # length is 64 + self.over_max_length_filename = "my_name_is_one_over_max_length_chats_abcdefghijklmnopqrstuvwxyz1" + + self.filename_for_backup_file = "my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0" + + @pytest.fixture(scope="class", autouse=True) + def setUp(self, project): + run_dbt(["seed"]) + + @pytest.fixture(scope="class") + def seeds(self): + return {"seed.csv": seeds__seed} + + @pytest.fixture(scope="class") + def models(self): + return { + f"{self.incremental_filename}.sql": + models__basic_incremental, + f"{self.filename_for_backup_file}.sql": + models__basic_table, + f"{self.max_length_filename}.sql": + models__basic_table, + f"{self.over_max_length_filename}.sql": + models__basic_table, + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "seeds": { + "quote_columns": False, + }, + } + + # Backup table name generation: + # 1. for len(relation name) <= 51, backfills + # 2. for len(relation name) > 51 characters, overwrites + # the last 12 characters with __dbt_backup + def test_name_shorter_or_equal_to_63_passes(self, project): + run_dbt( + [ + "run", + "-s", + f"{self.max_length_filename}", + f"{self.filename_for_backup_file}", + ], + ) + + def test_long_name_passes_when_temp_tables_are_generated(self): + run_dbt( + [ + "run", + "-s", + f"{self.incremental_filename}", + ], + ) + + # Run again to trigger incremental materialization + run_dbt( + [ + "run", + "-s", + f"{self.incremental_filename}", + ], + ) + + # 63 characters is the character limit for a table name in a postgres database + # (assuming compiled without changes from source) + def test_name_longer_than_63_does_not_build(self): + err_msg = "Relation name 'my_name_is_one_over_max"\ + "_length_chats_abcdefghijklmnopqrstuvwxyz1' is longer than 63 characters" + res = run_dbt( + [ + "run", + "-s", + self.over_max_length_filename, + ], + expect_pass=False + ) + assert res[0].status == RunStatus.Error + assert err_msg in res[0].message diff --git a/test/integration/044_run_operations_tests/macros/happy_macros.sql b/tests/functional/run_operations/fixtures.py similarity index 82% rename from test/integration/044_run_operations_tests/macros/happy_macros.sql rename to tests/functional/run_operations/fixtures.py index c5c6df4dc8a..f6ed82e20ec 100644 --- a/test/integration/044_run_operations_tests/macros/happy_macros.sql +++ b/tests/functional/run_operations/fixtures.py @@ -1,3 +1,4 @@ +happy_macros_sql = """ {% macro no_args() %} {% if execute %} {% call statement(auto_begin=True) %} @@ -53,4 +54,19 @@ {% macro print_something() %} {{ print("You're doing awesome!") }} -{% endmacro %} \ No newline at end of file +{% endmacro %} +""" + +sad_macros_sql = """ +{% macro syntax_error() %} + {% if execute %} + {% call statement() %} + select NOPE NOT A VALID QUERY + {% endcall %} + {% endif %} +{% endmacro %} +""" + +model_sql = """ +select 1 as id +""" diff --git a/tests/functional/run_operations/test_run_operations.py b/tests/functional/run_operations/test_run_operations.py new file mode 100644 index 00000000000..f91ef2d8359 --- /dev/null +++ b/tests/functional/run_operations/test_run_operations.py @@ -0,0 +1,104 @@ +import os +import pytest +import yaml + +from dbt.tests.util import ( + check_table_does_exist, + run_dbt +) +from tests.functional.run_operations.fixtures import ( + happy_macros_sql, + sad_macros_sql, + model_sql +) + + +class TestOperations: + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": model_sql} + + @pytest.fixture(scope="class") + def macros(self): + return { + "happy_macros.sql": happy_macros_sql, + "sad_macros.sql": sad_macros_sql + } + + @pytest.fixture(scope="class") + def dbt_profile_data(self, unique_schema): + return { + "config": {"send_anonymous_usage_stats": False}, + "test": { + "outputs": { + "default": { + "type": "postgres", + "threads": 4, + "host": "localhost", + "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)), + "user": os.getenv("POSTGRES_TEST_USER", "root"), + "pass": os.getenv("POSTGRES_TEST_PASS", "password"), + "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"), + "schema": unique_schema, + }, + "noaccess": { + "type": "postgres", + "threads": 4, + "host": "localhost", + "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)), + "user": 'noaccess', + "pass": 'password', + "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"), + 'schema': unique_schema + } + }, + "target": "default", + }, + } + + def run_operation(self, macro, expect_pass=True, extra_args=None, **kwargs): + args = ['run-operation', macro] + if kwargs: + args.extend(('--args', yaml.safe_dump(kwargs))) + if extra_args: + args.extend(extra_args) + return run_dbt(args, expect_pass=expect_pass) + + def test_macro_noargs(self, project): + self.run_operation('no_args') + check_table_does_exist(project.adapter, 'no_args') + + def test_macro_args(self, project): + self.run_operation('table_name_args', table_name='my_fancy_table') + check_table_does_exist(project.adapter, 'my_fancy_table') + + def test_macro_exception(self, project): + self.run_operation('syntax_error', False) + + def test_macro_missing(self, project): + self.run_operation('this_macro_does_not_exist', False) + + def test_cannot_connect(self, project): + self.run_operation('no_args', + extra_args=['--target', 'noaccess'], + expect_pass=False) + + def test_vacuum(self, project): + run_dbt(['run']) + # this should succeed + self.run_operation('vacuum', table_name='model') + + def test_vacuum_ref(self, project): + run_dbt(['run']) + # this should succeed + self.run_operation('vacuum_ref', ref_target='model') + + def test_select(self, project): + self.run_operation('select_something', name='world') + + def test_access_graph(self, project): + self.run_operation('log_graph') + + def test_print(self, project): + # Tests that calling the `print()` macro does not cause an exception + self.run_operation('print_something') diff --git a/tests/functional/schema_tests/test_schema_v2_tests.py b/tests/functional/schema_tests/test_schema_v2_tests.py index 00c14cd711b..44a6696931b 100644 --- a/tests/functional/schema_tests/test_schema_v2_tests.py +++ b/tests/functional/schema_tests/test_schema_v2_tests.py @@ -95,7 +95,7 @@ alt_local_utils__macros__type_timestamp_sql, all_quotes_schema__schema_yml, ) -from dbt.exceptions import ParsingException, CompilationException +from dbt.exceptions import ParsingException, CompilationException, DuplicateResourceName from dbt.contracts.results import TestStatus @@ -904,9 +904,9 @@ def test_generic_test_collision( project, ): """These tests collide, since only the configs differ""" - with pytest.raises(CompilationException) as exc: + with pytest.raises(DuplicateResourceName) as exc: run_dbt() - assert "dbt found two tests with the name" in str(exc) + assert "dbt found two tests with the name" in str(exc.value) class TestGenericTestsConfigCustomMacros: diff --git a/test/integration/030_statement_tests/seed/seed.csv b/tests/functional/statements/fixtures.py similarity index 89% rename from test/integration/030_statement_tests/seed/seed.csv rename to tests/functional/statements/fixtures.py index 640af6c4ee6..e05f697644a 100644 --- a/test/integration/030_statement_tests/seed/seed.csv +++ b/tests/functional/statements/fixtures.py @@ -1,4 +1,12 @@ -id,first_name,last_name,email,gender,ip_address +# +# Seeds +# +seeds__statement_expected = """source,value +matrix,100 +table,100 +""" + +seeds__statement_actual = """id,first_name,last_name,email,gender,ip_address 1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 @@ -99,3 +107,32 @@ 98,Angela,Brooks,abrooks2p@mtv.com,Female,10.63.249.126 99,Harold,Foster,hfoster2q@privacy.gov.au,Male,139.214.40.244 100,Carl,Meyer,cmeyer2r@disqus.com,Male,204.117.7.88 +""" + +# +# Models +# +models__statement_actual = """ +-- {{ ref('seed') }} + +{%- call statement('test_statement', fetch_result=True) -%} + + select + count(*) as "num_records" + + from {{ ref('seed') }} + +{%- endcall -%} + +{% set result = load_result('test_statement') %} + +{% set res_table = result['table'] %} +{% set res_matrix = result['data'] %} + +{% set matrix_value = res_matrix[0][0] %} +{% set table_value = res_table[0]['num_records'] %} + +select 'matrix' as source, {{ matrix_value }} as value +union all +select 'table' as source, {{ table_value }} as value +""" diff --git a/tests/functional/statements/test_statements.py b/tests/functional/statements/test_statements.py new file mode 100644 index 00000000000..4b8640b8066 --- /dev/null +++ b/tests/functional/statements/test_statements.py @@ -0,0 +1,43 @@ +import pathlib +import pytest + +from dbt.tests.util import ( + run_dbt, + check_relations_equal, + write_file +) +from tests.functional.statements.fixtures import ( + models__statement_actual, + seeds__statement_actual, + seeds__statement_expected, +) + + +class TestStatements: + @pytest.fixture(scope="class", autouse=True) + def setUp(self, project): + # put seeds in 'seed' not 'seeds' directory + (pathlib.Path(project.project_root) / "seed").mkdir(parents=True, exist_ok=True) + write_file(seeds__statement_actual, project.project_root, "seed", "seed.csv") + write_file(seeds__statement_expected, project.project_root, "seed", "statement_expected.csv") + + @pytest.fixture(scope="class") + def models(self): + return {"statement_actual.sql": models__statement_actual} + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "seeds": { + "quote_columns": False, + }, + "seed-paths": ["seed"], + } + + def test_postgres_statements(self, project): + results = run_dbt(["seed"]) + assert len(results) == 2 + results = run_dbt() + assert len(results) == 1 + + check_relations_equal(project.adapter, ["statement_actual", "statement_expected"]) diff --git a/tests/functional/store_test_failures_tests/fixtures.py b/tests/functional/store_test_failures_tests/fixtures.py new file mode 100644 index 00000000000..dae8530135e --- /dev/null +++ b/tests/functional/store_test_failures_tests/fixtures.py @@ -0,0 +1,126 @@ +# +# Seeds +# +seeds__people = """id,first_name,last_name,email,gender,ip_address +1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 +2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 +3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 +4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 +5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136 +6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220 +7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64 +8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13 +9,Gary,Day,gday8@nih.gov,Male,35.81.68.186 +10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100 +""" + +seeds__expected_accepted_values = """value_field,n_records +Gary,1 +Rose,1 +""" + +seeds__expected_failing_test = """id,first_name,last_name,email,gender,ip_address +1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 +2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 +3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 +4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 +5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136 +6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220 +7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64 +8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13 +9,Gary,Day,gday8@nih.gov,Male,35.81.68.186 +10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100 +""" + +seeds__expected_not_null_problematic_model_id = """id,first_name,last_name,email,gender,ip_address +,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 +,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 +""" + +seeds__expected_unique_problematic_model_id = """unique_field,n_records +2,2 +1,2 +""" + +# +# Schema +# +properties__schema_yml = """ +version: 2 + +models: + + - name: fine_model + columns: + - name: id + tests: + - unique + - not_null + + - name: problematic_model + columns: + - name: id + tests: + - unique: + store_failures: true + - not_null + - name: first_name + tests: + # test truncation of really long test name + - accepted_values: + values: + - Jack + - Kathryn + - Gerald + - Bonnie + - Harold + - Jacqueline + - Wanda + - Craig + # - Gary + # - Rose + + - name: fine_model_but_with_a_no_good_very_long_name + columns: + - name: quite_long_column_name + tests: + # test truncation of really long test name with builtin + - unique +""" + +# +# Models +# +models__fine_model = """ +select * from {{ ref('people') }} +""" + +models__file_model_but_with_a_no_good_very_long_name = """ +select 1 as quite_long_column_name +""" + +models__problematic_model = """ +select * from {{ ref('people') }} + +union all + +select * from {{ ref('people') }} +where id in (1,2) + +union all + +select null as id, first_name, last_name, email, gender, ip_address from {{ ref('people') }} +where id in (3,4) +""" + +# +# Tests +# +tests__failing_test = """ +select * from {{ ref('fine_model') }} +""" + +tests__passing_test = """ +select * from {{ ref('fine_model') }} +where false +""" diff --git a/tests/functional/store_test_failures_tests/test_store_test_failures.py b/tests/functional/store_test_failures_tests/test_store_test_failures.py new file mode 100644 index 00000000000..ff26d7d97d3 --- /dev/null +++ b/tests/functional/store_test_failures_tests/test_store_test_failures.py @@ -0,0 +1,152 @@ +import pytest + +from dbt.tests.util import ( + check_relations_equal, + run_dbt, +) + +from tests.functional.store_test_failures_tests.fixtures import ( + seeds__people, + seeds__expected_accepted_values, + seeds__expected_failing_test, + seeds__expected_not_null_problematic_model_id, + seeds__expected_unique_problematic_model_id, + properties__schema_yml, + models__problematic_model, + models__fine_model, + models__file_model_but_with_a_no_good_very_long_name, + tests__failing_test, + tests__passing_test, +) + +# used to rename test audit schema to help test schema meet max char limit +# the default is _dbt_test__audit but this runs over the postgres 63 schema name char limit +# without which idempotency conditions will not hold (i.e. dbt can't drop the schema properly) +TEST_AUDIT_SCHEMA_SUFFIX = "dbt_test__aud" + + +class StoreTestFailuresBase: + @pytest.fixture(scope="function", autouse=True) + def setUp(self, project): + self.test_audit_schema = f"{project.test_schema}_{TEST_AUDIT_SCHEMA_SUFFIX}" + run_dbt(["seed"]) + run_dbt(["run"]) + + @pytest.fixture(scope="class") + def seeds(self): + return { + "people.csv": seeds__people, + "expected_accepted_values.csv": seeds__expected_accepted_values, + "expected_failing_test.csv": seeds__expected_failing_test, + "expected_not_null_problematic_model_id.csv": + seeds__expected_not_null_problematic_model_id, + "expected_unique_problematic_model_id.csv": + seeds__expected_unique_problematic_model_id, + } + + @pytest.fixture(scope="class") + def tests(self): + return { + "failing_test.sql": tests__failing_test, + "passing_test.sql": tests__passing_test, + } + + @pytest.fixture(scope="class") + def properties(self): + return {"schema.yml": properties__schema_yml} + + @pytest.fixture(scope="class") + def models(self): + return { + "fine_model.sql": models__fine_model, + "fine_model_but_with_a_no_good_very_long_name.sql": + models__file_model_but_with_a_no_good_very_long_name, + "problematic_model.sql": models__problematic_model, + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "seeds": { + "quote_columns": False, + "test": self.column_type_overrides(), + }, + "tests": { + "+schema": TEST_AUDIT_SCHEMA_SUFFIX + } + } + + def column_type_overrides(self): + return {} + + def run_tests_store_one_failure(self, project): + run_dbt(["test"], expect_pass=False) + + # one test is configured with store_failures: true, make sure it worked + check_relations_equal( + project.adapter, + [ + f"{self.test_audit_schema}.unique_problematic_model_id", + "expected_unique_problematic_model_id" + ] + ) + + def run_tests_store_failures_and_assert(self, project): + # make sure this works idempotently for all tests + run_dbt(["test", "--store-failures"], expect_pass=False) + results = run_dbt(["test", "--store-failures"], expect_pass=False) + + # compare test results + actual = [(r.status, r.failures) for r in results] + expected = [('pass', 0), ('pass', 0), ('pass', 0), ('pass', 0), + ('fail', 2), ('fail', 2), ('fail', 2), ('fail', 10)] + assert sorted(actual) == sorted(expected) + + # compare test results stored in database + check_relations_equal(project.adapter, [ + f"{self.test_audit_schema}.failing_test", + "expected_failing_test" + ]) + check_relations_equal(project.adapter, [ + f"{self.test_audit_schema}.not_null_problematic_model_id", + "expected_not_null_problematic_model_id" + ]) + check_relations_equal(project.adapter, [ + f"{self.test_audit_schema}.unique_problematic_model_id", + "expected_unique_problematic_model_id" + ]) + check_relations_equal(project.adapter, [ + f"{self.test_audit_schema}.accepted_values_problemat" + "ic_mo_c533ab4ca65c1a9dbf14f79ded49b628", + "expected_accepted_values" + ]) + + +class TestStoreTestFailures(StoreTestFailuresBase): + @pytest.fixture(scope="function") + def clean_up(self, project): + yield + with project.adapter.connection_named('__test'): + relation = project.adapter.Relation.create(database=project.database, schema=self.test_audit_schema) + project.adapter.drop_schema(relation) + + relation = project.adapter.Relation.create(database=project.database, schema=project.test_schema) + project.adapter.drop_schema(relation) + + def column_type_overrides(self): + return { + "expected_unique_problematic_model_id": { + "+column_types": { + "n_records": "bigint", + }, + }, + "expected_accepted_values": { + "+column_types": { + "n_records": "bigint", + }, + }, + } + + def test__store_and_assert(self, project, clean_up): + self.run_tests_store_one_failure(project) + self.run_tests_store_failures_and_assert(project) diff --git a/tests/functional/threading/test_thread_count.py b/tests/functional/threading/test_thread_count.py new file mode 100644 index 00000000000..c31f5ed6312 --- /dev/null +++ b/tests/functional/threading/test_thread_count.py @@ -0,0 +1,46 @@ +import pytest +from dbt.tests.util import run_dbt + + +models__do_nothing__sql = """ +with x as (select pg_sleep(1)) select 1 +""" + + +class TestThreadCount: + @pytest.fixture(scope="class") + def models(self): + return { + "do_nothing_1.sql": models__do_nothing__sql, + "do_nothing_2.sql": models__do_nothing__sql, + "do_nothing_3.sql": models__do_nothing__sql, + "do_nothing_4.sql": models__do_nothing__sql, + "do_nothing_5.sql": models__do_nothing__sql, + "do_nothing_6.sql": models__do_nothing__sql, + "do_nothing_7.sql": models__do_nothing__sql, + "do_nothing_8.sql": models__do_nothing__sql, + "do_nothing_9.sql": models__do_nothing__sql, + "do_nothing_10.sql": models__do_nothing__sql, + "do_nothing_11.sql": models__do_nothing__sql, + "do_nothing_12.sql": models__do_nothing__sql, + "do_nothing_13.sql": models__do_nothing__sql, + "do_nothing_14.sql": models__do_nothing__sql, + "do_nothing_15.sql": models__do_nothing__sql, + "do_nothing_16.sql": models__do_nothing__sql, + "do_nothing_17.sql": models__do_nothing__sql, + "do_nothing_18.sql": models__do_nothing__sql, + "do_nothing_19.sql": models__do_nothing__sql, + "do_nothing_20.sql": models__do_nothing__sql, + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return {"config-version": 2} + + @pytest.fixture(scope="class") + def profiles_config_update(self): + return {"threads": 2} + + def test_threading_8x(self, project): + results = run_dbt(args=["run", "--threads", "16"]) + assert len(results), 20 diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index c2064b84c1a..3dbff04c303 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -1,7 +1,7 @@ # flake8: noqa from dbt.events.test_types import UnitTestInfo from dbt.events import AdapterLogger -from dbt.events.functions import event_to_json, LOG_VERSION, reset_event_history +from dbt.events.functions import event_to_json, LOG_VERSION, event_to_dict from dbt.events.types import * from dbt.events.test_types import * @@ -13,13 +13,13 @@ ErrorLevel, TestLevel, ) -from dbt.events.proto_types import NodeInfo, RunResultMsg, ReferenceKeyMsg +from dbt.events.proto_types import ListOfStrings, NodeInfo, RunResultMsg, ReferenceKeyMsg from importlib import reload import dbt.events.functions as event_funcs import dbt.flags as flags import inspect import json -from dbt.contracts.graph.parsed import ParsedModelNode, NodeConfig, DependsOn +from dbt.contracts.graph.nodes import ModelNode, NodeConfig, DependsOn from dbt.contracts.files import FileHash from mashumaro.types import SerializableType from typing import Generic, TypeVar, Dict @@ -29,10 +29,8 @@ def get_all_subclasses(cls): all_subclasses = [] for subclass in cls.__subclasses__(): - # If the test breaks because of abcs this list might have to be updated. - if subclass in [TestLevel, DebugLevel, WarnLevel, InfoLevel, ErrorLevel]: - continue - all_subclasses.append(subclass) + if subclass not in [TestLevel, DebugLevel, WarnLevel, InfoLevel, ErrorLevel, DynamicLevel]: + all_subclasses.append(subclass) all_subclasses.extend(get_all_subclasses(subclass)) return set(all_subclasses) @@ -81,7 +79,7 @@ def test_formatting(self): event = AdapterEventDebug(name="dbt_tests", base_msg=[1,2,3], args=(3,)) assert isinstance(event.base_msg, str) - event = MacroEventDebug(msg=[1,2,3]) + event = JinjaLogDebug(msg=[1,2,3]) assert isinstance(event.msg, str) @@ -93,41 +91,19 @@ def test_event_codes(self): all_concrete = get_all_subclasses(BaseEvent) all_codes = set() - for event in all_concrete: - if not inspect.isabstract(event): - # must be in the form 1 capital letter, 3 digits - assert re.match("^[A-Z][0-9]{3}", event.code) - # cannot have been used already - assert ( - event.info.code not in all_codes - ), f"{event.code} is assigned more than once. Check types.py for duplicates." - all_codes.add(event.info.code) - - -class TestEventBuffer: - def setUp(self) -> None: - flags.EVENT_BUFFER_SIZE = 10 - reload(event_funcs) - - # ensure events are populated to the buffer exactly once - def test_buffer_populates(self): - self.setUp() - event_funcs.fire_event(UnitTestInfo(msg="Test Event 1")) - event_funcs.fire_event(UnitTestInfo(msg="Test Event 2")) - event1 = event_funcs.EVENT_HISTORY[-2] - assert event_funcs.EVENT_HISTORY.count(event1) == 1 - - # ensure events drop from the front of the buffer when buffer maxsize is reached - def test_buffer_FIFOs(self): - reset_event_history() - event_funcs.EVENT_HISTORY.clear() - for n in range(1, (flags.EVENT_BUFFER_SIZE + 2)): - event_funcs.fire_event(UnitTestInfo(msg=f"Test Event {n}")) - assert event_funcs.EVENT_HISTORY.count(UnitTestInfo(msg="Test Event 1")) == 0 + for event_cls in all_concrete: + code = event_cls.code(event_cls) + # must be in the form 1 capital letter, 3 digits + assert re.match("^[A-Z][0-9]{3}", code) + # cannot have been used already + assert ( + code not in all_codes + ), f"{code} is assigned more than once. Check types.py for duplicates." + all_codes.add(code) def MockNode(): - return ParsedModelNode( + return ModelNode( alias="model_one", name="model_one", database="dbt", @@ -164,56 +140,62 @@ def MockNode(): sample_values = [ - MainReportVersion(version="", log_version=LOG_VERSION), - MainKeyboardInterrupt(), - MainEncounteredError(exc=""), - MainStackTrace(stack_trace=""), + # A - pre-project loading + MainReportVersion(version=""), + MainReportArgs(args={}), MainTrackingUserState(user_state=""), - ParseCmdStart(), - ParseCmdCompiling(), - ParseCmdWritingManifest(), - ParseCmdDone(), - ManifestDependenciesLoaded(), - ManifestLoaderCreated(), - ManifestLoaded(), - ManifestChecked(), - ManifestFlatGraphBuilt(), - ParseCmdPerfInfoPath(path=""), - GitSparseCheckoutSubdirectory(subdir=""), - GitProgressCheckoutRevision(revision=""), - GitProgressUpdatingExistingDependency(dir=""), - GitProgressPullingNewDependency(dir=""), - GitNothingToDo(sha=""), - GitProgressUpdatedCheckoutRange(start_sha="", end_sha=""), - GitProgressCheckedOutAt(end_sha=""), - SystemErrorRetrievingModTime(path=""), - SystemCouldNotWrite(path="", reason="", exc=""), - SystemExecutingCmd(cmd=[""]), - SystemStdOutMsg(bmsg=b""), - SystemStdErrMsg(bmsg=b""), - SelectorReportInvalidSelector(valid_selectors="", spec_method="", raw_spec=""), - MacroEventInfo(msg=""), - MacroEventDebug(msg=""), + MergedFromState(num_merged=0, sample=[]), + MissingProfileTarget(profile_name="", target_name=""), + InvalidVarsYAML(), + DbtProjectError(), + DbtProjectErrorException(exc=""), + DbtProfileError(), + DbtProfileErrorException(exc=""), + ProfileListTitle(), + ListSingleProfile(profile=""), + NoDefinedProfiles(), + ProfileHelpMessage(), + StarterProjectPath(dir=""), + ConfigFolderDirectory(dir=""), + NoSampleProfileFound(adapter=""), + ProfileWrittenWithSample(name="", path=""), + ProfileWrittenWithTargetTemplateYAML(name="", path=""), + ProfileWrittenWithProjectTemplateYAML(name="", path=""), + SettingUpProfile(), + InvalidProfileTemplateYAML(), + ProjectNameAlreadyExists(name=""), + ProjectCreated(project_name=""), + + # D - Deprecations ====================== + PackageRedirectDeprecation(old_name="", new_name=""), + PackageInstallPathDeprecation(), + ConfigSourcePathDeprecation(deprecated_path="", exp_path=""), + ConfigDataPathDeprecation(deprecated_path="", exp_path=""), + AdapterDeprecationWarning(old_name="", new_name=""), + MetricAttributesRenamed(metric_name=""), + ExposureNameDeprecation(exposure=""), + + # E - DB Adapter ====================== + AdapterEventDebug(), + AdapterEventInfo(), + AdapterEventWarning(), + AdapterEventError(), NewConnection(conn_type="", conn_name=""), ConnectionReused(conn_name=""), - ConnectionLeftOpen(conn_name=""), - ConnectionClosed(conn_name=""), + ConnectionLeftOpenInCleanup(conn_name=""), + ConnectionClosedInCleanup(conn_name=""), RollbackFailed(conn_name=""), - ConnectionClosed2(conn_name=""), - ConnectionLeftOpen2(conn_name=""), + ConnectionClosed(conn_name=""), + ConnectionLeftOpen(conn_name=""), Rollback(conn_name=""), CacheMiss(conn_name="", database="", schema=""), - ListRelations(database="", schema="", relations=[]), + ListRelations(database="", schema=""), ConnectionUsed(conn_type="", conn_name=""), SQLQuery(conn_name="", sql=""), SQLQueryStatus(status="", elapsed=0.1), - CodeExecution(conn_name="", code_content=""), - CodeExecutionStatus(status="", elapsed=0.1), SQLCommit(conn_name=""), ColTypeChange( - orig_type="", - new_type="", - table=ReferenceKeyMsg(database="", schema="", identifier=""), + orig_type="", new_type="", table=ReferenceKeyMsg(database="", schema="", identifier="") ), SchemaCreation(relation=ReferenceKeyMsg(database="", schema="", identifier="")), SchemaDrop(relation=ReferenceKeyMsg(database="", schema="", identifier="")), @@ -231,6 +213,7 @@ def MockNode(): dropped=ReferenceKeyMsg(database="", schema="", identifier=""), consequences=[ReferenceKeyMsg(database="", schema="", identifier="")], ), + DropRelation(dropped=ReferenceKeyMsg()), UpdateReference( old_key=ReferenceKeyMsg(database="", schema="", identifier=""), new_key=ReferenceKeyMsg(database="", schema="", identifier=""), @@ -246,29 +229,49 @@ def MockNode(): DumpBeforeRenameSchema(dump=dict()), DumpAfterRenameSchema(dump=dict()), AdapterImportError(exc=""), - PluginLoadError(), - SystemReportReturnCode(returncode=0), + PluginLoadError(exc_info=""), NewConnectionOpening(connection_state=""), - TimingInfoCollected(), - MergedFromState(num_merged=0, sample=[]), - MissingProfileTarget(profile_name="", target_name=""), - InvalidVarsYAML(), + CodeExecution(conn_name="", code_content=""), + CodeExecutionStatus(status="", elapsed=0.1), + CatalogGenerationError(exc=""), + WriteCatalogFailure(num_exceptions=0), + CatalogWritten(path=""), + CannotGenerateDocs(), + BuildingCatalog(), + DatabaseErrorRunningHook(hook_type=""), + HooksRunning(num_hooks=0, hook_type=""), + HookFinished(stat_line="", execution="", execution_time=0), + + # I - Project parsing ====================== + ParseCmdStart(), + ParseCmdCompiling(), + ParseCmdWritingManifest(), + ParseCmdDone(), + ManifestDependenciesLoaded(), + ManifestLoaderCreated(), + ManifestLoaded(), + ManifestChecked(), + ManifestFlatGraphBuilt(), + ParseCmdPerfInfoPath(path=""), GenericTestFileParse(path=""), MacroFileParse(path=""), PartialParsingFullReparseBecauseOfError(), - PartialParsingFile(file_id=""), PartialParsingExceptionFile(file=""), + PartialParsingFile(file_id=""), PartialParsingException(exc_info={}), PartialParsingSkipParsing(), PartialParsingMacroChangeStartFullParse(), + PartialParsingProjectEnvVarsChanged(), + PartialParsingProfileEnvVarsChanged(), + PartialParsingDeletedMetric(unique_id=""), ManifestWrongMetadataVersion(version=""), PartialParsingVersionMismatch(saved_version="", current_version=""), PartialParsingFailedBecauseConfigChange(), PartialParsingFailedBecauseProfileChange(), PartialParsingFailedBecauseNewProjectDependency(), PartialParsingFailedBecauseHashChanged(), - PartialParsingDeletedMetric(unique_id=""), - ParsedFileLoadFailed(path="", exc=""), + PartialParsingNotEnabled(), + ParsedFileLoadFailed(path="", exc="", exc_info=""), PartialParseSaveFileNotFound(), StaticParserCausedJinjaRendering(path=""), UsingExperimentalParser(path=""), @@ -289,51 +292,179 @@ def MockNode(): PartialParsingUpdateSchemaFile(file_id=""), PartialParsingDeletedSource(unique_id=""), PartialParsingDeletedExposure(unique_id=""), - InvalidDisabledSourceInTestNode(msg=""), - InvalidRefInTestNode(msg=""), + InvalidDisabledTargetInTestNode( + resource_type_title="", + unique_id="", + original_file_path="", + target_kind="", + target_name="", + target_package="", + ), + UnusedResourceConfigPath(unused_config_paths=[]), + SeedIncreased(package_name="", name=""), + SeedExceedsLimitSamePath(package_name="", name=""), + SeedExceedsLimitAndPathChanged(package_name="", name=""), + SeedExceedsLimitChecksumChanged(package_name="", name="", checksum_name=""), + UnusedTables(unused_tables=[]), + WrongResourceSchemaFile(patch_name="", resource_type="", file_path="", plural_resource_type=""), + NoNodeForYamlKey(patch_name="", yaml_key="", file_path=""), + MacroPatchNotFound(patch_name=""), + NodeNotFoundOrDisabled( + original_file_path="", + unique_id="", + resource_type_title="", + target_name="", + target_kind="", + target_package="", + disabled="", + ), + JinjaLogWarning(), + + # M - Deps generation ====================== + + GitSparseCheckoutSubdirectory(subdir=""), + GitProgressCheckoutRevision(revision=""), + GitProgressUpdatingExistingDependency(dir=""), + GitProgressPullingNewDependency(dir=""), + GitNothingToDo(sha=""), + GitProgressUpdatedCheckoutRange(start_sha="", end_sha=""), + GitProgressCheckedOutAt(end_sha=""), + RegistryProgressGETRequest(url=""), + RegistryProgressGETResponse(url="", resp_code=1234), + SelectorReportInvalidSelector(valid_selectors="", spec_method="", raw_spec=""), + JinjaLogInfo(msg=""), + JinjaLogDebug(msg=""), + DepsNoPackagesFound(), + DepsStartPackageInstall(package_name=""), + DepsInstallInfo(version_name=""), + DepsUpdateAvailable(version_latest=""), + DepsUpToDate(), + DepsListSubdirectory(subdirectory=""), + DepsNotifyUpdatesAvailable(packages=ListOfStrings()), + RetryExternalCall(attempt=0, max=0), + RecordRetryException(exc=""), + RegistryIndexProgressGETRequest(url=""), + RegistryIndexProgressGETResponse(url="", resp_code=1234), + RegistryResponseUnexpectedType(response=""), + RegistryResponseMissingTopKeys(response=""), + RegistryResponseMissingNestedKeys(response=""), + RegistryResponseExtraNestedKeys(response=""), + DepsSetDownloadDirectory(path=""), + + # Q - Node execution ====================== + RunningOperationCaughtError(exc=""), + CompileComplete(), + FreshnessCheckComplete(), + SeedHeader(header=""), + SeedHeaderSeparator(len_header=0), + SQLRunnerException(exc=""), + LogTestResult( + name="", + index=0, + num_models=0, + execution_time=0, + num_failures=0, + ), + LogStartLine(description="", index=0, total=0, node_info=NodeInfo()), + LogModelResult( + description="", + status="", + index=0, + total=0, + execution_time=0, + ), + LogSnapshotResult( + status="", + description="", + cfg={}, + index=0, + total=0, + execution_time=0, + ), + LogSeedResult( + status="", + index=0, + total=0, + execution_time=0, + schema="", + relation="", + ), + LogFreshnessResult( + source_name="", + table_name="", + index=0, + total=0, + execution_time=0, + ), + LogCancelLine(conn_name=""), + DefaultSelector(name=""), + NodeStart(node_info=NodeInfo()), + NodeFinished(node_info=NodeInfo()), + QueryCancelationUnsupported(type=""), + ConcurrencyLine(num_threads=0, target_name=""), + WritingInjectedSQLForNode(node_info=NodeInfo()), + NodeCompiling(node_info=NodeInfo()), + NodeExecuting(node_info=NodeInfo()), + LogHookStartLine( + statement="", + index=0, + total=0, + ), + LogHookEndLine( + statement="", + status="", + index=0, + total=0, + execution_time=0, + ), + SkippingDetails( + resource_type="", + schema="", + node_name="", + index=0, + total=0, + ), + NothingToDo(), RunningOperationUncaughtError(exc=""), - DbtProjectError(), - DbtProjectErrorException(exc=""), - DbtProfileError(), - DbtProfileErrorException(exc=""), - ProfileListTitle(), - ListSingleProfile(profile=""), - NoDefinedProfiles(), - ProfileHelpMessage(), + EndRunResult(), + NoNodesSelected(), + DepsUnpinned(revision="", git=""), + NoNodesForSelectionCriteria(spec_raw=""), + + # W - Node testing ====================== + CatchableExceptionOnRun(exc=""), InternalExceptionOnRun(build_path="", exc=""), GenericExceptionOnRun(build_path="", unique_id="", exc=""), NodeConnectionReleaseError(node_name="", exc=""), + FoundStats(stat_line=""), + + # Z - misc ====================== + + MainKeyboardInterrupt(), + MainEncounteredError(exc=""), + MainStackTrace(stack_trace=""), + SystemErrorRetrievingModTime(path=""), + SystemCouldNotWrite(path="", reason="", exc=""), + SystemExecutingCmd(cmd=[""]), + SystemStdOutMsg(bmsg=b""), + SystemStdErrMsg(bmsg=b""), + SystemReportReturnCode(returncode=0), + TimingInfoCollected(), + LogDebugStackTrace(), CheckCleanPath(path=""), ConfirmCleanPath(path=""), ProtectedCleanPath(path=""), FinishedCleanPaths(), OpenCommand(open_cmd="", profiles_dir=""), - DepsNoPackagesFound(), - DepsStartPackageInstall(package_name=""), - DepsInstallInfo(version_name=""), - DepsUpdateAvailable(version_latest=""), - DepsListSubdirectory(subdirectory=""), - DepsNotifyUpdatesAvailable(packages=[]), - DatabaseErrorRunningHook(hook_type=""), EmptyLine(), - HooksRunning(num_hooks=0, hook_type=""), - HookFinished(stat_line="", execution="", execution_time=0), - WriteCatalogFailure(num_exceptions=0), - CatalogWritten(path=""), - CannotGenerateDocs(), - BuildingCatalog(), - CompileComplete(), - FreshnessCheckComplete(), ServingDocsPort(address="", port=0), ServingDocsAccessInfo(port=""), ServingDocsExitInfo(), - SeedHeader(header=""), - SeedHeaderSeparator(len_header=0), RunResultWarning(resource_type="", node_name="", path=""), RunResultFailure(resource_type="", node_name="", path=""), - StatsLine(stats={"pass": 0, "warn": 0, "error": 0, "skip": 0, "total": 0}), + StatsLine(stats={"error": 0, "skip": 0, "pass": 0, "warn": 0,"total": 0}), RunResultError(msg=""), RunResultErrorNoMessage(status=""), SQLCompiledPath(path=""), @@ -341,131 +472,39 @@ def MockNode(): FirstRunResultError(msg=""), AfterFirstRunResultError(msg=""), EndOfRunSummary(num_errors=0, num_warnings=0, keyboard_interrupt=False), - PrintStartLine(description="", index=0, total=0, node_info=NodeInfo()), - PrintHookStartLine(statement="", index=0, total=0, node_info=NodeInfo()), - PrintHookEndLine( - statement="", status="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - SkippingDetails( - resource_type="", schema="", node_name="", index=0, total=0, node_info=NodeInfo() - ), - PrintErrorTestResult(name="", index=0, num_models=0, execution_time=0, node_info=NodeInfo()), - PrintPassTestResult(name="", index=0, num_models=0, execution_time=0, node_info=NodeInfo()), - PrintWarnTestResult( - name="", index=0, num_models=0, execution_time=0, num_failures=0, node_info=NodeInfo() - ), - PrintFailureTestResult( - name="", index=0, num_models=0, execution_time=0, num_failures=0, node_info=NodeInfo() - ), - PrintSkipBecauseError(schema="", relation="", index=0, total=0), - PrintModelErrorResultLine( - description="", status="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintModelResultLine( - description="", status="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintSnapshotErrorResultLine( - status="", description="", cfg={}, index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintSnapshotResultLine( - status="", description="", cfg={}, index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintSeedErrorResultLine( - status="", index=0, total=0, execution_time=0, schema="", relation="", node_info=NodeInfo() - ), - PrintSeedResultLine( - status="", index=0, total=0, execution_time=0, schema="", relation="", node_info=NodeInfo() - ), - PrintFreshnessErrorLine( - source_name="", table_name="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintFreshnessErrorStaleLine( - source_name="", table_name="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintFreshnessWarnLine( - source_name="", table_name="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintFreshnessPassLine( - source_name="", table_name="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintCancelLine(conn_name=""), - DefaultSelector(name=""), - NodeStart(unique_id="", node_info=NodeInfo()), - NodeCompiling(unique_id="", node_info=NodeInfo()), - NodeExecuting(unique_id="", node_info=NodeInfo()), - NodeFinished(unique_id="", node_info=NodeInfo(), run_result=RunResultMsg()), - QueryCancelationUnsupported(type=""), - ConcurrencyLine(num_threads=0, target_name=""), - StarterProjectPath(dir=""), - ConfigFolderDirectory(dir=""), - NoSampleProfileFound(adapter=""), - ProfileWrittenWithSample(name="", path=""), - ProfileWrittenWithTargetTemplateYAML(name="", path=""), - ProfileWrittenWithProjectTemplateYAML(name="", path=""), - SettingUpProfile(), - InvalidProfileTemplateYAML(), - ProjectNameAlreadyExists(name=""), - ProjectCreated(project_name="", docs_url="", slack_url=""), - DepsSetDownloadDirectory(path=""), + LogSkipBecauseError(schema="", relation="", index=0, total=0), EnsureGitInstalled(), DepsCreatingLocalSymlink(), DepsSymlinkNotAvailable(), - FoundStats(stat_line=""), - CompilingNode(unique_id=""), - WritingInjectedSQLForNode(unique_id=""), DisableTracking(), SendingEvent(kwargs=""), SendEventFailure(), FlushEvents(), FlushEventsFailure(), TrackingInitializeFailure(), - RetryExternalCall(attempt=0, max=0), - GeneralWarningMsg(msg="", log_fmt=""), - GeneralWarningException(exc="", log_fmt=""), - PartialParsingProfileEnvVarsChanged(), - AdapterEventDebug(name="", base_msg="", args=()), - AdapterEventInfo(name="", base_msg="", args=()), - AdapterEventWarning(name="", base_msg="", args=()), - AdapterEventError(name="", base_msg="", args=()), - PrintDebugStackTrace(), - MainReportArgs(args={}), - RegistryProgressGETRequest(url=""), - RegistryIndexProgressGETRequest(url=""), - RegistryIndexProgressGETResponse(url="", resp_code=1), - RegistryResponseUnexpectedType(response=""), - RegistryResponseMissingTopKeys(response=""), - RegistryResponseMissingNestedKeys(response=""), - RegistryResponseExtraNestedKeys(response=""), - DepsUpToDate(), - PartialParsingNotEnabled(), - SQLRunnerException(exc=""), - DropRelation(dropped=ReferenceKeyMsg(database="", schema="", identifier="")), - PartialParsingProjectEnvVarsChanged(), - RegistryProgressGETResponse(url="", resp_code=1), - IntegrationTestDebug(msg=""), - IntegrationTestInfo(msg=""), - IntegrationTestWarn(msg=""), - IntegrationTestError(msg=""), - IntegrationTestException(msg=""), - EventBufferFull(), - RecordRetryException(exc=""), - UnitTestInfo(msg=""), + RunResultWarningMessage(), + + # T - tests ====================== + IntegrationTestInfo(), + IntegrationTestDebug(), + IntegrationTestWarn(), + IntegrationTestError(), + IntegrationTestException(), + UnitTestInfo(), + ] + + class TestEventJSONSerialization: # attempts to test that every event is serializable to json. # event types that take `Any` are not possible to test in this way since some will serialize # just fine and others won't. def test_all_serializable(self): - no_test = [DummyCacheEvent] - all_non_abstract_events = set( - filter( - lambda x: not inspect.isabstract(x) and x not in no_test, - get_all_subclasses(BaseEvent), - ) + get_all_subclasses(BaseEvent), ) all_event_values_list = list(map(lambda x: x.__class__, sample_values)) diff = all_non_abstract_events.difference(set(all_event_values_list)) @@ -479,7 +518,7 @@ def test_all_serializable(self): # if we have everything we need to test, try to serialize everything for event in sample_values: - event_dict = event.to_dict() + event_dict = event_to_dict(event) try: event_json = event_to_json(event) except Exception as e: @@ -487,30 +526,3 @@ def test_all_serializable(self): T = TypeVar("T") - - -@dataclass -class Counter(Generic[T], SerializableType): - dummy_val: T - count: int = 0 - - def next(self) -> T: - self.count = self.count + 1 - return self.dummy_val - - # mashumaro serializer - def _serialize() -> Dict[str, int]: - return {"count": count} - - -@dataclass -class DummyCacheEvent(InfoLevel, Cache, SerializableType): - code = "X999" - counter: Counter - - def message(self) -> str: - return f"state: {self.counter.next()}" - - # mashumaro serializer - def _serialize() -> str: - return "DummyCacheEvent" diff --git a/tests/unit/test_proto_events.py b/tests/unit/test_proto_events.py index 46e9479ef39..d5b070c41e2 100644 --- a/tests/unit/test_proto_events.py +++ b/tests/unit/test_proto_events.py @@ -5,14 +5,15 @@ RollbackFailed, MainEncounteredError, PluginLoadError, - PrintStartLine, + LogStartLine, + LogTestResult, ) -from dbt.events.functions import event_to_dict, LOG_VERSION, reset_metadata_vars +from dbt.events.functions import event_to_dict, LOG_VERSION, reset_metadata_vars, info from dbt.events import proto_types as pl from dbt.version import installed -info_keys = {"name", "code", "msg", "level", "invocation_id", "pid", "thread", "ts", "extra"} +info_keys = {"name", "code", "msg", "level", "invocation_id", "pid", "thread", "ts", "extra", "category"} def test_events(): @@ -89,7 +90,7 @@ def test_node_info_events(): "node_started_at": "some_time", "node_finished_at": "another_time", } - event = PrintStartLine( + event = LogStartLine( description="some description", index=123, total=111, @@ -121,3 +122,16 @@ def test_extra_dict_on_event(monkeypatch): # clean up reset_metadata_vars() + + +def test_dynamic_level_events(): + event = LogTestResult( + name="model_name", + info=info(level=LogTestResult.status_to_level("pass")), + status="pass", + index=1, + num_models=3, + num_failures=0 + ) + assert event + assert event.info.level == "info" diff --git a/tests/unit/test_version.py b/tests/unit/test_version.py index 6545891fc54..217988ba5e2 100644 --- a/tests/unit/test_version.py +++ b/tests/unit/test_version.py @@ -673,10 +673,16 @@ def mock_import(*args, **kwargs): def mock_versions(mocker, installed="1.0.0", latest=None, plugins={}): mocker.patch("dbt.version.__version__", installed) - mock_plugins(mocker, plugins) mock_latest_versions(mocker, latest, plugins) + # mock_plugins must be called last to avoid erronously raising an ImportError. + mock_plugins(mocker, plugins) +# NOTE: mock_plugins patches importlib.import_module, and should always be the last +# patch to be mocked in order to avoid erronously raising an ImportError. +# Explanation: As of Python 3.11, mock.patch indirectly uses importlib.import_module +# and thus uses the mocked object (in this case, mock_import) instead of the real +# implementation in subsequent mock.patch calls. Issue: https://github.com/python/cpython/issues/98771 def mock_plugins(mocker, plugins): mock_find_spec = mocker.patch("importlib.util.find_spec") path = "/tmp/dbt/adapters" diff --git a/tox.ini b/tox.ini index c77b9f92272..53187161c7f 100644 --- a/tox.ini +++ b/tox.ini @@ -2,7 +2,7 @@ skipsdist = True envlist = unit,integration -[testenv:{unit,py37,py38,py39,py310,py}] +[testenv:{unit,py37,py38,py39,py310,py311,py}] description = unit testing download = true skip_install = true @@ -16,8 +16,8 @@ deps = -rdev-requirements.txt -reditable-requirements.txt -[testenv:{integration,py37-integration,py38-integration,py39-integration,py310-integration,py-integration}] -description = adapter plugin integration testing +[testenv:{integration,py37-integration,py38-integration,py39-integration,py310-integration,py311-integration,py-integration}] +description = functional testing download = true skip_install = true passenv = @@ -25,10 +25,9 @@ passenv = POSTGRES_TEST_* PYTEST_ADDOPTS commands = + {envpython} -m pytest --cov=core -m profile_postgres {posargs} test/integration {envpython} -m pytest --cov=core {posargs} tests/functional {envpython} -m pytest --cov=core {posargs} tests/adapter - {envpython} -m pytest --cov=core -m profile_postgres {posargs} test/integration - deps = -rdev-requirements.txt