From 78b089e2e52d344e64731f876712ccc4cf3292fd Mon Sep 17 00:00:00 2001 From: ciklista <29396800+ciklista@users.noreply.github.com> Date: Thu, 20 Jul 2023 08:06:09 +0200 Subject: [PATCH] dbt-core v1.5.x support --- .github/workflows/test.yml | 24 +- .gitignore | 5 +- dbt.duckdb | Bin 0 -> 2371584 bytes dbt_invoke/internal/_utils.py | 178 ++++++------ dbt_invoke/internal/_version.py | 2 +- dbt_invoke/properties.py | 270 +++++++++--------- requirements/requirements_dbt_0.18.x.txt | 11 - requirements/requirements_dbt_0.19.x.txt | 9 - requirements/requirements_dbt_1.1.x.txt | 8 - ...t_1.4.x.txt => requirements_dbt_1.5.x.txt} | 4 +- setup.py | 24 +- tests/data_files/customers.parquet | Bin 0 -> 320 bytes tests/data_files/items.parquet | Bin 0 -> 410 bytes tests/data_files/orders.parquet | Bin 0 -> 700 bytes tests/dbt.duckdb | Bin 0 -> 2371584 bytes tests/dbt_project_files/dbt_project.yml | 2 +- tests/test.py | 66 +++-- tests/test_config.yml | 4 + tests/test_dbt_project/dbt_project.yml | 2 +- .../models/marts/core/customers.sql | 2 +- .../models/marts/core/orders.sql | 2 +- tests/test_dbt_project/models/sources.yml | 8 + tests/test_dbt_project/profiles.yml | 30 +- .../snapshots/items_snapshot.sql | 2 +- tests/test_dbt_project/test.db | Bin 28672 -> 0 bytes tests/test_properties.py | 120 ++++---- tests/test_utils.py | 13 +- 27 files changed, 370 insertions(+), 416 deletions(-) create mode 100644 dbt.duckdb delete mode 100644 requirements/requirements_dbt_0.18.x.txt delete mode 100644 requirements/requirements_dbt_0.19.x.txt delete mode 100644 requirements/requirements_dbt_1.1.x.txt rename requirements/{requirements_dbt_1.4.x.txt => requirements_dbt_1.5.x.txt} (67%) create mode 100644 tests/data_files/customers.parquet create mode 100644 tests/data_files/items.parquet create mode 100644 tests/data_files/orders.parquet create mode 100644 tests/dbt.duckdb create mode 100644 tests/test_dbt_project/models/sources.yml delete mode 100644 tests/test_dbt_project/test.db diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d017675..c5fe28d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,17 +30,9 @@ jobs: strategy: matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python-version: ['3.7', '3.10'] - dbt-version: [0.18.x, 0.19.x, 1.1.x, 1.4.x] - exclude: - - python-version: '3.10' - dbt-version: 0.18.x - - python-version: '3.10' - dbt-version: 0.19.x - - python-version: '3.7' - dbt-version: 1.1.x - - python-version: '3.7' - dbt-version: 1.4.x + python-version: ['3.8', '3.10'] + dbt-version: [1.5.x] + steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} @@ -51,17 +43,7 @@ jobs: run: | python -m pip install --upgrade pip pip install -r requirements/requirements_dbt_${{ matrix.dbt-version }}.txt - # There is more than one target in profiles.yml because - # dbt-sqlite for dbt~=0.18.x uses a different format than other - # versions. - - name: Run tests for dbt~=0.18.x - if: ${{ matrix.dbt-version == '0.18.x' }} - env: - TARGET: ${{ matrix.dbt-version }} - run: | - python tests/test.py - name: Run tests for other dbt versions - if: ${{ matrix.dbt-version != '0.18.x' }} env: TARGET: default run: | diff --git a/.gitignore b/.gitignore index 5391d87..c1859fe 100644 --- a/.gitignore +++ b/.gitignore @@ -135,4 +135,7 @@ dmypy.json .pytype/ # Cython debug symbols -cython_debug/ \ No newline at end of file +cython_debug/ + +# testing stuff +dbt_invoke/test_run.py \ No newline at end of file diff --git a/dbt.duckdb b/dbt.duckdb new file mode 100644 index 0000000000000000000000000000000000000000..293b5da29a0251a75de7862d9cee8084c785ccda GIT binary patch literal 2371584 zcmeI*U2J64eE{$ouf4l5n}C`SNJ7ZSBH09Oyt`}+G!ezkh6sg)2uP*URO(5=(Ds3@RQ0J%)Jj0#qN-Abd;T-` zc&^9x+RN^mvG;ex_}p{v|9t)Kxntj%*`fMR|33T6cOJR%E5CF4fm4-U8ckWP*p?9> zK!5-N0t5&UAV7cs0Rnqp;7<-78U2^1KK}ZlfvM#32L|>l5+Fc;009C72oNAZfB*pk zdr#nnXMXav`yTn{rw$iGA1U&&B6t74_s*#i6Cgl<009C72oNAZfB=C(ftUaC=at`D z_{a0BMXIKNc;wdu;@<)+*5{Yv^8mM`M>)l71^M-Yygyzyc)lncNcr5^PHrL&&377$ z?bshXKb0PlqU|^^Gd~xH4#vKeKG@5;p`R?aqm5>3ezs8#yr~p8QEZQ=e5E;8?=YZ}*hl_~FVy2OPTx{g7dwVOD=x(t%7HyPf^PyDi&Ss{)RByJQSnk9$b}+4|7%4(8 zI<0cx_S3q)+i0)Za&#K`TEu~CG7er{#6TR4(ZvD<;bD~yn*FKwS)M}63cjms+-{@|3gHoT}!m--y zO1rbX*l6|l6xUI4{K=L2QfI#Noub&QUGI#4^^vbX*xh$DhTJ}$u9Nyq7_^V~hB4W! zx1L;SbdIK|&wXh;PTS$)9ITy+xH!W8qv;W8^o{r#hvUg3k2E6IR@+*{b!l&D@DX%l zdPM3sQ+i(5x}QA7tt0ZRtI^(6w~zh;>pcr?qfpdYpT+dc6CIdRY48!1huN=D??emb z3!wLeclmpI7hYTrUmi(6QVN_{X7OrnW^W%GmaAlKL_51s*21^BR&reb;^9B{Wl7y9fd}ba%AGZ;q-d^_O`3n(B|LyF%~ohLpOf#MAkP6}n-RMYZ)= z3~yjzuJ1AeF23UO)A;&^l?V_ZK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7csfon(Lr+@#(pN;*^=(DRust!#3U{O9$+Fw1C3LnWk`Zc;+5!m0=3(Ae;PGav! z1djKPmuhTPHTQpWs?qKLmJL-a7hY)IGk#(9-pbn#zjo`>Z(TpVRby9R9QR*j9F^x< zFTMHdSfw)k(2GxRRo=^gcx0D;{SxM8fT(Kk{ z8+ggB{9&gUbUvKx{IHlwF5ggW3vnP5w#1vk#k(9H<$EbH+L6%YPA_G0RjXF z5FkKce+k4Nqtb8-c)wz}&yUd&AV7cs0RjXF5GV@_-}@ErD9Y!@HUR-BKL6SQagOv+?&S)g4L4;!#dxsjwJBNh#(vceaz8i0RFD8jJ0j_ENpseqy;3jmFVWrAMSFN5sth zT+}@n`%=m-5&FsQo=PS9X}0EP8(RfV6oG-c(wwVz8gn!CPC5GeA|jeCN2ehtu(^Rj ze5@4xnPNKz7SY?cKUm{@ePMoX=3HxeajOyCR`hYC=%ZJov%Fb+gvKl){VW1fD$YT# zkbVZ(^F=8_N+%``bx%y}k9M{>A0JF1ks|b>=i{bsSTC?Fyt&thi|EMmY;OO9-p;%f za!8cFJkl#gY%I|aq{TS?IR6v^>{H3ni*7 zrp<-PPNUtKi8Ya0{AOdeGr2aRiSN~JKR!NQtEK;DTaC0JYv<=1-|h`j9Z-Yp+1+Kfd${?$jm{$O|S(HKtqcv}7S znV4Vuc&^u;Y}Q*(t~5GFQ{d;mG#<%}-uC4tyA|h7Sb?K3tN<)aCo6{rG z=vq)*dcB#(sH$;w-yIPt^;c)Ci|t3*tu*)}hBq*f^<73l`FiOs-145@b3m-7n__oL zc{Rnp<#*3VU*;zSNoTe5WY(YF~W0 z^@g#@T02~YZqzytFqi_V>0Y|C(GsUL^tq2oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK;Y^M{OV8txc#sH{iE~M)XhkG z97vDy_JtRk_l#dyz4z^hU%U0`w+>DB^6RI!+FyCD_0pTKj#VmOc=UgM_1aiv`k@z} zI{kVoZ?*sGPEYFu2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7e?dk}bL_PU?{$N#?b>(wGv2inNxd^`@197;cK$$>edQd!%|8#x~gUDw!w zbaZ?$qTg0=BsaCL>M)JtBO9hrdG>*{FMi~~4~DFp%%wRT%x$H-dN}>4qShg`j!-~ZT|Gr4~`0RjYeOJF)b5$#ro`XNAo009C7HWzSbvAK)A1PJUA0e2RA zK0iiBfB*pk1PBlyK%guzeD7DdqbQ#r+XM&@AV7cs0RjYu zTVS~FD60LhQ|W)jlutbk_mVO?0t5&UAV7cs0Rr!x^fWE_>}S}(o% z>R6>R{m_d~oqm1nay+)BjuV@|eK^lA=W!l<`*1>#2<)=J@V$_=%K)jn4Nt(kKLkDQ z2Z7>`mi;glb09!~!22e!|udl{PthH`{%#>$|JQjhhymx`5kwjID4uw zb=TCLr>5#tQzxg+-d#UcKX>BfZ=S24n!4*$3Xup9AV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB=DgEbwp5`M+K`{)Jzx7HPDY>qwF3@>o$m zT;$avKQNFNMHvAC1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FoIJ1wL`(k!M!_^SOhoMH(&UGE(HZJXVws6?wJD4-Di*QAU6O0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk u1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PJV5f&T}{XAsc< literal 0 HcmV?d00001 diff --git a/dbt_invoke/internal/_utils.py b/dbt_invoke/internal/_utils.py index 05178e9..1a322eb 100644 --- a/dbt_invoke/internal/_utils.py +++ b/dbt_invoke/internal/_utils.py @@ -9,17 +9,17 @@ try: from importlib.metadata import version - DBT_VERSION = version('dbt-core') + DBT_VERSION = version("dbt-core") except ImportError: import pkg_resources - DBT_VERSION = pkg_resources.get_distribution('dbt-core').version + DBT_VERSION = pkg_resources.get_distribution("dbt-core").version from ruamel.yaml import YAML, YAMLError MACROS = { - '_log_columns_list': ( + "_log_columns_list": ( "\n{# This macro is intended for use by dbt-invoke #}" "\n{% macro _log_columns_list(sql=none, resource_name=none) %}" "\n {% if sql is none %}" @@ -32,28 +32,28 @@ ) } DBT_GLOBAL_ARGS = { - 'log-format': 'json', + "log-format": "json", } DBT_LS_ARG_HELP = ( 'An argument for listing dbt resources (run "dbt ls --help" for details)' ) DBT_LS_ARGS = { - 'resource_type': {'help': DBT_LS_ARG_HELP, 'resource_selector': True}, - 'select': {'help': DBT_LS_ARG_HELP, 'resource_selector': True}, - 'models': {'help': DBT_LS_ARG_HELP, 'resource_selector': True}, - 'exclude': {'help': DBT_LS_ARG_HELP, 'resource_selector': True}, - 'selector': {'help': DBT_LS_ARG_HELP, 'resource_selector': True}, - 'project_dir': {'help': DBT_LS_ARG_HELP, 'resource_selector': False}, - 'profiles_dir': {'help': DBT_LS_ARG_HELP, 'resource_selector': False}, - 'profile': {'help': DBT_LS_ARG_HELP, 'resource_selector': False}, - 'target': {'help': DBT_LS_ARG_HELP, 'resource_selector': False}, - 'vars': {'help': DBT_LS_ARG_HELP, 'resource_selector': False}, - 'bypass_cache': {'help': DBT_LS_ARG_HELP, 'resource_selector': False}, - 'state': {'help': DBT_LS_ARG_HELP, 'resource_selector': False}, + "resource_type": {"help": DBT_LS_ARG_HELP, "resource_selector": True}, + "select": {"help": DBT_LS_ARG_HELP, "resource_selector": True}, + "models": {"help": DBT_LS_ARG_HELP, "resource_selector": True}, + "exclude": {"help": DBT_LS_ARG_HELP, "resource_selector": True}, + "selector": {"help": DBT_LS_ARG_HELP, "resource_selector": True}, + "project_dir": {"help": DBT_LS_ARG_HELP, "resource_selector": False}, + "profiles_dir": {"help": DBT_LS_ARG_HELP, "resource_selector": False}, + "profile": {"help": DBT_LS_ARG_HELP, "resource_selector": False}, + "target": {"help": DBT_LS_ARG_HELP, "resource_selector": False}, + "vars": {"help": DBT_LS_ARG_HELP, "resource_selector": False}, + "bypass_cache": {"help": DBT_LS_ARG_HELP, "resource_selector": False}, + "state": {"help": DBT_LS_ARG_HELP, "resource_selector": False}, } -def get_logger(name, level='INFO'): +def get_logger(name, level="INFO"): """ Create a logger @@ -67,7 +67,7 @@ def get_logger(name, level='INFO'): logger.handlers.clear() handler = logging.StreamHandler(stream=sys.stdout) formatter = logging.Formatter( - '{name} | {levelname:^8} | {message}', style='{' + "{name} | {levelname:^8} | {message}", style="{" ) handler.setFormatter(formatter) logger.addHandler(handler) @@ -84,7 +84,7 @@ def parse_yaml(location): """ yaml = YAML(typ="rt") yaml.preserve_quotes = True - with open(location, 'r') as stream: + with open(location, "r") as stream: try: parsed_yaml = yaml.load(stream) return parsed_yaml @@ -92,7 +92,7 @@ def parse_yaml(location): sys.exit(exc) -def write_yaml(location, data, mode='w'): +def write_yaml(location, data, mode="w"): """ Write a yaml file @@ -120,34 +120,31 @@ def get_project_info(ctx, project_dir=None): :return: None """ project = Project(project_dir) - if DBT_VERSION < '1.5.0': - project_path = get_nearest_project_dir(project) - else: - project_path = get_nearest_project_dir(project.project_dir) - project_yml_path = Path(project_path, 'dbt_project.yml') + project_path = get_nearest_project_dir(project.project_dir) + project_yml_path = Path(project_path, "dbt_project.yml") # Get project configuration values from dbt_project.yml # (or use dbt defaults) project_yml = parse_yaml(project_yml_path) - project_name = project_yml.get('name') - target_path = Path(project_path, project_yml.get('target-path', 'target')) - compiled_path = Path(target_path, 'compiled', project_name) + project_name = project_yml.get("name") + target_path = Path(project_path, project_yml.get("target-path", "target")) + compiled_path = Path(target_path, "compiled", project_name) macro_paths = [ Path(project_path, macro_path) - for macro_path in project_yml.get('macro-paths', ['macros']) + for macro_path in project_yml.get("macro-paths", ["macros"]) ] # Set context config key-value pairs - ctx.config['project_path'] = project_path - ctx.config['project_name'] = project_name - ctx.config['target_path'] = target_path - ctx.config['compiled_path'] = compiled_path - ctx.config['macro_paths'] = macro_paths + ctx.config["project_path"] = project_path + ctx.config["project_name"] = project_name + ctx.config["target_path"] = target_path + ctx.config["compiled_path"] = compiled_path + ctx.config["macro_paths"] = macro_paths def dbt_ls( ctx, supported_resource_types=None, hide=True, - output='json', + output="json", logger=None, **kwargs, ): @@ -167,11 +164,11 @@ def dbt_ls( :return: A list of lines from stdout """ if not logger: - logger = get_logger('') + logger = get_logger("") resource_selection_arguments = { arg: kwargs.get(arg) for arg, details in DBT_LS_ARGS.items() - if details['resource_selector'] + if details["resource_selector"] } # Use default arguments if no resource selection arguments are given default_arguments = list() @@ -179,16 +176,16 @@ def dbt_ls( default_arguments.append(f'--select {ctx.config["project_name"]}') # Use all supported_resource_types unless a resource_type or models # kwarg is given - if not kwargs.get('resource_type') and not kwargs.get('models'): + if not kwargs.get("resource_type") and not kwargs.get("models"): if supported_resource_types: for rt in supported_resource_types: - default_arguments.append(f'{get_cli_kwargs(resource_type=rt)}') - default_arguments = ' '.join(default_arguments) + default_arguments.append(f"{get_cli_kwargs(resource_type=rt)}") + default_arguments = " ".join(default_arguments) arguments = get_cli_kwargs(**kwargs) - dbt_command_cli_args = f'{default_arguments} {arguments} --output {output}' + dbt_command_cli_args = f"{default_arguments} {arguments} --output {output}" dbt_global_cli_args = get_cli_kwargs(**DBT_GLOBAL_ARGS) command = f"dbt {dbt_global_cli_args} ls {dbt_command_cli_args}" - logger.debug(f'Running command: {command}') + logger.debug(f"Running command: {command}") result = ctx.run(command, hide=hide) result_stdout = escape_ansi(result.stdout) result_lines = result_stdout.splitlines() @@ -197,19 +194,14 @@ def dbt_ls( # Because we set the dbt global arg "--log-format json", if # line is valid json then it may be an actual result or it # may be some other output from dbt, like a warning. - try: - line_dict = json.loads(line) - # If line is not valid json, then it should be an actual - # result. This is because even when the "dbt ls" command - # arg "--output" is not set to json, non-result logs will - # still be in json format (due to the dbt global arg - # "--log-format json"). - except ValueError: - result_lines_filtered.append(line) + data = json.loads(line).get("data") + if data and "msg" in data: + line_dict = json.loads(data["msg"]) + else: continue # If 'resource_type' is in line_dict, then this is likely # an actual result and not something else like a warning. - if 'resource_type' in line_dict: + if "resource_type" in line_dict: result_lines_filtered.append(line_dict) # Else, if 'resource_type' is not in line_dict, this may be # a warning from dbt, so log it. @@ -225,7 +217,7 @@ def get_cli_kwargs(**kwargs): :param kwargs: Keyword arguments :return: CLI keyword arguments """ - return ' '.join( + return " ".join( [ f'--{k.replace("_", "-")} {str(v).replace(",", " ")}' for k, v in kwargs.items() @@ -271,24 +263,24 @@ def dbt_run_operation( :return: stdout in list where each item is one line of output """ if not logger: - logger = get_logger('') + logger = get_logger("") dbt_command_args = { - 'project_dir': project_dir or ctx.config['project_path'], - 'profiles_dir': profiles_dir, - 'profile': profile, - 'target': target, - 'vars': vars, - 'bypass_cache': bypass_cache, + "project_dir": project_dir or ctx.config["project_path"], + "profiles_dir": profiles_dir, + "profile": profile, + "target": target, + "vars": vars, + "bypass_cache": bypass_cache, } dbt_command_cli_args = get_cli_kwargs(**dbt_command_args) dbt_global_cli_args = get_cli_kwargs(**DBT_GLOBAL_ARGS) macro_kwargs = json.dumps(kwargs, sort_keys=False) - if platform.system().lower().startswith('win'): + if platform.system().lower().startswith("win"): # Format YAML string for Windows Command Prompt macro_kwargs = macro_kwargs.replace('"', '\\"') macro_kwargs = macro_kwargs.replace('\\\\"', '"') - macro_kwargs = macro_kwargs.replace('>', '^>') - macro_kwargs = macro_kwargs.replace('<', '^<') + macro_kwargs = macro_kwargs.replace(">", "^>") + macro_kwargs = macro_kwargs.replace("<", "^<") macro_kwargs = f'"{macro_kwargs}"' else: # Format YAML string for Mac/Linux (bash) @@ -298,7 +290,7 @@ def dbt_run_operation( f"dbt {dbt_global_cli_args} run-operation {dbt_command_cli_args}" f" {macro_name} --args {macro_kwargs}" ) - logger.debug(f'Running command: {command}') + logger.debug(f"Running command: {command}") result = ctx.run(command, hide=hide) result_stdout = escape_ansi(result.stdout) result_lines = [json.loads(data) for data in result_stdout.splitlines()] @@ -326,20 +318,20 @@ def macro_exists(ctx, macro_name, logger=None, **kwargs): :return: True if the macro exists, else False """ if not logger: - logger = get_logger('') + logger = get_logger("") try: dbt_run_operation( ctx, macro_name, logger=logger, - sql=f'SELECT 1 AS __dbt_invoke_check_macro_{macro_name} LIMIT 0', + sql=f"SELECT 1 AS __dbt_invoke_check_macro_{macro_name} LIMIT 0", **kwargs, ) except Exception as exc: if all( [ s in str(exc).lower() - for s in ['runtime error', 'not', 'find', macro_name] + for s in ["runtime error", "not", "find", macro_name] ] ): return False @@ -358,60 +350,60 @@ def add_macro(ctx, macro_name, logger=None): :return: None """ if not logger: - logger = get_logger('') - location = Path(ctx.config['macro_paths'][0], f'{macro_name}.sql') + logger = get_logger("") + location = Path(ctx.config["macro_paths"][0], f"{macro_name}.sql") logger.warning( - f'This command requires the following macro:' - f'\n{get_macro(macro_name)}' + f"This command requires the following macro:" + f"\n{get_macro(macro_name)}" ) question = ( f'Would you like to add the macro "{macro_name}"' - f' to the following location?:\n{location}' + f" to the following location?:\n{location}" ) prompt = ( 'Please enter "y" to confirm macro addition,' ' "n" to abort,' ' or "a" to provide an alternate location.' ) - add_confirmation = input(f'{question}\n{prompt}\n') - while add_confirmation.lower() not in ['y', 'n', 'a']: - add_confirmation = input(f'{prompt}\n') - if add_confirmation.lower() == 'n': - logger.info('Macro addition aborted.') + add_confirmation = input(f"{question}\n{prompt}\n") + while add_confirmation.lower() not in ["y", "n", "a"]: + add_confirmation = input(f"{prompt}\n") + if add_confirmation.lower() == "n": + logger.info("Macro addition aborted.") sys.exit() - elif add_confirmation.lower() == 'a': + elif add_confirmation.lower() == "a": alternate_prompt = ( 'Please enter a path (ending in ".sql")' - ' to a new or existing macro file' - ' in one of your existing dbt macro-paths.\n' + " to a new or existing macro file" + " in one of your existing dbt macro-paths.\n" ) location = Path(input(alternate_prompt)) absolute_macro_paths = [ - mp.resolve() for mp in ctx.config['macro_paths'] + mp.resolve() for mp in ctx.config["macro_paths"] ] while ( location.parent.resolve() not in absolute_macro_paths - or location.suffix.lower() != '.sql' + or location.suffix.lower() != ".sql" ): if location.parent.resolve() not in absolute_macro_paths: not_a_macro_path = ( - f'{location.parent.resolve()}' - f' is not an existing macro path.' + f"{location.parent.resolve()}" + f" is not an existing macro path." ) - existing_macro_paths_are = 'Your existing macro paths are:' + existing_macro_paths_are = "Your existing macro paths are:" existing_macro_paths = "\n".join( [str(mp) for mp in absolute_macro_paths] ) logger.warning( - f'{not_a_macro_path}' - f'\n{existing_macro_paths_are}' - f'\n{existing_macro_paths}' + f"{not_a_macro_path}" + f"\n{existing_macro_paths_are}" + f"\n{existing_macro_paths}" ) - if location.suffix.lower() != '.sql': + if location.suffix.lower() != ".sql": logger.warning('File suffix must be ".sql".') location = Path(input(alternate_prompt)) - with location.open('a') as f: - f.write(f'{get_macro(macro_name)}') + with location.open("a") as f: + f.write(f"{get_macro(macro_name)}") logger.info(f'Macro "{macro_name}" added to {location.resolve()}') @@ -419,8 +411,8 @@ def escape_ansi(line): # Windows can sometime emit Control Sequences in command line outputs # (see https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) # The regex filters those out (see https://stackoverflow.com/a/14693789/15202709) - ansi_escape = re.compile(r'(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]') - return ansi_escape.sub('', line) + ansi_escape = re.compile(r"(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]") + return ansi_escape.sub("", line) class Project: diff --git a/dbt_invoke/internal/_version.py b/dbt_invoke/internal/_version.py index d93b5b2..493f741 100644 --- a/dbt_invoke/internal/_version.py +++ b/dbt_invoke/internal/_version.py @@ -1 +1 @@ -__version__ = '0.2.3' +__version__ = "0.3.0" diff --git a/dbt_invoke/properties.py b/dbt_invoke/properties.py index d8440c8..443140e 100644 --- a/dbt_invoke/properties.py +++ b/dbt_invoke/properties.py @@ -10,21 +10,32 @@ from dbt_invoke.internal import _utils -_LOGGER = _utils.get_logger('dbt-invoke') -_MACRO_NAME = '_log_columns_list' +try: + from importlib.metadata import version + + DBT_VERSION = version("dbt-core") + +except ImportError: + import pkg_resources + + DBT_VERSION = pkg_resources.get_distribution("dbt-core").version + + +_LOGGER = _utils.get_logger("dbt-invoke") +_MACRO_NAME = "_log_columns_list" _SUPPORTED_RESOURCE_TYPES = { - 'model': 'models', - 'seed': 'seeds', - 'snapshot': 'snapshots', - 'analysis': 'analyses', + "model": "models", + "seed": "seeds", + "snapshot": "snapshots", + "analysis": "analyses", } _PROGRESS_PADDING = 9 # Character padding to align progress logs _update_and_delete_help = { - arg.replace('_', '-'): details['help'] + arg.replace("_", "-"): details["help"] for arg, details in _utils.DBT_LS_ARGS.items() } -_update_and_delete_help['log-level'] = ( +_update_and_delete_help["log-level"] = ( "One of Python's standard logging levels" " (DEBUG, INFO, WARNING, ERROR, CRITICAL)" ) @@ -34,7 +45,7 @@ default=True, help={ **_update_and_delete_help, - 'threads': ( + "threads": ( "Maximum number of concurrent threads to use in" " collecting resources' column information from the data warehouse" " and in creating/updating the corresponding property files. Each" @@ -203,8 +214,8 @@ def echo_macro(ctx): :return: None """ _LOGGER.info( - f'Copy and paste the following macro into your dbt project:' - f'\n{_utils.get_macro(_MACRO_NAME)}' + f"Copy and paste the following macro into your dbt project:" + f"\n{_utils.get_macro(_MACRO_NAME)}" ) @@ -217,9 +228,9 @@ def _read_manifest(target_path): https://docs.getdbt.com/reference/artifacts/manifest-json """ with open( - Path(target_path, 'manifest').with_suffix('.json'), + Path(target_path, "manifest").with_suffix(".json"), "r", - encoding='utf-8', + encoding="utf-8", ) as manifest_json: return json.loads(manifest_json.read()) @@ -300,7 +311,7 @@ def migrate( log_level=log_level, ) # Parse nodes from the manifest file - nodes = _read_manifest(ctx['target_path'])['nodes'] + nodes = _read_manifest(ctx["target_path"])["nodes"] # Create a migration_map dict to keep track of existing property # files from which properties for one or more resources will be # migrated. Structure of migration_map: @@ -334,29 +345,29 @@ def migrate( # node comes from the correct dbt project # (transformed_ls_results should already only contain nodes from # the correct dbt project). - if metadata['original_file_path'] not in transformed_ls_results: + if metadata["original_file_path"] not in transformed_ls_results: continue # Skip if node is not present in any existing property file - elif not metadata.get('patch_path'): + elif not metadata.get("patch_path"): continue existing_property_path = Path( - ctx.config['project_path'], - metadata.get('patch_path').split('//')[-1], + ctx.config["project_path"], + metadata.get("patch_path").split("//")[-1], ) resource_path = Path( - ctx.config['project_path'], - metadata['original_file_path'], + ctx.config["project_path"], + metadata["original_file_path"], ) # Add data for to-be-created property files to the migration_map migration_map[existing_property_path].append( { - 'name': metadata['name'], - 'resource_type': metadata['resource_type'], - 'resource_type_plural': _SUPPORTED_RESOURCE_TYPES.get( - metadata['resource_type'] + "name": metadata["name"], + "resource_type": metadata["resource_type"], + "resource_type_plural": _SUPPORTED_RESOURCE_TYPES.get( + metadata["resource_type"] ), - 'resource_path': resource_path, - 'property_path': resource_path.with_suffix('.yml'), + "resource_path": resource_path, + "property_path": resource_path.with_suffix(".yml"), } ) # Loop through the migration_map to perform the migration @@ -366,7 +377,7 @@ def migrate( # Create a set of the resource types for which at least one # resource will be migrated from the existing property file relevant_resource_types_plural = set( - [resource['resource_type_plural'] for resource in resource_list] + [resource["resource_type_plural"] for resource in resource_list] ) # Read each relevant property file once to keep things speedy existing_property_file_dict = _utils.parse_yaml(existing_property_path) @@ -374,10 +385,10 @@ def migrate( # the properties and index position of all resources within the # existing property file existing_properties = { - properties['name']: { - 'resource_type_plural': k, - 'index': i, - 'properties': properties, + properties["name"]: { + "resource_type_plural": k, + "index": i, + "properties": properties, } for k, v in existing_property_file_dict.items() if k in relevant_resource_types_plural @@ -388,7 +399,7 @@ def migrate( # destination for resource in resource_list: # Skip if the properties are already in the correct location - if existing_property_path == resource['property_path']: + if existing_property_path == resource["property_path"]: continue _LOGGER.info( f"""Moving "{resource['name']}" definition from""" @@ -400,17 +411,17 @@ def migrate( # property file. try: property_file_dict = _get_property_header( - resource['name'], - resource['resource_type'], - existing_properties[resource['name']]['properties'], + resource["name"], + resource["resource_type"], + existing_properties[resource["name"]]["properties"], ) _utils.write_yaml( - resource['property_path'], + resource["property_path"], property_file_dict, - mode='x', + mode="x", ) - indices_to_remove[resource['resource_type_plural']].append( - existing_properties[resource['name']]['index'] + indices_to_remove[resource["resource_type_plural"]].append( + existing_properties[resource["name"]]["index"] ) _LOGGER.info(f"Created {resource['property_path']}") except Exception: @@ -435,8 +446,8 @@ def migrate( existing_property_file_dict, ) _LOGGER.info( - f'Removed {str(removed_counter)} migrated resources from' - f' {str(existing_property_path.resolve())}' + f"Removed {str(removed_counter)} migrated resources from" + f" {str(existing_property_path.resolve())}" ) except Exception: _LOGGER.exception( @@ -453,7 +464,7 @@ def migrate( _LOGGER.exception( f"Failed to delete {str(existing_property_path.resolve())}" ) - _LOGGER.info('Migration successful') + _LOGGER.info("Migration successful") def _initiate_alterations(ctx, **kwargs): @@ -468,30 +479,30 @@ def _initiate_alterations(ctx, **kwargs): commands 2. The transformed results of the "dbt ls" command """ - if kwargs.get('log_level'): - _LOGGER.setLevel(kwargs.get('log_level').upper()) - resource_type = kwargs.get('resource_type') + if kwargs.get("log_level"): + _LOGGER.setLevel(kwargs.get("log_level").upper()) + resource_type = kwargs.get("resource_type") _assert_supported_resource_type(resource_type) - project_dir = kwargs.get('project_dir') + project_dir = kwargs.get("project_dir") _utils.get_project_info(ctx, project_dir=project_dir) common_dbt_kwargs = { - 'project_dir': project_dir or ctx.config['project_path'], - 'profiles_dir': kwargs.get('profiles_dir'), - 'profile': kwargs.get('profile'), - 'target': kwargs.get('target'), - 'vars': kwargs.get('vars'), - 'bypass_cache': kwargs.get('bypass_cache'), + "project_dir": project_dir or ctx.config["project_path"], + "profiles_dir": kwargs.get("profiles_dir"), + "profile": kwargs.get("profile"), + "target": kwargs.get("target"), + "vars": kwargs.get("vars"), + "bypass_cache": kwargs.get("bypass_cache"), } # Get the paths and resource types of the # resources for which to create property files transformed_ls_results = _transform_ls_results( ctx, resource_type=resource_type, - select=kwargs.get('select'), - models=kwargs.get('models'), - exclude=kwargs.get('exclude'), - selector=kwargs.get('selector'), - state=kwargs.get('state'), + select=kwargs.get("select"), + models=kwargs.get("models"), + exclude=kwargs.get("exclude"), + selector=kwargs.get("selector"), + state=kwargs.get("state"), **common_dbt_kwargs, ) return common_dbt_kwargs, transformed_ls_results @@ -510,19 +521,19 @@ def _transform_ls_results(ctx, **kwargs): and the value is dictionary form of the resource's json """ # Run dbt ls to retrieve resource path and json information - _LOGGER.info('Searching for matching resources...') + _LOGGER.info("Searching for matching resources...") potential_results = _utils.dbt_ls( ctx, supported_resource_types=_SUPPORTED_RESOURCE_TYPES, logger=_LOGGER, - output='json', + output="json", **kwargs, ) potential_result_paths = None results = dict() for i, potential_result in enumerate(potential_results): - if 'original_file_path' in potential_result: - potential_result_path = potential_result['original_file_path'] + if "original_file_path" in potential_result: + potential_result_path = potential_result["original_file_path"] # Before dbt version 0.20.0, original_file_path was not # included in the json response of "dbt ls". For older # versions of dbt, we need to run "dbt ls" with the @@ -533,14 +544,14 @@ def _transform_ls_results(ctx, **kwargs): ctx, supported_resource_types=_SUPPORTED_RESOURCE_TYPES, logger=_LOGGER, - output='path', + output="path", **kwargs, ) assert len(potential_result_paths) == len( potential_results - ), 'Length of results differs from length of result details' + ), "Length of results differs from length of result details" potential_result_path = potential_result_paths[i] - if Path(ctx.config['project_path'], potential_result_path).exists(): + if Path(ctx.config["project_path"], potential_result_path).exists(): results[potential_result_path] = potential_result _LOGGER.info( f"Found {len(results)} matching resources in dbt project" @@ -589,18 +600,18 @@ def _create_all_property_files( i + 1, transformed_ls_results_length, **kwargs, - ): {'index': i + 1, 'resource_location': k} + ): {"index": i + 1, "resource_location": k} for i, (k, v) in enumerate(transformed_ls_results.items()) } # Log success or failure for each thread successes = 0 failures = 0 for future in as_completed(futures): - index = futures[future]['index'] - resource_location = futures[future]['resource_location'] + index = futures[future]["index"] + resource_location = futures[future]["resource_location"] progress = ( - f'Resource {index} of {transformed_ls_results_length},' - f' {resource_location}' + f"Resource {index} of {transformed_ls_results_length}," + f" {resource_location}" ) if future.exception() is not None: _LOGGER.error(f'{"[FAILURE]":>{_PROGRESS_PADDING}} {progress}') @@ -612,7 +623,7 @@ def _create_all_property_files( type(e), e, e.__traceback__ ) futures[future][ - 'exception_message' + "exception_message" ] = f'{progress}\n{"".join(exception_lines)}' else: _LOGGER.info(f'{"[SUCCESS]":>{_PROGRESS_PADDING}} {progress}') @@ -624,20 +635,20 @@ def _create_all_property_files( # that the failed futures are displayed in order of submission, # rather than completion for future in futures: - exception_message = futures[future].get('exception_message') + exception_message = futures[future].get("exception_message") if exception_message: exception_messages.append(exception_message) if exception_messages: - exception_messages = '\n'.join(exception_messages) + exception_messages = "\n".join(exception_messages) _LOGGER.error( - f'Tracebacks for all failures:\n\n{exception_messages}' + f"Tracebacks for all failures:\n\n{exception_messages}" ) # Log result summary _LOGGER.info( f'{"[DONE]":>{_PROGRESS_PADDING}}' - f' Total: {successes + failures},' - f' Successes: {successes},' - f' Failures: {failures}' + f" Total: {successes + failures}," + f" Successes: {successes}," + f" Failures: {failures}" ) @@ -653,47 +664,47 @@ def _delete_all_property_files(ctx, transformed_ls_results): :return: None """ resource_paths = [ - Path(ctx.config['project_path'], resource_location) + Path(ctx.config["project_path"], resource_location) for resource_location in transformed_ls_results ] property_paths = [ - rp.with_suffix('.yml') + rp.with_suffix(".yml") for rp in resource_paths - if rp.with_suffix('.yml').exists() + if rp.with_suffix(".yml").exists() ] _LOGGER.info( - f'{len(property_paths)} of {len(resource_paths)}' - f' have existing property files' + f"{len(property_paths)} of {len(resource_paths)}" + f" have existing property files" ) # Delete the selected property paths if len(property_paths) > 0: - deletion_message_yml_paths = '\n'.join( + deletion_message_yml_paths = "\n".join( [str(property_path) for property_path in property_paths] ) - deletion_message_prefix = '\nThe following files will be deleted:\n\n' + deletion_message_prefix = "\nThe following files will be deleted:\n\n" deletion_message_suffix = ( - f'\n\nAre you sure you want to delete these' - f' {len(property_paths)} file(s) (answer: y/n)?\n' + f"\n\nAre you sure you want to delete these" + f" {len(property_paths)} file(s) (answer: y/n)?\n" ) deletion_confirmation = input( - f'{deletion_message_prefix}' - f'{deletion_message_yml_paths}' - f'{deletion_message_suffix}' + f"{deletion_message_prefix}" + f"{deletion_message_yml_paths}" + f"{deletion_message_suffix}" ) # User confirmation - while deletion_confirmation.lower() not in ['y', 'n']: + while deletion_confirmation.lower() not in ["y", "n"]: deletion_confirmation = input( '\nPlease enter "y" to confirm deletion' ' or "n" to abort deletion.\n' ) - if deletion_confirmation.lower() == 'y': + if deletion_confirmation.lower() == "y": for file in property_paths: os.remove(file) - _LOGGER.info('Deletion confirmed.') + _LOGGER.info("Deletion confirmed.") else: - _LOGGER.info('Deletion aborted.') + _LOGGER.info("Deletion aborted.") else: - _LOGGER.info('There are no files to delete.') + _LOGGER.info("There are no files to delete.") def _create_property_file( @@ -722,13 +733,13 @@ def _create_property_file( """ _LOGGER.info( f'{"[START]":>{_PROGRESS_PADDING}}' - f' Resource {counter} of {total},' - f' {resource_location}' + f" Resource {counter} of {total}," + f" {resource_location}" ) columns = _get_columns(ctx, resource_location, resource_dict, **kwargs) property_path = Path( - ctx.config['project_path'], resource_location - ).with_suffix('.yml') + ctx.config["project_path"], resource_location + ).with_suffix(".yml") property_file_dict = _structure_property_file_dict( property_path, resource_dict, @@ -754,10 +765,10 @@ def _get_columns(ctx, resource_location, resource_dict, **kwargs): :return: A list of the column names in the resource """ resource_path = Path(resource_location) - materialized = resource_dict['config']['materialized'] - resource_type = resource_dict['resource_type'] - resource_name = resource_dict['name'] - if materialized != 'ephemeral' and resource_type != 'analysis': + materialized = resource_dict["config"]["materialized"] + resource_type = resource_dict["resource_type"] + resource_name = resource_dict["name"] + if materialized != "ephemeral" and resource_type != "analysis": result_lines = _utils.dbt_run_operation( ctx, _MACRO_NAME, @@ -770,8 +781,8 @@ def _get_columns(ctx, resource_location, resource_dict, **kwargs): # data warehouse, so the compiled versions of their SQL statements # are used instead else: - resource_path = Path(ctx.config['compiled_path'], resource_path) - with open(resource_path, 'r') as f: + resource_path = Path(ctx.config["compiled_path"], resource_path) + with open(resource_path, "r") as f: lines = f.readlines() # Get and clean the SQL code lines = [line.strip() for line in lines if line.strip()] @@ -782,45 +793,28 @@ def _get_columns(ctx, resource_location, resource_dict, **kwargs): relevant_lines = list( filter( - lambda x: x.get( - # dbt-core>=1.0,<1.4 - # run-operation logs contain structure - # { - # 'code': 'M011', - # 'msg': ['column1', 'column2', ...] - # } - 'code', - # dbt-core>=1.4 - # run-operation logs contain structure - # { - # 'info': { - # 'code': 'M011', - # 'msg': "['column1', 'column2', ...]" # string value - # } - # } - x.get('info', dict()).get('code'), - ) - == 'M011', + lambda x: x["info"].get("code") == "I062", result_lines, ) ) + if len(relevant_lines) >= 1: relevant_line = relevant_lines[-1] columns = relevant_line.get( - 'msg', - relevant_line.get('info', dict()).get('msg'), + "msg", + relevant_line.get("info", dict()).get("msg"), ) else: # for older dbt-core versions, we need to cross fingers a little harder relevant_lines = result_lines[1:] # also, the message key is different - columns = relevant_lines[-1].get('message') + columns = relevant_lines[-1].get("message") # In some version of dbt columns are not passed as valid json but as # a string representation of a list is_string_list = ( isinstance(columns, str) - and columns.startswith('[') - and columns.endswith(']') + and columns.startswith("[") + and columns.endswith("]") ) if is_string_list: columns = ast.literal_eval(columns) @@ -838,8 +832,8 @@ def _structure_property_file_dict(location, resource_dict, columns_list): property file :return: None """ - resource_type = resource_dict['resource_type'] - resource_name = resource_dict['name'] + resource_type = resource_dict["resource_type"] + resource_name = resource_dict["name"] # If the property file already exists, read it into a dictionary. if location.exists(): property_file_dict = _utils.parse_yaml(location) @@ -855,18 +849,18 @@ def _structure_property_file_dict(location, resource_dict, columns_list): # Get the sub-dictionaries of each existing column resource_type_plural = _SUPPORTED_RESOURCE_TYPES[resource_type] existing_columns_dict = { - item['name']: item - for item in property_file_dict[resource_type_plural][0]['columns'] + item["name"]: item + for item in property_file_dict[resource_type_plural][0]["columns"] } # For each column we want in the property file, # reuse the sub-dictionary if it exists # or else create a new sub-dictionary - property_file_dict[resource_type_plural][0]['columns'] = list() + property_file_dict[resource_type_plural][0]["columns"] = list() for column in columns_list: column_dict = existing_columns_dict.get( column, _get_property_column(column) ) - property_file_dict[resource_type_plural][0]['columns'].append( + property_file_dict[resource_type_plural][0]["columns"].append( column_dict ) return property_file_dict @@ -884,9 +878,9 @@ def _get_property_header(resource, resource_type, properties=None): :return: A dictionary representing resource properties """ if not properties: - properties = {'name': resource, 'description': "", 'columns': []} + properties = {"name": resource, "description": "", "columns": []} header_dict = { - 'version': 2, + "version": 2, _SUPPORTED_RESOURCE_TYPES[resource_type]: [properties], } return header_dict @@ -899,7 +893,7 @@ def _get_property_column(column_name): :param column_name: Name of column :return: A dictionary representing column properties """ - column_dict = {'name': column_name, 'description': ""} + column_dict = {"name": column_name, "description": ""} return column_dict @@ -918,8 +912,8 @@ def _assert_supported_resource_type(resource_type): ) except AssertionError: msg = ( - f'Sorry, this tool only supports the following resource types:' - f' {list(_SUPPORTED_RESOURCE_TYPES.keys())}' + f"Sorry, this tool only supports the following resource types:" + f" {list(_SUPPORTED_RESOURCE_TYPES.keys())}" ) _LOGGER.exception(msg) raise diff --git a/requirements/requirements_dbt_0.18.x.txt b/requirements/requirements_dbt_0.18.x.txt deleted file mode 100644 index f41c790..0000000 --- a/requirements/requirements_dbt_0.18.x.txt +++ /dev/null @@ -1,11 +0,0 @@ -agate<1.6.2 -cryptography<3 -dbt-core~=0.18.0 -dbt-sqlite~=0.0.4 -invoke>=1.4.1 -MarkupSafe==2.0.1 -PyYAML>=5.1 -pyopenssl<20.0.0 -pytz<2021.0 -ruamel.yaml>=0.17.12 --e . \ No newline at end of file diff --git a/requirements/requirements_dbt_0.19.x.txt b/requirements/requirements_dbt_0.19.x.txt deleted file mode 100644 index ef6276a..0000000 --- a/requirements/requirements_dbt_0.19.x.txt +++ /dev/null @@ -1,9 +0,0 @@ -agate<1.6.2 -dbt-core~=0.19.0 -dbt-sqlite~=0.1.0 -invoke>=1.4.1 -MarkupSafe==2.0.1 -pytz<2021.0 -PyYAML>=5.1 -ruamel.yaml>=0.17.12 --e . \ No newline at end of file diff --git a/requirements/requirements_dbt_1.1.x.txt b/requirements/requirements_dbt_1.1.x.txt deleted file mode 100644 index 7cfc2a3..0000000 --- a/requirements/requirements_dbt_1.1.x.txt +++ /dev/null @@ -1,8 +0,0 @@ -agate<1.6.4 -dbt-core~=1.1.0 -dbt-sqlite~=1.1.0 -invoke>=1.4.1 -pytz<2021.0 -PyYAML>=5.1 -ruamel.yaml>=0.17.12 --e . diff --git a/requirements/requirements_dbt_1.4.x.txt b/requirements/requirements_dbt_1.5.x.txt similarity index 67% rename from requirements/requirements_dbt_1.4.x.txt rename to requirements/requirements_dbt_1.5.x.txt index e0a1a04..47753b4 100644 --- a/requirements/requirements_dbt_1.4.x.txt +++ b/requirements/requirements_dbt_1.5.x.txt @@ -1,6 +1,6 @@ agate>=1.6,<1.7.1 -dbt-core~=1.4.0 -dbt-sqlite~=1.4.0 +dbt-core~=1.5.0 +dbt-duckdb~=1.5.0 invoke>=1.4.1 PyYAML>=5.1 ruamel.yaml>=0.17.12 diff --git a/setup.py b/setup.py index aaaf1b2..9ee5c5f 100644 --- a/setup.py +++ b/setup.py @@ -5,25 +5,25 @@ from dbt_invoke.internal import _version PARENT_DIR = Path(__file__).parent -README = Path(PARENT_DIR, 'README.md').read_text() +README = Path(PARENT_DIR, "README.md").read_text() setup( - name='dbt-invoke', + name="dbt-invoke", version=_version.__version__, - author='Robert Astel, Jennifer Zhan, Vincent Dragonette', - author_email='rob.astel@gmail.com', - license='Apache License 2.0', + author="Robert Astel, Jennifer Zhan, Vincent Dragonette", + author_email="rob.astel@gmail.com", + license="Apache License 2.0", description=( - 'dbt-invoke is a CLI for creating, updating, and deleting' - ' dbt property files.' + "dbt-invoke is a CLI for creating, updating, and deleting" + " dbt property files." ), long_description=README, - long_description_content_type='text/markdown', - url='https://github.com/Dashlane/dbt-invoke', + long_description_content_type="text/markdown", + url="https://github.com/Dashlane/dbt-invoke", packages=find_packages(), - install_requires=['invoke>=1.4.1', 'PyYAML>=5.1', 'ruamel.yaml>=0.17.12'], - python_requires='>=3.6.0', + install_requires=["invoke>=1.4.1", "PyYAML>=5.1", "ruamel.yaml>=0.17.12"], + python_requires=">=3.6.0", entry_points={ - 'console_scripts': ["dbt-invoke = dbt_invoke.main:program.run"] + "console_scripts": ["dbt-invoke = dbt_invoke.main:program.run"] }, ) diff --git a/tests/data_files/customers.parquet b/tests/data_files/customers.parquet new file mode 100644 index 0000000000000000000000000000000000000000..be92acfb80e6f26a015a5a70e30955f423dd522a GIT binary patch literal 320 zcmWG=3^EjD5LFR%(h+3?GT21f7#QTcIT*m;e{xbnLPBy1%OoF%GdxEa7%#2s5dzA9 zb=cw2Ve%y@p&^ms_v5kzuzDs*8&d`e-jveh?3ASV;^d6f+(c0p22mzeCJFB3(&Cc* z+|;7@%oL!(N)S=5QAF8iR}|lO(GoswNgOHZcaV6g3VT z2^MCc^CVapfutCN1`-b_0N0FcHbifNS`JXJp%+Lmhy>|H@W6(Nv8iz|Xt22ez2M@+ La0w_D01P_-mf1T{ literal 0 HcmV?d00001 diff --git a/tests/data_files/items.parquet b/tests/data_files/items.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ee69374d8dd82ac87571170ca91646f523997e4f GIT binary patch literal 410 zcmWG=3^EjD5S0;C&=F+3usm>3y>BnS|rKtxmm$pkJHhzaw&fx4L_ zeM}i7cvDJ~vs04di<2`_a}$Bqh_a|MNw8;@q~^wFrl_hw_^bs*naQaj9urWOtF#~` zu_QGmKCwiVhe3=5 K=z{=YtN;Kan=rlr literal 0 HcmV?d00001 diff --git a/tests/data_files/orders.parquet b/tests/data_files/orders.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b9515f82233d72b28b1541fef7b03f3b52f9757f GIT binary patch literal 700 zcmWG=3^EjD5ET)1(h+3>GT21f7#QTcIT*m;KO+~z*Z)jFHWMQwkenKxt4feX9f!;!Z9tF3HbD6J^gVNzFx3$5B|Cm{*cnQi;$CH7&6OD8$1c#v~~t8KcG^ zBg!PnDv4wjix`s_gIJ0hhm8aaD8wP87=s294=4=RiQ*!d<{Tn5!yOLOc!7~fod^%Y zv~FPnY6Zmxgao-5!9#X2A|PN|XD|b`f}#aNg0v!dK&`+4VN&B@&|q@`Mw5#Z!!}^R I1OU@50PrbgPyhe` literal 0 HcmV?d00001 diff --git a/tests/dbt.duckdb b/tests/dbt.duckdb new file mode 100644 index 0000000000000000000000000000000000000000..888c87739a90c860f0748afb2c088740f965daf2 GIT binary patch literal 2371584 zcmeI*U5s2;VF2LS^{(x#9qh^^gc8VT2{u^v`e#|2=0|a5hakmCngpb#RGN-=#$mzx zlbKml6lkKTky7zMRbHY}1!O1i#0#oQ6bXU)(1u1;;UO|rAF3iGT0vjPLnG08dd|!} zGk0fqz3bTC%-G+NXU;wU=Y03f?B1E#@z76xKK+aTe(aVLpE>^EWT72KLR8F-MFa>C zAV7cs0RjXF5FkK+z~Bq~!y~hA)|>zHfkQn@*_5Sk>dBWRK!5-N0t5&UAV7cs0RjXD zPvGA^JNfvl+b@3Wa5nYfEZ&mEuKx$`oSHEK0t5&UAV7cs0RjXF5a<>7&!aEwx#^Bi ze5;v-p->^-wq_OM-vZ2*XXfJT0k>y)DT=QZq}K~l-e@>DmnGt72r8!TmRKbRfIJ{D>-)75;%_vI>%XUAh9UR3D~o;PR`Z^aY^|nLpDQoaKR4fq%UH* z)k`ibUvcea?Pzt{?lxaLOu1B_uJqWxKUaHqb{xlxwO3}#_ut#%Z28PgrF6PBKbwcn zx41JKV|zA6yG3JurTVdIIx*kLUYAPgtee&zIWpW%MaZ~C<4aZ3i}l9*Y_(S39aia8;(_g5Z zilvQm?a6ARTUb3^UOdz4Z*IO)4YaYzY&bkK(WurNrKlKs{Cst~G0~aP_*aU14~&f! zi{amNtr~V_@$5|Xi|wgRjK>v>PmFh}ms-6vW*5eb<$Cd>p;58;`28pEKmO^~VX7Af zZ>9HTea78B_Q}T{d8n1UCp-YkrMR;Cfz+Ztu~4o(wODQJ2~|FJ_gFk7=h~7T_SVBX zx_0)-8?i<_3){jibrlAWm2L~S7)Cmk;-jy<#yFp$_{c~vFm}&md^)+*{6g{a5YKtf z6qXJaE97@o`&lHP)83cyS!P?z4k7KJm^WXmg#GbAziglObk?_b+I831IIT*#U6Vav zx;fc1aki_TS}e~sW*T40lI_;3ZL#(QR@h&|*;((L{P@Hg)o%^A7`j_0{@_V-T=x#V zrPWEko7~To)yGMTTfIhFZ)*!5$VTa`qV@2|ZQ-Ts&uTA?;==Pw?Agn?X~$Y=g54J7 zM(iQKotJO$cb1c$u9wbNiU0uu1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAn=D@{%UF0e|`Cv%`6P{kW6Oj(OmxKEg|vVl+kU`I@%QSyIJi@sgpEF%pHyu z$J*t&7OOQ);~xwyZi?gIzN~5C!uf@J$1XJQEByM=S3mUpuXY?;t+T5zkDrC%TMjGC zqwvkzx8MAJnDMcPUwQ8MYok}53;>X8#DJ4|2sV z`9#l4Zut*8#TlpK)aHe387aLpJ4orQxceckq00=Z12F*t1PBlyFt`G%{bI1>Q{$_= z*4b&B4pN*rQhG^e>-#ADicR}z9xMDwz)KH?g~z)z`K#!#IrSCdq4-;0F=ccHJACBe zsmbcp(G(YkVx3NLIF2EuujV<~V2dSFk9(pI$G_NYK2r$4!9Sb4Tlnko?>1QXcF(`X zkecUXe-@GS#RLd!q`sw&Ww`06laeZ$9t0O>w009C72oM-3f&TmYo6GvJo2!d~+HEFFfB*pk1PBngz69bg zqe6cRc)y~*&yU#=AV7cs0RjXF5XcMk-}@D=DDvmWF#!Su2oNAZfB=F17U=IQilOe; zsdT?$%BPB`s+RE4;)-t z{*9jWoTJDKpDQoaKR4fqoyO8T!YziFjx|a%mDqN3 z%nKnlj3HM2VC$$*h~q5OW~QsFRUFSMj)iz}p;B&CE2VNHUwcPZBX*mw9j08WPgi;- z@%~)x-Pv)RSggHv|Gh2Fme0&oN~de{v#ZVM&TNeB*%<8>jro=8$EtA^F}xfrgpi$s zb|TzIuya`|R>_^1SkyW(F+cXR#`(B8REZ&0z2y0LU#nWXVqSPiA`q1 z;hBj>wcaSj4H0_$e091p(VouuSBiTNjExnG;oo$v8n$Bb>`e8G?Ria%$GMMBjJNBS zTJ0ON3**Idz4+14qF8+V{*(6~|8(oHRVj?wO6)66FV-9Lv(;KRCwus0vCi4laD>}P%Cdwcwm%EarN~B?FmdQlxt5dRvUXl`N!@ai`#EF+e@7-6*o*& zzct)q7(g{bRIRG8rwo{ZrZWZE#VeJx0~D(!Rq6r#jRc=t#>H)cc3*! z{(%uEx9a+{+DoIj@ca^c7L%KHtd%C%ZBcH-9^$@@n>B{^W9iCs+D~S&*CWHNj$0=v zZmpX_GJ)53_sb)rGo$rAI6BqWczmRK-3LVb0T-Y9SKjrhMSX>hpPnx}8$Z&ndt2Yu zJ`cHV9e);e=hNDihqFqZAzD9WbGvfc{IUj1qd2ji&A4>1SmDyUEd4lK+OZS?0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkL{?Z0?_|4+s~^Y_gxjASIk zS)9^avh>z09?Ig;EZ&sGA_4>m5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyKw$6%{$~31ThG68_Ya#{80uM4N~iKzJiH~`-JU9@8ihjVC>^A@ zx9XD4Higo$-WuJ$vXa!*lBWGMkM}NHLgA$cLteZ~lkcsvWHFcLusQV=;-RhKu9z}9 zgB?C{@YG~=>S&4!L$OY$I2^~2(pNB-B~9Zw*k=Vz8&PfitB$9SRnxd1PBlyK!Cu&2<+^;nO@e1-CSMt-T5;i z0t5&UAV7e?jUy0$85R0l!21>beSXZ2009C72oNAZfIwcL|K6`~MUg*0jtLMTK!5-N z0t5*3w?KbiQ4DpzPNn-5Q$F?7-&@M;2oNAZfB*pk1PHvV0_h?md_isDg`SzDvh?Y- z`1cnvr1W35lb!T!S%XymVo&^G(UcM5&8xi0(rO#~ zTdk8NUmd)@Z(!~n|6u6;ra1lz--vzT{KCCs7n=7Ke*NgHAA0^*JC=P#P3r6_%;RUt zVTE}VzFGVBo8J#&#~yy=x#O>mUWvz=)bYfoZy!$UOL068zI`|#2n04*p#NUT+F*b% z+_ER&jh})MuLptbmzL{cDb_%M0D*T+VBHr_t}NcQb95O4#|z?X9Q-PKje&JPumb7z zxq)4Pi4!0|fB=Cj6jei5FkK+z@-FkoVTKGoRwbcd@Jh% zf%Jj0xFw(HdC4t(*eQL&?XUl+J>iu8-OnyfpUcvz&$PTwdgwAk>Of3@009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5)W zBY_9De&f;aJ^jWD`?A&-o6Y8MINlU)_x#IS-G7C`&QR3c+Ab=b3*{HiFWftJp?Tlk zzj^(`U%Th+XNJ}ap?zpJTpzw~bA9Am-j9S~+MzMjj(_wA|NP;<`oCs#Z4~rY`?Ej(^C#bkEQP{} zkN@=NNB`vTfBfrjTFJt7(tLY+HSh5LMgd#w$*JSo7!Y}84`-9&$voO-L)}buD zDT}veampX&%cuN+i{L!BBEGZ&DfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs v0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXDSm6HwdFWMB literal 0 HcmV?d00001 diff --git a/tests/dbt_project_files/dbt_project.yml b/tests/dbt_project_files/dbt_project.yml index 1e02d43..3afa9ee 100644 --- a/tests/dbt_project_files/dbt_project.yml +++ b/tests/dbt_project_files/dbt_project.yml @@ -2,7 +2,7 @@ name: test_dbt_project version: 1.0.0 config-version: 2 -profile: dbt-sqlite +profile: dbt-duckdb seed-paths: ["data"] diff --git a/tests/test.py b/tests/test.py index b97b620..f4da7df 100644 --- a/tests/test.py +++ b/tests/test.py @@ -6,6 +6,7 @@ import pkg_resources import shutil import itertools +import duckdb import invoke @@ -23,53 +24,74 @@ def setUpClass(cls): :return: None """ - cls.logger = _utils.get_logger('dbt-invoke', level='DEBUG') - cls.config_path = Path(PARENT_DIR, 'test_config.yml') + cls.logger = _utils.get_logger("dbt-invoke", level="DEBUG") + cls.config_path = Path(PARENT_DIR, "test_config.yml") cls.config = _utils.parse_yaml(cls.config_path) # for backward compatibility, select the correct dbt_project.yml file - if pkg_resources.get_distribution("dbt-core").version >= '1.0.0': + if pkg_resources.get_distribution("dbt-core").version >= "1.0.0": shutil.copy( - Path(PARENT_DIR, 'dbt_project_files/dbt_project.yml'), + Path(PARENT_DIR, "dbt_project_files/dbt_project.yml"), Path( - PARENT_DIR, cls.config['project_name'], 'dbt_project.yml' + PARENT_DIR, cls.config["project_name"], "dbt_project.yml" ), ) else: shutil.copy( Path( - PARENT_DIR, 'dbt_project_files/dbt_project_pre_dbt_v1.yml' + PARENT_DIR, "dbt_project_files/dbt_project_pre_dbt_v1.yml" ), Path( - PARENT_DIR, cls.config['project_name'], 'dbt_project.yml' + PARENT_DIR, cls.config["project_name"], "dbt_project.yml" ), ) - cls.project_dir = Path(PARENT_DIR, cls.config['project_name']) - cls.profiles_dir = Path(PARENT_DIR, cls.config['project_name']) + cls.project_dir = Path(PARENT_DIR, cls.config["project_name"]) + cls.profiles_dir = Path(PARENT_DIR, cls.config["project_name"]) cls.test_base_dir = PARENT_DIR - cls.expected_properties = cls.config['expected_properties'] - cls.expected_dbt_ls_results = cls.config['expected_dbt_ls_results'] + cls.expected_properties = cls.config["expected_properties"] + cls.expected_dbt_ls_results = cls.config["expected_dbt_ls_results"] cls.ctx = invoke.Context() _utils.get_project_info(cls.ctx, project_dir=cls.project_dir) - cls.macro_name = '_log_columns_list' + cls.macro_name = "_log_columns_list" cls.macro_value = _utils.MACROS[cls.macro_name] cls.macro_path = Path( - cls.ctx.config['macro_paths'][0], - f'{cls.macro_name}.sql', + cls.ctx.config["macro_paths"][0], + f"{cls.macro_name}.sql", + ) + cls.dbt_seed = ( + "dbt seed" + f" --project-dir {cls.project_dir}" + f" --profiles-dir {cls.project_dir}" + f" --target-path {cls.project_dir}/target" ) cls.dbt_clean = ( - 'dbt clean' - f' --project-dir {cls.project_dir}' - f' --profiles-dir {cls.project_dir}' + "dbt clean" + f" --project-dir {cls.project_dir}" + f" --profiles-dir {cls.project_dir}" + ) + cls.dbt_run = ( + "dbt run " + f" --project-dir {cls.project_dir}" + f" --profiles-dir {cls.project_dir}" + f" --target-path {cls.project_dir}/target" + ) + cls.dbt_snapshot = ( + "dbt snapshot" + f" --project-dir {cls.project_dir}" + f" --profiles-dir {cls.project_dir}" ) cls.dbt_compile = ( - 'dbt compile' - f' --project-dir {cls.project_dir}' - f' --profiles-dir {cls.project_dir}' + "dbt compile" + f" --project-dir {cls.project_dir}" + f" --profiles-dir {cls.project_dir}" + f" --target-path {cls.project_dir}/target" ) + invoke.run(cls.dbt_seed) invoke.run(cls.dbt_clean) invoke.run(cls.dbt_compile) + invoke.run(cls.dbt_run) + invoke.run(cls.dbt_snapshot) def setUp(self): """ @@ -79,7 +101,7 @@ def setUp(self): """ if self.macro_path.exists(): os.remove(self.macro_path) - with patch('builtins.input', return_value='y'): + with patch("builtins.input", return_value="y"): properties.delete( self.ctx, project_dir=self.project_dir, @@ -119,7 +141,7 @@ def compare_files(self, path1, path2): return True -if __name__ == '__main__': +if __name__ == "__main__": loader = unittest.TestLoader() suite = loader.discover(PARENT_DIR) runner = unittest.TextTestRunner(verbosity=2) diff --git a/tests/test_config.yml b/tests/test_config.yml index 72a7530..7841393 100644 --- a/tests/test_config.yml +++ b/tests/test_config.yml @@ -61,6 +61,10 @@ expected_properties: description: '' - name: updated_at description: '' + - name: dbt_scd_id + description: '' + - name: dbt_updated_at + description: '' - name: dbt_valid_from description: '' - name: dbt_valid_to diff --git a/tests/test_dbt_project/dbt_project.yml b/tests/test_dbt_project/dbt_project.yml index 1e02d43..3afa9ee 100644 --- a/tests/test_dbt_project/dbt_project.yml +++ b/tests/test_dbt_project/dbt_project.yml @@ -2,7 +2,7 @@ name: test_dbt_project version: 1.0.0 config-version: 2 -profile: dbt-sqlite +profile: dbt-duckdb seed-paths: ["data"] diff --git a/tests/test_dbt_project/models/marts/core/customers.sql b/tests/test_dbt_project/models/marts/core/customers.sql index 2a7ecaa..51acf02 100644 --- a/tests/test_dbt_project/models/marts/core/customers.sql +++ b/tests/test_dbt_project/models/marts/core/customers.sql @@ -2,4 +2,4 @@ SELECT customer_id , created_at FROM - customers \ No newline at end of file + {{ source('external_source', 'customers') }} \ No newline at end of file diff --git a/tests/test_dbt_project/models/marts/core/orders.sql b/tests/test_dbt_project/models/marts/core/orders.sql index fd56b90..d0158cb 100644 --- a/tests/test_dbt_project/models/marts/core/orders.sql +++ b/tests/test_dbt_project/models/marts/core/orders.sql @@ -5,4 +5,4 @@ SELECT , quantity , order_at FROM - orders \ No newline at end of file + {{ source('external_source', 'orders') }} \ No newline at end of file diff --git a/tests/test_dbt_project/models/sources.yml b/tests/test_dbt_project/models/sources.yml new file mode 100644 index 0000000..d78d895 --- /dev/null +++ b/tests/test_dbt_project/models/sources.yml @@ -0,0 +1,8 @@ +sources: + - name: external_source + meta: + external_location: "data_files/{name}.parquet" + tables: + - name: customers + - name: orders + - name: items diff --git a/tests/test_dbt_project/profiles.yml b/tests/test_dbt_project/profiles.yml index 02a5eac..3d273a0 100644 --- a/tests/test_dbt_project/profiles.yml +++ b/tests/test_dbt_project/profiles.yml @@ -1,33 +1,9 @@ config: send_anonymous_usage_stats: False -# Credit to https://github.com/codeforkjeff/dbt-sqlite -dbt-sqlite: +dbt-duckdb: target: "{{ env_var('TARGET', 'default') }}" outputs: - # There is more than one target because dbt-sqlite for dbt~=0.18.x - # uses a different format for schemas_and_paths - 0.18.x: - type: &type sqlite - # sqlite locks the whole db on writes so anything > 1 won't help - threads: &threads 1 - # Value is arbitrary - database: &database database - # Value of 'schema' must be defined in schema_paths below. - # In most cases, this should be 'main' - schema: &schema main - # Connect schemas to paths: at least one of these must be 'main' - schemas_and_paths: main=test.db - # Directory where all *.db files are attached as schema, using - # base filename as schema name, and where new schema are created. - # This can overlap with the dirs of files in schemas_and_paths as - # long as there are no conflicts. - schema_directory: &schema_directory . default: - type: *type - threads: *threads - database: *database - schema: *schema - schemas_and_paths: - main: test.db - schema_directory: *schema_directory + type: duckdb + path: ./dbt.duckdb diff --git a/tests/test_dbt_project/snapshots/items_snapshot.sql b/tests/test_dbt_project/snapshots/items_snapshot.sql index b6c5187..1fc9778 100644 --- a/tests/test_dbt_project/snapshots/items_snapshot.sql +++ b/tests/test_dbt_project/snapshots/items_snapshot.sql @@ -6,5 +6,5 @@ updated_at='updated_at' ) }} - select * from {{ ref('items') }} + select * from {{ source('external_source', 'items') }} {% endsnapshot %} \ No newline at end of file diff --git a/tests/test_dbt_project/test.db b/tests/test_dbt_project/test.db deleted file mode 100644 index bfa2676070a1c285bd2904f50a2fc97099769a64..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 28672 zcmeI)J#U*p7{GBzY}2@Lnhj;jWs9XMYL%r@hSY6oWyN+{mkyZ_Hbk*%g9A8?NAjim zS^90t;TKF2s?O+-1U~M#<9U7$%Ps!?tuu;TaqR~aI})eLzM`tidm$7>sV1kEoU23L zS5`O4ue#ALYF3q#^FNKoRH^NH%Bx`S_ufh4e%EUp6-}TY0R#|0009ILKmY**$|4ZI zscQP!nHv9!?7`R#+&kBsxz=EAIreBgw}$?WA4CP+wU2$HWg5b4edrjX;308b-x&t3 z9l4I>d*YLv(7QB6??fa0v-!<>vsP{dOI6bk z57pRBLtni)TsO9I39P%*FZ*ej`81Ya{=Y(5EtsU5MAMKTz7!2Un8poIVe2q1s}0tg_000Iag zfIwLV*#DQcvgCyT0tg_000IagfB*srAb>z7!2Un8poIVe2q1s}0tg_000IagfIwLV g>M}9@|F5j&B`*XJKmY**5I_I{1Q0*~0R&QkzhwC;eE