From fffa4de97a2f1f57c605c68a7571e2c77ec7d294 Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Thu, 21 Oct 2021 09:22:55 -0400 Subject: [PATCH] [#3885] Partially parse when environment variables in schema files change --- core/dbt/clients/jinja.py | 4 +- core/dbt/config/renderer.py | 68 +------ core/dbt/context/base.py | 41 ++++- core/dbt/context/configured.py | 36 +++- core/dbt/context/docs.py | 2 +- core/dbt/context/providers.py | 28 ++- core/dbt/contracts/files.py | 16 ++ core/dbt/contracts/graph/parsed.py | 1 + core/dbt/exceptions.py | 13 ++ core/dbt/parser/partial.py | 166 +++++++++++++----- core/dbt/parser/schema_renderer.py | 68 +++++++ core/dbt/parser/schemas.py | 113 ++++++++---- core/dbt/parser/sources.py | 3 +- core/dbt/utils.py | 22 ++- .../test_schema_v2_tests.py | 4 +- .../test_configs_in_schema_files.py | 4 +- .../test-files/env_var_schema.yml | 6 + .../068_partial_parsing_tests/test_pp_vars.py | 25 +-- test/unit/test_utils.py | 14 +- test/unit/test_yaml_renderer.py | 103 +++++++++++ 20 files changed, 548 insertions(+), 189 deletions(-) create mode 100644 core/dbt/parser/schema_renderer.py create mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml create mode 100644 test/unit/test_yaml_renderer.py diff --git a/core/dbt/clients/jinja.py b/core/dbt/clients/jinja.py index f6f0e8246fc..db8e0dda1ec 100644 --- a/core/dbt/clients/jinja.py +++ b/core/dbt/clients/jinja.py @@ -21,7 +21,7 @@ from dbt.utils import ( get_dbt_macro_name, get_docs_macro_name, get_materialization_macro_name, - get_test_macro_name, deep_map + get_test_macro_name, deep_map_render ) from dbt.clients._jinja_blocks import BlockIterator, BlockData, BlockTag @@ -661,5 +661,5 @@ def _convert_function( return value - kwargs = deep_map(_convert_function, node.test_metadata.kwargs) + kwargs = deep_map_render(_convert_function, node.test_metadata.kwargs) context[GENERIC_TEST_KWARGS_NAME] = kwargs diff --git a/core/dbt/config/renderer.py b/core/dbt/config/renderer.py index 866ec8c49bf..5c27439eb1a 100644 --- a/core/dbt/config/renderer.py +++ b/core/dbt/config/renderer.py @@ -5,8 +5,7 @@ from dbt.exceptions import ( DbtProjectError, CompilationException, RecursionException ) -from dbt.node_types import NodeType -from dbt.utils import deep_map +from dbt.utils import deep_map_render Keypath = Tuple[Union[str, int], ...] @@ -47,7 +46,7 @@ def render_data( self, data: Dict[str, Any] ) -> Dict[str, Any]: try: - return deep_map(self.render_entry, data) + return deep_map_render(self.render_entry, data) except RecursionException: raise DbtProjectError( f'Cycle detected: {self.name} input has a reference to itself', @@ -163,69 +162,6 @@ def name(self): 'Profile' -class SchemaYamlRenderer(BaseRenderer): - DOCUMENTABLE_NODES = frozenset( - n.pluralize() for n in NodeType.documentable() - ) - - @property - def name(self): - return 'Rendering yaml' - - def _is_norender_key(self, keypath: Keypath) -> bool: - """ - models: - - name: blah - - description: blah - tests: ... - - columns: - - name: - - description: blah - tests: ... - - Return True if it's tests or description - those aren't rendered - """ - if len(keypath) >= 2 and keypath[1] in ('tests', 'description'): - return True - - if ( - len(keypath) >= 4 and - keypath[1] == 'columns' and - keypath[3] in ('tests', 'description') - ): - return True - - return False - - # don't render descriptions or test keyword arguments - def should_render_keypath(self, keypath: Keypath) -> bool: - if len(keypath) < 2: - return True - - if keypath[0] not in self.DOCUMENTABLE_NODES: - return True - - if len(keypath) < 3: - return True - - if keypath[0] == NodeType.Source.pluralize(): - if keypath[2] == 'description': - return False - if keypath[2] == 'tables': - if self._is_norender_key(keypath[3:]): - return False - elif keypath[0] == NodeType.Macro.pluralize(): - if keypath[2] == 'arguments': - if self._is_norender_key(keypath[3:]): - return False - elif self._is_norender_key(keypath[1:]): - return False - else: # keypath[0] in self.DOCUMENTABLE_NODES: - if self._is_norender_key(keypath[1:]): - return False - return True - - class PackageRenderer(BaseRenderer): @property def name(self): diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index 2d1dbca1c88..c731ce8ec0d 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -6,12 +6,12 @@ from dbt import flags from dbt import tracking -from dbt.clients.jinja import undefined_error, get_rendered +from dbt.clients.jinja import get_rendered from dbt.clients.yaml_helper import ( # noqa: F401 yaml, safe_load, SafeLoader, Loader, Dumper ) from dbt.contracts.graph.compiled import CompiledResource -from dbt.exceptions import raise_compiler_error, MacroReturn +from dbt.exceptions import raise_compiler_error, MacroReturn, raise_parsing_error from dbt.logger import GLOBAL_LOGGER as logger from dbt.version import __version__ as dbt_version @@ -21,6 +21,38 @@ import datetime import re +# Contexts in dbt Core +# Contexts are used for Jinja rendering. They include context methods, +# executable macros, and various settings that are available in Jinja. +# +# Different contexts are used in different places because we allow access +# to different methods and data in different places. Executable SQL, for +# example, includes the available macros and the model, while Jinja in +# yaml files is more limited. +# +# The context that is passed to Jinja is always in a dictionary format, +# not an actual class, so a 'to_dict()' is executed on a context class +# before it is used for rendering. +# +# Each context has a generate__context function to create the context. +# ProviderContext subclasses have different generate functions for +# parsing and for execution. +# +# Context class hierarchy +# +# BaseContext -- core/dbt/context/base.py +# TargetContext -- core/dbt/context/target.py +# ConfiguredContext -- core/dbt/context/configured.py +# SchemaYamlContext -- core/dbt/context/configured.py +# DocsRuntimeContext -- core/dbt/context/configured.py +# MacroResolvingContext -- core/dbt/context/configured.py +# ManifestContext -- core/dbt/context/manifest.py +# QueryHeaderContext -- core/dbt/context/manifest.py +# ProviderContext -- core/dbt/context/provider.py +# MacroContext -- core/dbt/context/provider.py +# ModelContext -- core/dbt/context/provider.py +# TestContext -- core/dbt/context/provider.py + def get_pytz_module_context() -> Dict[str, Any]: context_exports = pytz.__all__ # type: ignore @@ -164,8 +196,6 @@ class BaseContext(metaclass=ContextMeta): def __init__(self, cli_vars): self._ctx = {} self.cli_vars = cli_vars - # Save the env_vars encountered using this - self.env_vars = {} def generate_builtins(self): builtins: Dict[str, Any] = {} @@ -287,11 +317,10 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: return_value = default if return_value is not None: - self.env_vars[var] = return_value return return_value else: msg = f"Env var required but not provided: '{var}'" - undefined_error(msg) + raise_parsing_error(msg) if os.environ.get('DBT_MACRO_DEBUGGING'): @contextmember diff --git a/core/dbt/context/configured.py b/core/dbt/context/configured.py index 21095f786c9..de7f702eadf 100644 --- a/core/dbt/context/configured.py +++ b/core/dbt/context/configured.py @@ -1,11 +1,14 @@ -from typing import Any, Dict +import os +from typing import Any, Dict, Optional from dbt.contracts.connection import AdapterRequiredConfig +from dbt.logger import SECRET_ENV_PREFIX from dbt.node_types import NodeType from dbt.utils import MultiDict -from dbt.context.base import contextproperty, Var +from dbt.context.base import contextproperty, contextmember, Var from dbt.context.target import TargetContext +from dbt.exceptions import raise_parsing_error class ConfiguredContext(TargetContext): @@ -64,11 +67,18 @@ def __call__(self, var_name, default=Var._VAR_NOTSET): return self.get_missing_var(var_name) +class SchemaYamlVars(): + def __init__(self): + self.env_vars = {} + self.vars = {} + + class SchemaYamlContext(ConfiguredContext): # subclass is DocsRuntimeContext - def __init__(self, config, project_name: str): + def __init__(self, config, project_name: str, schema_yaml_vars: Optional[SchemaYamlVars]): super().__init__(config) self._project_name = project_name + self.schema_yaml_vars = schema_yaml_vars @contextproperty def var(self) -> ConfiguredVar: @@ -76,6 +86,22 @@ def var(self) -> ConfiguredVar: self._ctx, self.config, self._project_name ) + @contextmember + def env_var(self, var: str, default: Optional[str] = None) -> str: + return_value = None + if var in os.environ: + return_value = os.environ[var] + elif default is not None: + return_value = default + + if return_value is not None: + if not var.startswith(SECRET_ENV_PREFIX) and self.schema_yaml_vars: + self.schema_yaml_vars.env_vars[var] = return_value + return return_value + else: + msg = f"Env var required but not provided: '{var}'" + raise_parsing_error(msg) + class MacroResolvingContext(ConfiguredContext): def __init__(self, config): @@ -89,9 +115,9 @@ def var(self) -> ConfiguredVar: def generate_schema_yml_context( - config: AdapterRequiredConfig, project_name: str + config: AdapterRequiredConfig, project_name: str, schema_yaml_vars: SchemaYamlVars = None ) -> Dict[str, Any]: - ctx = SchemaYamlContext(config, project_name) + ctx = SchemaYamlContext(config, project_name, schema_yaml_vars) return ctx.to_dict() diff --git a/core/dbt/context/docs.py b/core/dbt/context/docs.py index 5d6092f7a05..16e19f6ee88 100644 --- a/core/dbt/context/docs.py +++ b/core/dbt/context/docs.py @@ -23,7 +23,7 @@ def __init__( manifest: Manifest, current_project: str, ) -> None: - super().__init__(config, current_project) + super().__init__(config, current_project, None) self.node = node self.manifest = manifest diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index a117f2aefb5..6bc266c33e0 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -11,11 +11,12 @@ get_adapter, get_adapter_package_names, get_adapter_type_names ) from dbt.clients import agate_helper -from dbt.clients.jinja import get_rendered, MacroGenerator, MacroStack, undefined_error +from dbt.clients.jinja import get_rendered, MacroGenerator, MacroStack from dbt.config import RuntimeConfig, Project from .base import contextmember, contextproperty, Var from .configured import FQNLookup from .context_config import ContextConfig +from dbt.logger import SECRET_ENV_PREFIX from dbt.context.macro_resolver import MacroResolver, TestMacroNamespace from .macros import MacroNamespaceBuilder, MacroNamespace from .manifest import ManifestContext @@ -47,6 +48,7 @@ ref_bad_context, source_target_not_found, wrapped_exports, + raise_parsing_error, ) from dbt.config import IsFQNResource from dbt.logger import GLOBAL_LOGGER as logger, SECRET_ENV_PREFIX # noqa @@ -1184,7 +1186,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: return return_value else: msg = f"Env var required but not provided: '{var}'" - undefined_error(msg) + raise_parsing_error(msg) class MacroContext(ProviderContext): @@ -1428,6 +1430,28 @@ def _build_test_namespace(self): ) self.namespace = macro_namespace + @contextmember + def env_var(self, var: str, default: Optional[str] = None) -> str: + return_value = None + if var in os.environ: + return_value = os.environ[var] + elif default is not None: + return_value = default + + if return_value is not None: + # Save the env_var value in the manifest and the var name in the source_file + if not var.startswith(SECRET_ENV_PREFIX) and self.model: + self.manifest.env_vars[var] = return_value + # the "model" should only be test nodes, but just in case, check + if self.model.resource_type == NodeType.Test and self.model.file_key_name: + source_file = self.manifest.files[self.model.file_id] + (yaml_key, name) = self.model.file_key_name.split('.') + source_file.add_env_var(var, yaml_key, name) + return return_value + else: + msg = f"Env var required but not provided: '{var}'" + raise_parsing_error(msg) + def generate_test_context( model: ManifestNode, diff --git a/core/dbt/contracts/files.py b/core/dbt/contracts/files.py index 64134c4cecf..f5889cd603b 100644 --- a/core/dbt/contracts/files.py +++ b/core/dbt/contracts/files.py @@ -301,5 +301,21 @@ def get_all_test_ids(self): test_ids.extend(self.tests[key][name]) return test_ids + def add_env_var(self, var, yaml_key, name): + if yaml_key not in self.env_vars: + self.env_vars[yaml_key] = {} + if name not in self.env_vars[yaml_key]: + self.env_vars[yaml_key][name] = [] + if var not in self.env_vars[yaml_key][name]: + self.env_vars[yaml_key][name].append(var) + + def delete_from_env_vars(self, yaml_key, name): + # We delete all vars for this yaml_key/name because the + # entry has been scheduled for reparsing. + if yaml_key in self.env_vars and name in self.env_vars[yaml_key]: + del self.env_vars[yaml_key][name] + if not self.env_vars[yaml_key]: + del self.env_vars[yaml_key] + AnySourceFile = Union[SchemaSourceFile, SourceFile] diff --git a/core/dbt/contracts/graph/parsed.py b/core/dbt/contracts/graph/parsed.py index 8afea16775a..46158957bd1 100644 --- a/core/dbt/contracts/graph/parsed.py +++ b/core/dbt/contracts/graph/parsed.py @@ -424,6 +424,7 @@ class ParsedGenericTestNode(ParsedNode, HasTestMetadata): # keep this in sync with CompiledGenericTestNode! resource_type: NodeType = field(metadata={'restrict': [NodeType.Test]}) column_name: Optional[str] = None + file_key_name: Optional[str] = None # Was not able to make mypy happy and keep the code working. We need to # refactor the various configs. config: TestConfig = field(default_factory=TestConfig) # type: ignore diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index 5bc388fbb71..c0cf54ef8bd 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -242,6 +242,15 @@ class ValidationException(RuntimeException): MESSAGE = "Validation Error" +class ParsingException(RuntimeException): + CODE = 10015 + MESSAGE = "Parsing Error" + + @property + def type(self): + return 'Parsing' + + class JSONValidationException(ValidationException): def __init__(self, typename, errors): self.typename = typename @@ -444,6 +453,10 @@ def raise_compiler_error(msg, node=None) -> NoReturn: raise CompilationException(msg, node) +def raise_parsing_error(msg) -> NoReturn: + raise ParsingException(msg) + + def raise_database_error(msg, node=None) -> NoReturn: raise DatabaseException(msg, node) diff --git a/core/dbt/parser/partial.py b/core/dbt/parser/partial.py index 5e833e40efb..c51d432c874 100644 --- a/core/dbt/parser/partial.py +++ b/core/dbt/parser/partial.py @@ -64,7 +64,8 @@ def __init__(self, saved_manifest: Manifest, new_files: MutableMapping[str, AnyS self.project_parser_files = {} self.deleted_manifest = Manifest() self.macro_child_map: Dict[str, List[str]] = {} - self.env_vars_to_source_files = self.build_env_vars_to_source_files() + (self.env_vars_changed_source_files, self.env_vars_changed_schema_files) = \ + self.build_env_vars_to_files() self.build_file_diff() self.processing_file = None self.deleted_special_override_macro = False @@ -117,11 +118,19 @@ def build_file_diff(self): if self.saved_files[file_id].parse_file_type in mg_files: changed_or_deleted_macro_file = True changed.append(file_id) - # handled changed env_vars for non-schema-files - for file_id in list(chain.from_iterable(self.env_vars_to_source_files.values())): + + # handle changed env_vars for non-schema-files + for file_id in self.env_vars_changed_source_files: if file_id in deleted or file_id in changed: continue changed.append(file_id) + + # handle changed env_vars for schema files + for file_id in self.env_vars_changed_schema_files.keys(): + if file_id in deleted_schema_files or file_id in changed_schema_files: + continue + changed_schema_files.append(file_id) + file_diff = { "deleted": deleted, "deleted_schema_files": deleted_schema_files, @@ -564,6 +573,10 @@ def handle_schema_file_changes(self, schema_file, saved_yaml_dict, new_yaml_dict # loop through comparing previous dict_from_yaml with current dict_from_yaml # Need to do the deleted/added/changed thing, just like the files lists + env_var_changes = {} + if schema_file.file_id in self.env_vars_changed_schema_files: + env_var_changes = self.env_vars_changed_schema_files[schema_file.file_id] + # models, seeds, snapshots, analyses for dict_key in ['models', 'seeds', 'snapshots', 'analyses']: key_diff = self.get_diff_for(dict_key, saved_yaml_dict, new_yaml_dict) @@ -577,53 +590,95 @@ def handle_schema_file_changes(self, schema_file, saved_yaml_dict, new_yaml_dict if key_diff['added']: for elem in key_diff['added']: self.merge_patch(schema_file, dict_key, elem) + # Handle schema file updates due to env_var changes + if dict_key in env_var_changes and dict_key in new_yaml_dict: + for name in env_var_changes[dict_key]: + if name in key_diff['changed_or_deleted_names']: + continue + elem = self.get_schema_element(new_yaml_dict[dict_key], name) + if elem: + self.delete_schema_mssa_links(schema_file, dict_key, elem) + self.merge_patch(schema_file, dict_key, elem) # sources - source_diff = self.get_diff_for('sources', saved_yaml_dict, new_yaml_dict) + dict_key = 'sources' + source_diff = self.get_diff_for(dict_key, saved_yaml_dict, new_yaml_dict) if source_diff['changed']: for source in source_diff['changed']: if 'overrides' in source: # This is a source patch; need to re-parse orig source self.remove_source_override_target(source) self.delete_schema_source(schema_file, source) - self.remove_tests(schema_file, 'sources', source['name']) - self.merge_patch(schema_file, 'sources', source) + self.remove_tests(schema_file, dict_key, source['name']) + self.merge_patch(schema_file, dict_key, source) if source_diff['deleted']: for source in source_diff['deleted']: if 'overrides' in source: # This is a source patch; need to re-parse orig source self.remove_source_override_target(source) self.delete_schema_source(schema_file, source) - self.remove_tests(schema_file, 'sources', source['name']) + self.remove_tests(schema_file, dict_key, source['name']) if source_diff['added']: for source in source_diff['added']: if 'overrides' in source: # This is a source patch; need to re-parse orig source self.remove_source_override_target(source) - self.merge_patch(schema_file, 'sources', source) + self.merge_patch(schema_file, dict_key, source) + # Handle schema file updates due to env_var changes + if dict_key in env_var_changes and dict_key in new_yaml_dict: + for name in env_var_changes[dict_key]: + if name in key_diff['changed_or_deleted_names']: + continue + source = self.get_schema_element(new_yaml_dict[dict_key], name) + if source: + if 'overrides' in source: + self.remove_source_override_target(source) + self.delete_schema_source(schema_file, source) + self.remove_tests(schema_file, dict_key, source['name']) + self.merge_patch(schema_file, dict_key, source) # macros - macro_diff = self.get_diff_for('macros', saved_yaml_dict, new_yaml_dict) + dict_key = 'macros' + macro_diff = self.get_diff_for(dict_key, saved_yaml_dict, new_yaml_dict) if macro_diff['changed']: for macro in macro_diff['changed']: self.delete_schema_macro_patch(schema_file, macro) - self.merge_patch(schema_file, 'macros', macro) + self.merge_patch(schema_file, dict_key, macro) if macro_diff['deleted']: for macro in macro_diff['deleted']: self.delete_schema_macro_patch(schema_file, macro) if macro_diff['added']: for macro in macro_diff['added']: - self.merge_patch(schema_file, 'macros', macro) + self.merge_patch(schema_file, dict_key, macro) + # Handle schema file updates due to env_var changes + if dict_key in env_var_changes and dict_key in new_yaml_dict: + for name in env_var_changes[dict_key]: + if name in key_diff['changed_or_deleted_names']: + continue + elem = self.get_schema_element(new_yaml_dict[dict_key], name) + if elem: + self.delete_schema_macro_patch(schema_file, macro) + self.merge_patch(schema_file, dict_key, macro) # exposures - exposure_diff = self.get_diff_for('exposures', saved_yaml_dict, new_yaml_dict) + dict_key = 'exposures' + exposure_diff = self.get_diff_for(dict_key, saved_yaml_dict, new_yaml_dict) if exposure_diff['changed']: for exposure in exposure_diff['changed']: self.delete_schema_exposure(schema_file, exposure) - self.merge_patch(schema_file, 'exposures', exposure) + self.merge_patch(schema_file, dict_key, exposure) if exposure_diff['deleted']: for exposure in exposure_diff['deleted']: self.delete_schema_exposure(schema_file, exposure) if exposure_diff['added']: for exposure in exposure_diff['added']: - self.merge_patch(schema_file, 'exposures', exposure) + self.merge_patch(schema_file, dict_key, exposure) + # Handle schema file updates due to env_var changes + if dict_key in env_var_changes and dict_key in new_yaml_dict: + for name in env_var_changes[dict_key]: + if name in key_diff['changed_or_deleted_names']: + continue + elem = self.get_schema_element(new_yaml_dict[dict_key], name) + if elem: + self.delete_schema_exposure(schema_file, exposure) + self.merge_patch(schema_file, dict_key, exposure) # Take a "section" of the schema file yaml dictionary from saved and new schema files # and determine which parts have changed @@ -662,6 +717,7 @@ def get_diff_for(self, key, saved_yaml_dict, new_yaml_dict): "deleted": deleted_elements, "added": added_elements, "changed": changed_elements, + "changed_or_deleted_names": changed + deleted, } return diff @@ -680,6 +736,7 @@ def merge_patch(self, schema_file, key, patch): found = True if not found: pp_dict[key].append(patch) + schema_file.delete_from_env_vars(key, patch['name']) self.add_to_pp_files(schema_file) # For model, seed, snapshot, analysis schema dictionary keys, @@ -806,36 +863,57 @@ def remove_source_override_target(self, source_dict): # This builds a dictionary of files that need to be scheduled for parsing # because the env var has changed. - def build_env_vars_to_source_files(self): - env_vars_to_source_files = {} + # source_files + # env_vars_changed_source_files: [file_id, file_id...] + # schema_files + # env_vars_changed_schema_files: {file_id: {"yaml_key": [name, ..]}} + def build_env_vars_to_files(self): + unchanged_vars = [] + changed_vars = [] + delete_vars = [] + # Check whether the env_var has changed and add it to + # an unchanged or changed list + for env_var in self.saved_manifest.env_vars: + prev_value = self.saved_manifest.env_vars[env_var] + current_value = os.getenv(env_var) + if current_value is None: + # env_var no longer set, remove from manifest + delete_vars.append(env_var) + elif prev_value == current_value: + unchanged_vars.append(env_var) + else: # prev_value != current_value + changed_vars.append(env_var) + for env_var in delete_vars: + del self.saved_manifest.env_var[env_var] + + env_vars_changed_source_files = [] + env_vars_changed_schema_files = {} # The SourceFiles contain a list of env_vars that were used in the file. - # The SchemaSourceFiles contain a dictionary of env_vars to schema file blocks. - # Create a combined dictionary of env_vars to files that contain them. + # The SchemaSourceFiles contain a dictionary of yaml_key to schema entry names to + # a list of vars. + # Create a list of file_ids for source_files that need to be reparsed, and + # a dictionary of file_ids to yaml_keys to names. for source_file in self.saved_files.values(): - if source_file.parse_file_type == ParseFileType.Schema: + file_id = source_file.file_id + if not source_file.env_vars: continue - for env_var in source_file.env_vars: - if env_var not in env_vars_to_source_files: - env_vars_to_source_files[env_var] = [] - env_vars_to_source_files[env_var].append(source_file.file_id) - - # Check whether the env_var has changed. If it hasn't, remove the env_var - # from env_vars_to_source_files so that we can use it as dictionary of - # which files need to be scheduled for parsing. - delete_unchanged_vars = [] - for env_var in env_vars_to_source_files.keys(): - prev_value = None - if env_var in self.saved_manifest.env_vars: - prev_value = self.saved_manifest.env_vars[env_var] - current_value = os.getenv(env_var) - if prev_value == current_value: - delete_unchanged_vars.append(env_var) - if current_value is None: - # env_var no longer set, remove from manifest - del self.saved_manifest.env_vars[env_var] - - # Actually remove the vars that haven't changed - for env_var in delete_unchanged_vars: - del env_vars_to_source_files[env_var] - - return env_vars_to_source_files + if source_file.parse_file_type == ParseFileType.Schema: + for yaml_key in source_file.env_vars.keys(): + for name in source_file.env_vars[yaml_key].keys(): + for env_var in source_file.env_vars[yaml_key][name]: + if env_var in changed_vars: + if file_id not in env_vars_changed_schema_files: + env_vars_changed_schema_files[file_id] = {} + if yaml_key not in env_vars_changed_schema_files[file_id]: + env_vars_changed_schema_files[file_id][yaml_key] = [] + if name not in env_vars_changed_schema_files[file_id][yaml_key]: + env_vars_changed_schema_files[file_id][yaml_key].append(name) + break # if one env_var is changed we can stop + + else: + for env_var in source_file.env_vars: + if env_var in changed_vars: + env_vars_changed_source_files.append(file_id) + break # if one env_var is changed we can stop + + return (env_vars_changed_source_files, env_vars_changed_schema_files) diff --git a/core/dbt/parser/schema_renderer.py b/core/dbt/parser/schema_renderer.py new file mode 100644 index 00000000000..2483bb8579c --- /dev/null +++ b/core/dbt/parser/schema_renderer.py @@ -0,0 +1,68 @@ +from typing import Dict, Any + +from dbt.config.renderer import BaseRenderer, Keypath + + +# This class renders dictionaries derived from "schema" yaml files. +# It calls Jinja on strings (in deep_map_render), except for certain +# keys which are skipped because they need to be rendered later +# (tests and description). +class SchemaYamlRenderer(BaseRenderer): + def __init__(self, context: Dict[str, Any], key: str) -> None: + super().__init__(context) + self.key = key + + @property + def name(self): + return 'Rendering yaml' + + def _is_norender_key(self, keypath: Keypath) -> bool: + """ + models: + - name: blah + description: blah + tests: ... + columns: + - name: + description: blah + tests: ... + + Return True if it's tests or description - those aren't rendered now + because they're rendered later in parse_generic_tests or process_docs. + """ + if len(keypath) >= 1 and keypath[0] in ('tests', 'description'): + return True + + if len(keypath) == 2 and keypath[1] in ('tests', 'description'): + return True + + if ( + len(keypath) >= 3 and + keypath[0] == 'columns' and + keypath[2] in ('tests', 'description') + ): + return True + + return False + + # don't render descriptions or test keyword arguments + def should_render_keypath(self, keypath: Keypath) -> bool: + if len(keypath) < 1: + return True + + if self.key == 'sources': + if keypath[0] == 'description': + return False + if keypath[0] == 'tables': + if self._is_norender_key(keypath[2:]): + return False + elif self.key == 'macros': + if keypath[0] == 'arguments': + if self._is_norender_key(keypath[1:]): + return False + elif self._is_norender_key(keypath[0:]): + return False + else: # models, seeds, snapshots, analyses + if self._is_norender_key(keypath[0:]): + return False + return True diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 4f770a806e5..27366b5d14f 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -13,11 +13,11 @@ from dbt.adapters.factory import get_adapter, get_adapter_package_names from dbt.clients.jinja import get_rendered, add_rendered_test_kwargs from dbt.clients.yaml_helper import load_yaml_text -from dbt.config.renderer import SchemaYamlRenderer +from dbt.parser.schema_renderer import SchemaYamlRenderer from dbt.context.context_config import ( ContextConfig, ) -from dbt.context.configured import generate_schema_yml_context +from dbt.context.configured import generate_schema_yml_context, SchemaYamlVars from dbt.context.providers import ( generate_parse_exposure, generate_test_context ) @@ -46,7 +46,7 @@ from dbt.exceptions import ( warn_invalid_patch, validator_error_message, JSONValidationException, raise_invalid_schema_yml_version, ValidationException, - CompilationException, raise_duplicate_patch_name, + ParsingException, raise_duplicate_patch_name, raise_duplicate_macro_patch_name, InternalException, raise_duplicate_source_patch_name, warn_or_error, ) @@ -107,7 +107,7 @@ def yaml_from_file( return load_yaml_text(source_file.contents) except ValidationException as e: reason = validator_error_message(e) - raise CompilationException( + raise ParsingException( 'Error reading {}: {} - {}' .format(source_file.project_name, path, reason) ) @@ -168,12 +168,12 @@ def __init__( ) -> None: super().__init__(project, manifest, root_project) + self.schema_yaml_vars = SchemaYamlVars() self.render_ctx = generate_schema_yml_context( - self.root_project, self.project.project_name + self.root_project, + self.project.project_name, + self.schema_yaml_vars ) - - self.raw_renderer = SchemaYamlRenderer(self.render_ctx) - internal_package_names = get_adapter_package_names( self.root_project.credentials.type ) @@ -216,6 +216,7 @@ def create_test_node( name: str, raw_sql: str, test_metadata: Dict[str, Any], + file_key_name: str, column_name: Optional[str], ) -> ParsedGenericTestNode: @@ -256,6 +257,7 @@ def get_hashable_md( 'test_metadata': test_metadata, 'column_name': column_name, 'checksum': FileHash.empty().to_dict(omit_none=True), + 'file_key_name': file_key_name, } try: ParsedGenericTestNode.validate(dct) @@ -270,7 +272,7 @@ def get_hashable_md( original_file_path=target.original_file_path, raw_sql=raw_sql, ) - raise CompilationException(msg, node=node) from exc + raise ParsingException(msg, node=node) from exc # lots of time spent in this method def _parse_generic_test( @@ -279,6 +281,7 @@ def _parse_generic_test( test: Dict[str, Any], tags: List[str], column_name: Optional[str], + schema_file_id: str, ) -> ParsedGenericTestNode: try: builder = TestBuilder( @@ -288,14 +291,18 @@ def _parse_generic_test( package_name=target.package_name, render_ctx=self.render_ctx, ) - except CompilationException as exc: + if self.schema_yaml_vars.env_vars: + self.store_env_vars(target, schema_file_id, self.schema_yaml_vars.env_vars) + self.schema_yaml_vars.env_vars = {} + + except ParsingException as exc: context = _trimmed(str(target)) msg = ( 'Invalid test config given in {}:' '\n\t{}\n\t@: {}' .format(target.original_file_path, exc.msg, context) ) - raise CompilationException(msg) from exc + raise ParsingException(msg) from exc original_name = os.path.basename(target.original_file_path) compiled_path = get_pseudo_test_path(builder.compiled_name, original_name) @@ -316,6 +323,11 @@ def _parse_generic_test( } tags = sorted(set(itertools.chain(tags, builder.tags()))) + if isinstance(target, UnpatchedSourceDefinition): + file_key_name = f"{target.source.yaml_key}.{target.source.name}" + else: + file_key_name = f"{target.yaml_key}.{target.name}" + node = self.create_test_node( target=target, path=compiled_path, @@ -326,11 +338,23 @@ def _parse_generic_test( raw_sql=builder.build_raw_sql(), column_name=column_name, test_metadata=metadata, + file_key_name=file_key_name, ) - self.render_test_update(node, config, builder) + self.render_test_update(node, config, builder, schema_file_id) return node + def store_env_vars(self, target, schema_file_id, env_vars): + self.manifest.env_vars.update(env_vars) + if schema_file_id in self.manifest.files: + schema_file = self.manifest.files[schema_file_id] + search_name = target.search_name + yaml_key = target.yaml_key + if '.' in search_name: # source file definitions + (search_name, _) = search_name.split('.') + for var in env_vars.keys(): + schema_file.add_env_var(var, yaml_key, search_name) + # This does special shortcut processing for the two # most common internal macros, not_null and unique, # which avoids the jinja rendering to resolve config @@ -338,7 +362,7 @@ def _parse_generic_test( # In the future we will look at generalizing this # more to handle additional macros or to use static # parsing to avoid jinja overhead. - def render_test_update(self, node, config, builder): + def render_test_update(self, node, config, builder, schema_file_id): macro_unique_id = self.macro_resolver.get_macro_id( node.package_name, 'test_' + builder.name) # Add the depends_on here so we can limit the macros added @@ -370,10 +394,11 @@ def render_test_update(self, node, config, builder): node.raw_sql, context, node, capture_macros=True ) self.update_parsed_node_config(node, config) + # env_vars should have been updated in the context env_var method except ValidationError as exc: # we got a ValidationError - probably bad types in config() msg = validator_error_message(exc) - raise CompilationException(msg, node=node) from exc + raise ParsingException(msg, node=node) from exc def parse_node(self, block: GenericTestBlock) -> ParsedGenericTestNode: """In schema parsing, we rewrite most of the part of parse_node that @@ -385,6 +410,7 @@ def parse_node(self, block: GenericTestBlock) -> ParsedGenericTestNode: test=block.test, tags=block.tags, column_name=block.column_name, + schema_file_id=block.file.file_id, ) self.add_test_node(block, node) return node @@ -456,15 +482,6 @@ def parse_file(self, block: FileBlock, dct: Dict = None) -> None: dct = yaml_from_file(block.file) if dct: - try: - # This does a deep_map which will fail if there are circular references - dct = self.raw_renderer.render_data(dct) - except CompilationException as exc: - raise CompilationException( - f'Failed to render {block.path.original_file_path} from ' - f'project {self.project.project_name}: {exc}' - ) from exc - # contains the FileBlock and the data (dictionary) yaml_block = YamlBlock.from_file_block(block, dct) @@ -559,6 +576,13 @@ def __init__( # analyses, exposures self.key = key self.yaml = yaml + self.schema_yaml_vars = SchemaYamlVars() + self.render_ctx = generate_schema_yml_context( + self.schema_parser.root_project, + self.schema_parser.project.project_name, + self.schema_yaml_vars + ) + self.renderer = SchemaYamlRenderer(self.render_ctx, self.key) @property def manifest(self): @@ -582,7 +606,7 @@ def root_project(self): def get_key_dicts(self) -> Iterable[Dict[str, Any]]: data = self.yaml.data.get(self.key, []) if not isinstance(data, list): - raise CompilationException( + raise ParsingException( '{} must be a list, got {} instead: ({})' .format(self.key, type(data), _trimmed(str(data))) ) @@ -590,15 +614,40 @@ def get_key_dicts(self) -> Iterable[Dict[str, Any]]: # for each dict in the data (which is a list of dicts) for entry in data: + # check that entry is a dict and that all dict values # are strings - if coerce_dict_str(entry) is not None: - yield entry - else: + if coerce_dict_str(entry) is None: msg = error_context( path, self.key, data, 'expected a dict with string keys' ) - raise CompilationException(msg) + raise ParsingException(msg) + + if 'name' not in entry: + raise ParsingException("Entry did not contain a name") + + # render the data + entry = self.render_entry(entry) + if self.schema_yaml_vars.env_vars: + self.schema_parser.manifest.env_vars.update(self.schema_yaml_vars.env_vars) + self.schema_yaml_vars.env_vars = {} + schema_file = self.yaml.file + assert isinstance(schema_file, SchemaSourceFile) + for var in self.schema_yaml_vars.env_vars.keys(): + schema_file.add_env_var(var, self.key, entry['name']) + + yield entry + + def render_entry(self, dct): + try: + # This does a deep_map which will fail if there are circular references + dct = self.renderer.render_data(dct) + except ParsingException as exc: + raise ParsingException( + f'Failed to render {self.yaml.file.path.original_file_path} from ' + f'project {self.project.project_name}: {exc}' + ) from exc + return dct class YamlDocsReader(YamlReader): @@ -619,7 +668,7 @@ def _target_from_dict(self, cls: Type[T], data: Dict[str, Any]) -> T: return cls.from_dict(data) except (ValidationError, JSONValidationException) as exc: msg = error_context(path, self.key, data, exc) - raise CompilationException(msg) from exc + raise ParsingException(msg) from exc # The other parse method returns TestBlocks. This one doesn't. # This takes the yaml dictionaries in 'sources' keys and uses them @@ -747,7 +796,7 @@ def get_unparsed_target(self) -> Iterable[NonSourceTarget]: node = self._target_type().from_dict(data) except (ValidationError, JSONValidationException) as exc: msg = error_context(path, self.key, data, exc) - raise CompilationException(msg) from exc + raise ParsingException(msg) from exc else: yield node @@ -756,7 +805,7 @@ def get_unparsed_target(self) -> Iterable[NonSourceTarget]: def normalize_meta_attribute(self, data, path): if 'meta' in data: if 'config' in data and 'meta' in data['config']: - raise CompilationException(f""" + raise ParsingException(f""" In {path}: found meta dictionary in 'config' dictionary and as top-level key. Remove the top-level key and define it under 'config' dictionary only. """.strip()) @@ -951,6 +1000,6 @@ def parse(self) -> Iterable[ParsedExposure]: unparsed = UnparsedExposure.from_dict(data) except (ValidationError, JSONValidationException) as exc: msg = error_context(self.yaml.path, self.key, data, exc) - raise CompilationException(msg) from exc + raise ParsingException(msg) from exc parsed = self.parse_exposure(unparsed) yield parsed diff --git a/core/dbt/parser/sources.py b/core/dbt/parser/sources.py index 98eb418f4e0..7b419e85042 100644 --- a/core/dbt/parser/sources.py +++ b/core/dbt/parser/sources.py @@ -268,7 +268,8 @@ def parse_source_test( target=target, test=test, tags=tags, - column_name=column_name + column_name=column_name, + schema_file_id=target.file_id, ) return node diff --git a/core/dbt/utils.py b/core/dbt/utils.py index 9d16aff2e50..2564adad4cb 100644 --- a/core/dbt/utils.py +++ b/core/dbt/utils.py @@ -165,7 +165,7 @@ def deep_merge_item(destination, key, value): destination[key] = value -def _deep_map( +def _deep_map_render( func: Callable[[Any, Tuple[Union[str, int], ...]], Any], value: Any, keypath: Tuple[Union[str, int], ...], @@ -176,12 +176,12 @@ def _deep_map( if isinstance(value, list): ret = [ - _deep_map(func, v, (keypath + (idx,))) + _deep_map_render(func, v, (keypath + (idx,))) for idx, v in enumerate(value) ] elif isinstance(value, dict): ret = { - k: _deep_map(func, v, (keypath + (str(k),))) + k: _deep_map_render(func, v, (keypath + (str(k),))) for k, v in value.items() } elif isinstance(value, atomic_types): @@ -190,20 +190,24 @@ def _deep_map( container_types: Tuple[Type[Any], ...] = (list, dict) ok_types = container_types + atomic_types raise dbt.exceptions.DbtConfigError( - 'in _deep_map, expected one of {!r}, got {!r}' + 'in _deep_map_render, expected one of {!r}, got {!r}' .format(ok_types, type(value)) ) return ret -def deep_map( +def deep_map_render( func: Callable[[Any, Tuple[Union[str, int], ...]], Any], value: Any ) -> Any: - """map the function func() onto each non-container value in 'value' + """ This function renders a nested dictionary derived from a yaml + file. It is used to render dbt_project.yml, profiles.yml, and + schema files. + + It maps the function func() onto each non-container value in 'value' recursively, returning a new value. As long as func does not manipulate - value, then deep_map will also not manipulate it. + value, then deep_map_render will also not manipulate it. value should be a value returned by `yaml.safe_load` or `json.load` - the only expected types are list, dict, native python number, str, NoneType, @@ -217,11 +221,11 @@ def deep_map( dbt.exceptions.RecursionException """ try: - return _deep_map(func, value, ()) + return _deep_map_render(func, value, ()) except RuntimeError as exc: if 'maximum recursion depth exceeded' in str(exc): raise dbt.exceptions.RecursionException( - 'Cycle detected in deep_map' + 'Cycle detected in deep_map_render' ) raise diff --git a/test/integration/008_schema_tests_test/test_schema_v2_tests.py b/test/integration/008_schema_tests_test/test_schema_v2_tests.py index be792d2b970..239a29876d4 100644 --- a/test/integration/008_schema_tests_test/test_schema_v2_tests.py +++ b/test/integration/008_schema_tests_test/test_schema_v2_tests.py @@ -2,7 +2,7 @@ import os from dbt.task.test import TestTask -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationException, ParsingException from dbt.contracts.results import TestStatus @@ -353,7 +353,7 @@ def run_schema_validations(self): @use_profile('postgres') def test_postgres_malformed_schema_will_break_run(self): - with self.assertRaises(CompilationException): + with self.assertRaises(ParsingException): self.run_dbt() diff --git a/test/integration/039_config_test/test_configs_in_schema_files.py b/test/integration/039_config_test/test_configs_in_schema_files.py index fc93e9aa9fd..31badb8d041 100644 --- a/test/integration/039_config_test/test_configs_in_schema_files.py +++ b/test/integration/039_config_test/test_configs_in_schema_files.py @@ -2,7 +2,7 @@ import shutil from test.integration.base import DBTIntegrationTest, use_profile, get_manifest, normalize -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationException, ParsingException class TestSchemaFileConfigs(DBTIntegrationTest): @@ -98,7 +98,7 @@ def test_postgres_config_layering(self): # copy a schema file with multiple metas shutil.copyfile('extra-alt/untagged.yml', 'models-alt/untagged.yml') - with self.assertRaises(CompilationException): + with self.assertRaises(ParsingException): results = self.run_dbt(["run"]) # copy a schema file with config key in top-level of test and in config dict diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml new file mode 100644 index 00000000000..f8cf1ed9d67 --- /dev/null +++ b/test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml @@ -0,0 +1,6 @@ +version: 2 + +models: + - name: model_one + config: + materialized: "{{ env_var('TEST_SCHEMA_VAR') }}" diff --git a/test/integration/068_partial_parsing_tests/test_pp_vars.py b/test/integration/068_partial_parsing_tests/test_pp_vars.py index 6adbe116a20..641a5c32093 100644 --- a/test/integration/068_partial_parsing_tests/test_pp_vars.py +++ b/test/integration/068_partial_parsing_tests/test_pp_vars.py @@ -1,4 +1,4 @@ -from dbt.exceptions import CompilationException, UndefinedMacroException +from dbt.exceptions import CompilationException, ParsingException from dbt.contracts.graph.manifest import Manifest from dbt.contracts.files import ParseFileType from dbt.contracts.results import TestStatus @@ -28,11 +28,6 @@ def project_config(self): 'data-paths': ['seeds'], 'test-paths': ['tests'], 'macro-paths': ['macros'], - 'analysis-paths': ['analyses'], - 'snapshot-paths': ['snapshots'], - 'seeds': { - 'quote_columns': False, - }, } def setup_directories(self): @@ -41,10 +36,7 @@ def setup_directories(self): # delete files in this directory without tests interfering with each other. os.mkdir(os.path.join(self.test_root_dir, 'models')) os.mkdir(os.path.join(self.test_root_dir, 'tests')) - os.mkdir(os.path.join(self.test_root_dir, 'seeds')) os.mkdir(os.path.join(self.test_root_dir, 'macros')) - os.mkdir(os.path.join(self.test_root_dir, 'analyses')) - os.mkdir(os.path.join(self.test_root_dir, 'snapshots')) @@ -61,7 +53,7 @@ def test_postgres_env_vars_models(self): # copy a file with an env_var call without an env_var self.copy_file('test-files/env_var_model.sql', 'models/env_var_model.sql') - with self.assertRaises(UndefinedMacroException): + with self.assertRaises(ParsingException): results = self.run_dbt(["--partial-parse", "run"]) # set the env var @@ -84,5 +76,18 @@ def test_postgres_env_vars_models(self): self.assertEqual(expected_env_vars, manifest.env_vars) self.assertNotEqual(model_created_at, manifest.nodes[model_id].created_at) + # set an env_var in a schema file + self.copy_file('test-files/env_var_schema.yml', 'models/schema.yml') + with self.assertRaises(ParsingException): + results = self.run_dbt(["--partial-parse", "run"]) + + # actually set the env_var + os.environ['TEST_SCHEMA_VAR'] = 'view' + results = self.run_dbt(["--partial-parse", "run"]) + manifest = get_manifest() + expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view"} + self.assertEqual(expected_env_vars, manifest.env_vars) + + # delete the env var to cleanup del os.environ['ENV_VAR_TEST'] diff --git a/test/unit/test_utils.py b/test/unit/test_utils.py index e4b2f005fc5..9b314298304 100644 --- a/test/unit/test_utils.py +++ b/test/unit/test_utils.py @@ -87,10 +87,10 @@ def test__simple_cases(self): }, ], } - actual = dbt.utils.deep_map(self.intify_all, self.input_value) + actual = dbt.utils.deep_map_render(self.intify_all, self.input_value) self.assertEqual(actual, expected) - actual = dbt.utils.deep_map(self.intify_all, expected) + actual = dbt.utils.deep_map_render(self.intify_all, expected) self.assertEqual(actual, expected) @staticmethod @@ -119,24 +119,24 @@ def test__keypath(self): }, ], } - actual = dbt.utils.deep_map(self.special_keypath, self.input_value) + actual = dbt.utils.deep_map_render(self.special_keypath, self.input_value) self.assertEqual(actual, expected) - actual = dbt.utils.deep_map(self.special_keypath, expected) + actual = dbt.utils.deep_map_render(self.special_keypath, expected) self.assertEqual(actual, expected) def test__noop(self): - actual = dbt.utils.deep_map(lambda x, _: x, self.input_value) + actual = dbt.utils.deep_map_render(lambda x, _: x, self.input_value) self.assertEqual(actual, self.input_value) def test_trivial(self): cases = [[], {}, 1, 'abc', None, True] for case in cases: - result = dbt.utils.deep_map(lambda x, _: x, case) + result = dbt.utils.deep_map_render(lambda x, _: x, case) self.assertEqual(result, case) with self.assertRaises(dbt.exceptions.DbtConfigError): - dbt.utils.deep_map(lambda x, _: x, {'foo': object()}) + dbt.utils.deep_map_render(lambda x, _: x, {'foo': object()}) class TestBytesFormatting(unittest.TestCase): diff --git a/test/unit/test_yaml_renderer.py b/test/unit/test_yaml_renderer.py new file mode 100644 index 00000000000..758067ba484 --- /dev/null +++ b/test/unit/test_yaml_renderer.py @@ -0,0 +1,103 @@ +import unittest + +import dbt.exceptions +import dbt.utils +from dbt.parser.schema_renderer import SchemaYamlRenderer + + +class TestYamlRendering(unittest.TestCase): + + def test__models(self): + + context = { + "test_var": "1234", + "alt_var": "replaced", + } + renderer = SchemaYamlRenderer(context, 'models') + + # Verify description is not rendered and misc attribute is rendered + dct = { + "name": "my_model", + "description": "{{ test_var }}", + "attribute": "{{ test_var }}", + } + expected = { + "name": "my_model", + "description": "{{ test_var }}", + "attribute": "1234", + } + dct = renderer.render_data(dct) + self.assertEqual(expected, dct) + + # Verify description in columns is not rendered + dct = { + 'name': 'my_test', + 'attribute': "{{ test_var }}", + 'columns': [ + {'description': "{{ test_var }}", 'name': 'id'}, + ] + } + expected = { + 'name': 'my_test', + 'attribute': "1234", + 'columns': [ + {'description': "{{ test_var }}", 'name': 'id'}, + ] + } + dct = renderer.render_data(dct) + self.assertEqual(expected, dct) + + def test__sources(self): + + context = { + "test_var": "1234", + "alt_var": "replaced", + } + renderer = SchemaYamlRenderer(context, 'sources') + + # Only descriptions have jinja, none should be rendered + dct = { + "name": "my_source", + "description": "{{ alt_var }}", + "tables": [ + { + "name": "my_table", + "description": "{{ alt_var }}", + "columns": [ + { + "name": "id", + "description": "{{ alt_var }}", + } + ] + } + ] + } + rendered = renderer.render_data(dct) + self.assertEqual(dct, rendered) + + def test__macros(self): + + context = { + "test_var": "1234", + "alt_var": "replaced", + } + renderer = SchemaYamlRenderer(context, 'macros') + + # Look for description in arguments + dct = { + "name": "my_macro", + "arguments": [ + {"name": "my_arg", "attr": "{{ alt_var }}"}, + {"name": "an_arg", "description": "{{ alt_var}}"} + ] + } + expected = { + "name": "my_macro", + "arguments": [ + {"name": "my_arg", "attr": "replaced"}, + {"name": "an_arg", "description": "{{ alt_var}}"} + ] + } + dct = renderer.render_data(dct) + self.assertEqual(dct, expected) +