From d379e9e45c4387145da940aa41569f4cb2612700 Mon Sep 17 00:00:00 2001 From: Mike Alfare <13974384+mikealfare@users.noreply.github.com> Date: Mon, 29 Apr 2024 20:29:46 -0400 Subject: [PATCH 01/15] Release prep for `1.8.0b5` (#75) --- .changes/1.8.0-b5.md | 10 ++++++++++ .../Features-20240323-160222.yaml | 0 .../{unreleased => 1.8.0}/Fixes-20240423-180916.yaml | 0 .../{unreleased => 1.8.0}/Fixes-20240425-133401.yaml | 0 CHANGELOG.md | 11 +++++++++++ dbt/adapters/postgres/__version__.py | 2 +- 6 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 .changes/1.8.0-b5.md rename .changes/{unreleased => 1.8.0}/Features-20240323-160222.yaml (100%) rename .changes/{unreleased => 1.8.0}/Fixes-20240423-180916.yaml (100%) rename .changes/{unreleased => 1.8.0}/Fixes-20240425-133401.yaml (100%) diff --git a/.changes/1.8.0-b5.md b/.changes/1.8.0-b5.md new file mode 100644 index 00000000..196d68a7 --- /dev/null +++ b/.changes/1.8.0-b5.md @@ -0,0 +1,10 @@ +## dbt-postgres 1.8.0-b5 - April 29, 2024 + +### Features + +* Debug log when `type_code` fails to convert to a `data_type` + +### Fixes + +* remove materialized views from renambeable relation and remove a quote +* Replace usage of `Set` with `List` to fix issue with index updates intermittently happening out of order diff --git a/.changes/unreleased/Features-20240323-160222.yaml b/.changes/1.8.0/Features-20240323-160222.yaml similarity index 100% rename from .changes/unreleased/Features-20240323-160222.yaml rename to .changes/1.8.0/Features-20240323-160222.yaml diff --git a/.changes/unreleased/Fixes-20240423-180916.yaml b/.changes/1.8.0/Fixes-20240423-180916.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240423-180916.yaml rename to .changes/1.8.0/Fixes-20240423-180916.yaml diff --git a/.changes/unreleased/Fixes-20240425-133401.yaml b/.changes/1.8.0/Fixes-20240425-133401.yaml similarity index 100% rename from .changes/unreleased/Fixes-20240425-133401.yaml rename to .changes/1.8.0/Fixes-20240425-133401.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index f9ef4153..df8d5dec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,17 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html), and is generated by [Changie](https://github.com/miniscruff/changie). +## dbt-postgres 1.8.0-b5 - April 29, 2024 + +### Features + +* Debug log when `type_code` fails to convert to a `data_type` + +### Fixes + +* remove materialized views from renambeable relation and remove a quote +* Replace usage of `Set` with `List` to fix issue with index updates intermittently happening out of order + ## dbt-postgres 1.8.0-b2 - April 03, 2024 ### Under the Hood diff --git a/dbt/adapters/postgres/__version__.py b/dbt/adapters/postgres/__version__.py index 6b76061f..c904307f 100644 --- a/dbt/adapters/postgres/__version__.py +++ b/dbt/adapters/postgres/__version__.py @@ -1 +1 @@ -version = "1.8.0b4" +version = "1.8.0b5" From 18b93ae3cca1d55638cc55ee8b8f72e4270d154d Mon Sep 17 00:00:00 2001 From: Mila Page <67295367+VersusFacit@users.noreply.github.com> Date: Wed, 1 May 2024 14:46:21 -0700 Subject: [PATCH 02/15] Add branch param to release job (#79) Co-authored-by: Mila Page --- .github/workflows/release.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 69542465..27fb9f4f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,6 +10,11 @@ on: - prod - test default: prod + ref: + description: "The ref (sha or branch name) to use" + type: string + default: "main" + required: true permissions: read-all @@ -33,6 +38,7 @@ jobs: uses: actions/checkout@v4 with: persist-credentials: false + ref: "${{ inputs.ref }}" - name: Setup `hatch` uses: dbt-labs/dbt-adapters/.github/actions/setup-hatch@main From 3c90ecb850859be6d74fe5444fab9c2bbbfb5308 Mon Sep 17 00:00:00 2001 From: Mila Page <67295367+VersusFacit@users.noreply.github.com> Date: Wed, 1 May 2024 15:26:48 -0700 Subject: [PATCH 03/15] Bump version to new alpha (#80) Co-authored-by: Mila Page --- .changes/0.0.0.md | 3 ++ .changes/1.0.8-b3.md | 9 ----- .changes/1.8.0-b2.md | 14 ------- .changes/1.8.0-b5.md | 10 ----- .../1.8.0/Dependencies-20240328-133507.yaml | 6 --- .../1.8.0/Dependencies-20240403-135902.yaml | 6 --- .changes/1.8.0/Features-20240323-160222.yaml | 6 --- .changes/1.8.0/Fixes-20240423-180916.yaml | 6 --- .changes/1.8.0/Fixes-20240425-133401.yaml | 6 --- .changes/1.8.0/Security-20240327-193942.yaml | 6 --- .../1.8.0/Under the Hood-20240226-225642.yaml | 6 --- CHANGELOG.md | 38 ++----------------- dbt/adapters/postgres/__version__.py | 2 +- 13 files changed, 7 insertions(+), 111 deletions(-) create mode 100644 .changes/0.0.0.md delete mode 100644 .changes/1.0.8-b3.md delete mode 100644 .changes/1.8.0-b2.md delete mode 100644 .changes/1.8.0-b5.md delete mode 100644 .changes/1.8.0/Dependencies-20240328-133507.yaml delete mode 100644 .changes/1.8.0/Dependencies-20240403-135902.yaml delete mode 100644 .changes/1.8.0/Features-20240323-160222.yaml delete mode 100644 .changes/1.8.0/Fixes-20240423-180916.yaml delete mode 100644 .changes/1.8.0/Fixes-20240425-133401.yaml delete mode 100644 .changes/1.8.0/Security-20240327-193942.yaml delete mode 100644 .changes/1.8.0/Under the Hood-20240226-225642.yaml diff --git a/.changes/0.0.0.md b/.changes/0.0.0.md new file mode 100644 index 00000000..660fbd3b --- /dev/null +++ b/.changes/0.0.0.md @@ -0,0 +1,3 @@ +## Previous Releases +For information on prior major and minor releases, see their changelogs: +- [1.8](https://github.com/dbt-labs/dbt-postgres/blob/1.8.latest/CHANGELOG.md) diff --git a/.changes/1.0.8-b3.md b/.changes/1.0.8-b3.md deleted file mode 100644 index d73520b5..00000000 --- a/.changes/1.0.8-b3.md +++ /dev/null @@ -1,9 +0,0 @@ -## dbt-postgres 1.0.8-b3 - April 16, 2024 - -### Fixes - -* Determine `psycopg2` based on `platform_system` (Linux or other), remove usage of `DBT_PSYCOPG2_NAME` environment variable - -### Under the Hood - -* Update dependabot configuration to cover GHA diff --git a/.changes/1.8.0-b2.md b/.changes/1.8.0-b2.md deleted file mode 100644 index 193206cc..00000000 --- a/.changes/1.8.0-b2.md +++ /dev/null @@ -1,14 +0,0 @@ -## dbt-postgres 1.8.0-b2 - April 03, 2024 - -### Under the Hood - -* Add unit test for transaction semantics. - -### Dependencies - -* add "no-binary" install option -* Add `dbt-core` as a dependency to preserve backwards compatibility for installation - -### Security - -* Pin `black>=24.3` in `pyproject.toml` diff --git a/.changes/1.8.0-b5.md b/.changes/1.8.0-b5.md deleted file mode 100644 index 196d68a7..00000000 --- a/.changes/1.8.0-b5.md +++ /dev/null @@ -1,10 +0,0 @@ -## dbt-postgres 1.8.0-b5 - April 29, 2024 - -### Features - -* Debug log when `type_code` fails to convert to a `data_type` - -### Fixes - -* remove materialized views from renambeable relation and remove a quote -* Replace usage of `Set` with `List` to fix issue with index updates intermittently happening out of order diff --git a/.changes/1.8.0/Dependencies-20240328-133507.yaml b/.changes/1.8.0/Dependencies-20240328-133507.yaml deleted file mode 100644 index c7dbd319..00000000 --- a/.changes/1.8.0/Dependencies-20240328-133507.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Dependencies -body: add "no-binary" install option -time: 2024-03-28T13:35:07.300121-07:00 -custom: - Author: colin-rogers-dbt - Issue: "6" diff --git a/.changes/1.8.0/Dependencies-20240403-135902.yaml b/.changes/1.8.0/Dependencies-20240403-135902.yaml deleted file mode 100644 index 126b2178..00000000 --- a/.changes/1.8.0/Dependencies-20240403-135902.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Dependencies -body: Add `dbt-core` as a dependency to preserve backwards compatibility for installation -time: 2024-04-03T13:59:02.539298-04:00 -custom: - Author: mikealfare - Issue: "44" diff --git a/.changes/1.8.0/Features-20240323-160222.yaml b/.changes/1.8.0/Features-20240323-160222.yaml deleted file mode 100644 index c5af1aca..00000000 --- a/.changes/1.8.0/Features-20240323-160222.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: Debug log when `type_code` fails to convert to a `data_type` -time: 2024-03-23T16:02:22.153674-06:00 -custom: - Author: dbeatty10 - Issue: "8912" diff --git a/.changes/1.8.0/Fixes-20240423-180916.yaml b/.changes/1.8.0/Fixes-20240423-180916.yaml deleted file mode 100644 index 48015bcb..00000000 --- a/.changes/1.8.0/Fixes-20240423-180916.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: remove materialized views from renambeable relation and remove a quote -time: 2024-04-23T18:09:16.865258-05:00 -custom: - Author: McKnight-42 - Issue: "127" diff --git a/.changes/1.8.0/Fixes-20240425-133401.yaml b/.changes/1.8.0/Fixes-20240425-133401.yaml deleted file mode 100644 index cb6d14da..00000000 --- a/.changes/1.8.0/Fixes-20240425-133401.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Replace usage of `Set` with `List` to fix issue with index updates intermittently happening out of order -time: 2024-04-25T13:34:01.018399-04:00 -custom: - Author: mikealfare - Issue: "72" diff --git a/.changes/1.8.0/Security-20240327-193942.yaml b/.changes/1.8.0/Security-20240327-193942.yaml deleted file mode 100644 index 66dee543..00000000 --- a/.changes/1.8.0/Security-20240327-193942.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Security -body: Pin `black>=24.3` in `pyproject.toml` -time: 2024-03-27T19:39:42.633016-04:00 -custom: - Author: mikealfare - Issue: "40" diff --git a/.changes/1.8.0/Under the Hood-20240226-225642.yaml b/.changes/1.8.0/Under the Hood-20240226-225642.yaml deleted file mode 100644 index dd5d0645..00000000 --- a/.changes/1.8.0/Under the Hood-20240226-225642.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Add unit test for transaction semantics. -time: 2024-02-26T22:56:42.202429-08:00 -custom: - Author: versusfacit - Issue: "23" diff --git a/CHANGELOG.md b/CHANGELOG.md index df8d5dec..5beb02ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,38 +5,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html), and is generated by [Changie](https://github.com/miniscruff/changie). -## dbt-postgres 1.8.0-b5 - April 29, 2024 - -### Features - -* Debug log when `type_code` fails to convert to a `data_type` - -### Fixes - -* remove materialized views from renambeable relation and remove a quote -* Replace usage of `Set` with `List` to fix issue with index updates intermittently happening out of order - -## dbt-postgres 1.8.0-b2 - April 03, 2024 - -### Under the Hood - -* Add unit test for transaction semantics. - -### Dependencies - -* add "no-binary" install option -* Add `dbt-core` as a dependency to preserve backwards compatibility for installation - -### Security - -* Pin `black>=24.3` in `pyproject.toml` - -## dbt-postgres 1.0.8-b3 - April 16, 2024 - -### Fixes - -* Determine `psycopg2` based on `platform_system` (Linux or other), remove usage of `DBT_PSYCOPG2_NAME` environment variable - -### Under the Hood - -* Update dependabot configuration to cover GHA +## Previous Releases +For information on prior major and minor releases, see their changelogs: +- [1.8](https://github.com/dbt-labs/dbt-postgres/blob/1.8.latest/CHANGELOG.md) diff --git a/dbt/adapters/postgres/__version__.py b/dbt/adapters/postgres/__version__.py index c904307f..6698ed64 100644 --- a/dbt/adapters/postgres/__version__.py +++ b/dbt/adapters/postgres/__version__.py @@ -1 +1 @@ -version = "1.8.0b5" +version = "1.9.0a1" From 8446b977a01b1443f1ae14b78ce4d58d46f765c8 Mon Sep 17 00:00:00 2001 From: Mike Alfare <13974384+mikealfare@users.noreply.github.com> Date: Thu, 2 May 2024 18:29:58 -0400 Subject: [PATCH 04/15] [Tech Debt] Remove context methods test suite (#83) --- .../context_methods/first_dependency.py | 95 -------- .../context_methods/test_builtin_functions.py | 143 ------------ .../context_methods/test_cli_var_override.py | 66 ------ .../context_methods/test_cli_vars.py | 205 ------------------ .../context_methods/test_custom_env_vars.py | 36 --- .../context_methods/test_env_vars.py | 195 ----------------- .../context_methods/test_secret_env_vars.py | 185 ---------------- .../context_methods/test_var_dependency.py | 82 ------- .../test_var_in_generate_name.py | 43 ---- .../context_methods/test_yaml_functions.py | 49 ----- 10 files changed, 1099 deletions(-) delete mode 100644 tests/functional/context_methods/first_dependency.py delete mode 100644 tests/functional/context_methods/test_builtin_functions.py delete mode 100644 tests/functional/context_methods/test_cli_var_override.py delete mode 100644 tests/functional/context_methods/test_cli_vars.py delete mode 100644 tests/functional/context_methods/test_custom_env_vars.py delete mode 100644 tests/functional/context_methods/test_env_vars.py delete mode 100644 tests/functional/context_methods/test_secret_env_vars.py delete mode 100644 tests/functional/context_methods/test_var_dependency.py delete mode 100644 tests/functional/context_methods/test_var_in_generate_name.py delete mode 100644 tests/functional/context_methods/test_yaml_functions.py diff --git a/tests/functional/context_methods/first_dependency.py b/tests/functional/context_methods/first_dependency.py deleted file mode 100644 index 8e1365be..00000000 --- a/tests/functional/context_methods/first_dependency.py +++ /dev/null @@ -1,95 +0,0 @@ -from dbt.tests.fixtures.project import write_project_files -import pytest - - -first_dependency__dbt_project_yml = """ -name: 'first_dep' -version: '1.0' -config-version: 2 - -profile: 'default' - -model-paths: ["models"] -analysis-paths: ["analyses"] -test-paths: ["tests"] -seed-paths: ["seeds"] -macro-paths: ["macros"] - -require-dbt-version: '>=0.1.0' - -target-path: "target" # directory which will store compiled SQL files -clean-targets: # directories to be removed by `dbt clean` - - "target" - - "dbt_packages" - -vars: - first_dep: - first_dep_global: 'first_dep_global_value_overridden' - test_config_root_override: 'configured_from_dependency' - test_config_package: 'configured_from_dependency' - -seeds: - quote_columns: True - -""" - -first_dependency__models__nested__first_dep_model_sql = """ -select - '{{ var("first_dep_global") }}' as first_dep_global, - '{{ var("from_root_to_first") }}' as from_root -""" - -first_dependency__seeds__first_dep_expected_csv = """first_dep_global,from_root -first_dep_global_value_overridden,root_first_value -""" - -first_dependency__models__nested__first_dep_model_var_expected_csv = """test_config_root_override,test_config_package -configured_from_root,configured_from_dependency -""" - -first_dependency__models__nested__first_dep_model_var_sql = """ -select - '{{ config.get("test_config_root_override") }}' as test_config_root_override, - '{{ config.get("test_config_package") }}' as test_config_package -""" - -first_dependency__model_var_in_config_schema = """ -models: -- name: first_dep_model - config: - test_config_root_override: "{{ var('test_config_root_override') }}" - test_config_package: "{{ var('test_config_package') }}" -""" - - -class FirstDependencyProject: - @pytest.fixture(scope="class") - def first_dependency(self, project): - first_dependency_files = { - "dbt_project.yml": first_dependency__dbt_project_yml, - "models": { - "nested": { - "first_dep_model.sql": first_dependency__models__nested__first_dep_model_sql - } - }, - "seeds": {"first_dep_expected.csv": first_dependency__seeds__first_dep_expected_csv}, - } - write_project_files(project.project_root, "first_dependency", first_dependency_files) - - -class FirstDependencyConfigProject: - @pytest.fixture(scope="class") - def first_dependency(self, project): - first_dependency_files = { - "dbt_project.yml": first_dependency__dbt_project_yml, - "models": { - "nested": { - "first_dep_model.sql": first_dependency__models__nested__first_dep_model_var_sql, - "schema.yml": first_dependency__model_var_in_config_schema, - } - }, - "seeds": { - "first_dep_expected.csv": first_dependency__models__nested__first_dep_model_var_expected_csv - }, - } - write_project_files(project.project_root, "first_dependency", first_dependency_files) diff --git a/tests/functional/context_methods/test_builtin_functions.py b/tests/functional/context_methods/test_builtin_functions.py deleted file mode 100644 index b8a47b34..00000000 --- a/tests/functional/context_methods/test_builtin_functions.py +++ /dev/null @@ -1,143 +0,0 @@ -import json - -from dbt.tests.util import write_file -from dbt_common.exceptions import CompilationError -import pytest - -from tests.functional.utils import run_dbt, run_dbt_and_capture - - -macros__validate_set_sql = """ -{% macro validate_set() %} - {% set set_result = set([1, 2, 2, 3, 'foo', False]) %} - {{ log("set_result: " ~ set_result) }} - {% set set_strict_result = set_strict([1, 2, 2, 3, 'foo', False]) %} - {{ log("set_strict_result: " ~ set_strict_result) }} -{% endmacro %} -""" - -macros__validate_zip_sql = """ -{% macro validate_zip() %} - {% set list_a = [1, 2] %} - {% set list_b = ['foo', 'bar'] %} - {% set zip_result = zip(list_a, list_b) | list %} - {{ log("zip_result: " ~ zip_result) }} - {% set zip_strict_result = zip_strict(list_a, list_b) | list %} - {{ log("zip_strict_result: " ~ zip_strict_result) }} -{% endmacro %} -""" - -macros__validate_invocation_sql = """ -{% macro validate_invocation(my_variable) %} - -- check a specific value - {{ log("use_colors: "~ invocation_args_dict['use_colors']) }} - -- whole dictionary (as string) - {{ log("invocation_result: "~ invocation_args_dict) }} -{% endmacro %} -""" - -macros__validate_dbt_metadata_envs_sql = """ -{% macro validate_dbt_metadata_envs() %} - {{ log("dbt_metadata_envs_result:"~ dbt_metadata_envs) }} -{% endmacro %} -""" - -models__set_exception_sql = """ -{% set set_strict_result = set_strict(1) %} -""" - -models__zip_exception_sql = """ -{% set zip_strict_result = zip_strict(1) %} -""" - - -def parse_json_logs(json_log_output): - parsed_logs = [] - for line in json_log_output.split("\n"): - try: - log = json.loads(line) - except ValueError: - continue - - parsed_logs.append(log) - - return parsed_logs - - -def find_result_in_parsed_logs(parsed_logs, result_name): - return next( - ( - item["data"]["msg"] - for item in parsed_logs - if result_name in item["data"].get("msg", "msg") - ), - False, - ) - - -class TestContextBuiltins: - @pytest.fixture(scope="class") - def macros(self): - return { - "validate_set.sql": macros__validate_set_sql, - "validate_zip.sql": macros__validate_zip_sql, - "validate_invocation.sql": macros__validate_invocation_sql, - "validate_dbt_metadata_envs.sql": macros__validate_dbt_metadata_envs_sql, - } - - def test_builtin_set_function(self, project): - _, log_output = run_dbt_and_capture(["--debug", "run-operation", "validate_set"]) - - # The order of the set isn't guaranteed so we can't check for the actual set in the logs - assert "set_result: " in log_output - assert "False" in log_output - assert "set_strict_result: " in log_output - - def test_builtin_zip_function(self, project): - _, log_output = run_dbt_and_capture(["--debug", "run-operation", "validate_zip"]) - - expected_zip = [(1, "foo"), (2, "bar")] - assert f"zip_result: {expected_zip}" in log_output - assert f"zip_strict_result: {expected_zip}" in log_output - - def test_builtin_invocation_args_dict_function(self, project): - _, log_output = run_dbt_and_capture( - [ - "--debug", - "--log-format=json", - "run-operation", - "validate_invocation", - "--args", - "{my_variable: test_variable}", - ] - ) - - parsed_logs = parse_json_logs(log_output) - use_colors = result = find_result_in_parsed_logs(parsed_logs, "use_colors") - assert use_colors == "use_colors: True" - invocation_dict = find_result_in_parsed_logs(parsed_logs, "invocation_result") - assert result - # The result should include a dictionary of all flags with values that aren't None - expected = ( - "'send_anonymous_usage_stats': False", - "'quiet': False", - "'print': True", - "'cache_selected_only': False", - "'macro': 'validate_invocation'", - "'args': {'my_variable': 'test_variable'}", - "'which': 'run-operation'", - "'indirect_selection': 'eager'", - ) - assert all(element in invocation_dict for element in expected) - - -class TestContextBuiltinExceptions: - # Assert compilation errors are raised with _strict equivalents - def test_builtin_function_exception(self, project): - write_file(models__set_exception_sql, project.project_root, "models", "raise.sql") - with pytest.raises(CompilationError): - run_dbt(["compile"]) - - write_file(models__zip_exception_sql, project.project_root, "models", "raise.sql") - with pytest.raises(CompilationError): - run_dbt(["compile"]) diff --git a/tests/functional/context_methods/test_cli_var_override.py b/tests/functional/context_methods/test_cli_var_override.py deleted file mode 100644 index 757ab521..00000000 --- a/tests/functional/context_methods/test_cli_var_override.py +++ /dev/null @@ -1,66 +0,0 @@ -from dbt.tests.util import run_dbt -import pytest - - -models_override__schema_yml = """ -version: 2 -models: -- name: test_vars - columns: - - name: field - data_tests: - - accepted_values: - values: - - override -""" - -models_override__test_vars_sql = """ -select '{{ var("required") }}'::varchar as field -""" - - -# Tests that cli vars override vars set in the project config -class TestCLIVarOverride: - @pytest.fixture(scope="class") - def models(self): - return { - "schema.yml": models_override__schema_yml, - "test_vars.sql": models_override__test_vars_sql, - } - - @pytest.fixture(scope="class") - def project_config_update(self): - return { - "vars": { - "required": "present", - }, - } - - def test__override_vars_global(self, project): - run_dbt(["run", "--vars", "{required: override}"]) - run_dbt(["test"]) - - -# This one switches to setting a var in 'test' -class TestCLIVarOverridePorject: - @pytest.fixture(scope="class") - def models(self): - return { - "schema.yml": models_override__schema_yml, - "test_vars.sql": models_override__test_vars_sql, - } - - @pytest.fixture(scope="class") - def project_config_update(self): - return { - "vars": { - "test": { - "required": "present", - }, - }, - } - - def test__override_vars_project_level(self, project): - # This should be "override" - run_dbt(["run", "--vars", "{required: override}"]) - run_dbt(["test"]) diff --git a/tests/functional/context_methods/test_cli_vars.py b/tests/functional/context_methods/test_cli_vars.py deleted file mode 100644 index 8f6d6e8d..00000000 --- a/tests/functional/context_methods/test_cli_vars.py +++ /dev/null @@ -1,205 +0,0 @@ -from dbt.tests.fixtures.project import write_project_files -from dbt.tests.util import get_artifact, run_dbt, write_config_file -from dbt_common.exceptions import CompilationError, DbtRuntimeError -import pytest -import yaml - - -models_complex__schema_yml = """ -version: 2 -models: -- name: complex_model - columns: - - name: var_1 - data_tests: - - accepted_values: - values: - - abc - - name: var_2 - data_tests: - - accepted_values: - values: - - def - - name: var_3 - data_tests: - - accepted_values: - values: - - jkl -""" - -models_complex__complex_model_sql = """ -select - '{{ var("variable_1") }}'::varchar as var_1, - '{{ var("variable_2")[0] }}'::varchar as var_2, - '{{ var("variable_3")["value"] }}'::varchar as var_3 -""" - -models_simple__schema_yml = """ -version: 2 -models: -- name: simple_model - columns: - - name: simple - data_tests: - - accepted_values: - values: - - abc -""" - -models_simple__simple_model_sql = """ -select - '{{ var("simple") }}'::varchar as simple -""" - -really_simple_model_sql = """ -select 'abc' as simple -""" - - -class TestCLIVars: - @pytest.fixture(scope="class") - def models(self): - return { - "schema.yml": models_complex__schema_yml, - "complex_model.sql": models_complex__complex_model_sql, - } - - def test__cli_vars_longform(self, project): - cli_vars = { - "variable_1": "abc", - "variable_2": ["def", "ghi"], - "variable_3": {"value": "jkl"}, - } - results = run_dbt(["run", "--vars", yaml.dump(cli_vars)]) - assert len(results) == 1 - results = run_dbt(["test", "--vars", yaml.dump(cli_vars)]) - assert len(results) == 3 - - -class TestCLIVarsSimple: - @pytest.fixture(scope="class") - def models(self): - return { - "schema.yml": models_simple__schema_yml, - "simple_model.sql": models_simple__simple_model_sql, - } - - def test__cli_vars_shorthand(self, project): - results = run_dbt(["run", "--vars", "simple: abc"]) - assert len(results) == 1 - results = run_dbt(["test", "--vars", "simple: abc"]) - assert len(results) == 1 - - def test__cli_vars_longer(self, project): - results = run_dbt(["run", "--vars", "{simple: abc, unused: def}"]) - assert len(results) == 1 - results = run_dbt(["test", "--vars", "{simple: abc, unused: def}"]) - assert len(results) == 1 - run_results = get_artifact(project.project_root, "target", "run_results.json") - assert run_results["args"]["vars"] == {"simple": "abc", "unused": "def"} - - -class TestCLIVarsProfile: - @pytest.fixture(scope="class") - def models(self): - return { - "schema.yml": models_simple__schema_yml, - "simple_model.sql": really_simple_model_sql, - } - - def test_cli_vars_in_profile(self, project, dbt_profile_data): - profile = dbt_profile_data - profile["test"]["outputs"]["default"]["host"] = "{{ var('db_host') }}" - write_config_file(profile, project.profiles_dir, "profiles.yml") - with pytest.raises(DbtRuntimeError): - results = run_dbt(["run"]) - results = run_dbt(["run", "--vars", "db_host: localhost"]) - assert len(results) == 1 - - -class TestCLIVarsPackages: - @pytest.fixture(scope="class", autouse=True) - def setUp(self, project_root, dbt_integration_project): # noqa: F811 - write_project_files(project_root, "dbt_integration_project", dbt_integration_project) - - @pytest.fixture(scope="class") - def models(self): - return { - "schema.yml": models_simple__schema_yml, - "simple_model.sql": really_simple_model_sql, - } - - @pytest.fixture(scope="class") - def packages_config(self): - return {"packages": [{"local": "dbt_integration_project"}]} - - def test_cli_vars_in_packages(self, project, packages_config): - # Run working deps and run commands - run_dbt(["deps"]) - results = run_dbt(["run"]) - assert len(results) == 1 - - # Change packages.yml to contain a var - packages = packages_config - packages["packages"][0]["local"] = "{{ var('path_to_project') }}" - write_config_file(packages, project.project_root, "packages.yml") - - # Without vars args deps fails - with pytest.raises(DbtRuntimeError): - run_dbt(["deps"]) - - # With vars arg deps succeeds - results = run_dbt(["deps", "--vars", "path_to_project: dbt_integration_project"]) - assert results is None - - -initial_selectors_yml = """ -selectors: - - name: dev_defer_snapshots - default: "{{ target.name == 'dev' | as_bool }}" - definition: - method: fqn - value: '*' - exclude: - - method: config.materialized - value: snapshot -""" - -var_selectors_yml = """ -selectors: - - name: dev_defer_snapshots - default: "{{ var('snapshot_target') == 'dev' | as_bool }}" - definition: - method: fqn - value: '*' - exclude: - - method: config.materialized - value: snapshot -""" - - -class TestCLIVarsSelectors: - @pytest.fixture(scope="class") - def models(self): - return { - "schema.yml": models_simple__schema_yml, - "simple_model.sql": really_simple_model_sql, - } - - @pytest.fixture(scope="class") - def selectors(self): - return initial_selectors_yml - - def test_vars_in_selectors(self, project): - # initially runs ok - results = run_dbt(["run"]) - assert len(results) == 1 - - # Update the selectors.yml file to have a var - write_config_file(var_selectors_yml, project.project_root, "selectors.yml") - with pytest.raises(CompilationError): - run_dbt(["run"]) - - # Var in cli_vars works - results = run_dbt(["run", "--vars", "snapshot_target: dev"]) - assert len(results) == 1 diff --git a/tests/functional/context_methods/test_custom_env_vars.py b/tests/functional/context_methods/test_custom_env_vars.py deleted file mode 100644 index 50a9b00c..00000000 --- a/tests/functional/context_methods/test_custom_env_vars.py +++ /dev/null @@ -1,36 +0,0 @@ -import json -import os - -import pytest - -from tests.functional.utils import run_dbt_and_capture - - -def parse_json_logs(json_log_output): - parsed_logs = [] - for line in json_log_output.split("\n"): - try: - log = json.loads(line) - except ValueError: - continue - - parsed_logs.append(log) - - return parsed_logs - - -class TestCustomVarInLogs: - @pytest.fixture(scope="class", autouse=True) - def setup(self): - # on windows, python uppercases env var names because windows is case insensitive - os.environ["DBT_ENV_CUSTOM_ENV_SOME_VAR"] = "value" - yield - del os.environ["DBT_ENV_CUSTOM_ENV_SOME_VAR"] - - def test_extra_filled(self, project): - _, log_output = run_dbt_and_capture( - ["--log-format=json", "deps"], - ) - logs = parse_json_logs(log_output) - for log in logs: - assert log["info"].get("extra") == {"SOME_VAR": "value"} diff --git a/tests/functional/context_methods/test_env_vars.py b/tests/functional/context_methods/test_env_vars.py deleted file mode 100644 index 0bfbd01c..00000000 --- a/tests/functional/context_methods/test_env_vars.py +++ /dev/null @@ -1,195 +0,0 @@ -import os - -from dbt.constants import DEFAULT_ENV_PLACEHOLDER, SECRET_ENV_PREFIX -from dbt.tests.util import get_manifest -import pytest - -from tests.functional.utils import run_dbt, run_dbt_and_capture - - -context_sql = """ - -{{ - config( - materialized='table' - ) -}} - -select - - -- compile-time variables - '{{ this }}' as "this", - '{{ this.name }}' as "this.name", - '{{ this.schema }}' as "this.schema", - '{{ this.table }}' as "this.table", - - '{{ target.dbname }}' as "target.dbname", - '{{ target.host }}' as "target.host", - '{{ target.name }}' as "target.name", - '{{ target.schema }}' as "target.schema", - '{{ target.type }}' as "target.type", - '{{ target.user }}' as "target.user", - '{{ target.get("pass", "") }}' as "target.pass", -- not actually included, here to test that it is _not_ present! - {{ target.port }} as "target.port", - {{ target.threads }} as "target.threads", - - -- runtime variables - '{{ run_started_at }}' as run_started_at, - '{{ invocation_id }}' as invocation_id, - '{{ thread_id }}' as thread_id, - - '{{ env_var("DBT_TEST_ENV_VAR") }}' as env_var, - '{{ env_var("DBT_TEST_IGNORE_DEFAULT", "ignored_default_val") }}' as env_var_ignore_default, - '{{ env_var("DBT_TEST_USE_DEFAULT", "use_my_default_val") }}' as env_var_use_default, - 'secret_variable' as env_var_secret, -- make sure the value itself is scrubbed from the logs - '{{ env_var("DBT_TEST_NOT_SECRET") }}' as env_var_not_secret - -""" - - -class TestEnvVars: - @pytest.fixture(scope="class") - def models(self): - return {"context.sql": context_sql} - - @pytest.fixture(scope="class", autouse=True) - def setup(self): - os.environ["DBT_TEST_ENV_VAR"] = "1" - os.environ["DBT_TEST_USER"] = "root" - os.environ["DBT_TEST_PASS"] = "password" - os.environ[SECRET_ENV_PREFIX + "SECRET"] = "secret_variable" - os.environ["DBT_TEST_NOT_SECRET"] = "regular_variable" - os.environ["DBT_TEST_IGNORE_DEFAULT"] = "ignored_default" - yield - del os.environ["DBT_TEST_ENV_VAR"] - del os.environ["DBT_TEST_USER"] - del os.environ[SECRET_ENV_PREFIX + "SECRET"] - del os.environ["DBT_TEST_NOT_SECRET"] - del os.environ["DBT_TEST_IGNORE_DEFAULT"] - - @pytest.fixture(scope="class") - def profiles_config_update(self, unique_schema): - return { - "test": { - "outputs": { - # don't use env_var's here so the integration tests can run - # seed sql statements and the like. default target is used - "dev": { - "type": "postgres", - "threads": 1, - "host": "localhost", - "port": 5432, - "user": "root", - "pass": "password", - "dbname": "dbt", - "schema": unique_schema, - }, - "prod": { - "type": "postgres", - "threads": 1, - "host": "localhost", - "port": 5432, - # root/password - "user": "{{ env_var('DBT_TEST_USER') }}", - "pass": "{{ env_var('DBT_TEST_PASS') }}", - "dbname": "dbt", - "schema": unique_schema, - }, - }, - "target": "dev", - } - } - - def get_ctx_vars(self, project): - fields = [ - "this", - "this.name", - "this.schema", - "this.table", - "target.dbname", - "target.host", - "target.name", - "target.port", - "target.schema", - "target.threads", - "target.type", - "target.user", - "target.pass", - "run_started_at", - "invocation_id", - "thread_id", - "env_var", - ] - field_list = ", ".join(['"{}"'.format(f) for f in fields]) - query = "select {field_list} from {schema}.context".format( - field_list=field_list, schema=project.test_schema - ) - vals = project.run_sql(query, fetch="all") - ctx = dict([(k, v) for (k, v) in zip(fields, vals[0])]) - return ctx - - def test_env_vars_dev( - self, - project, - ): - results = run_dbt(["run"]) - assert len(results) == 1 - ctx = self.get_ctx_vars(project) - - manifest = get_manifest(project.project_root) - expected = { - "DBT_TEST_ENV_VAR": "1", - "DBT_TEST_NOT_SECRET": "regular_variable", - "DBT_TEST_IGNORE_DEFAULT": "ignored_default", - "DBT_TEST_USE_DEFAULT": DEFAULT_ENV_PLACEHOLDER, - } - assert manifest.env_vars == expected - - this = '"{}"."{}"."context"'.format(project.database, project.test_schema) - assert ctx["this"] == this - - assert ctx["this.name"] == "context" - assert ctx["this.schema"] == project.test_schema - assert ctx["this.table"] == "context" - - assert ctx["target.dbname"] == "dbt" - assert ctx["target.host"] == "localhost" - assert ctx["target.name"] == "dev" - assert ctx["target.port"] == 5432 - assert ctx["target.schema"] == project.test_schema - assert ctx["target.threads"] == 1 - assert ctx["target.type"] == "postgres" - assert ctx["target.user"] == "root" - assert ctx["target.pass"] == "" - - assert ctx["env_var"] == "1" - - def test_env_vars_prod(self, project): - results = run_dbt(["run", "--target", "prod"]) - assert len(results) == 1 - ctx = self.get_ctx_vars(project) - - this = '"{}"."{}"."context"'.format(project.database, project.test_schema) - assert ctx["this"] == this - - assert ctx["this.name"] == "context" - assert ctx["this.schema"] == project.test_schema - assert ctx["this.table"] == "context" - - assert ctx["target.dbname"] == "dbt" - assert ctx["target.host"] == "localhost" - assert ctx["target.name"] == "prod" - assert ctx["target.port"] == 5432 - assert ctx["target.schema"] == project.test_schema - assert ctx["target.threads"] == 1 - assert ctx["target.type"] == "postgres" - assert ctx["target.user"] == "root" - assert ctx["target.pass"] == "" - assert ctx["env_var"] == "1" - - def test_env_vars_secrets(self, project): - os.environ["DBT_DEBUG"] = "True" - _, log_output = run_dbt_and_capture(["run", "--target", "prod"]) - - assert not ("secret_variable" in log_output) - assert "regular_variable" in log_output diff --git a/tests/functional/context_methods/test_secret_env_vars.py b/tests/functional/context_methods/test_secret_env_vars.py deleted file mode 100644 index a6a5537a..00000000 --- a/tests/functional/context_methods/test_secret_env_vars.py +++ /dev/null @@ -1,185 +0,0 @@ -import os - -from dbt.constants import SECRET_ENV_PREFIX -from dbt.exceptions import ParsingError -from dbt.tests.util import read_file -from dbt_common.exceptions import DbtInternalError -import pytest - -from tests.functional.context_methods.first_dependency import FirstDependencyProject -from tests.functional.utils import run_dbt, run_dbt_and_capture - - -secret_bad__context_sql = """ - -{{ - config( - materialized='table' - ) -}} - -select - - '{{ env_var("DBT_TEST_ENV_VAR") }}' as env_var, - '{{ env_var("DBT_ENV_SECRET_SECRET") }}' as env_var_secret, -- this should raise an error! - '{{ env_var("DBT_TEST_NOT_SECRET") }}' as env_var_not_secret - -""" - - -class TestDisallowSecretModel: - @pytest.fixture(scope="class") - def models(self): - return {"context.sql": secret_bad__context_sql} - - def test_disallow_secret(self, project): - with pytest.raises(ParsingError): - run_dbt(["compile"]) - - -models__context_sql = """ -{{ - config( - materialized='table' - ) -}} - -select - - -- compile-time variables - '{{ this }}' as "this", - '{{ this.name }}' as "this.name", - '{{ this.schema }}' as "this.schema", - '{{ this.table }}' as "this.table", - - '{{ target.dbname }}' as "target.dbname", - '{{ target.host }}' as "target.host", - '{{ target.name }}' as "target.name", - '{{ target.schema }}' as "target.schema", - '{{ target.type }}' as "target.type", - '{{ target.user }}' as "target.user", - '{{ target.get("pass", "") }}' as "target.pass", -- not actually included, here to test that it is _not_ present! - {{ target.port }} as "target.port", - {{ target.threads }} as "target.threads", - - -- runtime variables - '{{ run_started_at }}' as run_started_at, - '{{ invocation_id }}' as invocation_id, - '{{ thread_id }}' as thread_id, - - '{{ env_var("DBT_TEST_ENV_VAR") }}' as env_var, - 'secret_variable' as env_var_secret, -- make sure the value itself is scrubbed from the logs - '{{ env_var("DBT_TEST_NOT_SECRET") }}' as env_var_not_secret -""" - - -class TestAllowSecretProfilePackage(FirstDependencyProject): - @pytest.fixture(scope="class", autouse=True) - def setup(self): - os.environ[SECRET_ENV_PREFIX + "USER"] = "root" - os.environ[SECRET_ENV_PREFIX + "PASS"] = "password" - os.environ[SECRET_ENV_PREFIX + "PACKAGE"] = "first_dependency" - os.environ[SECRET_ENV_PREFIX + "GIT_TOKEN"] = "abc123" - yield - del os.environ[SECRET_ENV_PREFIX + "USER"] - del os.environ[SECRET_ENV_PREFIX + "PASS"] - del os.environ[SECRET_ENV_PREFIX + "PACKAGE"] - del os.environ[SECRET_ENV_PREFIX + "GIT_TOKEN"] - - @pytest.fixture(scope="class") - def models(self): - return {"context.sql": models__context_sql} - - @pytest.fixture(scope="class") - def packages(self): - return { - "packages": [ - { - # the raw value of this secret *will* be written to lock file - "local": "{{ env_var('DBT_ENV_SECRET_PACKAGE') }}" - }, - { - # this secret env var will *not* be written to lock file - "git": "https://{{ env_var('DBT_ENV_SECRET_GIT_TOKEN') }}@github.com/dbt-labs/dbt-external-tables.git" - }, - { - # this secret env var will *not* be written to lock file - "tarball": "https://{{ env_var('DBT_ENV_SECRET_GIT_TOKEN') }}@github.com/dbt-labs/dbt-utils/archive/refs/tags/1.1.1.tar.gz", - "name": "dbt_utils", - }, - ] - } - - @pytest.fixture(scope="class") - def profile_target(self): - return { - "type": "postgres", - "threads": 1, - "host": "localhost", - "port": 5432, - # root/password - "user": "{{ env_var('DBT_ENV_SECRET_USER') }}", - "pass": "{{ env_var('DBT_ENV_SECRET_PASS') }}", - "dbname": "dbt", - } - - def test_allow_secrets(self, project, first_dependency): - _, log_output = run_dbt_and_capture(["deps"]) - lock_file_contents = read_file("package-lock.yml") - - # this will not be written to logs or lock file - assert not ("abc123" in log_output) - assert not ("abc123" in lock_file_contents) - assert "{{ env_var('DBT_ENV_SECRET_GIT_TOKEN') }}" in lock_file_contents - - # this will be scrubbed from logs, but not from the lock file - assert not ("first_dependency" in log_output) - assert "first_dependency" in lock_file_contents - - -class TestCloneFailSecretScrubbed: - @pytest.fixture(scope="class", autouse=True) - def setup(self): - os.environ[SECRET_ENV_PREFIX + "GIT_TOKEN"] = "abc123" - - @pytest.fixture(scope="class") - def models(self): - return {"context.sql": models__context_sql} - - @pytest.fixture(scope="class") - def packages(self): - return { - "packages": [ - { - "git": "https://fakeuser:{{ env_var('DBT_ENV_SECRET_GIT_TOKEN') }}@github.com/dbt-labs/fake-repo.git" - }, - ] - } - - def test_fail_clone_with_scrubbing(self, project): - with pytest.raises(DbtInternalError) as excinfo: - _, log_output = run_dbt_and_capture(["deps"]) - - assert "abc123" not in str(excinfo.value) - - -class TestCloneFailSecretNotRendered(TestCloneFailSecretScrubbed): - # as above, with some Jinja manipulation - @pytest.fixture(scope="class") - def packages(self): - return { - "packages": [ - { - "git": "https://fakeuser:{{ env_var('DBT_ENV_SECRET_GIT_TOKEN') | join(' ') }}@github.com/dbt-labs/fake-repo.git" - }, - ] - } - - def test_fail_clone_with_scrubbing(self, project): - with pytest.raises(DbtInternalError) as excinfo: - _, log_output = run_dbt_and_capture(["deps"]) - - # we should not see any manipulated form of the secret value (abc123) here - # we should see a manipulated form of the placeholder instead - assert "a b c 1 2 3" not in str(excinfo.value) - assert "D B T _ E N V _ S E C R E T _ G I T _ T O K E N" in str(excinfo.value) diff --git a/tests/functional/context_methods/test_var_dependency.py b/tests/functional/context_methods/test_var_dependency.py deleted file mode 100644 index a0c06db7..00000000 --- a/tests/functional/context_methods/test_var_dependency.py +++ /dev/null @@ -1,82 +0,0 @@ -from dbt.tests.util import check_relations_equal, run_dbt -import pytest - -from tests.functional.context_methods.first_dependency import ( - FirstDependencyConfigProject, - FirstDependencyProject, -) - - -dependency_seeds__root_model_expected_csv = """first_dep_global,from_root -dep_never_overridden,root_root_value -""" - -dependency_models__inside__model_sql = """ -select - '{{ var("first_dep_override") }}' as first_dep_global, - '{{ var("from_root_to_root") }}' as from_root - -""" - - -class TestVarDependencyInheritance(FirstDependencyProject): - @pytest.fixture(scope="class") - def seeds(self): - return {"root_model_expected.csv": dependency_seeds__root_model_expected_csv} - - @pytest.fixture(scope="class") - def models(self): - return {"inside": {"model.sql": dependency_models__inside__model_sql}} - - @pytest.fixture(scope="class") - def packages(self): - return { - "packages": [ - {"local": "first_dependency"}, - ] - } - - @pytest.fixture(scope="class") - def project_config_update(self): - return { - "vars": { - "first_dep_override": "dep_never_overridden", - "test": { - "from_root_to_root": "root_root_value", - }, - "first_dep": { - "from_root_to_first": "root_first_value", - }, - }, - } - - def test_var_mutual_overrides_v1_conversion(self, project, first_dependency): - run_dbt(["deps"]) - assert len(run_dbt(["seed"])) == 2 - assert len(run_dbt(["run"])) == 2 - check_relations_equal(project.adapter, ["root_model_expected", "model"]) - check_relations_equal(project.adapter, ["first_dep_expected", "first_dep_model"]) - - -class TestVarConfigDependencyInheritance(FirstDependencyConfigProject): - @pytest.fixture(scope="class") - def packages(self): - return { - "packages": [ - {"local": "first_dependency"}, - ] - } - - @pytest.fixture(scope="class") - def project_config_update(self): - return { - "vars": { - "test_config_root_override": "configured_from_root", - }, - } - - def test_root_var_overrides_package_var(self, project, first_dependency): - run_dbt(["deps"]) - run_dbt(["seed"]) - assert len(run_dbt(["run"])) == 1 - check_relations_equal(project.adapter, ["first_dep_expected", "first_dep_model"]) diff --git a/tests/functional/context_methods/test_var_in_generate_name.py b/tests/functional/context_methods/test_var_in_generate_name.py deleted file mode 100644 index f36bec3a..00000000 --- a/tests/functional/context_methods/test_var_in_generate_name.py +++ /dev/null @@ -1,43 +0,0 @@ -from dbt.tests.util import run_dbt, update_config_file -from dbt_common.exceptions import CompilationError -import pytest - - -model_sql = """ -select 1 as id -""" - -bad_generate_macros__generate_names_sql = """ -{% macro generate_schema_name(custom_schema_name, node) -%} - {% do var('somevar') %} - {% do return(dbt.generate_schema_name(custom_schema_name, node)) %} -{%- endmacro %} - -""" - - -class TestMissingVarGenerateNameMacro: - @pytest.fixture(scope="class") - def macros(self): - return {"generate_names.sql": bad_generate_macros__generate_names_sql} - - @pytest.fixture(scope="class") - def models(self): - return {"model.sql": model_sql} - - def test_generate_schema_name_var(self, project): - # var isn't set, so generate_name macro fails - with pytest.raises(CompilationError) as excinfo: - run_dbt(["compile"]) - - assert "Required var 'somevar' not found in config" in str(excinfo.value) - - # globally scoped -- var is set at top-level - update_config_file({"vars": {"somevar": 1}}, project.project_root, "dbt_project.yml") - run_dbt(["compile"]) - - # locally scoped -- var is set in 'test' scope - update_config_file( - {"vars": {"test": {"somevar": 1}}}, project.project_root, "dbt_project.yml" - ) - run_dbt(["compile"]) diff --git a/tests/functional/context_methods/test_yaml_functions.py b/tests/functional/context_methods/test_yaml_functions.py deleted file mode 100644 index 8996abc9..00000000 --- a/tests/functional/context_methods/test_yaml_functions.py +++ /dev/null @@ -1,49 +0,0 @@ -from dbt.tests.util import run_dbt -import pytest - - -tests__from_yaml_sql = """ -{% set simplest = (fromyaml('a: 1') == {'a': 1}) %} -{% set nested_data %} -a: - b: - - c: 1 - d: 2 - - c: 3 - d: 4 -{% endset %} -{% set nested = (fromyaml(nested_data) == {'a': {'b': [{'c': 1, 'd': 2}, {'c': 3, 'd': 4}]}}) %} - -(select 'simplest' as name {% if simplest %}limit 0{% endif %}) -union all -(select 'nested' as name {% if nested %}limit 0{% endif %}) -""" - -tests__to_yaml_sql = """ -{% set simplest = (toyaml({'a': 1}) == 'a: 1\\n') %} -{% set default_sort = (toyaml({'b': 2, 'a': 1}) == 'b: 2\\na: 1\\n') %} -{% set unsorted = (toyaml({'b': 2, 'a': 1}, sort_keys=False) == 'b: 2\\na: 1\\n') %} -{% set sorted = (toyaml({'b': 2, 'a': 1}, sort_keys=True) == 'a: 1\\nb: 2\\n') %} -{% set default_results = (toyaml({'a': adapter}, 'failed') == 'failed') %} - -(select 'simplest' as name {% if simplest %}limit 0{% endif %}) -union all -(select 'default_sort' as name {% if default_sort %}limit 0{% endif %}) -union all -(select 'unsorted' as name {% if unsorted %}limit 0{% endif %}) -union all -(select 'sorted' as name {% if sorted %}limit 0{% endif %}) -union all -(select 'default_results' as name {% if default_results %}limit 0{% endif %}) -""" - - -class TestContextVars: - # This test has no actual models - - @pytest.fixture(scope="class") - def tests(self): - return {"from_yaml.sql": tests__from_yaml_sql, "to_yaml.sql": tests__to_yaml_sql} - - def test_json_data_tests(self, project): - assert len(run_dbt(["test"])) == 2 From f873247b0df2e51fafa8f9eb645df98804762f9b Mon Sep 17 00:00:00 2001 From: Mike Alfare <13974384+mikealfare@users.noreply.github.com> Date: Wed, 8 May 2024 10:37:49 -0400 Subject: [PATCH 05/15] Create CODEOWNERS (#86) --- .github/CODEOWNERS | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..02ed72d4 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,3 @@ +# This codeowners file is used to ensure all PRs require reviews from the adapters team + +* @dbt-labs/adapters From 09518ecb4cc21ea05b216b0376b49bf32daa168b Mon Sep 17 00:00:00 2001 From: Colin Rogers <111200756+colin-rogers-dbt@users.noreply.github.com> Date: Thu, 9 May 2024 11:47:36 -0700 Subject: [PATCH 06/15] remove defer_state tests (#84) Co-authored-by: Mike Alfare <13974384+mikealfare@users.noreply.github.com> --- tests/functional/defer_state/fixtures.py | 424 -------- .../defer_state/test_defer_state.py | 329 ------ .../defer_state/test_group_updates.py | 108 -- .../defer_state/test_modified_state.py | 964 ------------------ .../defer_state/test_run_results_state.py | 481 --------- 5 files changed, 2306 deletions(-) delete mode 100644 tests/functional/defer_state/fixtures.py delete mode 100644 tests/functional/defer_state/test_defer_state.py delete mode 100644 tests/functional/defer_state/test_group_updates.py delete mode 100644 tests/functional/defer_state/test_modified_state.py delete mode 100644 tests/functional/defer_state/test_run_results_state.py diff --git a/tests/functional/defer_state/fixtures.py b/tests/functional/defer_state/fixtures.py deleted file mode 100644 index 8b1d3d35..00000000 --- a/tests/functional/defer_state/fixtures.py +++ /dev/null @@ -1,424 +0,0 @@ -seed_csv = """id,name -1,Alice -2,Bob -""" - -table_model_sql = """ -{{ config(materialized='table') }} -select * from {{ ref('ephemeral_model') }} - --- establish a macro dependency to trigger state:modified.macros --- depends on: {{ my_macro() }} -""" - -table_model_now_view_sql = """ -{{ config(materialized='view') }} -select * from {{ ref('ephemeral_model') }} - --- establish a macro dependency to trigger state:modified.macros --- depends on: {{ my_macro() }} -""" - -table_model_now_incremental_sql = """ -{{ config(materialized='incremental', on_schema_change='append_new_columns') }} -select * from {{ ref('ephemeral_model') }} - --- establish a macro dependency to trigger state:modified.macros --- depends on: {{ my_macro() }} -""" - -changed_table_model_sql = """ -{{ config(materialized='table') }} -select 1 as fun -""" - -view_model_sql = """ -select * from {{ ref('seed') }} - --- establish a macro dependency that trips infinite recursion if not handled --- depends on: {{ my_infinitely_recursive_macro() }} -""" - -view_model_now_table_sql = """ -{{ config(materialized='table') }} -select * from {{ ref('seed') }} - --- establish a macro dependency that trips infinite recursion if not handled --- depends on: {{ my_infinitely_recursive_macro() }} -""" - -changed_view_model_sql = """ -select * from no.such.table -""" - -ephemeral_model_sql = """ -{{ config(materialized='ephemeral') }} -select * from {{ ref('view_model') }} -""" - -changed_ephemeral_model_sql = """ -{{ config(materialized='ephemeral') }} -select * from no.such.table -""" - -schema_yml = """ -version: 2 -models: - - name: view_model - columns: - - name: id - data_tests: - - unique: - severity: error - - not_null - - name: name -""" - -no_contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: {} - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: - contract: - enforced: True - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -modified_contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: - contract: - enforced: True - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: user_name - data_type: text -""" - -disabled_contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: - contract: - enforced: False - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -versioned_no_contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: {} - versions: - - v: 1 - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -versioned_contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: - contract: - enforced: True - versions: - - v: 1 - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -versioned_modified_contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: - contract: - enforced: True - versions: - - v: 1 - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: user_name - data_type: text -""" - -versioned_disabled_contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: - contract: - enforced: False - versions: - - v: 1 - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -constraint_schema_yml = """ -version: 2 -models: - - name: view_model - columns: - - name: id - data_tests: - - unique: - severity: error - - not_null - - name: name - - name: table_model - config: - contract: - enforced: True - constraints: - - type: primary_key - columns: [id] - columns: - - name: id - constraints: - - type: not_null - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -modified_column_constraint_schema_yml = """ -version: 2 -models: - - name: view_model - columns: - - name: id - data_tests: - - unique: - severity: error - - not_null - - name: name - - name: table_model - config: - contract: - enforced: True - constraints: - - type: primary_key - columns: [id] - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -modified_model_constraint_schema_yml = """ -version: 2 -models: - - name: view_model - columns: - - name: id - data_tests: - - unique: - severity: error - - not_null - - name: name - - name: table_model - config: - contract: - enforced: True - columns: - - name: id - constraints: - - type: not_null - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -exposures_yml = """ -version: 2 -exposures: - - name: my_exposure - type: application - depends_on: - - ref('view_model') - owner: - email: test@example.com -""" - -macros_sql = """ -{% macro my_macro() %} - {% do log('in a macro' ) %} -{% endmacro %} -""" - -infinite_macros_sql = """ -{# trigger infinite recursion if not handled #} - -{% macro my_infinitely_recursive_macro() %} - {{ return(adapter.dispatch('my_infinitely_recursive_macro')()) }} -{% endmacro %} - -{% macro default__my_infinitely_recursive_macro() %} - {% if unmet_condition %} - {{ my_infinitely_recursive_macro() }} - {% else %} - {{ return('') }} - {% endif %} -{% endmacro %} -""" - -snapshot_sql = """ -{% snapshot my_cool_snapshot %} - - {{ - config( - target_database=database, - target_schema=schema, - unique_key='id', - strategy='check', - check_cols=['id'], - ) - }} - select * from {{ ref('view_model') }} - -{% endsnapshot %} -""" - -model_1_sql = """ -select * from {{ ref('seed') }} -""" - -modified_model_1_sql = """ -select * from {{ ref('seed') }} -order by 1 -""" - -model_2_sql = """ -select id from {{ ref('model_1') }} -""" - -modified_model_2_sql = """ -select * from {{ ref('model_1') }} -order by 1 -""" - - -group_schema_yml = """ -groups: - - name: finance - owner: - email: finance@jaffleshop.com - -models: - - name: model_1 - config: - group: finance - - name: model_2 - config: - group: finance -""" - - -group_modified_schema_yml = """ -groups: - - name: accounting - owner: - email: finance@jaffleshop.com -models: - - name: model_1 - config: - group: accounting - - name: model_2 - config: - group: accounting -""" - -group_modified_fail_schema_yml = """ -groups: - - name: finance - owner: - email: finance@jaffleshop.com -models: - - name: model_1 - config: - group: accounting - - name: model_2 - config: - group: finance -""" diff --git a/tests/functional/defer_state/test_defer_state.py b/tests/functional/defer_state/test_defer_state.py deleted file mode 100644 index 45c1d93c..00000000 --- a/tests/functional/defer_state/test_defer_state.py +++ /dev/null @@ -1,329 +0,0 @@ -from copy import deepcopy -import json -import os -import shutil - -from dbt.contracts.results import RunStatus -from dbt.exceptions import DbtRuntimeError -from dbt.tests.util import rm_file, run_dbt, write_file -import pytest - -from tests.functional.defer_state import fixtures - - -class BaseDeferState: - @pytest.fixture(scope="class") - def models(self): - return { - "table_model.sql": fixtures.table_model_sql, - "view_model.sql": fixtures.view_model_sql, - "ephemeral_model.sql": fixtures.ephemeral_model_sql, - "schema.yml": fixtures.schema_yml, - "exposures.yml": fixtures.exposures_yml, - } - - @pytest.fixture(scope="class") - def macros(self): - return { - "macros.sql": fixtures.macros_sql, - "infinite_macros.sql": fixtures.infinite_macros_sql, - } - - @pytest.fixture(scope="class") - def seeds(self): - return { - "seed.csv": fixtures.seed_csv, - } - - @pytest.fixture(scope="class") - def snapshots(self): - return { - "snapshot.sql": fixtures.snapshot_sql, - } - - @pytest.fixture(scope="class") - def other_schema(self, unique_schema): - return unique_schema + "_other" - - @property - def project_config_update(self): - return { - "seeds": { - "test": { - "quote_columns": False, - } - } - } - - @pytest.fixture(scope="class") - def profiles_config_update(self, dbt_profile_target, unique_schema, other_schema): - outputs = {"default": dbt_profile_target, "otherschema": deepcopy(dbt_profile_target)} - outputs["default"]["schema"] = unique_schema - outputs["otherschema"]["schema"] = other_schema - return {"test": {"outputs": outputs, "target": "default"}} - - def copy_state(self, project_root): - state_path = os.path.join(project_root, "state") - if not os.path.exists(state_path): - os.makedirs(state_path) - shutil.copyfile( - f"{project_root}/target/manifest.json", f"{project_root}/state/manifest.json" - ) - - def run_and_save_state(self, project_root, with_snapshot=False): - results = run_dbt(["seed"]) - assert len(results) == 1 - assert not any(r.node.deferred for r in results) - results = run_dbt(["run"]) - assert len(results) == 2 - assert not any(r.node.deferred for r in results) - results = run_dbt(["test"]) - assert len(results) == 2 - - if with_snapshot: - results = run_dbt(["snapshot"]) - assert len(results) == 1 - assert not any(r.node.deferred for r in results) - - # copy files - self.copy_state(project_root) - - -class TestDeferStateUnsupportedCommands(BaseDeferState): - def test_no_state(self, project): - # no "state" files present, snapshot fails - with pytest.raises(DbtRuntimeError): - run_dbt(["snapshot", "--state", "state", "--defer"]) - - -class TestRunCompileState(BaseDeferState): - def test_run_and_compile_defer(self, project): - self.run_and_save_state(project.project_root) - - # defer test, it succeeds - # Change directory to ensure that state directory is underneath - # project directory. - os.chdir(project.profiles_dir) - results = run_dbt(["compile", "--state", "state", "--defer"]) - assert len(results.results) == 6 - assert results.results[0].node.name == "seed" - - -class TestSnapshotState(BaseDeferState): - def test_snapshot_state_defer(self, project): - self.run_and_save_state(project.project_root) - # snapshot succeeds without --defer - run_dbt(["snapshot"]) - # copy files - self.copy_state(project.project_root) - # defer test, it succeeds - run_dbt(["snapshot", "--state", "state", "--defer"]) - # favor_state test, it succeeds - run_dbt(["snapshot", "--state", "state", "--defer", "--favor-state"]) - - -class TestRunDeferState(BaseDeferState): - def test_run_and_defer(self, project, unique_schema, other_schema): - project.create_test_schema(other_schema) - self.run_and_save_state(project.project_root) - - # test tests first, because run will change things - # no state, wrong schema, failure. - run_dbt(["test", "--target", "otherschema"], expect_pass=False) - - # test generate docs - # no state, wrong schema, empty nodes - catalog = run_dbt(["docs", "generate", "--target", "otherschema"]) - assert not catalog.nodes - - # no state, run also fails - run_dbt(["run", "--target", "otherschema"], expect_pass=False) - - # defer test, it succeeds - results = run_dbt( - ["test", "-m", "view_model+", "--state", "state", "--defer", "--target", "otherschema"] - ) - - # defer docs generate with state, catalog refers schema from the happy times - catalog = run_dbt( - [ - "docs", - "generate", - "-m", - "view_model+", - "--state", - "state", - "--defer", - "--target", - "otherschema", - ] - ) - assert "seed.test.seed" not in catalog.nodes - - # with state it should work though - results = run_dbt( - ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"] - ) - assert other_schema not in results[0].node.compiled_code - assert unique_schema in results[0].node.compiled_code - - with open("target/manifest.json") as fp: - data = json.load(fp) - assert data["nodes"]["seed.test.seed"]["deferred"] - - assert len(results) == 1 - - -class TestRunDeferStateChangedModel(BaseDeferState): - def test_run_defer_state_changed_model(self, project): - self.run_and_save_state(project.project_root) - - # change "view_model" - write_file(fixtures.changed_view_model_sql, "models", "view_model.sql") - - # the sql here is just wrong, so it should fail - run_dbt( - ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"], - expect_pass=False, - ) - # but this should work since we just use the old happy model - run_dbt( - ["run", "-m", "table_model", "--state", "state", "--defer", "--target", "otherschema"], - expect_pass=True, - ) - - # change "ephemeral_model" - write_file(fixtures.changed_ephemeral_model_sql, "models", "ephemeral_model.sql") - # this should fail because the table model refs a broken ephemeral - # model, which it should see - run_dbt( - ["run", "-m", "table_model", "--state", "state", "--defer", "--target", "otherschema"], - expect_pass=False, - ) - - -class TestRunDeferStateIFFNotExists(BaseDeferState): - def test_run_defer_iff_not_exists(self, project, unique_schema, other_schema): - project.create_test_schema(other_schema) - self.run_and_save_state(project.project_root) - - results = run_dbt(["seed", "--target", "otherschema"]) - assert len(results) == 1 - results = run_dbt(["run", "--state", "state", "--defer", "--target", "otherschema"]) - assert len(results) == 2 - - # because the seed now exists in our "other" schema, we should prefer it over the one - # available from state - assert other_schema in results[0].node.compiled_code - - # this time with --favor-state: even though the seed now exists in our "other" schema, - # we should still favor the one available from state - results = run_dbt( - ["run", "--state", "state", "--defer", "--favor-state", "--target", "otherschema"] - ) - assert len(results) == 2 - assert other_schema not in results[0].node.compiled_code - - -class TestDeferStateDeletedUpstream(BaseDeferState): - def test_run_defer_deleted_upstream(self, project, unique_schema, other_schema): - project.create_test_schema(other_schema) - self.run_and_save_state(project.project_root) - - # remove "ephemeral_model" + change "table_model" - rm_file("models", "ephemeral_model.sql") - write_file(fixtures.changed_table_model_sql, "models", "table_model.sql") - - # ephemeral_model is now gone. previously this caused a - # keyerror (dbt#2875), now it should pass - run_dbt( - ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"], - expect_pass=True, - ) - - # despite deferral, we should use models just created in our schema - results = run_dbt(["test", "--state", "state", "--defer", "--target", "otherschema"]) - assert other_schema in results[0].node.compiled_code - - # this time with --favor-state: prefer the models in the "other" schema, even though they exist in ours - run_dbt( - [ - "run", - "-m", - "view_model", - "--state", - "state", - "--defer", - "--favor-state", - "--target", - "otherschema", - ], - expect_pass=True, - ) - results = run_dbt(["test", "--state", "state", "--defer", "--favor-state"]) - assert other_schema not in results[0].node.compiled_code - - -class TestDeferStateFlag(BaseDeferState): - def test_defer_state_flag(self, project, unique_schema, other_schema): - project.create_test_schema(other_schema) - - # test that state deferral works correctly - run_dbt(["compile", "--target-path", "target_compile"]) - write_file(fixtures.view_model_now_table_sql, "models", "table_model.sql") - - results = run_dbt(["ls", "--select", "state:modified", "--state", "target_compile"]) - assert results == ["test.table_model"] - - run_dbt(["seed", "--target", "otherschema", "--target-path", "target_otherschema"]) - - # this will fail because we haven't loaded the seed in the default schema - run_dbt( - [ - "run", - "--select", - "state:modified", - "--defer", - "--state", - "target_compile", - "--favor-state", - ], - expect_pass=False, - ) - - # this will fail because we haven't passed in --state - with pytest.raises( - DbtRuntimeError, match="Got a state selector method, but no comparison manifest" - ): - run_dbt( - [ - "run", - "--select", - "state:modified", - "--defer", - "--defer-state", - "target_otherschema", - "--favor-state", - ], - expect_pass=False, - ) - - # this will succeed because we've loaded the seed in other schema and are successfully deferring to it instead - results = run_dbt( - [ - "run", - "--select", - "state:modified", - "--defer", - "--state", - "target_compile", - "--defer-state", - "target_otherschema", - "--favor-state", - ] - ) - - assert len(results.results) == 1 - assert results.results[0].status == RunStatus.Success - assert results.results[0].node.name == "table_model" - assert results.results[0].adapter_response["rows_affected"] == 2 diff --git a/tests/functional/defer_state/test_group_updates.py b/tests/functional/defer_state/test_group_updates.py deleted file mode 100644 index 5f3e8006..00000000 --- a/tests/functional/defer_state/test_group_updates.py +++ /dev/null @@ -1,108 +0,0 @@ -import os - -from dbt.exceptions import ParsingError -from dbt.tests.util import copy_file, run_dbt, write_file -import pytest - -from tests.functional.defer_state import fixtures - - -class GroupSetup: - @pytest.fixture(scope="class") - def models(self): - return { - "model_1.sql": fixtures.model_1_sql, - "model_2.sql": fixtures.model_2_sql, - "schema.yml": fixtures.group_schema_yml, - } - - @pytest.fixture(scope="class") - def seeds(self): - return {"seed.csv": fixtures.seed_csv} - - def group_setup(self): - # save initial state - run_dbt(["seed"]) - results = run_dbt(["compile"]) - - # add sanity checks for first result - assert len(results) == 3 - seed_result = results[0].node - assert seed_result.unique_id == "seed.test.seed" - model_1_result = results[1].node - assert model_1_result.unique_id == "model.test.model_1" - assert model_1_result.group == "finance" - model_2_result = results[2].node - assert model_2_result.unique_id == "model.test.model_2" - assert model_2_result.group == "finance" - - -class TestFullyModifiedGroups(GroupSetup): - def test_changed_groups(self, project): - self.group_setup() - - # copy manifest.json to "state" directory - os.makedirs("state") - target_path = os.path.join(project.project_root, "target") - copy_file(target_path, "manifest.json", project.project_root, ["state", "manifest.json"]) - - # update group name, modify model so it gets picked up - write_file(fixtures.modified_model_1_sql, "models", "model_1.sql") - write_file(fixtures.modified_model_2_sql, "models", "model_2.sql") - write_file(fixtures.group_modified_schema_yml, "models", "schema.yml") - - # this test is flaky if you don't clean first before the build - run_dbt(["clean"]) - # only thing in results should be model_1 - results = run_dbt(["build", "-s", "state:modified", "--defer", "--state", "./state"]) - - assert len(results) == 2 - model_1_result = results[0].node - assert model_1_result.unique_id == "model.test.model_1" - assert model_1_result.group == "accounting" # new group name! - model_2_result = results[1].node - assert model_2_result.unique_id == "model.test.model_2" - assert model_2_result.group == "accounting" # new group name! - - -class TestPartiallyModifiedGroups(GroupSetup): - def test_changed_groups(self, project): - self.group_setup() - - # copy manifest.json to "state" directory - os.makedirs("state") - target_path = os.path.join(project.project_root, "target") - copy_file(target_path, "manifest.json", project.project_root, ["state", "manifest.json"]) - - # update group name, modify model so it gets picked up - write_file(fixtures.modified_model_1_sql, "models", "model_1.sql") - write_file(fixtures.group_modified_schema_yml, "models", "schema.yml") - - # this test is flaky if you don't clean first before the build - run_dbt(["clean"]) - # only thing in results should be model_1 - results = run_dbt(["build", "-s", "state:modified", "--defer", "--state", "./state"]) - - assert len(results) == 1 - model_1_result = results[0].node - assert model_1_result.unique_id == "model.test.model_1" - assert model_1_result.group == "accounting" # new group name! - - -class TestBadGroups(GroupSetup): - def test_changed_groups(self, project): - self.group_setup() - - # copy manifest.json to "state" directory - os.makedirs("state") - target_path = os.path.join(project.project_root, "target") - copy_file(target_path, "manifest.json", project.project_root, ["state", "manifest.json"]) - - # update group with invalid name, modify model so it gets picked up - write_file(fixtures.modified_model_1_sql, "models", "model_1.sql") - write_file(fixtures.group_modified_fail_schema_yml, "models", "schema.yml") - - # this test is flaky if you don't clean first before the build - run_dbt(["clean"]) - with pytest.raises(ParsingError, match="Invalid group 'accounting'"): - run_dbt(["build", "-s", "state:modified", "--defer", "--state", "./state"]) diff --git a/tests/functional/defer_state/test_modified_state.py b/tests/functional/defer_state/test_modified_state.py deleted file mode 100644 index e108fe9f..00000000 --- a/tests/functional/defer_state/test_modified_state.py +++ /dev/null @@ -1,964 +0,0 @@ -import os -import random -import shutil -import string - -from dbt.exceptions import ContractBreakingChangeError -from dbt.tests.util import get_manifest, update_config_file, write_file -from dbt_common.exceptions import CompilationError -import pytest - -from tests.functional.defer_state import fixtures -from tests.functional.utils import run_dbt, run_dbt_and_capture - - -class BaseModifiedState: - @pytest.fixture(scope="class") - def models(self): - return { - "table_model.sql": fixtures.table_model_sql, - "view_model.sql": fixtures.view_model_sql, - "ephemeral_model.sql": fixtures.ephemeral_model_sql, - "schema.yml": fixtures.schema_yml, - "exposures.yml": fixtures.exposures_yml, - } - - @pytest.fixture(scope="class") - def macros(self): - return { - "macros.sql": fixtures.macros_sql, - "infinite_macros.sql": fixtures.infinite_macros_sql, - } - - @pytest.fixture(scope="class") - def seeds(self): - return {"seed.csv": fixtures.seed_csv} - - @property - def project_config_update(self): - return { - "seeds": { - "test": { - "quote_columns": False, - } - } - } - - def copy_state(self): - if not os.path.exists("state"): - os.makedirs("state") - shutil.copyfile("target/manifest.json", "state/manifest.json") - - def run_and_save_state(self): - run_dbt(["seed"]) - run_dbt(["run"]) - self.copy_state() - - -class TestChangedSeedContents(BaseModifiedState): - def test_changed_seed_contents_state(self, project): - self.run_and_save_state() - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--exclude", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--select", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 1 - - # add a new row to the seed - changed_seed_contents = fixtures.seed_csv + "\n" + "3,carl" - write_file(changed_seed_contents, "seeds", "seed.csv") - - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"] - ) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--exclude", - "state:unmodified", - "--state", - "./state", - ] - ) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:unmodified", "--state", "./state"] - ) - assert len(results) == 0 - - results = run_dbt(["ls", "--select", "state:modified", "--state", "./state"]) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt(["ls", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt(["ls", "--select", "state:unmodified", "--state", "./state"]) - assert len(results) == 6 - - results = run_dbt(["ls", "--select", "state:modified+", "--state", "./state"]) - assert len(results) == 7 - assert set(results) == { - "test.seed", - "test.table_model", - "test.view_model", - "test.ephemeral_model", - "test.not_null_view_model_id", - "test.unique_view_model_id", - "exposure:test.my_exposure", - } - - results = run_dbt(["ls", "--select", "state:unmodified+", "--state", "./state"]) - assert len(results) == 6 - assert set(results) == { - "test.table_model", - "test.view_model", - "test.ephemeral_model", - "test.not_null_view_model_id", - "test.unique_view_model_id", - "exposure:test.my_exposure", - } - - shutil.rmtree("./state") - self.copy_state() - - # make a very big seed - # assume each line is ~2 bytes + len(name) - target_size = 1 * 1024 * 1024 - line_size = 64 - num_lines = target_size // line_size - maxlines = num_lines + 4 - seed_lines = [fixtures.seed_csv] - for idx in range(4, maxlines): - value = "".join(random.choices(string.ascii_letters, k=62)) - seed_lines.append(f"{idx},{value}") - seed_contents = "\n".join(seed_lines) - write_file(seed_contents, "seeds", "seed.csv") - - # now if we run again, we should get a warning - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"] - ) - assert len(results) == 1 - assert results[0] == "test.seed" - - with pytest.raises(CompilationError) as exc: - run_dbt( - [ - "--warn-error", - "ls", - "--resource-type", - "seed", - "--select", - "state:modified", - "--state", - "./state", - ] - ) - assert ">1MB" in str(exc.value) - - # now check if unmodified returns none - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:unmodified", "--state", "./state"] - ) - assert len(results) == 0 - - shutil.rmtree("./state") - self.copy_state() - - # once it"s in path mode, we don"t mark it as modified if it changes - write_file(seed_contents + "\n1,test", "seeds", "seed.csv") - - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--exclude", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--select", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 1 - - -class TestChangedSeedConfig(BaseModifiedState): - def test_changed_seed_config(self, project): - self.run_and_save_state() - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--exclude", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--select", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 1 - - update_config_file({"seeds": {"test": {"quote_columns": False}}}, "dbt_project.yml") - - # quoting change -> seed changed - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"] - ) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--exclude", - "state:unmodified", - "--state", - "./state", - ] - ) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:unmodified", "--state", "./state"] - ) - assert len(results) == 0 - - -class TestUnrenderedConfigSame(BaseModifiedState): - def test_unrendered_config_same(self, project): - self.run_and_save_state() - results = run_dbt( - ["ls", "--resource-type", "model", "--select", "state:modified", "--state", "./state"], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "model", - "--exclude", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "model", - "--select", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 3 - - # although this is the default value, dbt will recognize it as a change - # for previously-unconfigured models, because it"s been explicitly set - update_config_file({"models": {"test": {"materialized": "view"}}}, "dbt_project.yml") - results = run_dbt( - ["ls", "--resource-type", "model", "--select", "state:modified", "--state", "./state"] - ) - assert len(results) == 1 - assert results[0] == "test.view_model" - - # converse of above statement - results = run_dbt( - [ - "ls", - "--resource-type", - "model", - "--exclude", - "state:unmodified", - "--state", - "./state", - ] - ) - assert len(results) == 1 - assert results[0] == "test.view_model" - - results = run_dbt( - [ - "ls", - "--resource-type", - "model", - "--select", - "state:unmodified", - "--state", - "./state", - ] - ) - assert len(results) == 2 - assert set(results) == { - "test.table_model", - "test.ephemeral_model", - } - - -class TestChangedModelContents(BaseModifiedState): - def test_changed_model_contents(self, project): - self.run_and_save_state() - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 0 - - table_model_update = """ - {{ config(materialized="table") }} - - select * from {{ ref("seed") }} - """ - - write_file(table_model_update, "models", "table_model.sql") - - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - -class TestNewMacro(BaseModifiedState): - def test_new_macro(self, project): - self.run_and_save_state() - - new_macro = """ - {% macro my_other_macro() %} - {% endmacro %} - """ - - # add a new macro to a new file - write_file(new_macro, "macros", "second_macro.sql") - - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 0 - - os.remove("macros/second_macro.sql") - # add a new macro to the existing file - with open("macros/macros.sql", "a") as fp: - fp.write(new_macro) - - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 0 - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 0 - - -class TestChangedMacroContents(BaseModifiedState): - def test_changed_macro_contents(self, project): - self.run_and_save_state() - - # modify an existing macro - updated_macro = """ - {% macro my_macro() %} - {% do log("in a macro", info=True) %} - {% endmacro %} - """ - write_file(updated_macro, "macros", "macros.sql") - - # table_model calls this macro - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 1 - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 1 - - -class TestChangedExposure(BaseModifiedState): - def test_changed_exposure(self, project): - self.run_and_save_state() - - # add an "owner.name" to existing exposure - updated_exposure = fixtures.exposures_yml + "\n name: John Doe\n" - write_file(updated_exposure, "models", "exposures.yml") - - results = run_dbt(["run", "--models", "+state:modified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "view_model" - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 0 - - -class TestChangedContractUnversioned(BaseModifiedState): - MODEL_UNIQUE_ID = "model.test.table_model" - CONTRACT_SCHEMA_YML = fixtures.contract_schema_yml - MODIFIED_SCHEMA_YML = fixtures.modified_contract_schema_yml - DISABLED_SCHEMA_YML = fixtures.disabled_contract_schema_yml - NO_CONTRACT_SCHEMA_YML = fixtures.no_contract_schema_yml - - def test_changed_contract(self, project): - self.run_and_save_state() - - # update contract for table_model - write_file(self.CONTRACT_SCHEMA_YML, "models", "schema.yml") - - # This will find the table_model node modified both through a config change - # and by a non-breaking change to contract: true - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - manifest = get_manifest(project.project_root) - model_unique_id = self.MODEL_UNIQUE_ID - model = manifest.nodes[model_unique_id] - expected_unrendered_config = {"contract": {"enforced": True}, "materialized": "table"} - assert model.unrendered_config == expected_unrendered_config - - # Run it again with "state:modified:contract", still finds modified due to contract: true - results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - assert len(results) == 1 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - first_contract_checksum = model.contract.checksum - assert first_contract_checksum - # save a new state - self.copy_state() - - # This should raise because a column name has changed - write_file(self.MODIFIED_SCHEMA_YML, "models", "schema.yml") - results = run_dbt(["run"], expect_pass=False) - assert len(results) == 2 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - second_contract_checksum = model.contract.checksum - # double check different contract_checksums - assert first_contract_checksum != second_contract_checksum - - _, logs = run_dbt_and_capture( - ["run", "--models", "state:modified.contract", "--state", "./state"], expect_pass=False - ) - expected_error = "This model has an enforced contract that failed." - expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model" - expected_change = "Please ensure the name, data_type, and number of columns in your contract match the columns in your model's definition" - assert expected_error in logs - assert expected_warning in logs - assert expected_change in logs - - # Go back to schema file without contract. Should throw a warning. - write_file(self.NO_CONTRACT_SCHEMA_YML, "models", "schema.yml") - _, logs = run_dbt_and_capture( - ["run", "--models", "state:modified.contract", "--state", "./state"] - ) - expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model" - expected_change = "Contract enforcement was removed" - - # Now disable the contract. Should throw a warning - force warning into an error. - write_file(self.DISABLED_SCHEMA_YML, "models", "schema.yml") - with pytest.raises(CompilationError): - _, logs = run_dbt_and_capture( - [ - "--warn-error", - "run", - "--models", - "state:modified.contract", - "--state", - "./state", - ] - ) - expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model" - expected_change = "Contract enforcement was removed" - - -class TestChangedContractVersioned(BaseModifiedState): - MODEL_UNIQUE_ID = "model.test.table_model.v1" - CONTRACT_SCHEMA_YML = fixtures.versioned_contract_schema_yml - MODIFIED_SCHEMA_YML = fixtures.versioned_modified_contract_schema_yml - DISABLED_SCHEMA_YML = fixtures.versioned_disabled_contract_schema_yml - NO_CONTRACT_SCHEMA_YML = fixtures.versioned_no_contract_schema_yml - - def test_changed_contract_versioned(self, project): - self.run_and_save_state() - - # update contract for table_model - write_file(self.CONTRACT_SCHEMA_YML, "models", "schema.yml") - - # This will find the table_model node modified both through a config change - # and by a non-breaking change to contract: true - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - manifest = get_manifest(project.project_root) - model_unique_id = self.MODEL_UNIQUE_ID - model = manifest.nodes[model_unique_id] - expected_unrendered_config = {"contract": {"enforced": True}, "materialized": "table"} - assert model.unrendered_config == expected_unrendered_config - - # Run it again with "state:modified:contract", still finds modified due to contract: true - results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - assert len(results) == 1 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - first_contract_checksum = model.contract.checksum - assert first_contract_checksum - # save a new state - self.copy_state() - - # This should raise because a column name has changed - write_file(self.MODIFIED_SCHEMA_YML, "models", "schema.yml") - results = run_dbt(["run"], expect_pass=False) - assert len(results) == 2 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - second_contract_checksum = model.contract.checksum - # double check different contract_checksums - assert first_contract_checksum != second_contract_checksum - with pytest.raises(ContractBreakingChangeError): - results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - - # Go back to schema file without contract. Should raise an error. - write_file(self.NO_CONTRACT_SCHEMA_YML, "models", "schema.yml") - with pytest.raises(ContractBreakingChangeError): - results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - - # Now disable the contract. Should raise an error. - write_file(self.DISABLED_SCHEMA_YML, "models", "schema.yml") - with pytest.raises(ContractBreakingChangeError): - results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - - -class TestChangedConstraintUnversioned(BaseModifiedState): - def test_changed_constraint(self, project): - self.run_and_save_state() - - # update constraint for table_model - write_file(fixtures.constraint_schema_yml, "models", "schema.yml") - - # This will find the table_model node modified both through adding constraint - # and by a non-breaking change to contract: true - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - manifest = get_manifest(project.project_root) - model_unique_id = "model.test.table_model" - model = manifest.nodes[model_unique_id] - expected_unrendered_config = {"contract": {"enforced": True}, "materialized": "table"} - assert model.unrendered_config == expected_unrendered_config - - # Run it again with "state:modified:contract", still finds modified due to contract: true - results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - assert len(results) == 1 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - first_contract_checksum = model.contract.checksum - assert first_contract_checksum - # save a new state - self.copy_state() - - # This should raise because a column level constraint was removed - write_file(fixtures.modified_column_constraint_schema_yml, "models", "schema.yml") - # we don't have a way to know this failed unless we have a previous state to refer to, so the run succeeds - results = run_dbt(["run"]) - assert len(results) == 2 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - second_contract_checksum = model.contract.checksum - # double check different contract_checksums - assert first_contract_checksum != second_contract_checksum - # since the models are unversioned, they raise a warning but not an error - _, logs = run_dbt_and_capture( - ["run", "--models", "state:modified.contract", "--state", "./state"] - ) - expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model" - expected_change = "Enforced column level constraints were removed" - assert expected_warning in logs - assert expected_change in logs - - # This should raise because a model level constraint was removed (primary_key on id) - write_file(fixtures.modified_model_constraint_schema_yml, "models", "schema.yml") - # we don't have a way to know this failed unless we have a previous state to refer to, so the run succeeds - results = run_dbt(["run"]) - assert len(results) == 2 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - second_contract_checksum = model.contract.checksum - # double check different contract_checksums - assert first_contract_checksum != second_contract_checksum - _, logs = run_dbt_and_capture( - ["run", "--models", "state:modified.contract", "--state", "./state"] - ) - expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model" - expected_change = "Enforced model level constraints were removed" - assert expected_warning in logs - assert expected_change in logs - - -class TestChangedMaterializationConstraint(BaseModifiedState): - def test_changed_materialization(self, project): - self.run_and_save_state() - - # update constraint for table_model - write_file(fixtures.constraint_schema_yml, "models", "schema.yml") - - # This will find the table_model node modified both through adding constraint - # and by a non-breaking change to contract: true - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - manifest = get_manifest(project.project_root) - model_unique_id = "model.test.table_model" - model = manifest.nodes[model_unique_id] - expected_unrendered_config = {"contract": {"enforced": True}, "materialized": "table"} - assert model.unrendered_config == expected_unrendered_config - - # Run it again with "state:modified:contract", still finds modified due to contract: true - results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - assert len(results) == 1 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - first_contract_checksum = model.contract.checksum - assert first_contract_checksum - # save a new state - self.copy_state() - - # This should raise because materialization changed from table to view - write_file(fixtures.table_model_now_view_sql, "models", "table_model.sql") - # we don't have a way to know this failed unless we have a previous state to refer to, so the run succeeds - results = run_dbt(["run"]) - assert len(results) == 2 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - second_contract_checksum = model.contract.checksum - # double check different contract_checksums - assert first_contract_checksum != second_contract_checksum - _, logs = run_dbt_and_capture( - ["run", "--models", "state:modified.contract", "--state", "./state"] - ) - expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model" - expected_change = "Materialization changed with enforced constraints" - assert expected_warning in logs - assert expected_change in logs - - # This should not raise because materialization changed from table to incremental, both enforce constraints - write_file(fixtures.table_model_now_incremental_sql, "models", "table_model.sql") - # we don't have a way to know this failed unless we have a previous state to refer to, so the run succeeds - results = run_dbt(["run"]) - assert len(results) == 2 - - # This should pass because materialization changed from view to table which is the same as just adding new constraint, not breaking - write_file(fixtures.view_model_now_table_sql, "models", "view_model.sql") - write_file(fixtures.table_model_sql, "models", "table_model.sql") - results = run_dbt(["run"]) - assert len(results) == 2 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - second_contract_checksum = model.contract.checksum - # contract_checksums should be equal because we only save constraint related changes if the materialization is table/incremental - assert first_contract_checksum == second_contract_checksum - run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - assert len(results) == 2 - - -my_model_sql = """ -select 1 as id -""" - -modified_my_model_sql = """ --- a comment -select 1 as id -""" - -modified_my_model_non_breaking_sql = """ --- a comment -select 1 as id, 'blue' as color -""" - -my_model_yml = """ -models: - - name: my_model - latest_version: 1 - config: - contract: - enforced: true - columns: - - name: id - data_type: int - versions: - - v: 1 -""" - -modified_my_model_yml = """ -models: - - name: my_model - latest_version: 1 - config: - contract: - enforced: true - columns: - - name: id - data_type: text - versions: - - v: 1 -""" - -modified_my_model_non_breaking_yml = """ -models: - - name: my_model - latest_version: 1 - config: - contract: - enforced: true - columns: - - name: id - data_type: int - - name: color - data_type: text - versions: - - v: 1 -""" - - -class TestModifiedBodyAndContract: - @pytest.fixture(scope="class") - def models(self): - return { - "my_model.sql": my_model_sql, - "my_model.yml": my_model_yml, - } - - def copy_state(self): - if not os.path.exists("state"): - os.makedirs("state") - shutil.copyfile("target/manifest.json", "state/manifest.json") - - def test_modified_body_and_contract(self, project): - results = run_dbt(["run"]) - assert len(results) == 1 - self.copy_state() - - # Change both body and contract in a *breaking* way (= changing data_type of existing column) - write_file(modified_my_model_yml, "models", "my_model.yml") - write_file(modified_my_model_sql, "models", "my_model.sql") - - # Should raise even without specifying state:modified.contract - with pytest.raises(ContractBreakingChangeError): - results = run_dbt(["run", "-s", "state:modified", "--state", "./state"]) - - with pytest.raises(ContractBreakingChangeError): - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - - # Change both body and contract in a *non-breaking* way (= adding a new column) - write_file(modified_my_model_non_breaking_yml, "models", "my_model.yml") - write_file(modified_my_model_non_breaking_sql, "models", "my_model.sql") - - # Should pass - run_dbt(["run", "-s", "state:modified", "--state", "./state"]) - - # The model's contract has changed, even if non-breaking, so it should be selected by 'state:modified.contract' - results = run_dbt(["list", "-s", "state:modified.contract", "--state", "./state"]) - assert results == ["test.my_model.v1"] - - -modified_table_model_access_yml = """ -version: 2 -models: - - name: table_model - access: public -""" - - -class TestModifiedAccess(BaseModifiedState): - def test_changed_access(self, project): - self.run_and_save_state() - - # No access change - assert not run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - - # Modify access (protected -> public) - write_file(modified_table_model_access_yml, "models", "schema.yml") - assert run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - - results = run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - assert results == ["test.table_model"] - - -modified_table_model_access_yml = """ -version: 2 -models: - - name: table_model - deprecation_date: 2020-01-01 -""" - - -class TestModifiedDeprecationDate(BaseModifiedState): - def test_changed_access(self, project): - self.run_and_save_state() - - # No access change - assert not run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - - # Modify deprecation_date (None -> 2020-01-01) - write_file(modified_table_model_access_yml, "models", "schema.yml") - assert run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - - results = run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - assert results == ["test.table_model"] - - -modified_table_model_version_yml = """ -version: 2 -models: - - name: table_model - versions: - - v: 1 - defined_in: table_model -""" - - -class TestModifiedVersion(BaseModifiedState): - def test_changed_access(self, project): - self.run_and_save_state() - - # Change version (null -> v1) - write_file(modified_table_model_version_yml, "models", "schema.yml") - - results = run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - assert results == ["test.table_model.v1"] - - -table_model_latest_version_yml = """ -version: 2 -models: - - name: table_model - latest_version: 1 - versions: - - v: 1 - defined_in: table_model -""" - - -modified_table_model_latest_version_yml = """ -version: 2 -models: - - name: table_model - latest_version: 2 - versions: - - v: 1 - defined_in: table_model - - v: 2 -""" - - -class TestModifiedLatestVersion(BaseModifiedState): - def test_changed_access(self, project): - # Setup initial latest_version: 1 - write_file(table_model_latest_version_yml, "models", "schema.yml") - - self.run_and_save_state() - - # Bump latest version - write_file(fixtures.table_model_sql, "models", "table_model_v2.sql") - write_file(modified_table_model_latest_version_yml, "models", "schema.yml") - - results = run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - assert results == ["test.table_model.v1", "test.table_model.v2"] diff --git a/tests/functional/defer_state/test_run_results_state.py b/tests/functional/defer_state/test_run_results_state.py deleted file mode 100644 index ae5941c7..00000000 --- a/tests/functional/defer_state/test_run_results_state.py +++ /dev/null @@ -1,481 +0,0 @@ -import os -import shutil - -from dbt.tests.util import run_dbt, write_file -import pytest - -from tests.functional.defer_state import fixtures - - -class BaseRunResultsState: - @pytest.fixture(scope="class") - def models(self): - return { - "table_model.sql": fixtures.table_model_sql, - "view_model.sql": fixtures.view_model_sql, - "ephemeral_model.sql": fixtures.ephemeral_model_sql, - "schema.yml": fixtures.schema_yml, - "exposures.yml": fixtures.exposures_yml, - } - - @pytest.fixture(scope="class") - def macros(self): - return { - "macros.sql": fixtures.macros_sql, - "infinite_macros.sql": fixtures.infinite_macros_sql, - } - - @pytest.fixture(scope="class") - def seeds(self): - return {"seed.csv": fixtures.seed_csv} - - @property - def project_config_update(self): - return { - "seeds": { - "test": { - "quote_columns": False, - } - } - } - - def clear_state(self): - shutil.rmtree("./state") - - def copy_state(self): - if not os.path.exists("state"): - os.makedirs("state") - shutil.copyfile("target/manifest.json", "state/manifest.json") - shutil.copyfile("target/run_results.json", "state/run_results.json") - - def run_and_save_state(self): - run_dbt(["build"]) - self.copy_state() - - def rebuild_run_dbt(self, expect_pass=True): - self.clear_state() - run_dbt(["build"], expect_pass=expect_pass) - self.copy_state() - - def update_view_model_bad_sql(self): - # update view model to generate a failure case - not_unique_sql = "select * from forced_error" - write_file(not_unique_sql, "models", "view_model.sql") - - def update_view_model_failing_tests(self, with_dupes=True, with_nulls=False): - # test failure on build tests - # fail the unique test - select_1 = "select 1 as id" - select_stmts = [select_1] - if with_dupes: - select_stmts.append(select_1) - if with_nulls: - select_stmts.append("select null as id") - failing_tests_sql = " union all ".join(select_stmts) - write_file(failing_tests_sql, "models", "view_model.sql") - - def update_unique_test_severity_warn(self): - # change the unique test severity from error to warn and reuse the same view_model.sql changes above - new_config = fixtures.schema_yml.replace("error", "warn") - write_file(new_config, "models", "schema.yml") - - -class TestSeedRunResultsState(BaseRunResultsState): - def test_seed_run_results_state(self, project): - self.run_and_save_state() - self.clear_state() - run_dbt(["seed"]) - self.copy_state() - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "result:success", "--state", "./state"], - expect_pass=True, - ) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt(["ls", "--select", "result:success", "--state", "./state"]) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt(["ls", "--select", "result:success+", "--state", "./state"]) - assert len(results) == 7 - assert set(results) == { - "test.seed", - "test.table_model", - "test.view_model", - "test.ephemeral_model", - "test.not_null_view_model_id", - "test.unique_view_model_id", - "exposure:test.my_exposure", - } - - # add a new faulty row to the seed - changed_seed_contents = fixtures.seed_csv + "\n" + "\\\3,carl" - write_file(changed_seed_contents, "seeds", "seed.csv") - - self.clear_state() - run_dbt(["seed"], expect_pass=False) - self.copy_state() - - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "result:error", "--state", "./state"], - expect_pass=True, - ) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt(["ls", "--select", "result:error", "--state", "./state"]) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt(["ls", "--select", "result:error+", "--state", "./state"]) - assert len(results) == 7 - assert set(results) == { - "test.seed", - "test.table_model", - "test.view_model", - "test.ephemeral_model", - "test.not_null_view_model_id", - "test.unique_view_model_id", - "exposure:test.my_exposure", - } - - -class TestBuildRunResultsState(BaseRunResultsState): - def test_build_run_results_state(self, project): - self.run_and_save_state() - results = run_dbt(["build", "--select", "result:error", "--state", "./state"]) - assert len(results) == 0 - - self.update_view_model_bad_sql() - self.rebuild_run_dbt(expect_pass=False) - - results = run_dbt( - ["build", "--select", "result:error", "--state", "./state"], expect_pass=False - ) - assert len(results) == 3 - nodes = set([elem.node.name for elem in results]) - assert nodes == {"view_model", "not_null_view_model_id", "unique_view_model_id"} - - results = run_dbt(["ls", "--select", "result:error", "--state", "./state"]) - assert len(results) == 3 - assert set(results) == { - "test.view_model", - "test.not_null_view_model_id", - "test.unique_view_model_id", - } - - results = run_dbt( - ["build", "--select", "result:error+", "--state", "./state"], expect_pass=False - ) - assert len(results) == 4 - nodes = set([elem.node.name for elem in results]) - assert nodes == { - "table_model", - "view_model", - "not_null_view_model_id", - "unique_view_model_id", - } - - results = run_dbt(["ls", "--select", "result:error+", "--state", "./state"]) - assert len(results) == 6 # includes exposure - assert set(results) == { - "test.table_model", - "test.view_model", - "test.ephemeral_model", - "test.not_null_view_model_id", - "test.unique_view_model_id", - "exposure:test.my_exposure", - } - - self.update_view_model_failing_tests() - self.rebuild_run_dbt(expect_pass=False) - - results = run_dbt( - ["build", "--select", "result:fail", "--state", "./state"], expect_pass=False - ) - assert len(results) == 1 - assert results[0].node.name == "unique_view_model_id" - - results = run_dbt(["ls", "--select", "result:fail", "--state", "./state"]) - assert len(results) == 1 - assert results[0] == "test.unique_view_model_id" - - results = run_dbt( - ["build", "--select", "result:fail+", "--state", "./state"], expect_pass=False - ) - assert len(results) == 1 - nodes = set([elem.node.name for elem in results]) - assert nodes == {"unique_view_model_id"} - - results = run_dbt(["ls", "--select", "result:fail+", "--state", "./state"]) - assert len(results) == 1 - assert set(results) == {"test.unique_view_model_id"} - - self.update_unique_test_severity_warn() - self.rebuild_run_dbt(expect_pass=True) - - results = run_dbt( - ["build", "--select", "result:warn", "--state", "./state"], expect_pass=True - ) - assert len(results) == 1 - assert results[0].node.name == "unique_view_model_id" - - results = run_dbt(["ls", "--select", "result:warn", "--state", "./state"]) - assert len(results) == 1 - assert results[0] == "test.unique_view_model_id" - - results = run_dbt( - ["build", "--select", "result:warn+", "--state", "./state"], expect_pass=True - ) - assert len(results) == 1 - nodes = set([elem.node.name for elem in results]) - assert nodes == {"unique_view_model_id"} - - results = run_dbt(["ls", "--select", "result:warn+", "--state", "./state"]) - assert len(results) == 1 - assert set(results) == {"test.unique_view_model_id"} - - -class TestRunRunResultsState(BaseRunResultsState): - def test_run_run_results_state(self, project): - self.run_and_save_state() - results = run_dbt( - ["run", "--select", "result:success", "--state", "./state"], expect_pass=True - ) - assert len(results) == 2 - assert results[0].node.name == "view_model" - assert results[1].node.name == "table_model" - - # clear state and rerun upstream view model to test + operator - self.clear_state() - run_dbt(["run", "--select", "view_model"], expect_pass=True) - self.copy_state() - results = run_dbt( - ["run", "--select", "result:success+", "--state", "./state"], expect_pass=True - ) - assert len(results) == 2 - assert results[0].node.name == "view_model" - assert results[1].node.name == "table_model" - - # check we are starting from a place with 0 errors - results = run_dbt(["run", "--select", "result:error", "--state", "./state"]) - assert len(results) == 0 - - self.update_view_model_bad_sql() - self.clear_state() - run_dbt(["run"], expect_pass=False) - self.copy_state() - - # test single result selector on error - results = run_dbt( - ["run", "--select", "result:error", "--state", "./state"], expect_pass=False - ) - assert len(results) == 1 - assert results[0].node.name == "view_model" - - # test + operator selection on error - results = run_dbt( - ["run", "--select", "result:error+", "--state", "./state"], expect_pass=False - ) - assert len(results) == 2 - assert results[0].node.name == "view_model" - assert results[1].node.name == "table_model" - - # single result selector on skipped. Expect this to pass becase underlying view already defined above - results = run_dbt( - ["run", "--select", "result:skipped", "--state", "./state"], expect_pass=True - ) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - # add a downstream model that depends on table_model for skipped+ selector - downstream_model_sql = "select * from {{ref('table_model')}}" - write_file(downstream_model_sql, "models", "table_model_downstream.sql") - - self.clear_state() - run_dbt(["run"], expect_pass=False) - self.copy_state() - - results = run_dbt( - ["run", "--select", "result:skipped+", "--state", "./state"], expect_pass=True - ) - assert len(results) == 2 - assert results[0].node.name == "table_model" - assert results[1].node.name == "table_model_downstream" - - -class TestTestRunResultsState(BaseRunResultsState): - def test_test_run_results_state(self, project): - self.run_and_save_state() - # run passed nodes - results = run_dbt( - ["test", "--select", "result:pass", "--state", "./state"], expect_pass=True - ) - assert len(results) == 2 - nodes = set([elem.node.name for elem in results]) - assert nodes == {"unique_view_model_id", "not_null_view_model_id"} - - # run passed nodes with + operator - results = run_dbt( - ["test", "--select", "result:pass+", "--state", "./state"], expect_pass=True - ) - assert len(results) == 2 - nodes = set([elem.node.name for elem in results]) - assert nodes == {"unique_view_model_id", "not_null_view_model_id"} - - self.update_view_model_failing_tests() - self.rebuild_run_dbt(expect_pass=False) - - # test with failure selector - results = run_dbt( - ["test", "--select", "result:fail", "--state", "./state"], expect_pass=False - ) - assert len(results) == 1 - assert results[0].node.name == "unique_view_model_id" - - # test with failure selector and + operator - results = run_dbt( - ["test", "--select", "result:fail+", "--state", "./state"], expect_pass=False - ) - assert len(results) == 1 - assert results[0].node.name == "unique_view_model_id" - - self.update_unique_test_severity_warn() - # rebuild - expect_pass = True because we changed the error to a warning this time around - self.rebuild_run_dbt(expect_pass=True) - - # test with warn selector - results = run_dbt( - ["test", "--select", "result:warn", "--state", "./state"], expect_pass=True - ) - assert len(results) == 1 - assert results[0].node.name == "unique_view_model_id" - - # test with warn selector and + operator - results = run_dbt( - ["test", "--select", "result:warn+", "--state", "./state"], expect_pass=True - ) - assert len(results) == 1 - assert results[0].node.name == "unique_view_model_id" - - -class TestConcurrentSelectionRunResultsState(BaseRunResultsState): - def test_concurrent_selection_run_run_results_state(self, project): - self.run_and_save_state() - results = run_dbt( - ["run", "--select", "state:modified+", "result:error+", "--state", "./state"] - ) - assert len(results) == 0 - - self.update_view_model_bad_sql() - self.clear_state() - run_dbt(["run"], expect_pass=False) - self.copy_state() - - # add a new failing dbt model - bad_sql = "select * from forced_error" - write_file(bad_sql, "models", "table_model_modified_example.sql") - - results = run_dbt( - ["run", "--select", "state:modified+", "result:error+", "--state", "./state"], - expect_pass=False, - ) - assert len(results) == 3 - nodes = set([elem.node.name for elem in results]) - assert nodes == {"view_model", "table_model_modified_example", "table_model"} - - -class TestConcurrentSelectionTestRunResultsState(BaseRunResultsState): - def test_concurrent_selection_test_run_results_state(self, project): - self.run_and_save_state() - # create failure test case for result:fail selector - self.update_view_model_failing_tests(with_nulls=True) - - # run dbt build again to trigger test errors - self.rebuild_run_dbt(expect_pass=False) - - # get the failures from - results = run_dbt( - [ - "test", - "--select", - "result:fail", - "--exclude", - "not_null_view_model_id", - "--state", - "./state", - ], - expect_pass=False, - ) - assert len(results) == 1 - nodes = set([elem.node.name for elem in results]) - assert nodes == {"unique_view_model_id"} - - -class TestConcurrentSelectionBuildRunResultsState(BaseRunResultsState): - def test_concurrent_selectors_build_run_results_state(self, project): - self.run_and_save_state() - results = run_dbt( - ["build", "--select", "state:modified+", "result:error+", "--state", "./state"] - ) - assert len(results) == 0 - - self.update_view_model_bad_sql() - self.rebuild_run_dbt(expect_pass=False) - - # add a new failing dbt model - bad_sql = "select * from forced_error" - write_file(bad_sql, "models", "table_model_modified_example.sql") - - results = run_dbt( - ["build", "--select", "state:modified+", "result:error+", "--state", "./state"], - expect_pass=False, - ) - assert len(results) == 5 - nodes = set([elem.node.name for elem in results]) - assert nodes == { - "table_model_modified_example", - "view_model", - "table_model", - "not_null_view_model_id", - "unique_view_model_id", - } - - self.update_view_model_failing_tests() - - # create error model case for result:error selector - more_bad_sql = "select 1 as id from not_exists" - write_file(more_bad_sql, "models", "error_model.sql") - - # create something downstream from the error model to rerun - downstream_model_sql = "select * from {{ ref('error_model') }} )" - write_file(downstream_model_sql, "models", "downstream_of_error_model.sql") - - # regenerate build state - self.rebuild_run_dbt(expect_pass=False) - - # modify model again to trigger the state:modified selector - bad_again_sql = "select * from forced_anothererror" - write_file(bad_again_sql, "models", "table_model_modified_example.sql") - - results = run_dbt( - [ - "build", - "--select", - "state:modified+", - "result:error+", - "result:fail+", - "--state", - "./state", - ], - expect_pass=False, - ) - assert len(results) == 4 - nodes = set([elem.node.name for elem in results]) - assert nodes == { - "error_model", - "downstream_of_error_model", - "table_model_modified_example", - "unique_view_model_id", - } From c51b214df31a1211578fa93ae2bc3753c9fbccd0 Mon Sep 17 00:00:00 2001 From: Matthew McKnight <91097623+McKnight-42@users.noreply.github.com> Date: Fri, 10 May 2024 10:24:56 -0500 Subject: [PATCH 07/15] delete tests taht should be in core (#92) --- .../test_custom_materialization.py | 80 ------------------- 1 file changed, 80 deletions(-) delete mode 100644 tests/functional/materializations/test_custom_materialization.py diff --git a/tests/functional/materializations/test_custom_materialization.py b/tests/functional/materializations/test_custom_materialization.py deleted file mode 100644 index 6aa69a4b..00000000 --- a/tests/functional/materializations/test_custom_materialization.py +++ /dev/null @@ -1,80 +0,0 @@ -from dbt.tests.util import run_dbt -import pytest - - -models__model_sql = """ -{{ config(materialized='view') }} -select 1 as id - -""" - - -@pytest.fixture(scope="class") -def models(): - return {"model.sql": models__model_sql} - - -class TestOverrideAdapterDependency: - # make sure that if there's a dependency with an adapter-specific - # materialization, we honor that materialization - @pytest.fixture(scope="class") - def packages(self): - return {"packages": [{"local": "override-view-adapter-dep"}]} - - def test_adapter_dependency(self, project, override_view_adapter_dep): - run_dbt(["deps"]) - # this should error because the override is buggy - run_dbt(["run"], expect_pass=False) - - -class TestOverrideDefaultDependency: - @pytest.fixture(scope="class") - def packages(self): - return {"packages": [{"local": "override-view-default-dep"}]} - - def test_default_dependency(self, project, override_view_default_dep): - run_dbt(["deps"]) - # this should error because the override is buggy - run_dbt(["run"], expect_pass=False) - - -class TestOverrideAdapterDependencyPassing: - @pytest.fixture(scope="class") - def packages(self): - return {"packages": [{"local": "override-view-adapter-pass-dep"}]} - - def test_default_dependency(self, project, override_view_adapter_pass_dep): - run_dbt(["deps"]) - # this should pass because the override is ok - run_dbt(["run"]) - - -class TestOverrideAdapterLocal: - # make sure that the local default wins over the dependency - # adapter-specific - - @pytest.fixture(scope="class") - def packages(self): - return {"packages": [{"local": "override-view-adapter-pass-dep"}]} - - @pytest.fixture(scope="class") - def project_config_update(self): - return {"macro-paths": ["override-view-adapter-macros"]} - - def test_default_dependency( - self, project, override_view_adapter_pass_dep, override_view_adapter_macros - ): - run_dbt(["deps"]) - # this should error because the override is buggy - run_dbt(["run"], expect_pass=False) - - -class TestOverrideDefaultReturn: - @pytest.fixture(scope="class") - def project_config_update(self): - return {"macro-paths": ["override-view-return-no-relation"]} - - def test_default_dependency(self, project, override_view_return_no_relation): - run_dbt(["deps"]) - results = run_dbt(["run"], expect_pass=False) - assert "did not explicitly return a list of relations" in results[0].message From dc5c0f5acb8df8c34d4c56e385aa38cc65b4582f Mon Sep 17 00:00:00 2001 From: Colin Rogers <111200756+colin-rogers-dbt@users.noreply.github.com> Date: Mon, 13 May 2024 15:44:51 -0700 Subject: [PATCH 08/15] delete unneeded list command (#94) --- tests/functional/list/fixtures.py | 213 -------- tests/functional/list/test_list.py | 798 ----------------------------- 2 files changed, 1011 deletions(-) delete mode 100644 tests/functional/list/fixtures.py delete mode 100644 tests/functional/list/test_list.py diff --git a/tests/functional/list/fixtures.py b/tests/functional/list/fixtures.py deleted file mode 100644 index ae5514c6..00000000 --- a/tests/functional/list/fixtures.py +++ /dev/null @@ -1,213 +0,0 @@ -import pytest -from dbt.tests.fixtures.project import write_project_files - - -snapshots__snapshot_sql = """ -{% snapshot my_snapshot %} - {{ - config( - target_database=var('target_database', database), - target_schema=schema, - unique_key='id', - strategy='timestamp', - updated_at='updated_at', - ) - }} - select * from {{database}}.{{schema}}.seed -{% endsnapshot %} - -""" - -tests__t_sql = """ -select 1 as id limit 0 - -""" - -models__schema_yml = """ -version: 2 -models: - - name: outer - description: The outer table - columns: - - name: id - description: The id value - data_tests: - - unique - - not_null - -sources: - - name: my_source - tables: - - name: my_table - -""" - -models__ephemeral_sql = """ - -{{ config(materialized='ephemeral') }} - -select - 1 as id, - {{ dbt.date_trunc('day', dbt.current_timestamp()) }} as created_at - -""" - -models__metric_flow = """ - -select - {{ dbt.date_trunc('day', dbt.current_timestamp()) }} as date_day - -""" - -models__incremental_sql = """ -{{ - config( - materialized = "incremental", - incremental_strategy = "delete+insert", - ) -}} - -select * from {{ ref('seed') }} - -{% if is_incremental() %} - where a > (select max(a) from {{this}}) -{% endif %} - -""" - -models__docs_md = """ -{% docs my_docs %} - some docs -{% enddocs %} - -""" - -models__outer_sql = """ -select * from {{ ref('ephemeral') }} - -""" - -models__sub__inner_sql = """ -select * from {{ ref('outer') }} - -""" - -macros__macro_stuff_sql = """ -{% macro cool_macro() %} - wow! -{% endmacro %} - -{% macro other_cool_macro(a, b) %} - cool! -{% endmacro %} - -""" - -seeds__seed_csv = """a,b -1,2 -""" - -analyses__a_sql = """ -select 4 as id - -""" - -semantic_models__sm_yml = """ -semantic_models: - - name: my_sm - model: ref('outer') - defaults: - agg_time_dimension: created_at - entities: - - name: my_entity - type: primary - expr: id - dimensions: - - name: created_at - type: time - type_params: - time_granularity: day - measures: - - name: total_outer_count - agg: count - expr: 1 - -""" - -metrics__m_yml = """ -metrics: - - name: total_outer - type: simple - description: The total count of outer - label: Total Outer - type_params: - measure: total_outer_count -""" - - -@pytest.fixture(scope="class") -def snapshots(): - return {"snapshot.sql": snapshots__snapshot_sql} - - -@pytest.fixture(scope="class") -def tests(): - return {"t.sql": tests__t_sql} - - -@pytest.fixture(scope="class") -def models(): - return { - "schema.yml": models__schema_yml, - "ephemeral.sql": models__ephemeral_sql, - "incremental.sql": models__incremental_sql, - "docs.md": models__docs_md, - "outer.sql": models__outer_sql, - "metricflow_time_spine.sql": models__metric_flow, - "sm.yml": semantic_models__sm_yml, - "m.yml": metrics__m_yml, - "sub": {"inner.sql": models__sub__inner_sql}, - } - - -@pytest.fixture(scope="class") -def macros(): - return {"macro_stuff.sql": macros__macro_stuff_sql} - - -@pytest.fixture(scope="class") -def seeds(): - return {"seed.csv": seeds__seed_csv} - - -@pytest.fixture(scope="class") -def analyses(): - return {"a.sql": analyses__a_sql} - - -@pytest.fixture(scope="class") -def semantic_models(): - return {"sm.yml": semantic_models__sm_yml} - - -@pytest.fixture(scope="class") -def metrics(): - return {"m.yml": metrics__m_yml} - - -@pytest.fixture(scope="class") -def project_files( - project_root, - snapshots, - tests, - models, - macros, - seeds, - analyses, -): - write_project_files(project_root, "snapshots", snapshots) - write_project_files(project_root, "tests", tests) - write_project_files(project_root, "models", models) - write_project_files(project_root, "macros", macros) - write_project_files(project_root, "seeds", seeds) - write_project_files(project_root, "analyses", analyses) diff --git a/tests/functional/list/test_list.py b/tests/functional/list/test_list.py deleted file mode 100644 index f932cba7..00000000 --- a/tests/functional/list/test_list.py +++ /dev/null @@ -1,798 +0,0 @@ -import json -from os.path import normcase, normpath - -from dbt.logger import log_manager -from dbt.tests.util import run_dbt -import pytest - - -class TestList: - def dir(self, value): - return normpath(value) - - @pytest.fixture(scope="class") - def project_config_update(self): - return { - "config-version": 2, - "analysis-paths": [self.dir("analyses")], - "snapshot-paths": [self.dir("snapshots")], - "macro-paths": [self.dir("macros")], - "seed-paths": [self.dir("seeds")], - "test-paths": [self.dir("tests")], - "seeds": { - "quote_columns": False, - }, - } - - def run_dbt_ls(self, args=None, expect_pass=True): - log_manager.stdout_console() - full_args = ["ls"] - if args is not None: - full_args += args - - result = run_dbt(args=full_args, expect_pass=expect_pass) - - log_manager.stdout_console() - return result - - def assert_json_equal(self, json_str, expected): - assert json.loads(json_str) == expected - - def expect_given_output(self, args, expectations): - for key, values in expectations.items(): - ls_result = self.run_dbt_ls(args + ["--output", key]) - if not isinstance(values, (list, tuple)): - values = [values] - assert len(ls_result) == len(values) - for got, expected in zip(ls_result, values): - if key == "json": - self.assert_json_equal(got, expected) - else: - assert got == expected - - def expect_snapshot_output(self, project): - expectations = { - "name": "my_snapshot", - "selector": "test.snapshot.my_snapshot", - "json": { - "name": "my_snapshot", - "package_name": "test", - "depends_on": {"nodes": [], "macros": []}, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "snapshot", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "target_database": project.database, - "target_schema": project.test_schema, - "unique_key": "id", - "strategy": "timestamp", - "updated_at": "updated_at", - "full_refresh": None, - "database": None, - "schema": None, - "alias": None, - "check_cols": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - }, - "unique_id": "snapshot.test.my_snapshot", - "original_file_path": normalize("snapshots/snapshot.sql"), - "alias": "my_snapshot", - "resource_type": "snapshot", - }, - "path": self.dir("snapshots/snapshot.sql"), - } - self.expect_given_output(["--resource-type", "snapshot"], expectations) - - def expect_analyses_output(self): - expectations = { - "name": "a", - "selector": "test.analysis.a", - "json": { - "name": "a", - "package_name": "test", - "depends_on": {"nodes": [], "macros": []}, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "view", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "full_refresh": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "unique_key": None, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - }, - "unique_id": "analysis.test.a", - "original_file_path": normalize("analyses/a.sql"), - "alias": "a", - "resource_type": "analysis", - }, - "path": self.dir("analyses/a.sql"), - } - self.expect_given_output(["--resource-type", "analysis"], expectations) - - def expect_model_output(self): - expectations = { - "name": ("ephemeral", "incremental", "inner", "metricflow_time_spine", "outer"), - "selector": ( - "test.ephemeral", - "test.incremental", - "test.sub.inner", - "test.metricflow_time_spine", - "test.outer", - ), - "json": ( - { - "name": "ephemeral", - "package_name": "test", - "depends_on": { - "nodes": [], - "macros": ["macro.dbt.current_timestamp", "macro.dbt.date_trunc"], - }, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "ephemeral", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "full_refresh": None, - "unique_key": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - "access": "protected", - }, - "original_file_path": normalize("models/ephemeral.sql"), - "unique_id": "model.test.ephemeral", - "alias": "ephemeral", - "resource_type": "model", - }, - { - "name": "incremental", - "package_name": "test", - "depends_on": { - "nodes": ["seed.test.seed"], - "macros": ["macro.dbt.is_incremental"], - }, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "incremental", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "full_refresh": None, - "unique_key": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": "delete+insert", - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - "access": "protected", - }, - "original_file_path": normalize("models/incremental.sql"), - "unique_id": "model.test.incremental", - "alias": "incremental", - "resource_type": "model", - }, - { - "name": "inner", - "package_name": "test", - "depends_on": { - "nodes": ["model.test.outer"], - "macros": [], - }, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "view", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "full_refresh": None, - "unique_key": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - "access": "protected", - }, - "original_file_path": normalize("models/sub/inner.sql"), - "unique_id": "model.test.inner", - "alias": "inner", - "resource_type": "model", - }, - { - "name": "metricflow_time_spine", - "package_name": "test", - "depends_on": { - "nodes": [], - "macros": ["macro.dbt.current_timestamp", "macro.dbt.date_trunc"], - }, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "view", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "full_refresh": None, - "unique_key": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - "access": "protected", - }, - "original_file_path": normalize("models/metricflow_time_spine.sql"), - "unique_id": "model.test.metricflow_time_spine", - "alias": "metricflow_time_spine", - "resource_type": "model", - }, - { - "name": "outer", - "package_name": "test", - "depends_on": { - "nodes": ["model.test.ephemeral"], - "macros": [], - }, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "view", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "full_refresh": None, - "unique_key": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - "access": "protected", - }, - "original_file_path": normalize("models/outer.sql"), - "unique_id": "model.test.outer", - "alias": "outer", - "resource_type": "model", - }, - ), - "path": ( - self.dir("models/ephemeral.sql"), - self.dir("models/incremental.sql"), - self.dir("models/sub/inner.sql"), - self.dir("models/metricflow_time_spine.sql"), - self.dir("models/outer.sql"), - ), - } - self.expect_given_output(["--resource-type", "model"], expectations) - - # Do not include ephemeral model - it was not selected - def expect_model_ephemeral_output(self): - expectations = { - "name": ("outer"), - "selector": ("test.outer"), - "json": ( - { - "name": "outer", - "package_name": "test", - "depends_on": {"nodes": [], "macros": []}, - "tags": [], - "config": { - "enabled": True, - "materialized": "view", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "full_refresh": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "access": "protected", - }, - "unique_id": "model.test.ephemeral", - "original_file_path": normalize("models/ephemeral.sql"), - "alias": "outer", - "resource_type": "model", - }, - ), - "path": (self.dir("models/outer.sql"),), - } - self.expect_given_output(["--model", "outer"], expectations) - - def expect_source_output(self): - expectations = { - "name": "my_source.my_table", - "selector": "source:test.my_source.my_table", - "json": { - "config": { - "enabled": True, - }, - "unique_id": "source.test.my_source.my_table", - "original_file_path": normalize("models/schema.yml"), - "package_name": "test", - "name": "my_table", - "source_name": "my_source", - "resource_type": "source", - "tags": [], - }, - "path": self.dir("models/schema.yml"), - } - # should we do this --select automatically for a user if if 'source' is - # in the resource types and there is no '--select' or '--exclude'? - self.expect_given_output( - ["--resource-type", "source", "--select", "source:*"], expectations - ) - - def expect_seed_output(self): - expectations = { - "name": "seed", - "selector": "test.seed", - "json": { - "name": "seed", - "package_name": "test", - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "seed", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "delimiter": ",", - "persist_docs": {}, - "quote_columns": False, - "full_refresh": None, - "unique_key": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - }, - "depends_on": {"macros": []}, - "unique_id": "seed.test.seed", - "original_file_path": normalize("seeds/seed.csv"), - "alias": "seed", - "resource_type": "seed", - }, - "path": self.dir("seeds/seed.csv"), - } - self.expect_given_output(["--resource-type", "seed"], expectations) - - def expect_test_output(self): - expectations = { - "name": ("not_null_outer_id", "t", "unique_outer_id"), - "selector": ("test.not_null_outer_id", "test.t", "test.unique_outer_id"), - "json": ( - { - "name": "not_null_outer_id", - "package_name": "test", - "depends_on": { - "nodes": ["model.test.outer"], - "macros": ["macro.dbt.test_not_null"], - }, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "test", - "severity": "ERROR", - "store_failures": None, - "store_failures_as": None, - "warn_if": "!= 0", - "error_if": "!= 0", - "fail_calc": "count(*)", - "where": None, - "limit": None, - "tags": [], - "database": None, - "schema": "dbt_test__audit", - "alias": None, - "meta": {}, - }, - "unique_id": "test.test.not_null_outer_id.a226f4fb36", - "original_file_path": normalize("models/schema.yml"), - "alias": "not_null_outer_id", - "resource_type": "test", - }, - { - "name": "t", - "package_name": "test", - "depends_on": {"nodes": [], "macros": []}, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "test", - "severity": "ERROR", - "store_failures": None, - "store_failures_as": None, - "warn_if": "!= 0", - "error_if": "!= 0", - "fail_calc": "count(*)", - "where": None, - "limit": None, - "tags": [], - "database": None, - "schema": "dbt_test__audit", - "alias": None, - "meta": {}, - }, - "unique_id": "test.test.t", - "original_file_path": normalize("tests/t.sql"), - "alias": "t", - "resource_type": "test", - }, - { - "name": "unique_outer_id", - "package_name": "test", - "depends_on": { - "nodes": ["model.test.outer"], - "macros": ["macro.dbt.test_unique"], - }, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "test", - "severity": "ERROR", - "store_failures": None, - "store_failures_as": None, - "warn_if": "!= 0", - "error_if": "!= 0", - "fail_calc": "count(*)", - "where": None, - "limit": None, - "tags": [], - "database": None, - "schema": "dbt_test__audit", - "alias": None, - "meta": {}, - }, - "unique_id": "test.test.unique_outer_id.2195e332d3", - "original_file_path": normalize("models/schema.yml"), - "alias": "unique_outer_id", - "resource_type": "test", - }, - ), - "path": ( - self.dir("models/schema.yml"), - self.dir("tests/t.sql"), - self.dir("models/schema.yml"), - ), - } - self.expect_given_output(["--resource-type", "test"], expectations) - - def expect_all_output(self): - # generic test FQNS include the resource + column they're defined on - # models are just package, subdirectory path, name - # sources are like models, ending in source_name.table_name - expected_default = { - "test.ephemeral", - "test.incremental", - "test.snapshot.my_snapshot", - "test.sub.inner", - "test.outer", - "test.seed", - "source:test.my_source.my_table", - "test.not_null_outer_id", - "test.unique_outer_id", - "test.metricflow_time_spine", - "test.t", - "semantic_model:test.my_sm", - "metric:test.total_outer", - } - # analyses have their type inserted into their fqn like tests - expected_all = expected_default | {"test.analysis.a"} - - results = self.run_dbt_ls(["--resource-type", "all", "--select", "*", "source:*"]) - assert set(results) == expected_all - - results = self.run_dbt_ls(["--select", "*", "source:*"]) - assert set(results) == expected_default - - results = self.run_dbt_ls(["--resource-type", "default", "--select", "*", "source:*"]) - assert set(results) == expected_default - - results = self.run_dbt_ls - - def expect_select(self): - results = self.run_dbt_ls(["--resource-type", "test", "--select", "outer"]) - assert set(results) == {"test.not_null_outer_id", "test.unique_outer_id"} - - self.run_dbt_ls(["--resource-type", "test", "--select", "inner"], expect_pass=True) - - results = self.run_dbt_ls(["--resource-type", "test", "--select", "+inner"]) - assert set(results) == {"test.not_null_outer_id", "test.unique_outer_id"} - - results = self.run_dbt_ls(["--resource-type", "semantic_model"]) - assert set(results) == {"semantic_model:test.my_sm"} - - results = self.run_dbt_ls(["--resource-type", "metric"]) - assert set(results) == {"metric:test.total_outer"} - - results = self.run_dbt_ls(["--resource-type", "model", "--select", "outer+"]) - assert set(results) == {"test.outer", "test.sub.inner"} - - results = self.run_dbt_ls(["--resource-type", "model", "--exclude", "inner"]) - assert set(results) == { - "test.ephemeral", - "test.outer", - "test.metricflow_time_spine", - "test.incremental", - } - - results = self.run_dbt_ls(["--select", "config.incremental_strategy:delete+insert"]) - assert set(results) == {"test.incremental"} - - self.run_dbt_ls( - ["--select", "config.incremental_strategy:insert_overwrite"], expect_pass=True - ) - - def expect_resource_type_multiple(self): - """Expect selected resources when --resource-type given multiple times""" - results = self.run_dbt_ls(["--resource-type", "test", "--resource-type", "model"]) - assert set(results) == { - "test.ephemeral", - "test.incremental", - "test.not_null_outer_id", - "test.outer", - "test.sub.inner", - "test.metricflow_time_spine", - "test.t", - "test.unique_outer_id", - } - - results = self.run_dbt_ls( - [ - "--resource-type", - "test", - "--resource-type", - "model", - "--exclude", - "unique_outer_id", - ] - ) - assert set(results) == { - "test.ephemeral", - "test.incremental", - "test.not_null_outer_id", - "test.outer", - "test.metricflow_time_spine", - "test.sub.inner", - "test.t", - } - - results = self.run_dbt_ls( - [ - "--resource-type", - "test", - "--resource-type", - "model", - "--select", - "+inner", - "outer+", - "--exclude", - "inner", - ] - ) - assert set(results) == { - "test.ephemeral", - "test.not_null_outer_id", - "test.unique_outer_id", - "test.outer", - } - - def expect_selected_keys(self, project): - """Expect selected fields of the the selected model""" - expectations = [ - {"database": project.database, "schema": project.test_schema, "alias": "inner"} - ] - results = self.run_dbt_ls( - [ - "--model", - "inner", - "--output", - "json", - "--output-keys", - "database", - "schema", - "alias", - ] - ) - assert len(results) == len(expectations) - - for got, expected in zip(results, expectations): - self.assert_json_equal(got, expected) - - """Expect selected fields when --output-keys given multiple times - """ - expectations = [{"database": project.database, "schema": project.test_schema}] - results = self.run_dbt_ls( - [ - "--model", - "inner", - "--output", - "json", - "--output-keys", - "database", - "--output-keys", - "schema", - ] - ) - assert len(results) == len(expectations) - - for got, expected in zip(results, expectations): - self.assert_json_equal(got, expected) - - """Expect selected fields of the test resource types - """ - expectations = [ - {"name": "not_null_outer_id", "column_name": "id"}, - {"name": "t"}, - {"name": "unique_outer_id", "column_name": "id"}, - ] - results = self.run_dbt_ls( - [ - "--resource-type", - "test", - "--output", - "json", - "--output-keys", - "name", - "column_name", - ] - ) - assert len(results) == len(expectations) - - for got, expected in zip( - sorted(results, key=lambda x: json.loads(x).get("name")), - sorted(expectations, key=lambda x: x.get("name")), - ): - self.assert_json_equal(got, expected) - - """Expect nothing (non-existent keys) for the selected models - """ - expectations = [{}, {}] - results = self.run_dbt_ls( - [ - "--model", - "inner outer", - "--output", - "json", - "--output-keys", - "non_existent_key", - ] - ) - assert len(results) == len(expectations) - - for got, expected in zip(results, expectations): - self.assert_json_equal(got, expected) - - @pytest.mark.skip("The actual is not getting loaded, so all actuals are 0.") - def test_ls(self, project): - self.expect_snapshot_output(project) - self.expect_analyses_output() - self.expect_model_output() - self.expect_source_output() - self.expect_seed_output() - self.expect_test_output() - self.expect_select() - self.expect_resource_type_multiple() - self.expect_all_output() - self.expect_selected_keys(project) - - -def normalize(path): - """On windows, neither is enough on its own: - normcase('C:\\documents/ALL CAPS/subdir\\..') - 'c:\\documents\\all caps\\subdir\\..' - normpath('C:\\documents/ALL CAPS/subdir\\..') - 'C:\\documents\\ALL CAPS' - normpath(normcase('C:\\documents/ALL CAPS/subdir\\..')) - 'c:\\documents\\all caps' - """ - return normcase(normpath(path)) From 3aba80e4e0c93d3395d681d2caf776bb24df9d41 Mon Sep 17 00:00:00 2001 From: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> Date: Mon, 13 May 2024 17:33:31 -0600 Subject: [PATCH 09/15] Cross-database `date` macro (#81) Co-authored-by: Colin Rogers <111200756+colin-rogers-dbt@users.noreply.github.com> --- .changes/unreleased/Features-20240501-151856.yaml | 6 ++++++ tests/functional/shared_tests/test_utils.py | 5 +++++ 2 files changed, 11 insertions(+) create mode 100644 .changes/unreleased/Features-20240501-151856.yaml diff --git a/.changes/unreleased/Features-20240501-151856.yaml b/.changes/unreleased/Features-20240501-151856.yaml new file mode 100644 index 00000000..2dda7193 --- /dev/null +++ b/.changes/unreleased/Features-20240501-151856.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Cross-database `date` macro +time: 2024-05-01T15:18:56.758715-06:00 +custom: + Author: dbeatty10 + Issue: 82 diff --git a/tests/functional/shared_tests/test_utils.py b/tests/functional/shared_tests/test_utils.py index 9934a240..3811638a 100644 --- a/tests/functional/shared_tests/test_utils.py +++ b/tests/functional/shared_tests/test_utils.py @@ -8,6 +8,7 @@ from dbt.tests.adapter.utils.test_cast_bool_to_text import BaseCastBoolToText from dbt.tests.adapter.utils.test_concat import BaseConcat from dbt.tests.adapter.utils.test_current_timestamp import BaseCurrentTimestampAware +from dbt.tests.adapter.utils.test_date import BaseDate from dbt.tests.adapter.utils.test_dateadd import BaseDateAdd from dbt.tests.adapter.utils.test_datediff import BaseDateDiff from dbt.tests.adapter.utils.test_date_spine import BaseDateSpine @@ -69,6 +70,10 @@ class TestCurrentTimestamp(BaseCurrentTimestampAware): pass +class TestDate(BaseDate): + pass + + class TestDateSpine(BaseDateSpine): pass From 862e8e9e68a9fc452110db3fd97e246fb8705bb0 Mon Sep 17 00:00:00 2001 From: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> Date: Tue, 14 May 2024 13:15:09 -0600 Subject: [PATCH 10/15] Import relevant pytest(s) for cross-database `cast` macro (#77) --- .changes/unreleased/Features-20240430-185700.yaml | 6 ++++++ tests/functional/shared_tests/test_utils.py | 5 +++++ 2 files changed, 11 insertions(+) create mode 100644 .changes/unreleased/Features-20240430-185700.yaml diff --git a/.changes/unreleased/Features-20240430-185700.yaml b/.changes/unreleased/Features-20240430-185700.yaml new file mode 100644 index 00000000..638d1062 --- /dev/null +++ b/.changes/unreleased/Features-20240430-185700.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Add tests for cross-database `cast` macro +time: 2024-04-30T18:57:00.437045-06:00 +custom: + Author: dbeatty10 + Issue: "76" diff --git a/tests/functional/shared_tests/test_utils.py b/tests/functional/shared_tests/test_utils.py index 3811638a..d1a8ea1d 100644 --- a/tests/functional/shared_tests/test_utils.py +++ b/tests/functional/shared_tests/test_utils.py @@ -5,6 +5,7 @@ from dbt.tests.adapter.utils.test_array_concat import BaseArrayConcat from dbt.tests.adapter.utils.test_array_construct import BaseArrayConstruct from dbt.tests.adapter.utils.test_bool_or import BaseBoolOr +from dbt.tests.adapter.utils.test_cast import BaseCast from dbt.tests.adapter.utils.test_cast_bool_to_text import BaseCastBoolToText from dbt.tests.adapter.utils.test_concat import BaseConcat from dbt.tests.adapter.utils.test_current_timestamp import BaseCurrentTimestampAware @@ -58,6 +59,10 @@ class TestBoolOr(BaseBoolOr): pass +class TestCast(BaseCast): + pass + + class TestCastBoolToText(BaseCastBoolToText): pass From e23d0a7bb7d4a32598778922ce28578ad4be0e77 Mon Sep 17 00:00:00 2001 From: Mila Page <67295367+VersusFacit@users.noreply.github.com> Date: Tue, 14 May 2024 20:46:23 -0700 Subject: [PATCH 11/15] Fix the semicolon semantics for indexes while respecting other bug fix. (#97) Co-authored-by: Mila Page --- .../unreleased/Fixes-20240514-193201.yaml | 6 +++++ .../relations/materialized_view/alter.sql | 3 ++- .../relations/materialized_view/create.sql | 2 +- tests/functional/test_multiple_indexes.py | 27 +++++++++++++++++++ 4 files changed, 36 insertions(+), 2 deletions(-) create mode 100644 .changes/unreleased/Fixes-20240514-193201.yaml create mode 100644 tests/functional/test_multiple_indexes.py diff --git a/.changes/unreleased/Fixes-20240514-193201.yaml b/.changes/unreleased/Fixes-20240514-193201.yaml new file mode 100644 index 00000000..95ab2467 --- /dev/null +++ b/.changes/unreleased/Fixes-20240514-193201.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Fix the semicolon semantics for indexes while respecting other bug fix +time: 2024-05-14T19:32:01.149383-07:00 +custom: + Author: versusfacit + Issue: "85" diff --git a/dbt/include/postgres/macros/relations/materialized_view/alter.sql b/dbt/include/postgres/macros/relations/materialized_view/alter.sql index ee53c113..429b7e53 100644 --- a/dbt/include/postgres/macros/relations/materialized_view/alter.sql +++ b/dbt/include/postgres/macros/relations/materialized_view/alter.sql @@ -30,13 +30,14 @@ {%- if _index_change.action == "drop" -%} - {{ postgres__get_drop_index_sql(relation, _index.name) }}; + {{ postgres__get_drop_index_sql(relation, _index.name) }} {%- elif _index_change.action == "create" -%} {{ postgres__get_create_index_sql(relation, _index.as_node_config) }} {%- endif -%} + {{ ';' if not loop.last else "" }} {%- endfor -%} diff --git a/dbt/include/postgres/macros/relations/materialized_view/create.sql b/dbt/include/postgres/macros/relations/materialized_view/create.sql index 17e5cb06..89c18234 100644 --- a/dbt/include/postgres/macros/relations/materialized_view/create.sql +++ b/dbt/include/postgres/macros/relations/materialized_view/create.sql @@ -2,7 +2,7 @@ create materialized view if not exists {{ relation }} as {{ sql }}; {% for _index_dict in config.get('indexes', []) -%} - {{- get_create_index_sql(relation, _index_dict) -}} + {{- get_create_index_sql(relation, _index_dict) -}}{{ ';' if not loop.last else "" }} {%- endfor -%} {% endmacro %} diff --git a/tests/functional/test_multiple_indexes.py b/tests/functional/test_multiple_indexes.py new file mode 100644 index 00000000..1d30a6d4 --- /dev/null +++ b/tests/functional/test_multiple_indexes.py @@ -0,0 +1,27 @@ +import pytest + +from tests.functional.utils import run_dbt + + +REF_MULTIPLE_INDEX_MODEL = """ +{{ + config( + materialized="materialized_view", + indexes=[ + {"columns": ["foo"], "type": "btree"}, + {"columns": ["bar"], "type": "btree"}, + ], + ) +}} + +SELECT 1 AS foo, 2 AS bar +""" + + +class TestUnrestrictedPackageAccess: + @pytest.fixture(scope="class") + def models(self): + return {"index_test.sql": REF_MULTIPLE_INDEX_MODEL} + + def test_unrestricted_protected_ref(self, project): + run_dbt() From e308e2a682110c08ee3593e0a8e964fba61e3bc7 Mon Sep 17 00:00:00 2001 From: Mike Alfare <13974384+mikealfare@users.noreply.github.com> Date: Mon, 20 May 2024 18:35:36 -0400 Subject: [PATCH 12/15] Add docker release to the full release process for final releases (#51) Co-authored-by: Emily Rockman Co-authored-by: Emily Rockman --- .github/dependabot.yml | 5 + .github/workflows/release.yml | 115 ++++-- .github/workflows/release_prep_hatch.yml | 467 +++++++++++++++++++++++ docker/Dockerfile | 37 ++ docker/README.md | 58 +++ docker/dev.Dockerfile | 54 +++ pyproject.toml | 6 + 7 files changed, 710 insertions(+), 32 deletions(-) create mode 100644 .github/workflows/release_prep_hatch.yml create mode 100644 docker/Dockerfile create mode 100644 docker/README.md create mode 100644 docker/dev.Dockerfile diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4673f47c..ae2be43a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -10,3 +10,8 @@ updates: schedule: interval: "weekly" rebase-strategy: "disabled" + - package-ecosystem: "docker" + directory: "/docker" + schedule: + interval: "weekly" + rebase-strategy: "disabled" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 27fb9f4f..1139380a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -3,60 +3,111 @@ name: Release on: workflow_dispatch: inputs: - deploy-to: - type: choice - description: Choose where to publish (test/prod) - options: - - prod - - test - default: prod - ref: - description: "The ref (sha or branch name) to use" + branch: + description: "The branch to release from" type: string default: "main" + version: + description: "The version to release" required: true + type: string + deploy-to: + description: "Deploy to test or prod" + type: environment + default: prod + only_docker: + description: "Only release Docker image, skip GitHub & PyPI" + type: boolean + default: false -permissions: read-all +permissions: + contents: write # this is the permission that allows creating a new release # will cancel previous workflows triggered by the same event and for the same ref for PRs or same SHA otherwise concurrency: - group: ${{ github.workflow }}-${{ github.event_name }}-${{ contains(github.event_name, 'pull_request') && github.event.pull_request.head.ref || github.sha }}-${{ inputs.deploy-to }} + group: "${{ github.workflow }}-${{ github.event_name }}-${{ inputs.version }}-${{ inputs.deploy-to }}" cancel-in-progress: true jobs: - release: - name: PyPI - ${{ inputs.deploy-to }} - runs-on: ubuntu-latest - environment: - name: ${{ inputs.deploy-to }} - url: ${{ vars.PYPI_PROJECT_URL }} - permissions: - id-token: write # IMPORTANT: this permission is mandatory for trusted publishing + release-prep: + name: "Release prep: generate changelog, bump version" + uses: dbt-labs/dbt-postgres/.github/workflows/release_prep_hatch.yml@main + with: + branch: ${{ inputs.branch }} + version: ${{ inputs.version }} + deploy-to: ${{ inputs.deploy-to }} + secrets: inherit + build-release: + name: "Build release" + needs: release-prep + runs-on: ubuntu-latest + outputs: + archive-name: ${{ steps.archive.outputs.name }} steps: - - name: Check out repository + - name: "Checkout ${{ github.event.repository.name }}@${{ needs.release-prep.outputs.release-branch }}" uses: actions/checkout@v4 with: + ref: ${{ needs.release-prep.outputs.release-branch }} persist-credentials: false - ref: "${{ inputs.ref }}" - - name: Setup `hatch` + - name: "Setup `hatch`" uses: dbt-labs/dbt-adapters/.github/actions/setup-hatch@main - - name: Inputs - id: release-inputs + - name: "Set archive name" + id: archive run: | - version=$(hatch version) - archive_name=dbt-postgres-$version-${{ inputs.deploy-to }} - echo "archive-name=$archive_name" >> $GITHUB_OUTPUT + archive_name=${{ github.event.repository.name }}-${{ inputs.version }}-${{ inputs.deploy-to }} + echo "name=$archive_name" >> $GITHUB_OUTPUT - - name: Build `dbt-postgres` + - name: "Build ${{ github.event.repository.name }}" uses: dbt-labs/dbt-adapters/.github/actions/build-hatch@main with: - archive-name: ${{ steps.release-inputs.outputs.archive-name }} + archive-name: ${{ steps.archive.outputs.name }} - - name: Publish to PyPI + pypi-release: + name: "PyPI release" + if: ${{ !failure() && !cancelled() && !inputs.only_docker }} + runs-on: ubuntu-latest + needs: build-release + environment: + name: ${{ inputs.deploy-to }} + url: ${{ vars.PYPI_PROJECT_URL }} + permissions: + # this permission is required for trusted publishing + # see https://github.com/marketplace/actions/pypi-publish + id-token: write + steps: + - name: "Publish to PyPI" uses: dbt-labs/dbt-adapters/.github/actions/publish-pypi@main with: - pypi-repository-url: ${{ vars.PYPI_REPOSITORY_URL }} - archive-name: ${{ steps.release-inputs.outputs.archive-name }} + repository-url: ${{ vars.PYPI_REPOSITORY_URL }} + archive-name: ${{ needs.build-release.outputs.archive-name }} + + github-release: + name: "GitHub release" + if: ${{ !failure() && !cancelled() && !inputs.only_docker }} + needs: + - build-release + - release-prep + uses: dbt-labs/dbt-adapters/.github/workflows/github-release.yml@main + with: + sha: ${{ needs.release-prep.outputs.release-sha }} + version_number: ${{ inputs.version }} + changelog_path: ${{ needs.release-prep.outputs.changelog-path }} + test_run: ${{ inputs.deploy-to == 'test' }} + archive_name: ${{ needs.build-release.outputs.archive-name }} + + docker-release: + name: "Docker release" + # We cannot release to docker on a test run because it uses the tag in GitHub as + # what we need to release but draft releases don't actually tag the commit so it + # finds nothing to release + if: ${{ !failure() && !cancelled() && (inputs.deploy-to == 'prod' || inputs.only_docker) }} + needs: github-release # docker relies on the published tag from github-release + permissions: + packages: write # this permission is required for publishing to GHCR + uses: dbt-labs/dbt-release/.github/workflows/release-docker.yml@main + with: + version_number: ${{ inputs.version }} + test_run: ${{ inputs.deploy-to == 'test' }} diff --git a/.github/workflows/release_prep_hatch.yml b/.github/workflows/release_prep_hatch.yml new file mode 100644 index 00000000..37129c97 --- /dev/null +++ b/.github/workflows/release_prep_hatch.yml @@ -0,0 +1,467 @@ +# **what?** +# Perform the version bump, generate the changelog and run tests. +# +# Inputs: +# branch: The branch that we will release from +# version: The release version number (i.e. 1.0.0b1, 1.2.3rc2, 1.0.0) +# deploy-to: If we are deploying to prod or test, if test then release from branch +# is-nightly-release: Identifier that this is nightly release +# +# Outputs: +# release-sha: The sha that will actually be released. This can differ from the +# input sha if adding a version bump and/or changelog +# changelog-path: Path to the changelog file (ex .changes/1.2.3-rc1.md) +# +# Branching strategy: +# - During execution workflow execution the temp branch will be generated. +# - For normal runs the temp branch will be removed once changes were merged to target branch; +# - For test runs we will keep temp branch and will use it for release; +# Naming strategy: +# - For normal runs: prep-release/${{ inputs.deploy-to}}/${{ inputs.version }}_$GITHUB_RUN_ID +# - For nightly releases: prep-release/nightly-release/${{ inputs.version }}_$GITHUB_RUN_ID +# +# **why?** +# Reusable and consistent GitHub release process. +# +# **when?** +# Call when ready to kick off a build and release +# +# Validation Checks +# +# 1. Bump the version if it has not been bumped +# 2. Generate the changelog (via changie) if there is no markdown file for this version +name: "Release prep" +run-name: "Release prep: Generate changelog and bump ${{ inputs.package }} to ${{ inputs.version }} for release to ${{ inputs.deploy-to }}" +on: + workflow_call: + inputs: + branch: + description: "The branch to release from" + type: string + default: "main" + version: + description: "The version to release" + required: true + type: string + deploy-to: + description: "Deploy to test or prod" + type: string + default: "prod" + is-nightly-release: + description: "Identify if this is a nightly release" + type: boolean + default: false + outputs: + release-branch: + description: "The branch to be released from" + value: ${{ jobs.release.outputs.branch }} + release-sha: + description: "The SHA to be released" + value: ${{ jobs.release.outputs.sha }} + changelog-path: + description: "The path to the changelog from the repo root for this version, e.g. .changes/1.8.0-b1.md" + value: ${{ jobs.release-inputs.outputs.changelog-path }} + secrets: + FISHTOWN_BOT_PAT: + description: "Token to commit/merge changes into branches" + required: true + IT_TEAM_MEMBERSHIP: + description: "Token that can view org level teams" + required: true + +permissions: + contents: write + +defaults: + run: + shell: bash + +env: + PYTHON_TARGET_VERSION: 3.11 + NOTIFICATION_PREFIX: "[Release Prep]" + +jobs: + release-inputs: + runs-on: ubuntu-latest + outputs: + changelog-path: ${{ steps.changelog.outputs.path }} + changelog-exists: ${{ steps.changelog.outputs.exists }} + base-version: ${{ steps.semver.outputs.base-version }} + pre-release: ${{ steps.semver.outputs.pre-release }} + is-pre-release: ${{ steps.semver.outputs.is-pre-release }} + version-is-current: ${{ steps.version.outputs.is-current }} + + steps: + - name: "[DEBUG] Log inputs" + run: | + # WORKFLOW INPUTS + echo Branch: ${{ inputs.branch }} + echo Release version: ${{ inputs.version }} + echo Deploy to: ${{ inputs.deploy-to }} + echo Nightly release: ${{ inputs.is-nightly-release }} + # ENVIRONMENT VARIABLES + echo Python version: ${{ env.PYTHON_TARGET_VERSION }} + echo Notification prefix: ${{ env.NOTIFICATION_PREFIX }} + + - name: "Checkout ${{ github.event.repository.name }}@${{ inputs.branch }}" + uses: actions/checkout@v4 + with: + ref: ${{ inputs.branch }} + + - name: "Setup `hatch`" + uses: dbt-labs/dbt-adapters/.github/actions/setup-hatch@main + + - name: "Parse input version" + id: semver + uses: dbt-labs/actions/parse-semver@v1.1.0 + with: + version: ${{ inputs.version }} + + - name: "Audit version" + id: version + run: | + is_current=false + current_version=$(hatch version) + if test "$current_version" = "${{ inputs.version }}" + then + is_current=true + fi + echo "is-current=$is_updated" >> $GITHUB_OUTPUT + + - name: "[INFO] Skip version bump" + if: steps.version.outputs.is-current == 'true' + run: | + title="Skip version bump" + message="The version matches the input version ${{ inputs.version }}, skipping version bump" + echo "::notice title=${{ env.NOTIFICATION_PREFIX }}: $title::$message" + + - name: "Audit changelog" + id: changelog + run: | + path=".changes/" + if [[ ${{ steps.semver.outputs.is-pre-release }} -eq 1 ]] + then + path+="${{ steps.semver.outputs.base-version }}-${{ steps.semver.outputs.pre-release }}.md" + else + path+="${{ steps.semver.outputs.base-version }}.md" + fi + echo "path=$path" >> $GITHUB_OUTPUT + + does_exist=false + if test -f $path + then + does_exist=true + fi + echo "exists=$does_exist">> $GITHUB_OUTPUT + + - name: "[INFO] Skip changelog generation" + if: steps.changelog.outputs.exists == 'true' + run: | + title="Skip changelog generation" + message="A changelog already exists at ${{ steps.changelog.outputs.path }}, skipping generating changelog" + echo "::notice title=${{ env.NOTIFICATION_PREFIX }}: $title::$message" + + release-branch: + runs-on: ubuntu-latest + needs: release-inputs + if: | + needs.release-inputs.outputs.changelog-exists == 'false' || + needs.release-inputs.outputs.version-is-current == 'false' + outputs: + name: ${{ steps.release-branch.outputs.name }} + + steps: + - name: "Checkout ${{ github.event.repository.name }}@${{ inputs.branch }}" + uses: actions/checkout@v4 + with: + ref: ${{ inputs.branch }} + + - name: "Set release branch" + id: release-branch + run: | + name="prep-release/" + if [[ ${{ inputs.is-nightly-release }} == true ]] + then + name+="nightly-release/" + else + name+="${{ inputs.deploy-to }}/" + fi + name+="${{ inputs.version }}_$GITHUB_RUN_ID" + echo "name=$name" >> $GITHUB_OUTPUT + + - name: "Create release branch ${{ steps.release-branch.outputs.name }}" + run: | + git checkout -b ${{ steps.release-branch.outputs.name }} + git push -u origin ${{ steps.release-branch.outputs.name }} + + - name: "[INFO] Create release branch" + run: | + title="Create release branch" + message="Create release branch: ${{ steps.release-branch.outputs.name }}" + echo "::notice title=${{ env.NOTIFICATION_PREFIX }}: $title::$message" + + core-team: + if: needs.release-inputs.outputs.changelog-exists == 'false' + uses: dbt-labs/actions/.github/workflows/determine-team-membership.yml@main + with: + github_team: "core-group" + + generate-changelog: + runs-on: ubuntu-latest + if: needs.release-inputs.outputs.changelog-exists == 'false' + # only runs if we need to make changes, determined by not skipping release-branch + needs: + - release-inputs + - release-branch + - core-team + + steps: + - name: "Checkout ${{ github.event.repository.name }}@${{ needs.release-branch.outputs.name }}" + uses: actions/checkout@v3 + with: + ref: ${{ needs.release-branch.outputs.name }} + + - name: "Setup `hatch`" + uses: dbt-labs/dbt-adapters/.github/actions/setup-hatch@main + + - name: "Install `changie`" + run: | + brew tap miniscruff/changie https://github.com/miniscruff/changie + brew install changie + + - name: "Generate changelog at ${{ needs.release-inputs.outputs.changelog-path }}" + run: | + if [[ ${{ needs.release-inputs.outputs.is-pre-release }} -eq 1 ]] + then + changie batch ${{ needs.release-inputs.outputs.base-version }} \ + --move-dir '${{ needs.release-inputs.outputs.base-version }}' \ + --prerelease ${{ needs.release-inputs.outputs.pre-release }} + elif [[ -d ".changes/${{ needs.release-inputs.outputs.base-version }}" ]] + then + changie batch ${{ needs.release-inputs.outputs.base-version }} \ + --include '${{ needs.release-inputs.outputs.base-version }}' \ + --remove-prereleases + else # releasing a final patch with no pre-releases + changie batch ${{ needs.release-inputs.outputs.base-version }} + fi + changie merge + env: + CHANGIE_CORE_TEAM: ${{ needs.core-team.outputs.team_membership }} + + - name: "Remove trailing whitespace and missing new lines" + # this step will fail on whitespace errors but also correct them + continue-on-error: true + run: hatch run code-quality + + - name: "Commit & push changes" + run: | + git config user.name "$USER" + git config user.email "$EMAIL" + git pull + git add . + git commit -m "$COMMIT_MESSAGE" + git push + env: + USER: "GitHub Build Bot" + EMAIL: "buildbot@fishtownanalytics.com" + COMMIT_MESSAGE: "Generate changelog at ${{ needs.release-inputs.outputs.changelog-path }}" + + - name: "[INFO] Generated changelog at ${{ needs.release-inputs.outputs.changelog-path }}" + run: | + title="Changelog generation" + if [[ -f ${{ needs.release-inputs.outputs.changelog-path }} ]] + then + message="Generated changelog file successfully" + echo "::notice title=${{ env.NOTIFICATION_PREFIX }}: $title::$message" + else + message="Failed to generate changelog file" + echo "::error title=${{ env.NOTIFICATION_PREFIX }}: $title::$message" + exit 1 + fi + + bump-version: + runs-on: ubuntu-latest + if: needs.release-inputs.outputs.version-is-current == 'false' + # only runs if we need to make changes, determined by not skipping release-branch + needs: + - release-inputs + - release-branch + - generate-changelog + + steps: + - name: "Checkout ${{ github.event.repository.name }}@${{ needs.release-branch.outputs.name }}" + uses: actions/checkout@v3 + with: + ref: ${{ needs.release-branch.outputs.name }} + + - name: "Setup `hatch`" + uses: dbt-labs/dbt-adapters/.github/actions/setup-hatch@main + + - name: "Bump version to ${{ inputs.version }}" + run: hatch version ${{ inputs.version }} + + - name: "Commit & push changes" + run: | + git config user.name "$USER" + git config user.email "$EMAIL" + git pull + git add . + git commit -m "$COMMIT_MESSAGE" + git push + env: + USER: "GitHub Build Bot" + EMAIL: "buildbot@fishtownanalytics.com" + COMMIT_MESSAGE: "Bump version to ${{ inputs.version }}" + + - name: "[INFO] Bumped version to ${{ inputs.version }}" + run: | + title="Version bump" + message="Bumped version to ${{ inputs.version }}" + echo "::notice title=${{ env.NOTIFICATION_PREFIX }}: $title::$message" + + unit-tests: + runs-on: ubuntu-latest + # only run unit tests if we created a release branch and already bumped the version and generated the changelog + if: | + !failure() && !cancelled() && + needs.release-branch.outputs.name != '' + needs: + - release-branch + - generate-changelog + - bump-version + + steps: + - name: "Checkout ${{ github.event.repository.name }}@${{ needs.release-branch.outputs.name }}" + uses: actions/checkout@v4 + with: + ref: ${{ needs.release-branch.outputs.name }} + + - name: "Setup `hatch`" + uses: dbt-labs/dbt-adapters/.github/actions/setup-hatch@main + + - name: "Run unit tests" + run: hatch run unit-tests:all + + integration-tests: + runs-on: ubuntu-latest + # only run integration tests if we created a release branch and already bumped the version and generated the changelog + if: | + !failure() && !cancelled() && + needs.release-branch.outputs.name != '' + needs: + - release-branch + - generate-changelog + - bump-version + + services: + postgres: + image: postgres + env: + POSTGRES_PASSWORD: postgres + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 5432:5432 + + steps: + - name: "Checkout ${{ github.event.repository.name }}@${{ needs.release-branch.outputs.name }}" + uses: actions/checkout@v4 + with: + ref: ${{ needs.release-branch.outputs.name }} + + - name: Setup postgres + run: psql -f ./scripts/setup_test_database.sql + env: + PGHOST: localhost + PGPORT: 5432 + PGUSER: postgres + PGPASSWORD: postgres + PGDATABASE: postgres + + - name: "Set up `hatch`" + uses: dbt-labs/dbt-adapters/.github/actions/setup-hatch@main + + - name: "Run integration tests" + run: hatch run integration-tests:all + env: + POSTGRES_TEST_HOST: localhost + POSTGRES_TEST_PORT: 5432 + POSTGRES_TEST_USER: root + POSTGRES_TEST_PASS: password + POSTGRES_TEST_DATABASE: dbt + POSTGRES_TEST_THREADS: 4 + + merge-release-branch: + runs-on: ubuntu-latest + needs: + - unit-tests + - integration-tests + - release-branch + - release-inputs + if: | + !failure() && !cancelled() && + needs.release-branch.result == 'success' && + inputs.deploy-to == 'prod' + + steps: + - name: "Checkout ${{ github.event.repository.name }}" + uses: actions/checkout@v3 + + - name: "Merge changes into ${{ inputs.branch }}" + uses: everlytic/branch-merge@1.1.5 + with: + source_ref: ${{ needs.release-branch.outputs.name }} + target_branch: ${{ inputs.branch }} + github_token: ${{ secrets.FISHTOWN_BOT_PAT }} + commit_message_template: "[Automated] Merged {source_ref} into target {target_branch} during release process" + + - name: "[INFO] Merge changes into ${{ inputs.branch }}" + run: | + title="Merge changes" + message="Merge ${{ needs.release-branch.outputs.name }} into ${{ inputs.branch }}" + echo "::notice title=${{ env.NOTIFICATION_PREFIX }}: $title::$message" + + release: + runs-on: ubuntu-latest + needs: + - release-branch + - merge-release-branch + if: ${{ !failure() && !cancelled() }} + + # Get the SHA that will be released. + # If the changelog already exists and the version was already current on the input branch, then release from there. + # Otherwise, we generated a changelog and/or did the version bump in this workflow and there is a + # new sha to use from the merge we just did. Grab that here instead. + outputs: + branch: ${{ steps.branch.outputs.name }} + sha: ${{ steps.sha.outputs.sha }} + + steps: + - name: "Set release branch" + id: branch + # If a release branch was created and not merged, use the release branch + # Otherwise, use the input branch because either nothing was done, or the changes were merged back in + run: | + if [ ${{ needs.release-branch.result == 'success' }} && ${{ needs.merge-release-branch.result == 'skipped' }} ]; then + branch="${{ needs.release-branch.outputs.name }}" + else + branch="${{ inputs.branch }}" + fi + echo "name=$branch" >> $GITHUB_OUTPUT + + - name: "Checkout ${{ github.event.repository.name }}@${{ steps.branch.outputs.name }}" + uses: actions/checkout@v3 + with: + ref: ${{ steps.branch.outputs.name }} + + - name: "Set release SHA" + id: sha + run: echo "sha=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT + + # if this is a real release and a release branch was created, delete it + - name: "Delete release branch: ${{ needs.branch.outputs.name }}" + if: ${{ inputs.deploy-to == 'prod' && inputs.is-nightly-release == 'false' && needs.release-branch.outputs.name != '' }} + run: git push origin -d ${{ needs.branch.outputs.name }} diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 00000000..7c8dc14e --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,37 @@ +# this image gets published to GHCR for production use +ARG py_version=3.11.2 + +FROM python:$py_version-slim-bullseye as base + +RUN apt-get update \ + && apt-get dist-upgrade -y \ + && apt-get install -y --no-install-recommends \ + build-essential=12.9 \ + ca-certificates=20210119 \ + git=1:2.30.2-1+deb11u2 \ + libpq-dev=13.14-0+deb11u1 \ + make=4.3-4.1 \ + openssh-client=1:8.4p1-5+deb11u3 \ + software-properties-common=0.96.20.2-2.1 \ + && apt-get clean \ + && rm -rf \ + /var/lib/apt/lists/* \ + /tmp/* \ + /var/tmp/* + +ENV PYTHONIOENCODING=utf-8 +ENV LANG=C.UTF-8 + +RUN python -m pip install --upgrade "pip==24.0" "setuptools==69.2.0" "wheel==0.43.0" --no-cache-dir + + +FROM base as dbt-postgres + +ARG commit_ref=main + +HEALTHCHECK CMD dbt --version || exit 1 + +WORKDIR /usr/app/dbt/ +ENTRYPOINT ["dbt"] + +RUN python -m pip install --no-cache-dir "dbt-postgres @ git+https://github.com/dbt-labs/dbt-postgres@${commit_ref}" diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 00000000..22af3fe9 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,58 @@ +# Docker for dbt +This docker file is suitable for building dbt Docker images locally or using with CI/CD to automate populating a container registry. + + +## Building an image: +This Dockerfile can create images for the following target: `dbt-postgres` + +In order to build a new image, run the following docker command. +```shell +docker build --tag --target dbt-postgres +``` +--- +> **Note:** Docker must be configured to use [BuildKit](https://docs.docker.com/develop/develop-images/build_enhancements/) in order for images to build properly! + +--- + +By default the image will be populated with the latest version of `dbt-postgres` on `main`. +If you need to use a different version you can specify it by git ref using the `--build-arg` flag: +```shell +docker build --tag \ + --target dbt-postgres \ + --build-arg commit_ref= \ + +``` + +### Examples: +To build an image named "my-dbt" that supports Snowflake using the latest releases: +```shell +cd dbt-core/docker +docker build --tag my-dbt --target dbt-postgres . +``` + +To build an image named "my-other-dbt" that supports Snowflake using the adapter version 1.0.0b1: +```shell +cd dbt-core/docker +docker build \ + --tag my-other-dbt \ + --target dbt-postgres \ + --build-arg commit_ref=v1.0.0b1 \ + . +``` + +## Running an image in a container: +The `ENTRYPOINT` for this Dockerfile is the command `dbt` so you can bind-mount your project to `/usr/app` and use dbt as normal: +```shell +docker run \ + --network=host \ + --mount type=bind,source=path/to/project,target=/usr/app \ + --mount type=bind,source=path/to/profiles.yml,target=/root/.dbt/profiles.yml \ + my-dbt \ + ls +``` +--- +**Notes:** +* Bind-mount sources _must_ be an absolute path +* You may need to make adjustments to the docker networking setting depending on the specifics of your data warehouse/database host. + +--- diff --git a/docker/dev.Dockerfile b/docker/dev.Dockerfile new file mode 100644 index 00000000..a7d2eca3 --- /dev/null +++ b/docker/dev.Dockerfile @@ -0,0 +1,54 @@ +# this image does not get published, it is intended for local development only, see `Makefile` for usage +FROM ubuntu:22.04 as base + +# prevent python installation from asking for time zone region +ARG DEBIAN_FRONTEND=noninteractive + +# add python repository +RUN apt-get update \ + && apt-get install -y software-properties-common=0.99.22.9 \ + && add-apt-repository -y ppa:deadsnakes/ppa \ + && apt-get clean \ + && rm -rf \ + /var/lib/apt/lists/* \ + /tmp/* \ + /var/tmp/* + +# install python +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential=12.9ubuntu3 \ + git-all=1:2.34.1-1ubuntu1.10 \ + libpq-dev=14.11-0ubuntu0.22.04.1 \ + python3.8=3.8.19-1+jammy1 \ + python3.8-dev=3.8.19-1+jammy1 \ + python3.8-distutils=3.8.19-1+jammy1 \ + python3.8-venv=3.8.19-1+jammy1 \ + python3-pip=22.0.2+dfsg-1ubuntu0.4 \ + python3-wheel=0.37.1-2ubuntu0.22.04.1 \ + && apt-get clean \ + && rm -rf \ + /var/lib/apt/lists/* \ + /tmp/* \ + /var/tmp/* + +# update the default system interpreter to the newly installed version +RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.8 1 + +# install python dependencies +RUN python3 -m pip install --upgrade --no-cache-dir "hatch==1.9.1" + + +FROM base as dbt-postgres-dev + +HEALTHCHECK CMD python3 --version || exit 1 + +# send stdout/stderr to terminal +ENV PYTHONUNBUFFERED=1 + +# setup mount for local code +WORKDIR /opt/code +VOLUME /opt/code + +# create a virtual environment +RUN python3 -m venv /opt/venv diff --git a/pyproject.toml b/pyproject.toml index 0e93423c..a99829d9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,6 +65,12 @@ dependencies = [ [tool.hatch.envs.default.scripts] dev = "pre-commit install" code-quality = "pre-commit run --all-files" +docker-dev = [ + "echo Does not support integration testing, only development and unit testing. See issue https://github.com/dbt-labs/dbt-postgres/issues/99", + "docker build -f docker/dev.Dockerfile -t dbt-postgres-dev .", + "docker run --rm -it --name dbt-postgres-dev -v $(pwd):/opt/code dbt-postgres-dev", +] +docker-prod = "docker build -f docker/Dockerfile -t dbt-postgres ." [tool.hatch.envs.unit-tests] dependencies = [ From 86349f04dc585f24f1031011b44ed852aed94e9b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 01:00:55 -0400 Subject: [PATCH 13/15] Bump ubuntu from 22.04 to 24.04 in /docker (#100) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docker/dev.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/dev.Dockerfile b/docker/dev.Dockerfile index a7d2eca3..e137ff84 100644 --- a/docker/dev.Dockerfile +++ b/docker/dev.Dockerfile @@ -1,5 +1,5 @@ # this image does not get published, it is intended for local development only, see `Makefile` for usage -FROM ubuntu:22.04 as base +FROM ubuntu:24.04 as base # prevent python installation from asking for time zone region ARG DEBIAN_FRONTEND=noninteractive From 0717373f7caa064e4d9be3b08d9693e647319db3 Mon Sep 17 00:00:00 2001 From: Mike Alfare <13974384+mikealfare@users.noreply.github.com> Date: Thu, 23 May 2024 15:08:16 -0400 Subject: [PATCH 14/15] Fix release workflow (#106) --- .github/workflows/release_prep_hatch.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release_prep_hatch.yml b/.github/workflows/release_prep_hatch.yml index 37129c97..8e9ded04 100644 --- a/.github/workflows/release_prep_hatch.yml +++ b/.github/workflows/release_prep_hatch.yml @@ -126,7 +126,7 @@ jobs: then is_current=true fi - echo "is-current=$is_updated" >> $GITHUB_OUTPUT + echo "is-current=$is_current" >> $GITHUB_OUTPUT - name: "[INFO] Skip version bump" if: steps.version.outputs.is-current == 'true' @@ -202,9 +202,11 @@ jobs: core-team: if: needs.release-inputs.outputs.changelog-exists == 'false' + needs: release-inputs uses: dbt-labs/actions/.github/workflows/determine-team-membership.yml@main with: github_team: "core-group" + secrets: inherit generate-changelog: runs-on: ubuntu-latest @@ -445,7 +447,7 @@ jobs: # If a release branch was created and not merged, use the release branch # Otherwise, use the input branch because either nothing was done, or the changes were merged back in run: | - if [ ${{ needs.release-branch.result == 'success' }} && ${{ needs.merge-release-branch.result == 'skipped' }} ]; then + if [[ ${{ needs.release-branch.result == 'success' }} && ${{ needs.merge-release-branch.result == 'skipped' }} ]]; then branch="${{ needs.release-branch.outputs.name }}" else branch="${{ inputs.branch }}" From 96dd860b7b857c93736cba00d71280a5ec2cb37c Mon Sep 17 00:00:00 2001 From: Mike Alfare <13974384+mikealfare@users.noreply.github.com> Date: Thu, 23 May 2024 17:54:31 -0400 Subject: [PATCH 15/15] Add a changelog entry check for pull requests (#108) --- .github/workflows/changelog-entry-check.yml | 29 +++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 .github/workflows/changelog-entry-check.yml diff --git a/.github/workflows/changelog-entry-check.yml b/.github/workflows/changelog-entry-check.yml new file mode 100644 index 00000000..889c0995 --- /dev/null +++ b/.github/workflows/changelog-entry-check.yml @@ -0,0 +1,29 @@ +name: Changelog entry check + +on: + pull_request: + types: + - opened + - reopened + - labeled + - unlabeled + - synchronize + +defaults: + run: + shell: bash + +permissions: + contents: read + pull-requests: write + +jobs: + changelog-entry-check: + uses: dbt-labs/actions/.github/workflows/changelog-existence.yml@main + with: + changelog_comment: >- + Thank you for your pull request! We could not find a changelog entry for this change. + For details on how to document a change, see the + [dbt-postgres contributing guide](https://github.com/dbt-labs/dbt-postgres/blob/main/CONTRIBUTING.md). + skip_label: "Skip Changelog" + secrets: inherit