From eb6e59022013ac857e27e5c13bd3e6f59d545572 Mon Sep 17 00:00:00 2001 From: runleonarun Date: Mon, 30 Sep 2024 17:46:35 -0700 Subject: [PATCH 01/24] adding upgrade guide --- website/dbt-versions.js | 4 + .../core-upgrade/06-upgrading-to-v1.9.md | 89 +++++++++++++++++++ 2 files changed, 93 insertions(+) create mode 100644 website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md diff --git a/website/dbt-versions.js b/website/dbt-versions.js index 60efef64f75..26a6b9b6db3 100644 --- a/website/dbt-versions.js +++ b/website/dbt-versions.js @@ -18,6 +18,10 @@ exports.versions = [ version: "1.9.1", customDisplay: "Cloud (Versionless)", }, + { + version: "1.9", + isPrerelease: true, + }, { version: "1.8", EOLDate: "2025-04-15", diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md new file mode 100644 index 00000000000..b17fdef6f8d --- /dev/null +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -0,0 +1,89 @@ +--- +title: "Upgrading to v1.9 (beta)" +id: upgrading-to-v1.9 +description: New features and changes in dbt Core v1.9 +displayed_sidebar: "docs" +--- + +## Resources + +- Changelog INSERT HERE - LINK to 1.9 changelog +- [dbt Core CLI Installation guide](/docs/core/installation-overview) +- [Cloud upgrade guide](/docs/dbt-versions/upgrade-dbt-version-in-cloud) + +## What to know before upgrading + +dbt Labs is committed to providing backward compatibility for all versions 1.x, except for any changes explicitly mentioned on this page. If you encounter an error upon upgrading, please let us know by [opening an issue](https://github.com/dbt-labs/dbt-core/issues/new). + +Remember from version 1.8 that we're [going versionless](/docs/dbt-versions/core-upgrade/upgrading-to-v1.8#versionless) and we have a new [adapter installation procedure](/docs/dbt-versions/core-upgrade/upgrading-to-v1.8#new-dbt-core-adapter-installation-procedure). + +## New and changed features and functionality + +Features and functionality new in dbt v1.9. + +### New microbatch `incremental_strategy` + +INSERT HERE - link to docs + +Historically, managing incremental models involved several manual steps and responsibilities, which involved using: + +* Explicit filtering to define "new" data by writing your SQL within an `is_incremental` block. +* Custom logic for incremental loads by implementing your own logic to handle different loading strategies, such as `append` or `delete+insert`. +* Handle batches manually by implementing custom logic using variables. + +These steps made the process error-prone and introduced performance concerns because you had to run a single large SQL query to process all new and updated records. + +Starting in Core 1.9, you can use the new microbatch strategy, which streamlines this process and automates many of these tasks. The benefits include: + +* Simplified query design: Write your model query for a single day of data and no longer need `is_incremental()` logic or manual SQL for determining "new" records. +* Automatic batch processing: dbt automatically breaks down the loading process into smaller batches based on the specified `batch_size` and handles the SQL queries for each batch independently, improving efficiency and reducing the risk of query timeouts. +* Dynamic filtering: Use `event_time`, `lookback`, and `batch_size` configurations to generate necessary filters for you, making the process more streamlined and reducing the need for you to manage these details. +* Handling updates: Use the `lookback` configuration to keep track of late-arriving records instead of you making that calculation. + + +### Snapshots improvements + +Originally, snapshots were defined directly in the `dbt_project.yml` file, which involved YAML configuration for source and target schemas without any SQL logic. This method was cumbersome, as it limited flexibility and made managing snapshots more complex. Over time, snapshots evolved to use Jinja blocks, allowing for SQL logic within `.sql` files, but this added parsing complexity and made the development process less efficient. + +Beginning in dbt Core 1.9, we've streamlined snapshot configuration by defining snapshots purely in YAML without any SQL logic. This improvement includes: + +* New snapshot specification: Snapshots are now configured in a YAML file doe a cleaner more structured set up. +* New `snapshot_meta_column_names` config: Allows you to customize the names of meta fields (for example, `dbt_valid_from`, `dbt_valid_to`, etc.) that dbt automatically adds to snapshots. This increases flexibility to tailor metadata to your needs. +* `target_schema` now optional for snapshots: This schema is now optional When ommitted, snapshots will use the schema defined for the current environment. +* Standard schema and database configs supported: Snapshots will now be consistent with other dbt resources You can specify where snapshots should be stored. +* Warning for incorrect `updated_at` data type: To ensure data integrity, you'll see a warning if the `updated_at` field specified in the snapshot configuration is not the proper data type or timestamp. + +### `state:modified` improvements + +INSERT HERE Point me to a resource for this? + +Fewer false positives in state:modified +state_modified_compare_more_unrendered_values +state_modified_compare_vars + +### Deprecated functionality + +INSERT HERE - any deprecated functionality to call out? + + +### Managing changes to legacy behaviors + +dbt Core v1.9 has introduced flags for [managing changes to legacy behaviors](/reference/global-configs/behavior-changes). You may opt into recently introduced changes (disabled by default), or opt out of mature changes (enabled by default), by setting `True` / `False` values, respectively, for `flags` in `dbt_project.yml`. + +You can read more about each of these behavior changes in the following links: + +INSERT HERE! Any behavior changes? + +## Quick hits + +We also made some quality-of-life improvements in Core 1.9, enabling you to: + +- Document [singular data tests](/docs/build/data-tests#document-singular-tests). +- Use `ref` and `source` in foreign key constraints +- New CLI flag for `dbt test`. Choose which resource types are included or excluded when you run the `dbt test` by including [`--resource-type`/`--exclude-resource-type`](/reference/global-configs/resource-type) +- New CLI flag for [`dbt show`](/reference/commands/show). `--inline-direct` enables you to avoid loading the entire manifest and +skip rendering any Jinja templates. + +We also made improvements for adapters, enabling you to: +- Use arbitrary config options in `data_test` For example, you can set `snowflake_warehouse` for tests. +- Use behavior flags INSERT HERE MORE INFO From 5cf5f67a24f3e5bce92f3711eae8388a27143b22 Mon Sep 17 00:00:00 2001 From: runleonarun Date: Tue, 1 Oct 2024 12:39:43 -0700 Subject: [PATCH 02/24] updating with internal changes --- .../core-upgrade/06-upgrading-to-v1.9.md | 86 +++++++++---------- 1 file changed, 43 insertions(+), 43 deletions(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index b17fdef6f8d..b71a5a155e0 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -5,17 +5,22 @@ description: New features and changes in dbt Core v1.9 displayed_sidebar: "docs" --- -## Resources +## Resources -- Changelog INSERT HERE - LINK to 1.9 changelog +- [dbt Core 1.9 changelog](https://github.com/dbt-labs/dbt-core/blob/1.9.latest/CHANGELOG.md) - [dbt Core CLI Installation guide](/docs/core/installation-overview) - [Cloud upgrade guide](/docs/dbt-versions/upgrade-dbt-version-in-cloud) ## What to know before upgrading -dbt Labs is committed to providing backward compatibility for all versions 1.x, except for any changes explicitly mentioned on this page. If you encounter an error upon upgrading, please let us know by [opening an issue](https://github.com/dbt-labs/dbt-core/issues/new). +dbt Labs is committed to providing backward compatibility for all versions 1.x, except for any changes explicitly mentioned in this guide or as a [behavior change flag](/reference/global-configs/behavior-changes#behavior-change-flags). If you encounter an error upon upgrading, please let us know by [opening an issue](https://github.com/dbt-labs/dbt-core/issues/new). -Remember from version 1.8 that we're [going versionless](/docs/dbt-versions/core-upgrade/upgrading-to-v1.8#versionless) and we have a new [adapter installation procedure](/docs/dbt-versions/core-upgrade/upgrading-to-v1.8#new-dbt-core-adapter-installation-procedure). +dbt Cloud is now [versionless](/docs/dbt-versions/versionless-cloud). If you have selected "Versionless" in dbt Cloud, you already have access to all the features, fixes, and other functionality that is included in dbt Core v1.9. +For users of dbt Core, since v1.8 we recommend explicitly installing both `dbt-core` and `dbt-`. This may become required for a future version of dbt. For example: + +```sql +python3 -m pip install dbt-core dbt-snowflake +``` ## New and changed features and functionality @@ -23,67 +28,62 @@ Features and functionality new in dbt v1.9. ### New microbatch `incremental_strategy` -INSERT HERE - link to docs +Incremental models are, and have always been, a *performance optimization —* for datasets that are too large to be dropped and recreated from scratch every time you do a `dbt run`. -Historically, managing incremental models involved several manual steps and responsibilities, which involved using: +Historically, managing incremental models involved several manual steps and responsibilities, including: -* Explicit filtering to define "new" data by writing your SQL within an `is_incremental` block. -* Custom logic for incremental loads by implementing your own logic to handle different loading strategies, such as `append` or `delete+insert`. -* Handle batches manually by implementing custom logic using variables. +- Add a snippet of dbt code (in an `is_incremental()` block) that uses the already-existing table (`this`) as a rough bookmark, so that only new data gets processed. +- Pick one of the strategies for smushing old and new data together (`append`, `delete+insert`, or `merge`). +- If anything goes wrong, or your schema changes, you can always "full-refresh", by running the same simple query that rebuilds the whole table from scratch. -These steps made the process error-prone and introduced performance concerns because you had to run a single large SQL query to process all new and updated records. +While this works for many use-cases, there’s a clear limitation with this approach: *Some datasets are just too big to fit into one query.* -Starting in Core 1.9, you can use the new microbatch strategy, which streamlines this process and automates many of these tasks. The benefits include: +Starting in Core 1.9, you can use the [new microbatch strategy](/docs/build/incremental-microbatch) to optimize your largest datasets -- **process your event data in discrete periods with their own SQL queries, rather than all at once.** The benefits include: -* Simplified query design: Write your model query for a single day of data and no longer need `is_incremental()` logic or manual SQL for determining "new" records. -* Automatic batch processing: dbt automatically breaks down the loading process into smaller batches based on the specified `batch_size` and handles the SQL queries for each batch independently, improving efficiency and reducing the risk of query timeouts. -* Dynamic filtering: Use `event_time`, `lookback`, and `batch_size` configurations to generate necessary filters for you, making the process more streamlined and reducing the need for you to manage these details. -* Handling updates: Use the `lookback` configuration to keep track of late-arriving records instead of you making that calculation. +- Simplified query design: Write your model query for a single batch of data and no longer need manual filtering for determining "new" records. Use `event_time`, `lookback`, and `batch_size` configurations to generate necessary filters for you, making the process more streamlined and reducing the need for you to manage these details. +- Independent batch processing: dbt automatically breaks down the data to load into smaller batches based on the specified `batch_size` and processes each batch independently, improving efficiency and reducing the risk of query timeouts. If some of your batches fail, you can use `dbt retry` to load only the failed batches. +- Targeted reprocessing: To load a *specific* batch or batches, you can use the CLI arguments `--event-time-start` and `--event-time-end`. +While microbatch is in "beta", this functionality is still gated behind an env var, which will change to a behavior flag when 1.9 is GA. To use microbatch: -### Snapshots improvements +- Set `DBT_EXPERIMENTAL_MICROBATCH` to `true` in your project -Originally, snapshots were defined directly in the `dbt_project.yml` file, which involved YAML configuration for source and target schemas without any SQL logic. This method was cumbersome, as it limited flexibility and made managing snapshots more complex. Over time, snapshots evolved to use Jinja blocks, allowing for SQL logic within `.sql` files, but this added parsing complexity and made the development process less efficient. +### Snapshots improvements -Beginning in dbt Core 1.9, we've streamlined snapshot configuration by defining snapshots purely in YAML without any SQL logic. This improvement includes: +Beginning in dbt Core 1.9, we've streamlined snapshot configuration and added a handful of new configurations to make dbt **snapshots easier to configure, run, and customize.** These improvements include: -* New snapshot specification: Snapshots are now configured in a YAML file doe a cleaner more structured set up. -* New `snapshot_meta_column_names` config: Allows you to customize the names of meta fields (for example, `dbt_valid_from`, `dbt_valid_to`, etc.) that dbt automatically adds to snapshots. This increases flexibility to tailor metadata to your needs. -* `target_schema` now optional for snapshots: This schema is now optional When ommitted, snapshots will use the schema defined for the current environment. -* Standard schema and database configs supported: Snapshots will now be consistent with other dbt resources You can specify where snapshots should be stored. -* Warning for incorrect `updated_at` data type: To ensure data integrity, you'll see a warning if the `updated_at` field specified in the snapshot configuration is not the proper data type or timestamp. +- New snapshot specification: Snapshots can now be configured in a YAML file, which provides a cleaner and more consistent set up. +- New `snapshot_meta_column_names` config: Allows you to customize the names of meta fields (for example, `dbt_valid_from`, `dbt_valid_to`, etc.) that dbt automatically adds to snapshots. This increases flexibility to tailor metadata to your needs. +- `target_schema` is now optional for snapshots: When omitted, snapshots will use the schema defined for the current environment. +- Standard `schema` and `database` configs supported: Snapshots will now be consistent with other dbt resources. You can specify where environment-aware snapshots should be stored. +- Warning for incorrect `updated_at` data type: To ensure data integrity, you'll see a warning if the `updated_at` field specified in the snapshot configuration is not the proper data type or timestamp. ### `state:modified` improvements -INSERT HERE Point me to a resource for this? - -Fewer false positives in state:modified -state_modified_compare_more_unrendered_values -state_modified_compare_vars - -### Deprecated functionality - -INSERT HERE - any deprecated functionality to call out? +We’ve made a number of improvements to `state:modified` behaviors to help reduce the risk of false positives/negatives, including: +- Added environment-aware enhancements for environments where the logic purposefully differs (for example, materializing as a table in `prod` but a `view` in dev). +- Enhanced performance so that models that use `var` or `env_var` are included in `state:modified`. ### Managing changes to legacy behaviors -dbt Core v1.9 has introduced flags for [managing changes to legacy behaviors](/reference/global-configs/behavior-changes). You may opt into recently introduced changes (disabled by default), or opt out of mature changes (enabled by default), by setting `True` / `False` values, respectively, for `flags` in `dbt_project.yml`. +dbt Core v1.9 has introduced flags for [managing changes to legacy behaviors](/reference/global-configs/behavior-changes). You may opt into recently introduced changes (disabled by default), or opt out of mature changes (enabled by default), by setting `True` / `False` values, respectively, for `flags` in `dbt_project.yml`. You can read more about each of these behavior changes in the following links: -INSERT HERE! Any behavior changes? +- (Introduced, disabled by default) [`state_modified_compare_more_unrendered_values` and `state_modified_compare_vars`](/reference/global-configs/behavior-changes#behavior-change-flags) . +- (Introduced, disabled by default) new [`skip_nodes_if_on_run_start_fails` project config flag](/reference/global-configs/behavior-changes#behavior-change-flags). If the flag is set and **any** `on-run-start` hook fails, mark all selected nodes as skipped + - `on-run-start/end` hooks are **always** run, regardless of whether they passed or failed last time +- [Removing a contracted model by deleting, renaming, or disabling]/docs/collaborate/govern/model-contracts#how-are-breaking-changes-handled) it will return an error (versioned models) or warning (unversioned models). + +## Adapter specific features and functionalities + +TBD ## Quick hits We also made some quality-of-life improvements in Core 1.9, enabling you to: -- Document [singular data tests](/docs/build/data-tests#document-singular-tests). -- Use `ref` and `source` in foreign key constraints -- New CLI flag for `dbt test`. Choose which resource types are included or excluded when you run the `dbt test` by including [`--resource-type`/`--exclude-resource-type`](/reference/global-configs/resource-type) -- New CLI flag for [`dbt show`](/reference/commands/show). `--inline-direct` enables you to avoid loading the entire manifest and -skip rendering any Jinja templates. - -We also made improvements for adapters, enabling you to: -- Use arbitrary config options in `data_test` For example, you can set `snowflake_warehouse` for tests. -- Use behavior flags INSERT HERE MORE INFO +- Document [singular data tests](/docs/build/data-tests#document-singular-tests). +- Use `ref` and `source` in foreign key constraints. +- `dbt test` supports the `--resource-type` / `--exclude-resource-type` flag, making it possible to include or exclude data tests (`test`) or unit tests (`unit_test`). From 77b743d439b31558fe5ac1da4021c52785a320a4 Mon Sep 17 00:00:00 2001 From: runleonarun Date: Tue, 1 Oct 2024 12:47:46 -0700 Subject: [PATCH 03/24] fixing link --- .../docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index b71a5a155e0..7835eac167b 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -74,7 +74,7 @@ You can read more about each of these behavior changes in the following links: - (Introduced, disabled by default) [`state_modified_compare_more_unrendered_values` and `state_modified_compare_vars`](/reference/global-configs/behavior-changes#behavior-change-flags) . - (Introduced, disabled by default) new [`skip_nodes_if_on_run_start_fails` project config flag](/reference/global-configs/behavior-changes#behavior-change-flags). If the flag is set and **any** `on-run-start` hook fails, mark all selected nodes as skipped - `on-run-start/end` hooks are **always** run, regardless of whether they passed or failed last time -- [Removing a contracted model by deleting, renaming, or disabling]/docs/collaborate/govern/model-contracts#how-are-breaking-changes-handled) it will return an error (versioned models) or warning (unversioned models). +- [Removing a contracted model by deleting, renaming, or disabling](/docs/collaborate/govern/model-contracts#how-are-breaking-changes-handled) it will return an error (versioned models) or warning (unversioned models). ## Adapter specific features and functionalities From 26abb9931afef286aa1aa4a0c3adc1a230d2187b Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Tue, 1 Oct 2024 13:07:47 -0700 Subject: [PATCH 04/24] Update website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md --- .../docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index 7835eac167b..0bd820a615b 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -38,7 +38,7 @@ Historically, managing incremental models involved several manual steps and resp While this works for many use-cases, there’s a clear limitation with this approach: *Some datasets are just too big to fit into one query.* -Starting in Core 1.9, you can use the [new microbatch strategy](/docs/build/incremental-microbatch) to optimize your largest datasets -- **process your event data in discrete periods with their own SQL queries, rather than all at once.** The benefits include: +Starting in Core 1.9, you can use the new microbatch strategy to optimize your largest datasets -- **process your event data in discrete periods with their own SQL queries, rather than all at once.** The benefits include: - Simplified query design: Write your model query for a single batch of data and no longer need manual filtering for determining "new" records. Use `event_time`, `lookback`, and `batch_size` configurations to generate necessary filters for you, making the process more streamlined and reducing the need for you to manage these details. - Independent batch processing: dbt automatically breaks down the data to load into smaller batches based on the specified `batch_size` and processes each batch independently, improving efficiency and reducing the risk of query timeouts. If some of your batches fail, you can use `dbt retry` to load only the failed batches. From a6e475ce8264b00fb41b19dd11f7a0a0a3c782a4 Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Tue, 1 Oct 2024 13:17:12 -0700 Subject: [PATCH 05/24] Update website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md --- .../docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index 0bd820a615b..81231d1215d 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -47,7 +47,12 @@ Starting in Core 1.9, you can use the new microbatch strategy to optimize your l While microbatch is in "beta", this functionality is still gated behind an env var, which will change to a behavior flag when 1.9 is GA. To use microbatch: - Set `DBT_EXPERIMENTAL_MICROBATCH` to `true` in your project +Currently microbatch is supported on these adapters with more to come: +• postgres +• snowflake +• bigquery +• spark ### Snapshots improvements Beginning in dbt Core 1.9, we've streamlined snapshot configuration and added a handful of new configurations to make dbt **snapshots easier to configure, run, and customize.** These improvements include: From 8c8eff18505248230e27451af2ca42701cd05fa7 Mon Sep 17 00:00:00 2001 From: runleonarun Date: Tue, 1 Oct 2024 15:01:54 -0700 Subject: [PATCH 06/24] fixing link --- .../docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index 81231d1215d..f1f782e8da8 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -90,5 +90,5 @@ TBD We also made some quality-of-life improvements in Core 1.9, enabling you to: - Document [singular data tests](/docs/build/data-tests#document-singular-tests). -- Use `ref` and `source` in foreign key constraints. +- Use `ref` and `source` in [foreign key constraints](/reference/resource-properties/constraints). - `dbt test` supports the `--resource-type` / `--exclude-resource-type` flag, making it possible to include or exclude data tests (`test`) or unit tests (`unit_test`). From 18bcb9f164bb0f24cb2fbafa859e64b89eb19009 Mon Sep 17 00:00:00 2001 From: runleonarun Date: Tue, 1 Oct 2024 16:28:55 -0700 Subject: [PATCH 07/24] adding adapters --- .../core-upgrade/06-upgrading-to-v1.9.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index f1f782e8da8..5662250e82a 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -83,7 +83,22 @@ You can read more about each of these behavior changes in the following links: ## Adapter specific features and functionalities -TBD +### Redshift + +- We are changing the adapter's behavior when accessing metadata on Redshift. It’s currently under a [behavior flag](/reference/global-configs/redshift-changes#the-restrict_direct_pg_catalog_access-flag) to mitigate any breaking changes. + +### Snowflake + +- Iceberg Table Format support will be available on three out of the box materializations: table, incremental, dynamic tables. INSERT HERE link to docs. + +### Bigquery + +- Can cancel running queries on keyboard interrupt +- auto-drop intermediate tables created by incremental models to save resources + +### Spark + +- Support overriding the ODBC driver connection string which now enables you to provide custom connections ## Quick hits From 00462f22110660458e89ae1572fa34bb152ca658 Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Tue, 1 Oct 2024 16:41:43 -0700 Subject: [PATCH 08/24] Apply suggestions from code review --- .../docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index 5662250e82a..c6447e08806 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -85,7 +85,7 @@ You can read more about each of these behavior changes in the following links: ### Redshift -- We are changing the adapter's behavior when accessing metadata on Redshift. It’s currently under a [behavior flag](/reference/global-configs/redshift-changes#the-restrict_direct_pg_catalog_access-flag) to mitigate any breaking changes. +- We are changing the adapter's behavior when accessing metadata on Redshift. It’s currently under a behavior flag to mitigate any breaking changes. ### Snowflake From d2143e7ab58f8d4b4e8119aa0526a2b706032908 Mon Sep 17 00:00:00 2001 From: runleonarun Date: Tue, 1 Oct 2024 17:13:57 -0700 Subject: [PATCH 09/24] removing INSERT link --- .../docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index c6447e08806..0271591bf50 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -89,7 +89,7 @@ You can read more about each of these behavior changes in the following links: ### Snowflake -- Iceberg Table Format support will be available on three out of the box materializations: table, incremental, dynamic tables. INSERT HERE link to docs. +- Iceberg Table Format support will be available on three out of the box materializations: table, incremental, dynamic tables. ### Bigquery From 7a5391d5e08918b33055e07ccc6baa29db62d26c Mon Sep 17 00:00:00 2001 From: Amy Chen <46451573+amychen1776@users.noreply.github.com> Date: Wed, 2 Oct 2024 09:41:45 -0400 Subject: [PATCH 10/24] Update website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md Co-authored-by: Grace Goheen <53586774+graciegoheen@users.noreply.github.com> --- .../docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index 0271591bf50..2333c20a743 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -94,7 +94,7 @@ You can read more about each of these behavior changes in the following links: ### Bigquery - Can cancel running queries on keyboard interrupt -- auto-drop intermediate tables created by incremental models to save resources +- Auto-drop intermediate tables created by incremental models to save resources ### Spark From 932874bae26dcf1ecbc54fce699a66fec73b4eda Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Wed, 2 Oct 2024 10:17:53 -0700 Subject: [PATCH 11/24] Apply suggestions from code review Co-authored-by: Grace Goheen <53586774+graciegoheen@users.noreply.github.com> --- .../core-upgrade/06-upgrading-to-v1.9.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index 2333c20a743..3fbb5d96d28 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -9,11 +9,11 @@ displayed_sidebar: "docs" - [dbt Core 1.9 changelog](https://github.com/dbt-labs/dbt-core/blob/1.9.latest/CHANGELOG.md) - [dbt Core CLI Installation guide](/docs/core/installation-overview) -- [Cloud upgrade guide](/docs/dbt-versions/upgrade-dbt-version-in-cloud) +- [Cloud upgrade guide](/docs/dbt-versions/upgrade-dbt-version-in-cloud#versionless) ## What to know before upgrading -dbt Labs is committed to providing backward compatibility for all versions 1.x, except for any changes explicitly mentioned in this guide or as a [behavior change flag](/reference/global-configs/behavior-changes#behavior-change-flags). If you encounter an error upon upgrading, please let us know by [opening an issue](https://github.com/dbt-labs/dbt-core/issues/new). +dbt Labs is committed to providing backward compatibility for all versions 1.x. Any behavior changes will be accompanied by a [behavior change flag](/reference/global-configs/behavior-changes#behavior-change-flags) to provide a migration window for existing projects. If you encounter an error upon upgrading, please let us know by [opening an issue](https://github.com/dbt-labs/dbt-core/issues/new). dbt Cloud is now [versionless](/docs/dbt-versions/versionless-cloud). If you have selected "Versionless" in dbt Cloud, you already have access to all the features, fixes, and other functionality that is included in dbt Core v1.9. For users of dbt Core, since v1.8 we recommend explicitly installing both `dbt-core` and `dbt-`. This may become required for a future version of dbt. For example: @@ -28,7 +28,7 @@ Features and functionality new in dbt v1.9. ### New microbatch `incremental_strategy` -Incremental models are, and have always been, a *performance optimization —* for datasets that are too large to be dropped and recreated from scratch every time you do a `dbt run`. +Incremental models are, and have always been, a *performance optimization* — for datasets that are too large to be dropped and recreated from scratch every time you do a `dbt run`. Historically, managing incremental models involved several manual steps and responsibilities, including: @@ -40,7 +40,7 @@ While this works for many use-cases, there’s a clear limitation with this appr Starting in Core 1.9, you can use the new microbatch strategy to optimize your largest datasets -- **process your event data in discrete periods with their own SQL queries, rather than all at once.** The benefits include: -- Simplified query design: Write your model query for a single batch of data and no longer need manual filtering for determining "new" records. Use `event_time`, `lookback`, and `batch_size` configurations to generate necessary filters for you, making the process more streamlined and reducing the need for you to manage these details. +- Simplified query design: Write your model query for a single batch of data. dbt will use your `event_time`, `lookback`, and `batch_size` configurations to automatically generate the necessary filters for you, making the process more streamlined and reducing the need for you to manage these details. - Independent batch processing: dbt automatically breaks down the data to load into smaller batches based on the specified `batch_size` and processes each batch independently, improving efficiency and reducing the risk of query timeouts. If some of your batches fail, you can use `dbt retry` to load only the failed batches. - Targeted reprocessing: To load a *specific* batch or batches, you can use the CLI arguments `--event-time-start` and `--event-time-end`. @@ -60,7 +60,7 @@ Beginning in dbt Core 1.9, we've streamlined snapshot configuration and added a - New snapshot specification: Snapshots can now be configured in a YAML file, which provides a cleaner and more consistent set up. - New `snapshot_meta_column_names` config: Allows you to customize the names of meta fields (for example, `dbt_valid_from`, `dbt_valid_to`, etc.) that dbt automatically adds to snapshots. This increases flexibility to tailor metadata to your needs. - `target_schema` is now optional for snapshots: When omitted, snapshots will use the schema defined for the current environment. -- Standard `schema` and `database` configs supported: Snapshots will now be consistent with other dbt resources. You can specify where environment-aware snapshots should be stored. +- Standard `schema` and `database` configs supported: Snapshots will now be consistent with other dbt resource types. You can specify where environment-aware snapshots should be stored. - Warning for incorrect `updated_at` data type: To ensure data integrity, you'll see a warning if the `updated_at` field specified in the snapshot configuration is not the proper data type or timestamp. ### `state:modified` improvements @@ -72,7 +72,7 @@ We’ve made a number of improvements to `state:modified` behaviors to help redu ### Managing changes to legacy behaviors -dbt Core v1.9 has introduced flags for [managing changes to legacy behaviors](/reference/global-configs/behavior-changes). You may opt into recently introduced changes (disabled by default), or opt out of mature changes (enabled by default), by setting `True` / `False` values, respectively, for `flags` in `dbt_project.yml`. +dbt Core v1.9 has a handful of new flags for [managing changes to legacy behaviors](/reference/global-configs/behavior-changes). You may opt into recently introduced changes (disabled by default), or opt out of mature changes (enabled by default), by setting `True` / `False` values, respectively, for `flags` in `dbt_project.yml`. You can read more about each of these behavior changes in the following links: @@ -85,7 +85,7 @@ You can read more about each of these behavior changes in the following links: ### Redshift -- We are changing the adapter's behavior when accessing metadata on Redshift. It’s currently under a behavior flag to mitigate any breaking changes. +- We are changing the adapter's behavior when accessing metadata on Redshift. It’s currently under a [behavior flag](reference/global-configs/redshift-changes#the-restrict_direct_pg_catalog_access-flag) to mitigate any breaking changes. There are no expected impacts to the user experience. ### Snowflake From 1365f9d2fe7c4b370875c308e321c0d5e9c82dd3 Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Wed, 2 Oct 2024 10:38:08 -0700 Subject: [PATCH 12/24] Update website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md --- .../docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index 3fbb5d96d28..75397a59303 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -65,7 +65,7 @@ Beginning in dbt Core 1.9, we've streamlined snapshot configuration and added a ### `state:modified` improvements -We’ve made a number of improvements to `state:modified` behaviors to help reduce the risk of false positives/negatives, including: +We’ve made improvements to `state:modified` behaviors to help reduce the risk of false positives/negatives. Read more about [the `state:modified` behavior flags](#managing-changes-to-legacy-behaviors) that leverage these improvements: - Added environment-aware enhancements for environments where the logic purposefully differs (for example, materializing as a table in `prod` but a `view` in dev). - Enhanced performance so that models that use `var` or `env_var` are included in `state:modified`. From 56c362be711535f2bbc3011aab66df5e45e0dc09 Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Wed, 2 Oct 2024 11:43:46 -0700 Subject: [PATCH 13/24] Apply suggestions from code review Co-authored-by: Amy Chen <46451573+amychen1776@users.noreply.github.com> --- .../docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index 75397a59303..d46083c3fb2 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -65,7 +65,7 @@ Beginning in dbt Core 1.9, we've streamlined snapshot configuration and added a ### `state:modified` improvements -We’ve made improvements to `state:modified` behaviors to help reduce the risk of false positives/negatives. Read more about [the `state:modified` behavior flags](#managing-changes-to-legacy-behaviors) that leverage these improvements: +We’ve made improvements to `state:modified` behaviors to help reduce the risk of false positives and negatives. Read more about [the `state:modified` behavior flags](#managing-changes-to-legacy-behaviors) that leverage these improvements: - Added environment-aware enhancements for environments where the logic purposefully differs (for example, materializing as a table in `prod` but a `view` in dev). - Enhanced performance so that models that use `var` or `env_var` are included in `state:modified`. @@ -79,13 +79,14 @@ You can read more about each of these behavior changes in the following links: - (Introduced, disabled by default) [`state_modified_compare_more_unrendered_values` and `state_modified_compare_vars`](/reference/global-configs/behavior-changes#behavior-change-flags) . - (Introduced, disabled by default) new [`skip_nodes_if_on_run_start_fails` project config flag](/reference/global-configs/behavior-changes#behavior-change-flags). If the flag is set and **any** `on-run-start` hook fails, mark all selected nodes as skipped - `on-run-start/end` hooks are **always** run, regardless of whether they passed or failed last time -- [Removing a contracted model by deleting, renaming, or disabling](/docs/collaborate/govern/model-contracts#how-are-breaking-changes-handled) it will return an error (versioned models) or warning (unversioned models). ## Adapter specific features and functionalities ### Redshift -- We are changing the adapter's behavior when accessing metadata on Redshift. It’s currently under a [behavior flag](reference/global-configs/redshift-changes#the-restrict_direct_pg_catalog_access-flag) to mitigate any breaking changes. There are no expected impacts to the user experience. +- We are changing the adapter's behavior when accessing metadata on Redshift. It’s currently under a [behavior flag](/reference/global-configs/redshift-changes#the-restrict_direct_pg_catalog_access-flag) to mitigate any breaking changes. There are no expected impacts to the user experience. + +- Support IAM Role auth ### Snowflake From e7a2d460df7cf2e65945e6ada20d0feb287654d0 Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Wed, 2 Oct 2024 14:18:54 -0700 Subject: [PATCH 14/24] Apply suggestions from code review Co-authored-by: Grace Goheen <53586774+graciegoheen@users.noreply.github.com> --- .../docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index d46083c3fb2..407d9efbc37 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -68,7 +68,7 @@ Beginning in dbt Core 1.9, we've streamlined snapshot configuration and added a We’ve made improvements to `state:modified` behaviors to help reduce the risk of false positives and negatives. Read more about [the `state:modified` behavior flags](#managing-changes-to-legacy-behaviors) that leverage these improvements: - Added environment-aware enhancements for environments where the logic purposefully differs (for example, materializing as a table in `prod` but a `view` in dev). -- Enhanced performance so that models that use `var` or `env_var` are included in `state:modified`. +- Models that use `var` or `env_var` are included in `state:modified`. ### Managing changes to legacy behaviors @@ -77,7 +77,7 @@ dbt Core v1.9 has a handful of new flags for [managing changes to legacy behavi You can read more about each of these behavior changes in the following links: - (Introduced, disabled by default) [`state_modified_compare_more_unrendered_values` and `state_modified_compare_vars`](/reference/global-configs/behavior-changes#behavior-change-flags) . -- (Introduced, disabled by default) new [`skip_nodes_if_on_run_start_fails` project config flag](/reference/global-configs/behavior-changes#behavior-change-flags). If the flag is set and **any** `on-run-start` hook fails, mark all selected nodes as skipped +- (Introduced, disabled by default) [`skip_nodes_if_on_run_start_fails` project config flag](/reference/global-configs/behavior-changes#behavior-change-flags). If the flag is set and **any** `on-run-start` hook fails, mark all selected nodes as skipped - `on-run-start/end` hooks are **always** run, regardless of whether they passed or failed last time ## Adapter specific features and functionalities @@ -105,6 +105,7 @@ You can read more about each of these behavior changes in the following links: We also made some quality-of-life improvements in Core 1.9, enabling you to: +- dbt now returns an an error (versioned models) or warning (unversioned models) when you [remove a contracted model by deleting, renaming, or disabling](/docs/collaborate/govern/model-contracts#how-are-breaking-changes-handled) it. - Document [singular data tests](/docs/build/data-tests#document-singular-tests). - Use `ref` and `source` in [foreign key constraints](/reference/resource-properties/constraints). -- `dbt test` supports the `--resource-type` / `--exclude-resource-type` flag, making it possible to include or exclude data tests (`test`) or unit tests (`unit_test`). +- Use `dbt test` with the `--resource-type` / `--exclude-resource-type` flag, making it possible to include or exclude data tests (`test`) or unit tests (`unit_test`). From 4c6bf6caca22db6a92697d363d18344b030041bb Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Wed, 2 Oct 2024 14:19:12 -0700 Subject: [PATCH 15/24] Apply suggestions from code review Co-authored-by: Grace Goheen <53586774+graciegoheen@users.noreply.github.com> --- .../docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index 407d9efbc37..bd1d3998abc 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -26,7 +26,7 @@ python3 -m pip install dbt-core dbt-snowflake Features and functionality new in dbt v1.9. -### New microbatch `incremental_strategy` +### Microbatch `incremental_strategy` Incremental models are, and have always been, a *performance optimization* — for datasets that are too large to be dropped and recreated from scratch every time you do a `dbt run`. From 74f519c85764adc6c804e9d3cf88fc43810787ea Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Wed, 2 Oct 2024 14:20:40 -0700 Subject: [PATCH 16/24] Update 06-upgrading-to-v1.9.md --- .../dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index bd1d3998abc..5bae57e6a23 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -48,11 +48,11 @@ While microbatch is in "beta", this functionality is still gated behind an env v - Set `DBT_EXPERIMENTAL_MICROBATCH` to `true` in your project Currently microbatch is supported on these adapters with more to come: - -• postgres -• snowflake -• bigquery -• spark +* postgres +* snowflake +* bigquery +* spark + ### Snapshots improvements Beginning in dbt Core 1.9, we've streamlined snapshot configuration and added a handful of new configurations to make dbt **snapshots easier to configure, run, and customize.** These improvements include: From fef4db35436c54b2d74a6c3f316570923428954c Mon Sep 17 00:00:00 2001 From: runleonarun Date: Wed, 2 Oct 2024 15:04:34 -0700 Subject: [PATCH 17/24] @grace feedback --- .../core-upgrade/06-upgrading-to-v1.9.md | 22 ++++++++++--------- .../docs/docs/dbt-versions/release-notes.md | 2 +- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index 5bae57e6a23..e8f7592da1a 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -28,7 +28,7 @@ Features and functionality new in dbt v1.9. ### Microbatch `incremental_strategy` -Incremental models are, and have always been, a *performance optimization* — for datasets that are too large to be dropped and recreated from scratch every time you do a `dbt run`. +Incremental models are, and have always been, a *performance optimization* — for datasets that are too large to be dropped and recreated from scratch every time you do a `dbt run`. Learn more about [incremental models](/docs/build/incremental-models-overview). Historically, managing incremental models involved several manual steps and responsibilities, including: @@ -48,10 +48,10 @@ While microbatch is in "beta", this functionality is still gated behind an env v - Set `DBT_EXPERIMENTAL_MICROBATCH` to `true` in your project Currently microbatch is supported on these adapters with more to come: -* postgres -* snowflake -* bigquery -* spark + * postgres + * snowflake + * bigquery + * spark ### Snapshots improvements @@ -63,6 +63,8 @@ Beginning in dbt Core 1.9, we've streamlined snapshot configuration and added a - Standard `schema` and `database` configs supported: Snapshots will now be consistent with other dbt resource types. You can specify where environment-aware snapshots should be stored. - Warning for incorrect `updated_at` data type: To ensure data integrity, you'll see a warning if the `updated_at` field specified in the snapshot configuration is not the proper data type or timestamp. +Read more about [Snapshots meta fields](/docs/build/snapshots#snapshot-meta-fields). + ### `state:modified` improvements We’ve made improvements to `state:modified` behaviors to help reduce the risk of false positives and negatives. Read more about [the `state:modified` behavior flags](#managing-changes-to-legacy-behaviors) that leverage these improvements: @@ -76,16 +78,16 @@ dbt Core v1.9 has a handful of new flags for [managing changes to legacy behavi You can read more about each of these behavior changes in the following links: -- (Introduced, disabled by default) [`state_modified_compare_more_unrendered_values` and `state_modified_compare_vars`](/reference/global-configs/behavior-changes#behavior-change-flags) . -- (Introduced, disabled by default) [`skip_nodes_if_on_run_start_fails` project config flag](/reference/global-configs/behavior-changes#behavior-change-flags). If the flag is set and **any** `on-run-start` hook fails, mark all selected nodes as skipped - - `on-run-start/end` hooks are **always** run, regardless of whether they passed or failed last time +- (Introduced, disabled by default) [`state_modified_compare_more_unrendered_values`](/reference/global-configs/behavior-changes#behavior-change-flags). Set to `True` to start persisting unrendered_database and unrendered_schema configs during source parsing, and do comparison on unrendered values during `state:modified` checks. +- (Introduced, disabled by default) [`state_modified_compare_vars`](/reference/global-configs/behavior-changes#behavior-change-flags). Set to `True` if a model uses a `var` or `env_var` in its definition. dbt will be able to identify its lineage to include the model in `state:modified` because the var or env_var value has changed. +- (Introduced, disabled by default) [`skip_nodes_if_on_run_start_fails` project config flag](/reference/global-configs/behavior-changes#behavior-change-flags). If the flag is set and **any** `on-run-start` hook fails, mark all selected nodes as skipped. + - `on-run-start/end` hooks are **always** run, regardless of whether they passed or failed last time. +- (Introduced, disabled by default) [[Redshift] `restrict_direct_pg_catalog_access`](/reference/global-configs/behavior-changes#redshift-restrict_direct_pg_catalog_access). If the flag is set the adapter will use the Redshift API (through the Python client) if available, or query Redshift's `information_schema` tables instead of using `pg_` tables. ## Adapter specific features and functionalities ### Redshift -- We are changing the adapter's behavior when accessing metadata on Redshift. It’s currently under a [behavior flag](/reference/global-configs/redshift-changes#the-restrict_direct_pg_catalog_access-flag) to mitigate any breaking changes. There are no expected impacts to the user experience. - - Support IAM Role auth ### Snowflake diff --git a/website/docs/docs/dbt-versions/release-notes.md b/website/docs/docs/dbt-versions/release-notes.md index 0d1668c959f..533e933ee2a 100644 --- a/website/docs/docs/dbt-versions/release-notes.md +++ b/website/docs/docs/dbt-versions/release-notes.md @@ -21,7 +21,7 @@ Release notes are grouped by month for both multi-tenant and virtual private clo ## October 2024 - **Enhancement**: In dbt Cloud Versionless, snapshots defined in SQL files can now use `config` defined in `schema.yml` YAML files. This update resolves the previous limitation that required snapshot properties to be defined exclusively in `dbt_project.yml` and/or a `config()` block within the SQL file. This enhancement will be included in the upcoming dbt Core v1.9 release. -- **Enhancement**: In May 2024, dbt Cloud versionless began inferring a model's `primary_key` based on configured data tests and/or constraints within `manifest.json`. The inferred `primary_key` is visible in dbt Explorer and utilized by the dbt Cloud [compare changes](/docs/deploy/run-visibility#compare-tab) feature. This will also be released in dbt Core 1.9. +- **Enhancement**: In dbt Cloud versionless, dbt infers a model's `primary_key` based on configured data tests and/or constraints within `manifest.json`. The inferred `primary_key` is visible in dbt Explorer and utilized by the dbt Cloud [compare changes](/docs/deploy/run-visibility#compare-tab) feature. This will also be released in dbt Core 1.9. Read about the [order dbt infers columns can be used as primary key of a model](https://github.com/dbt-labs/dbt-core/blob/7940ad5c7858ff11ef100260a372f2f06a86e71f/core/dbt/contracts/graph/nodes.py#L534-L541). - **New:** dbt Explorer now includes trust signal icons, which is currently available as a [Preview](/docs/dbt-versions/product-lifecycles#dbt-cloud). Trust signals offer a quick, at-a-glance view of data health when browsing your dbt models in Explorer. These icons indicate whether a model is **Healthy**, **Caution**, **Degraded**, or **Unknown**. For accurate health data, ensure the resource is up-to-date and has had a recent job run. Refer to [Trust signals](/docs/collaborate/explore-projects#trust-signals-for-resources) for more information. - **New:** Auto exposures are now available in Preview in dbt Cloud. Auto-exposures helps users understand how their models are used in downstream analytics tools to inform investments and reduce incidents. It imports and auto-generates exposures based on Tableau dashboards, with user-defined curation. To learn more, refer to [Auto exposures](/docs/collaborate/auto-exposures). From 24b0dd53b268f0f1b9fd3f553d1ad6acdeb2d5fc Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Wed, 2 Oct 2024 16:13:18 -0700 Subject: [PATCH 18/24] Apply suggestions from code review Co-authored-by: Grace Goheen <53586774+graciegoheen@users.noreply.github.com> --- .../docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index e8f7592da1a..49d5e7fd8cb 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -47,6 +47,7 @@ Starting in Core 1.9, you can use the new microbatch strategy to optimize your l While microbatch is in "beta", this functionality is still gated behind an env var, which will change to a behavior flag when 1.9 is GA. To use microbatch: - Set `DBT_EXPERIMENTAL_MICROBATCH` to `true` in your project + Currently microbatch is supported on these adapters with more to come: * postgres * snowflake @@ -70,7 +71,7 @@ Read more about [Snapshots meta fields](/docs/build/snapshots#snapshot-meta-fiel We’ve made improvements to `state:modified` behaviors to help reduce the risk of false positives and negatives. Read more about [the `state:modified` behavior flags](#managing-changes-to-legacy-behaviors) that leverage these improvements: - Added environment-aware enhancements for environments where the logic purposefully differs (for example, materializing as a table in `prod` but a `view` in dev). -- Models that use `var` or `env_var` are included in `state:modified`. +- Models that use `var` or `env_var` in their definition are included in `state:modified` when their values change. ### Managing changes to legacy behaviors From a6b0a6260ab91d425881acde0f42d521bfbd1ab6 Mon Sep 17 00:00:00 2001 From: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> Date: Wed, 2 Oct 2024 17:34:22 -0600 Subject: [PATCH 19/24] Remove `state_modified_compare_vars` from the upgrade guide --- .../docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index 49d5e7fd8cb..c5b9a2e4bf4 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -71,7 +71,6 @@ Read more about [Snapshots meta fields](/docs/build/snapshots#snapshot-meta-fiel We’ve made improvements to `state:modified` behaviors to help reduce the risk of false positives and negatives. Read more about [the `state:modified` behavior flags](#managing-changes-to-legacy-behaviors) that leverage these improvements: - Added environment-aware enhancements for environments where the logic purposefully differs (for example, materializing as a table in `prod` but a `view` in dev). -- Models that use `var` or `env_var` in their definition are included in `state:modified` when their values change. ### Managing changes to legacy behaviors @@ -80,7 +79,6 @@ dbt Core v1.9 has a handful of new flags for [managing changes to legacy behavi You can read more about each of these behavior changes in the following links: - (Introduced, disabled by default) [`state_modified_compare_more_unrendered_values`](/reference/global-configs/behavior-changes#behavior-change-flags). Set to `True` to start persisting unrendered_database and unrendered_schema configs during source parsing, and do comparison on unrendered values during `state:modified` checks. -- (Introduced, disabled by default) [`state_modified_compare_vars`](/reference/global-configs/behavior-changes#behavior-change-flags). Set to `True` if a model uses a `var` or `env_var` in its definition. dbt will be able to identify its lineage to include the model in `state:modified` because the var or env_var value has changed. - (Introduced, disabled by default) [`skip_nodes_if_on_run_start_fails` project config flag](/reference/global-configs/behavior-changes#behavior-change-flags). If the flag is set and **any** `on-run-start` hook fails, mark all selected nodes as skipped. - `on-run-start/end` hooks are **always** run, regardless of whether they passed or failed last time. - (Introduced, disabled by default) [[Redshift] `restrict_direct_pg_catalog_access`](/reference/global-configs/behavior-changes#redshift-restrict_direct_pg_catalog_access). If the flag is set the adapter will use the Redshift API (through the Python client) if available, or query Redshift's `information_schema` tables instead of using `pg_` tables. From 33628ebddc5d596f53634ecab75f2f8ee1e1b163 Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Wed, 2 Oct 2024 16:52:55 -0700 Subject: [PATCH 20/24] Update website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md --- .../docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index c5b9a2e4bf4..9ab42751ad5 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -106,7 +106,7 @@ You can read more about each of these behavior changes in the following links: We also made some quality-of-life improvements in Core 1.9, enabling you to: -- dbt now returns an an error (versioned models) or warning (unversioned models) when you [remove a contracted model by deleting, renaming, or disabling](/docs/collaborate/govern/model-contracts#how-are-breaking-changes-handled) it. +- Maintain data quality now that dbt returns an an error (versioned models) or warning (unversioned models) when someone [removes a contracted model by deleting, renaming, or disabling](/docs/collaborate/govern/model-contracts#how-are-breaking-changes-handled) it. - Document [singular data tests](/docs/build/data-tests#document-singular-tests). - Use `ref` and `source` in [foreign key constraints](/reference/resource-properties/constraints). - Use `dbt test` with the `--resource-type` / `--exclude-resource-type` flag, making it possible to include or exclude data tests (`test`) or unit tests (`unit_test`). From 74a30ef40f147ce102c692b335a9eeaa1578808d Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Thu, 3 Oct 2024 10:27:22 -0700 Subject: [PATCH 21/24] Apply suggestions from code review Co-authored-by: Grace Goheen <53586774+graciegoheen@users.noreply.github.com> --- .../docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index 9ab42751ad5..d1865192e76 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -68,7 +68,7 @@ Read more about [Snapshots meta fields](/docs/build/snapshots#snapshot-meta-fiel ### `state:modified` improvements -We’ve made improvements to `state:modified` behaviors to help reduce the risk of false positives and negatives. Read more about [the `state:modified` behavior flags](#managing-changes-to-legacy-behaviors) that leverage these improvements: +We’ve made improvements to `state:modified` behaviors to help reduce the risk of false positives and negatives. Read more about [the `state:modified` behavior flag](#managing-changes-to-legacy-behaviors) that unlocks this improvement: - Added environment-aware enhancements for environments where the logic purposefully differs (for example, materializing as a table in `prod` but a `view` in dev). @@ -78,7 +78,7 @@ dbt Core v1.9 has a handful of new flags for [managing changes to legacy behavi You can read more about each of these behavior changes in the following links: -- (Introduced, disabled by default) [`state_modified_compare_more_unrendered_values`](/reference/global-configs/behavior-changes#behavior-change-flags). Set to `True` to start persisting unrendered_database and unrendered_schema configs during source parsing, and do comparison on unrendered values during `state:modified` checks. +- (Introduced, disabled by default) [`state_modified_compare_more_unrendered_values`](/reference/global-configs/behavior-changes#behavior-change-flags). Set to `True` to start persisting `unrendered_database` and `unrendered_schema` configs during source parsing, and do comparison on unrendered values during `state:modified` checks to reduce false positives due to environment-aware logic when selecting `state:modified`. - (Introduced, disabled by default) [`skip_nodes_if_on_run_start_fails` project config flag](/reference/global-configs/behavior-changes#behavior-change-flags). If the flag is set and **any** `on-run-start` hook fails, mark all selected nodes as skipped. - `on-run-start/end` hooks are **always** run, regardless of whether they passed or failed last time. - (Introduced, disabled by default) [[Redshift] `restrict_direct_pg_catalog_access`](/reference/global-configs/behavior-changes#redshift-restrict_direct_pg_catalog_access). If the flag is set the adapter will use the Redshift API (through the Python client) if available, or query Redshift's `information_schema` tables instead of using `pg_` tables. From 5d8ae6ecc5ebaf32c0365d810f198703c9e9fdde Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Thu, 3 Oct 2024 10:28:28 -0700 Subject: [PATCH 22/24] Update website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md Co-authored-by: Grace Goheen <53586774+graciegoheen@users.noreply.github.com> --- .../docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index d1865192e76..78bec39eb11 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -46,7 +46,7 @@ Starting in Core 1.9, you can use the new microbatch strategy to optimize your l While microbatch is in "beta", this functionality is still gated behind an env var, which will change to a behavior flag when 1.9 is GA. To use microbatch: -- Set `DBT_EXPERIMENTAL_MICROBATCH` to `true` in your project +- Set `DBT_EXPERIMENTAL_MICROBATCH` to `true` wherever you're running dbt Core Currently microbatch is supported on these adapters with more to come: * postgres From 06bbe85dd261455d92e74f8143bbc19e01fbc0bd Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Thu, 3 Oct 2024 10:35:06 -0700 Subject: [PATCH 23/24] Update 06-upgrading-to-v1.9.md --- .../docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index 78bec39eb11..42e756ef419 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -28,6 +28,10 @@ Features and functionality new in dbt v1.9. ### Microbatch `incremental_strategy` +:::info +While microbatch is in "beta", this functionality is still gated behind an env var, which will change to a behavior flag when 1.9 is GA. +::: + Incremental models are, and have always been, a *performance optimization* — for datasets that are too large to be dropped and recreated from scratch every time you do a `dbt run`. Learn more about [incremental models](/docs/build/incremental-models-overview). Historically, managing incremental models involved several manual steps and responsibilities, including: @@ -44,7 +48,7 @@ Starting in Core 1.9, you can use the new microbatch strategy to optimize your l - Independent batch processing: dbt automatically breaks down the data to load into smaller batches based on the specified `batch_size` and processes each batch independently, improving efficiency and reducing the risk of query timeouts. If some of your batches fail, you can use `dbt retry` to load only the failed batches. - Targeted reprocessing: To load a *specific* batch or batches, you can use the CLI arguments `--event-time-start` and `--event-time-end`. -While microbatch is in "beta", this functionality is still gated behind an env var, which will change to a behavior flag when 1.9 is GA. To use microbatch: +To use microbatch: - Set `DBT_EXPERIMENTAL_MICROBATCH` to `true` wherever you're running dbt Core From 5613080ec82ffaaeec63df38391b87c1d20ca5f3 Mon Sep 17 00:00:00 2001 From: "Leona B. Campbell" <3880403+runleonarun@users.noreply.github.com> Date: Thu, 3 Oct 2024 10:41:57 -0700 Subject: [PATCH 24/24] Update 06-upgrading-to-v1.9.md --- .../docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md index 42e756ef419..cf9b9eaed4e 100644 --- a/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md +++ b/website/docs/docs/dbt-versions/core-upgrade/06-upgrading-to-v1.9.md @@ -29,7 +29,7 @@ Features and functionality new in dbt v1.9. ### Microbatch `incremental_strategy` :::info -While microbatch is in "beta", this functionality is still gated behind an env var, which will change to a behavior flag when 1.9 is GA. +While microbatch is in "beta", this functionality is still gated behind an env var, which will change to a behavior flag when 1.9 is GA. To use microbatch, set `DBT_EXPERIMENTAL_MICROBATCH` to `true` wherever you're running dbt Core. ::: Incremental models are, and have always been, a *performance optimization* — for datasets that are too large to be dropped and recreated from scratch every time you do a `dbt run`. Learn more about [incremental models](/docs/build/incremental-models-overview). @@ -48,10 +48,6 @@ Starting in Core 1.9, you can use the new microbatch strategy to optimize your l - Independent batch processing: dbt automatically breaks down the data to load into smaller batches based on the specified `batch_size` and processes each batch independently, improving efficiency and reducing the risk of query timeouts. If some of your batches fail, you can use `dbt retry` to load only the failed batches. - Targeted reprocessing: To load a *specific* batch or batches, you can use the CLI arguments `--event-time-start` and `--event-time-end`. -To use microbatch: - -- Set `DBT_EXPERIMENTAL_MICROBATCH` to `true` wherever you're running dbt Core - Currently microbatch is supported on these adapters with more to come: * postgres * snowflake