diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6fcaacea8..7185cef3e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -70,14 +70,14 @@ To run functional tests we rely on [dagger](https://dagger.io/). This launches a ```sh pip install -r dagger/requirements.txt -python dagger/run_dbt_spark_tests.py --profile databricks_sql_endpoint --test-path tests/functional/adapter/test_basic.py::TestSimpleMaterializationsSpark::test_base -``` +python dagger/run_dbt_spark_tests.py --profile apache_spark --test-path tests/functional/adapter/incremental_strategies/test_microbatch.py `--profile`: required, this is the kind of spark connection to test against _options_: - "apache_spark" - "spark_session" + - "spark_http_odbc" - "databricks_sql_endpoint" - "databricks_cluster" - "databricks_http_cluster" diff --git a/tests/functional/adapter/incremental_strategies/test_microbatch.py b/tests/functional/adapter/incremental_strategies/test_microbatch.py index 3cc366142..060033efa 100644 --- a/tests/functional/adapter/incremental_strategies/test_microbatch.py +++ b/tests/functional/adapter/incremental_strategies/test_microbatch.py @@ -6,7 +6,7 @@ # No requirement for a unique_id for spark microbatch! _microbatch_model_no_unique_id_sql = """ -{{ config(materialized='incremental', incremental_strategy='microbatch', event_time='event_time', batch_size='day', begin=modules.datetime.datetime(2020, 1, 1, 0, 0, 0)) }} +{{ config(materialized='incremental', incremental_strategy='microbatch', event_time='event_time', batch_size='day', begin=modules.datetime.datetime(2020, 1, 1, 0, 0, 0), partition_by=['date_day']) }} select *, cast(event_time as date) as date_day from {{ ref('input_model') }} """