From 38b85501ce8962248cd93f219258ea5f2a849aa8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Wed, 25 Sep 2024 15:04:50 +0200 Subject: [PATCH 01/12] wip --- pkg/resources/task.go | 774 ++++++++++++++++++------------- pkg/resources/task_parameters.go | 380 +++++++++++++++ pkg/schemas/task_gen.go | 38 +- pkg/schemas/task_parameters.go | 29 ++ pkg/sdk/parameters.go | 64 +++ pkg/sdk/tasks_gen.go | 1 + pkg/sdk/tasks_impl_gen.go | 8 + 7 files changed, 965 insertions(+), 329 deletions(-) create mode 100644 pkg/resources/task_parameters.go create mode 100644 pkg/schemas/task_parameters.go diff --git a/pkg/resources/task.go b/pkg/resources/task.go index 078e29686d..9d6aace245 100644 --- a/pkg/resources/task.go +++ b/pkg/resources/task.go @@ -2,10 +2,13 @@ package resources import ( "context" + "errors" "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "log" "slices" "strconv" + "strings" "time" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -13,407 +16,338 @@ import ( "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/util" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) +// TODO: Go through descriptions + var taskSchema = map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Specifies if the task should be started (enabled) after creation or should remain suspended (default).", - }, - "name": { - Type: schema.TypeString, - Required: true, - Description: "Specifies the identifier for the task; must be unique for the database and schema in which the task is created.", - ForceNew: true, - }, "database": { - Type: schema.TypeString, - Required: true, - Description: "The database in which to create the task.", - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: suppressIdentifierQuoting, + Description: blocklistedCharactersFieldDescription("The database in which to create the task."), }, "schema": { - Type: schema.TypeString, - Required: true, - Description: "The schema in which to create the task.", - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: suppressIdentifierQuoting, + Description: blocklistedCharactersFieldDescription("The schema in which to create the task."), + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: suppressIdentifierQuoting, + Description: blocklistedCharactersFieldDescription("Specifies the identifier for the task; must be unique for the database and schema in which the task is created."), + }, + "enabled": { + Type: schema.TypeString, + Optional: true, + Default: BooleanDefault, + ValidateDiagFunc: validateBooleanString, + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShowWithMapping("state", func(state any) any { return state.(string) == string(sdk.TaskStateStarted) }), + Description: booleanStringFieldDescription("Specifies if the task should be started (enabled) after creation or should remain suspended (default)."), }, "warehouse": { - Type: schema.TypeString, - Optional: true, - Description: "The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. (Conflicts with user_task_managed_initial_warehouse_size)", - ForceNew: false, - ConflictsWith: []string{"user_task_managed_initial_warehouse_size"}, + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: IsValidIdentifier[sdk.AccountObjectIdentifier](), + DiffSuppressFunc: suppressIdentifierQuoting, + Description: "The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. (Conflicts with user_task_managed_initial_warehouse_size)", + ConflictsWith: []string{"user_task_managed_initial_warehouse_size"}, + }, + "user_task_managed_initial_warehouse_size": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: sdkValidation(sdk.ToWarehouseSize), + DiffSuppressFunc: SuppressIfAny( + NormalizeAndCompare(sdk.ToWarehouseSize), + IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterUserTaskManagedInitialWarehouseSize))), + ), + Description: fmt.Sprintf("Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. Valid values are (case-insensitive): %s. (Conflicts with warehouse)", possibleValuesListed(sdk.ValidWarehouseSizesString)), + ConflictsWith: []string{"warehouse"}, }, "schedule": { - Type: schema.TypeString, - Optional: true, - Description: "The schedule for periodically running the task. This can be a cron or interval in minutes. (Conflict with after)", - ConflictsWith: []string{"after"}, + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShow("schedule"), + Description: "The schedule for periodically running the task. This can be a cron or interval in minutes. (Conflict with finalize and after)", + ConflictsWith: []string{"finalize", "after"}, + }, + "config": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShow("config"), + // TODO: it could be retrieved with system function and show/desc (which should be used?) + // TODO: Doc request: there's no schema for JSON config format + Description: "Specifies a string representation of key value pairs that can be accessed by all tasks in the task graph. Must be in JSON format.", + }, + "allow_overlapping_execution": { + Type: schema.TypeBool, + Optional: true, + Default: BooleanDefault, + ValidateDiagFunc: validateBooleanString, + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShow("allow_overlapping_execution"), + Description: booleanStringFieldDescription("By default, Snowflake ensures that only one instance of a particular DAG is allowed to run at a time, setting the parameter value to TRUE permits DAG runs to overlap."), }, "session_parameters": { - Type: schema.TypeMap, - Elem: &schema.Schema{Type: schema.TypeString}, + // TODO: Description and validation + Type: schema.TypeList, // TODO: make it actual schema (check user) + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "a": {}, + // TODO: + }, + }, Optional: true, Description: "Specifies session parameters to set for the session when the task runs. A task supports all session parameters.", }, "user_task_timeout_ms": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 86400000), - Description: "Specifies the time limit on a single run of the task before it times out (in milliseconds).", + Type: schema.TypeInt, + Optional: true, + Default: IntDefault, + ValidateFunc: validation.IntAtLeast(0), + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterUserTaskTimeoutMs))), + Description: "Specifies the time limit on a single run of the task before it times out (in milliseconds).", }, "suspend_task_after_num_failures": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - ValidateFunc: validation.IntAtLeast(0), - Description: "Specifies the number of consecutive failed task runs after which the current task is suspended automatically. The default is 0 (no automatic suspension).", + Type: schema.TypeInt, + Optional: true, + Default: IntDefault, + ValidateFunc: validation.IntAtLeast(0), + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterSuspendTaskAfterNumFailures))), + Description: "Specifies the number of consecutive failed task runs after which the current task is suspended automatically. The default is 0 (no automatic suspension).", + }, + "error_integration": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: IsValidIdentifier[sdk.AccountObjectIdentifier](), + DiffSuppressFunc: SuppressIfAny(suppressIdentifierQuoting, IgnoreChangeToCurrentSnowflakeValueInShow("error_integration")), + Description: "Specifies the name of the notification integration used for error notifications.", }, "comment": { Type: schema.TypeString, Optional: true, Description: "Specifies a comment for the task.", }, + "finalize": { + Optional: true, + ValidateDiagFunc: IsValidIdentifier[sdk.SchemaObjectIdentifier](), + DiffSuppressFunc: SuppressIfAny( + suppressIdentifierQuoting, + IgnoreChangeToCurrentSnowflakeValueInShow("task_relations.0.finalizer"), + ), + ConflictsWith: []string{"schedule", "after"}, + }, + "task_auto_retry_attempts": { + Type: schema.TypeInt, + Optional: true, + Default: IntDefault, + ValidateFunc: validation.IntAtLeast(0), + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterTaskAutoRetryAttempts))), + Description: "Specifies the number of automatic task graph retry attempts. If any task graphs complete in a FAILED state, Snowflake can automatically retry the task graphs from the last task in the graph that failed.", + }, + "user_task_minimum_trigger_interval_in_seconds": { + Type: schema.TypeInt, + Optional: true, + Default: IntDefault, + ValidateFunc: validation.IntAtLeast(15), + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds))), + Description: "Defines how frequently a task can execute in seconds. If data changes occur more often than the specified minimum, changes will be grouped and processed together.", + }, "after": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: suppressIdentifierQuoting, + ValidateDiagFunc: IsValidIdentifier[sdk.SchemaObjectIdentifier](), + }, + Optional: true, + // TODO: Check + // Cannot use IgnoreChangeToCurrentSnowflakeValueInShow because output from predecessors may be ordered Description: "Specifies one or more predecessor tasks for the current task. Use this option to create a DAG of tasks or add this task to an existing DAG. A DAG is a series of tasks that starts with a scheduled root task and is linked together by dependencies.", - ConflictsWith: []string{"schedule"}, + ConflictsWith: []string{"schedule", "finalize"}, }, "when": { - Type: schema.TypeString, - Optional: true, - Description: "Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported.", + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: SuppressIfAny(DiffSuppressStatement, IgnoreChangeToCurrentSnowflakeValueInShow("condition")), + Description: "Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported.", }, - "sql_statement": { + "sql_statement": { // TODO: Test all possibilities of this field (procedure, procedural logic, single sql statement) Type: schema.TypeString, Required: true, - Description: "Any single SQL statement, or a call to a stored procedure, executed when the task runs.", ForceNew: false, - DiffSuppressFunc: DiffSuppressStatement, - }, - "user_task_managed_initial_warehouse_size": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - "XSMALL", "X-SMALL", "SMALL", "MEDIUM", "LARGE", "XLARGE", "X-LARGE", "XXLARGE", "X2LARGE", "2X-LARGE", - }, true), - Description: "Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. (Conflicts with warehouse)", - ConflictsWith: []string{"warehouse"}, + DiffSuppressFunc: SuppressIfAny(DiffSuppressStatement, IgnoreChangeToCurrentSnowflakeValueInShow("definition")), + Description: "Any single SQL statement, or a call to a stored procedure, executed when the task runs.", }, - "error_integration": { - Type: schema.TypeString, - Optional: true, - Description: "Specifies the name of the notification integration used for error notifications.", + FullyQualifiedNameAttributeName: schemas.FullyQualifiedNameSchema, + ShowOutputAttributeName: { + Type: schema.TypeList, + Computed: true, + Description: "Outputs the result of `SHOW TASKS` for the given task.", + Elem: &schema.Resource{ + Schema: schemas.ShowTaskSchema, + }, }, - "allow_overlapping_execution": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "By default, Snowflake ensures that only one instance of a particular DAG is allowed to run at a time, setting the parameter value to TRUE permits DAG runs to overlap.", + ParametersAttributeName: { + Type: schema.TypeList, + Computed: true, + Description: "Outputs the result of `SHOW PARAMETERS IN TASK` for the given task.", + Elem: &schema.Resource{ + Schema: schemas.ShowTaskParametersSchema, + }, }, - FullyQualifiedNameAttributeName: schemas.FullyQualifiedNameSchema, -} - -// difference find keys in 'a' but not in 'b'. -func difference(a, b map[string]any) map[string]any { - diff := make(map[string]any) - for k := range a { - if _, ok := b[k]; !ok { - diff[k] = a[k] - } - } - return diff } -// differentValue find keys present both in 'a' and 'b' but having different values. -func differentValue(a, b map[string]any) map[string]any { - diff := make(map[string]any) - for k, va := range a { - if vb, ok := b[k]; ok { - if vb != va { - diff[k] = vb - } - } - } - return diff -} - -// Task returns a pointer to the resource representing a task. func Task() *schema.Resource { return &schema.Resource{ - Create: CreateTask, - Read: ReadTask, - Update: UpdateTask, - Delete: DeleteTask, - CustomizeDiff: customdiff.ForceNewIfChange("when", func(ctx context.Context, old, new, meta any) bool { - return old.(string) != "" && new.(string) == "" - }), + CreateContext: CreateTask, + UpdateContext: UpdateTask, + ReadContext: ReadTask(true), + DeleteContext: DeleteTask, Schema: taskSchema, Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, + StateContext: schema.ImportStatePassthroughContext, // TODO: Import }, } } -// ReadTask implements schema.ReadFunc. -func ReadTask(d *schema.ResourceData, meta interface{}) error { +func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() - - taskId := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) - - task, err := client.Tasks.ShowByID(ctx, taskId) - if err != nil { - // If not found, mark resource to be removed from state file during apply or refresh - log.Printf("[DEBUG] task (%s) not found", d.Id()) - d.SetId("") - return nil - } - if err := d.Set(FullyQualifiedNameAttributeName, taskId.FullyQualifiedName()); err != nil { - return err - } - - if err := d.Set("enabled", task.State == sdk.TaskStateStarted); err != nil { - return err - } - - if err := d.Set("name", task.Name); err != nil { - return err - } - - if err := d.Set("database", task.DatabaseName); err != nil { - return err - } - - if err := d.Set("schema", task.SchemaName); err != nil { - return err - } - - if err := d.Set("warehouse", task.Warehouse); err != nil { - return err - } - - if err := d.Set("schedule", task.Schedule); err != nil { - return err - } - - if err := d.Set("comment", task.Comment); err != nil { - return err - } - - if err := d.Set("allow_overlapping_execution", task.AllowOverlappingExecution); err != nil { - return err - } - - if err := d.Set("error_integration", task.ErrorIntegration); err != nil { - return err - } - - predecessors := make([]string, len(task.Predecessors)) - for i, p := range task.Predecessors { - predecessors[i] = p.Name() - } - if err := d.Set("after", predecessors); err != nil { - return err - } - - if err := d.Set("when", task.Condition); err != nil { - return err - } - - if err := d.Set("sql_statement", task.Definition); err != nil { - return err - } - - opts := &sdk.ShowParametersOptions{In: &sdk.ParametersIn{Task: taskId}} - params, err := client.Parameters.ShowParameters(ctx, opts) - if err != nil { - return err - } - - if len(params) > 0 { - sessionParameters := make(map[string]any) - fieldParameters := map[string]interface{}{ - "user_task_managed_initial_warehouse_size": "", - } - - for _, param := range params { - if param.Level != "TASK" { - continue - } - switch param.Key { - case "USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE": - fieldParameters["user_task_managed_initial_warehouse_size"] = param.Value - case "USER_TASK_TIMEOUT_MS": - timeout, err := strconv.ParseInt(param.Value, 10, 64) - if err != nil { - return err - } - - fieldParameters["user_task_timeout_ms"] = timeout - case "SUSPEND_TASK_AFTER_NUM_FAILURES": - num, err := strconv.ParseInt(param.Value, 10, 64) - if err != nil { - return err - } - - fieldParameters["suspend_task_after_num_failures"] = num - default: - sessionParameters[param.Key] = param.Value - } - } - - if err := d.Set("session_parameters", sessionParameters); err != nil { - return err - } - - for key, value := range fieldParameters { - // lintignore:R001 - err = d.Set(key, value) - if err != nil { - return err - } - } - } - - return nil -} - -// CreateTask implements schema.CreateFunc. -func CreateTask(d *schema.ResourceData, meta interface{}) error { - client := meta.(*provider.Context).Client - ctx := context.Background() databaseName := d.Get("database").(string) schemaName := d.Get("schema").(string) name := d.Get("name").(string) + id := sdk.NewSchemaObjectIdentifier(databaseName, schemaName, name) + req := sdk.NewCreateTaskRequest(id, d.Get("sql_statement").(string)) - sqlStatement := d.Get("sql_statement").(string) - - taskId := sdk.NewSchemaObjectIdentifier(databaseName, schemaName, name) - createRequest := sdk.NewCreateTaskRequest(taskId, sqlStatement) - - // Set optionals if v, ok := d.GetOk("warehouse"); ok { - warehouseId := sdk.NewAccountObjectIdentifier(v.(string)) - createRequest.WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithWarehouse(warehouseId)) + warehouseId, err := sdk.ParseAccountObjectIdentifier(v.(string)) + if err != nil { + return diag.FromErr(err) + } + req.WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithWarehouse(warehouseId)) } if v, ok := d.GetOk("user_task_managed_initial_warehouse_size"); ok { size, err := sdk.ToWarehouseSize(v.(string)) if err != nil { - return err + return diag.FromErr(err) } - createRequest.WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithUserTaskManagedInitialWarehouseSize(size)) + req.WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithUserTaskManagedInitialWarehouseSize(size)) } if v, ok := d.GetOk("schedule"); ok { - createRequest.WithSchedule(v.(string)) + req.WithSchedule(v.(string)) // TODO: What about cron, how do we track changed (only through show) + } + + if v, ok := d.GetOk("config"); ok { + req.WithConfig(v.(string)) + } + + if v, ok := d.GetOk("allow_overlapping_execution"); ok { + req.WithAllowOverlappingExecution(v.(bool)) } if v, ok := d.GetOk("session_parameters"); ok { sessionParameters, err := sdk.GetSessionParametersFrom(v.(map[string]any)) if err != nil { - return err + return diag.FromErr(err) } - createRequest.WithSessionParameters(*sessionParameters) + req.WithSessionParameters(*sessionParameters) } - if v, ok := d.GetOk("user_task_timeout_ms"); ok { - createRequest.WithUserTaskTimeoutMs(v.(int)) + if v := d.Get("user_task_timeout_ms"); v != IntDefault { + req.WithUserTaskTimeoutMs(v.(int)) } - if v, ok := d.GetOk("suspend_task_after_num_failures"); ok { - createRequest.WithSuspendTaskAfterNumFailures(v.(int)) + if v := d.Get("suspend_task_after_num_failures"); v != IntDefault { + req.WithSuspendTaskAfterNumFailures(v.(int)) } - if v, ok := d.GetOk("comment"); ok { - createRequest.WithComment(v.(string)) + // TODO: Decide on name (error_notification_integration ?) + if v, ok := d.GetOk("error_integration"); ok { + notificationIntegrationId, err := sdk.ParseAccountObjectIdentifier(v.(string)) + if err != nil { + return diag.FromErr(err) + } + req.WithErrorNotificationIntegration(notificationIntegrationId) } - if v, ok := d.GetOk("allow_overlapping_execution"); ok { - createRequest.WithAllowOverlappingExecution(v.(bool)) + if v, ok := d.GetOk("comment"); ok { + req.WithComment(v.(string)) } - if v, ok := d.GetOk("error_integration"); ok { - errorIntegrationId, err := sdk.ParseAccountObjectIdentifier(v.(string)) + if v, ok := d.GetOk("finalize"); ok { + rootTaskId, err := sdk.ParseSchemaObjectIdentifier(v.(string)) if err != nil { - return err + return diag.FromErr(err) } - createRequest.WithErrorNotificationIntegration(errorIntegrationId) + req.WithFinalize(rootTaskId) } - if v, ok := d.GetOk("after"); ok { + if v := d.Get("task_auto_retry_attempts"); v != IntDefault { + req.WithTaskAutoRetryAttempts(v.(int)) + } + + if v := d.Get("user_task_minimum_trigger_interval_in_seconds"); v != IntDefault { + req.WithUserTaskMinimumTriggerIntervalInSeconds(v.(int)) + } + + if v, ok := d.GetOk("after"); ok { // TODO: Should after take in task names or fully qualified names? after := expandStringList(v.([]interface{})) precedingTasks := make([]sdk.SchemaObjectIdentifier, 0) for _, dep := range after { precedingTaskId := sdk.NewSchemaObjectIdentifier(databaseName, schemaName, dep) - tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, precedingTaskId, taskId) + tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, precedingTaskId, id) // TODO: What if this fails and only half of the tasks are suspended? defer func() { if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { log.Printf("[WARN] failed to resume tasks: %s", err) } }() if err != nil { - return err + return diag.FromErr(err) } - precedingTasks = append(precedingTasks, precedingTaskId) } - createRequest.WithAfter(precedingTasks) + req.WithAfter(precedingTasks) } if v, ok := d.GetOk("when"); ok { - createRequest.WithWhen(v.(string)) + req.WithWhen(v.(string)) } - if err := client.Tasks.Create(ctx, createRequest); err != nil { - return fmt.Errorf("error creating task %s err = %w", taskId.FullyQualifiedName(), err) + if err := client.Tasks.Create(ctx, req); err != nil { + return diag.FromErr(err) } - d.SetId(helpers.EncodeSnowflakeID(taskId)) - - enabled := d.Get("enabled").(bool) - if enabled { - if err := waitForTaskStart(ctx, client, taskId); err != nil { - log.Printf("[WARN] failed to resume task %s", name) - } - } + // TODO: State upgrader for "id" + d.SetId(helpers.EncodeResourceIdentifier(id)) - return ReadTask(d, meta) -} - -func waitForTaskStart(ctx context.Context, client *sdk.Client, id sdk.SchemaObjectIdentifier) error { - err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithResume(true)) - if err != nil { - return fmt.Errorf("error starting task %s err = %w", id.FullyQualifiedName(), err) - } - return util.Retry(5, 5*time.Second, func() (error, bool) { - task, err := client.Tasks.ShowByID(ctx, id) + if v := d.Get("enabled").(string); v != BooleanDefault { + enabled, err := booleanStringToBool(v) if err != nil { - return fmt.Errorf("error starting task %s err = %w", id.FullyQualifiedName(), err), false + return diag.FromErr(err) } - if task.State != sdk.TaskStateStarted { - return nil, false + if enabled { + if err := waitForTaskStart(ctx, client, id); err != nil { + log.Printf("[WARN] failed to resume task %s", name) + } } - return nil, true - }) + } + + return ReadTask(false)(ctx, d, meta) } -// UpdateTask implements schema.UpdateFunc. -func UpdateTask(d *schema.ResourceData, meta interface{}) error { +func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() - taskId := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, taskId, taskId) @@ -423,7 +357,7 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { } }() if err != nil { - return err + return diag.FromErr(err) } if d.HasChange("warehouse") { @@ -436,7 +370,7 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { } err := client.Tasks.Alter(ctx, alterRequest) if err != nil { - return fmt.Errorf("error updating warehouse on task %s err = %w", taskId.FullyQualifiedName(), err) + return diag.FromErr(fmt.Errorf("error updating warehouse on task %s err = %w", taskId.FullyQualifiedName(), err)) } } @@ -447,12 +381,12 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { if warehouse == "" && newSize != "" { size, err := sdk.ToWarehouseSize(newSize.(string)) if err != nil { - return err + return diag.FromErr(err) } alterRequest := sdk.NewAlterTaskRequest(taskId).WithSet(*sdk.NewTaskSetRequest().WithUserTaskManagedInitialWarehouseSize(size)) err = client.Tasks.Alter(ctx, alterRequest) if err != nil { - return fmt.Errorf("error updating user_task_managed_initial_warehouse_size on task %s", taskId.FullyQualifiedName()) + return diag.FromErr(fmt.Errorf("error updating user_task_managed_initial_warehouse_size on task %s", taskId.FullyQualifiedName())) } } } @@ -465,13 +399,13 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { } else { newErrorIntegrationId, err := sdk.ParseAccountObjectIdentifier(newErrorIntegration.(string)) if err != nil { - return err + return diag.FromErr(err) } alterRequest.WithSet(*sdk.NewTaskSetRequest().WithErrorNotificationIntegration(newErrorIntegrationId)) } err := client.Tasks.Alter(ctx, alterRequest) if err != nil { - return fmt.Errorf("error updating error integration on task %s", taskId.FullyQualifiedName()) + return diag.FromErr(fmt.Errorf("error updating error integration on task %s", taskId.FullyQualifiedName())) } } @@ -480,7 +414,7 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { // (the task will be brought up to the correct running state in the "enabled" check at the bottom of Update function). err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithSuspend(true)) if err != nil { - return fmt.Errorf("error suspending task %s, err: %w", taskId.FullyQualifiedName(), err) + return diag.FromErr(fmt.Errorf("error suspending task %s, err: %w", taskId.FullyQualifiedName(), err)) } o, n := d.GetChange("after") @@ -490,7 +424,7 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { if len(newAfter) > 0 { // preemptively removing schedule because a task cannot have both after and schedule if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithUnset(*sdk.NewTaskUnsetRequest().WithSchedule(true))); err != nil { - return fmt.Errorf("error updating schedule on task %s", taskId.FullyQualifiedName()) + return diag.FromErr(fmt.Errorf("error updating schedule on task %s", taskId.FullyQualifiedName())) } } @@ -503,7 +437,7 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { } if len(toRemove) > 0 { if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithRemoveAfter(toRemove)); err != nil { - return fmt.Errorf("error removing after dependencies from task %s", taskId.FullyQualifiedName()) + return diag.FromErr(fmt.Errorf("error removing after dependencies from task %s", taskId.FullyQualifiedName())) } } @@ -523,12 +457,12 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { } }() if err != nil { - return err + return diag.FromErr(err) } } if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithAddAfter(toAdd)); err != nil { - return fmt.Errorf("error adding after dependencies from task %s", taskId.FullyQualifiedName()) + return diag.FromErr(fmt.Errorf("error adding after dependencies from task %s", taskId.FullyQualifiedName())) } } } @@ -543,7 +477,7 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { } err := client.Tasks.Alter(ctx, alterRequest) if err != nil { - return fmt.Errorf("error updating schedule on task %s", taskId.FullyQualifiedName()) + return diag.FromErr(fmt.Errorf("error updating schedule on task %s", taskId.FullyQualifiedName())) } } @@ -557,7 +491,7 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { } err := client.Tasks.Alter(ctx, alterRequest) if err != nil { - return fmt.Errorf("error updating user task timeout on task %s", taskId.FullyQualifiedName()) + return diag.FromErr(fmt.Errorf("error updating user task timeout on task %s", taskId.FullyQualifiedName())) } } @@ -571,7 +505,7 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { } err := client.Tasks.Alter(ctx, alterRequest) if err != nil { - return fmt.Errorf("error updating suspend task after num failures on task %s", taskId.FullyQualifiedName()) + return diag.FromErr(fmt.Errorf("error updating suspend task after num failures on task %s", taskId.FullyQualifiedName())) } } @@ -585,7 +519,7 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { } err := client.Tasks.Alter(ctx, alterRequest) if err != nil { - return fmt.Errorf("error updating comment on task %s", taskId.FullyQualifiedName()) + return diag.FromErr(fmt.Errorf("error updating comment on task %s", taskId.FullyQualifiedName())) } } @@ -599,7 +533,7 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { } err := client.Tasks.Alter(ctx, alterRequest) if err != nil { - return fmt.Errorf("error updating allow overlapping execution on task %s", taskId.FullyQualifiedName()) + return diag.FromErr(fmt.Errorf("error updating allow overlapping execution on task %s", taskId.FullyQualifiedName())) } } @@ -622,30 +556,30 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { if len(remove) > 0 { sessionParametersUnset, err := sdk.GetSessionParametersUnsetFrom(remove) if err != nil { - return err + return diag.FromErr(err) } if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithUnset(*sdk.NewTaskUnsetRequest().WithSessionParametersUnset(*sessionParametersUnset))); err != nil { - return fmt.Errorf("error removing session_parameters on task %v err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error removing session_parameters on task %v err = %w", d.Id(), err)) } } if len(add) > 0 { sessionParameters, err := sdk.GetSessionParametersFrom(add) if err != nil { - return err + return diag.FromErr(err) } if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithSet(*sdk.NewTaskSetRequest().WithSessionParameters(*sessionParameters))); err != nil { - return fmt.Errorf("error adding session_parameters to task %v err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error adding session_parameters to task %v err = %w", d.Id(), err)) } } if len(change) > 0 { sessionParameters, err := sdk.GetSessionParametersFrom(change) if err != nil { - return err + return diag.FromErr(err) } if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithSet(*sdk.NewTaskSetRequest().WithSessionParameters(*sessionParameters))); err != nil { - return fmt.Errorf("error updating session_parameters in task %v err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error updating session_parameters in task %v err = %w", d.Id(), err)) } } } @@ -655,7 +589,7 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { alterRequest := sdk.NewAlterTaskRequest(taskId).WithModifyWhen(n.(string)) err := client.Tasks.Alter(ctx, alterRequest) if err != nil { - return fmt.Errorf("error updating when condition on task %s", taskId.FullyQualifiedName()) + return diag.FromErr(fmt.Errorf("error updating when condition on task %s", taskId.FullyQualifiedName())) } } @@ -664,7 +598,7 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { alterRequest := sdk.NewAlterTaskRequest(taskId).WithModifyAs(n.(string)) err := client.Tasks.Alter(ctx, alterRequest) if err != nil { - return fmt.Errorf("error updating sql statement on task %s", taskId.FullyQualifiedName()) + return diag.FromErr(fmt.Errorf("error updating sql statement on task %s", taskId.FullyQualifiedName())) } } @@ -675,36 +609,224 @@ func UpdateTask(d *schema.ResourceData, meta interface{}) error { } } else { if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithSuspend(true)); err != nil { - return fmt.Errorf("failed to suspend task %s", taskId.FullyQualifiedName()) + return diag.FromErr(fmt.Errorf("failed to suspend task %s", taskId.FullyQualifiedName())) } } - return ReadTask(d, meta) + return ReadTask(false)(ctx, d, meta) } -// DeleteTask implements schema.DeleteFunc. -func DeleteTask(d *schema.ResourceData, meta interface{}) error { - client := meta.(*provider.Context).Client - ctx := context.Background() +func ReadTask(withExternalChangesMarking bool) schema.ReadContextFunc { + return func(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + client := meta.(*provider.Context).Client + id, err := sdk.ParseSchemaObjectIdentifier(d.Id()) + if err != nil { + return diag.FromErr(err) + } - taskId := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) + task, err := client.Tasks.ShowByID(ctx, id) + if err != nil { + if errors.Is(err, sdk.ErrObjectNotFound) { + d.SetId("") + return diag.Diagnostics{ + diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Failed to query task. Marking the resource as removed.", + Detail: fmt.Sprintf("task name: %s, Err: %s", id.FullyQualifiedName(), err), + }, + } + } + return diag.FromErr(err) + } - tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, taskId, taskId) + taskParameters, err := client.Tasks.ShowParameters(ctx, id) + if err != nil { + return diag.FromErr(err) + } + + if withExternalChangesMarking { + if err = handleExternalChangesToObjectInShow(d, + showMapping{"", "", task.Config, task.Config, nil}, + ); err != nil { + return diag.FromErr(err) + } + } else { + if err = setStateToValuesFromConfig(d, taskSchema, []string{ + "abc", + }); err != nil { + return diag.FromErr(err) + } + } + + if errs := errors.Join( + // TODO: handleTaskParametersRead(d, taskParameters) + d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()), + d.Set(ShowOutputAttributeName, []map[string]any{schemas.TaskToSchema(task)}), + d.Set(ParametersAttributeName, []map[string]any{schemas.TaskParametersToSchema(taskParameters)}), + ); errs != nil { + return diag.FromErr(errs) + } + + if err := d.Set("enabled", task.State == sdk.TaskStateStarted); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("warehouse", task.Warehouse); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("schedule", task.Schedule); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("comment", task.Comment); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("allow_overlapping_execution", task.AllowOverlappingExecution); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("error_integration", task.ErrorIntegration); err != nil { + return diag.FromErr(err) + } + + predecessors := make([]string, len(task.Predecessors)) + for i, p := range task.Predecessors { + predecessors[i] = p.Name() + } + if err := d.Set("after", predecessors); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("when", task.Condition); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("sql_statement", task.Definition); err != nil { + return diag.FromErr(err) + } + + opts := &sdk.ShowParametersOptions{In: &sdk.ParametersIn{Task: id}} + params, err := client.Parameters.ShowParameters(ctx, opts) + if err != nil { + return diag.FromErr(err) + } + + if len(params) > 0 { + sessionParameters := make(map[string]any) + fieldParameters := map[string]interface{}{ + "user_task_managed_initial_warehouse_size": "", + } + + for _, param := range params { + if param.Level != "TASK" { + continue + } + switch param.Key { + case "USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE": + fieldParameters["user_task_managed_initial_warehouse_size"] = param.Value + case "USER_TASK_TIMEOUT_MS": + timeout, err := strconv.ParseInt(param.Value, 10, 64) + if err != nil { + return diag.FromErr(err) + } + + fieldParameters["user_task_timeout_ms"] = timeout + case "SUSPEND_TASK_AFTER_NUM_FAILURES": + num, err := strconv.ParseInt(param.Value, 10, 64) + if err != nil { + return diag.FromErr(err) + } + + fieldParameters["suspend_task_after_num_failures"] = num + default: + sessionParameters[param.Key] = param.Value + } + } + + if err := d.Set("session_parameters", sessionParameters); err != nil { + return diag.FromErr(err) + } + + for key, value := range fieldParameters { + // lintignore:R001 + err = d.Set(key, value) + if err != nil { + return diag.FromErr(err) + } + } + } + + return nil + } +} + +func DeleteTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + client := meta.(*provider.Context).Client + id, err := sdk.ParseSchemaObjectIdentifier(d.Id()) + if err != nil { + return diag.FromErr(err) + } + + tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, id, id) defer func() { if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { log.Printf("[WARN] failed to resume tasks: %s", err) } }() if err != nil { - return err + return diag.FromErr(err) } - dropRequest := sdk.NewDropTaskRequest(taskId) - err = client.Tasks.Drop(ctx, dropRequest) + err = client.Tasks.Drop(ctx, sdk.NewDropTaskRequest(id).WithIfExists(true)) if err != nil { - return fmt.Errorf("error deleting task %s err = %w", taskId.FullyQualifiedName(), err) + return diag.FromErr(fmt.Errorf("error deleting task %s err = %w", id.FullyQualifiedName(), err)) } d.SetId("") return nil } + +func waitForTaskStart(ctx context.Context, client *sdk.Client, id sdk.SchemaObjectIdentifier) error { + err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithResume(true)) + if err != nil { + return fmt.Errorf("error starting task %s err = %w", id.FullyQualifiedName(), err) + } + return util.Retry(5, 5*time.Second, func() (error, bool) { + task, err := client.Tasks.ShowByID(ctx, id) + if err != nil { + return fmt.Errorf("error starting task %s err = %w", id.FullyQualifiedName(), err), false + } + if task.State != sdk.TaskStateStarted { + return nil, false + } + return nil, true + }) +} + +// TODO: Remove functions below + +// difference find keys in 'a' but not in 'b'. +func difference(a, b map[string]any) map[string]any { + diff := make(map[string]any) + for k := range a { + if _, ok := b[k]; !ok { + diff[k] = a[k] + } + } + return diff +} + +// differentValue find keys present both in 'a' and 'b' but having different values. +func differentValue(a, b map[string]any) map[string]any { + diff := make(map[string]any) + for k, va := range a { + if vb, ok := b[k]; ok { + if vb != va { + diff[k] = vb + } + } + } + return diff +} diff --git a/pkg/resources/task_parameters.go b/pkg/resources/task_parameters.go new file mode 100644 index 0000000000..b5fe06023b --- /dev/null +++ b/pkg/resources/task_parameters.go @@ -0,0 +1,380 @@ +package resources + +//var ( +// userParametersSchema = make(map[string]*schema.Schema) +// userParametersCustomDiff = ParametersCustomDiff( +// userParametersProvider, +// parameter[sdk.UserParameter]{sdk.UserParameterAbortDetachedQuery, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterAutocommit, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterBinaryInputFormat, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterBinaryOutputFormat, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterClientMemoryLimit, valueTypeInt, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterClientMetadataRequestUseConnectionCtx, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterClientPrefetchThreads, valueTypeInt, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterClientResultChunkSize, valueTypeInt, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterClientResultColumnCaseInsensitive, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterClientSessionKeepAlive, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterClientSessionKeepAliveHeartbeatFrequency, valueTypeInt, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterClientTimestampTypeMapping, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterDateInputFormat, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterDateOutputFormat, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterEnableUnloadPhysicalTypeOptimization, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterErrorOnNondeterministicMerge, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterErrorOnNondeterministicUpdate, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterGeographyOutputFormat, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterGeometryOutputFormat, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterJdbcTreatDecimalAsInt, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterJdbcTreatTimestampNtzAsUtc, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterJdbcUseSessionTimezone, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterJsonIndent, valueTypeInt, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterLockTimeout, valueTypeInt, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterLogLevel, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterMultiStatementCount, valueTypeInt, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterNoorderSequenceAsDefault, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterOdbcTreatDecimalAsInt, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterQueryTag, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterQuotedIdentifiersIgnoreCase, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterRowsPerResultset, valueTypeInt, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterS3StageVpceDnsName, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterSearchPath, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterSimulatedDataSharingConsumer, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterStatementQueuedTimeoutInSeconds, valueTypeInt, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterStatementTimeoutInSeconds, valueTypeInt, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterStrictJsonOutput, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterTimestampDayIsAlways24h, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterTimestampInputFormat, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterTimestampLtzOutputFormat, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterTimestampNtzOutputFormat, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterTimestampOutputFormat, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterTimestampTypeMapping, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterTimestampTzOutputFormat, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterTimezone, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterTimeInputFormat, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterTimeOutputFormat, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterTraceLevel, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterTransactionAbortOnError, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterTransactionDefaultIsolationLevel, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterTwoDigitCenturyStart, valueTypeInt, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterUnsupportedDdlAction, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterUseCachedResult, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterWeekOfYearPolicy, valueTypeInt, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterWeekStart, valueTypeInt, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterEnableUnredactedQuerySyntaxError, valueTypeBool, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterNetworkPolicy, valueTypeString, sdk.ParameterTypeUser}, +// parameter[sdk.UserParameter]{sdk.UserParameterPreventUnloadToInternalStages, valueTypeBool, sdk.ParameterTypeUser}, +// ) +//) +// +//type parameterDef[T ~string] struct { +// Name T +// Type schema.ValueType +// Description string +// DiffSuppress schema.SchemaDiffSuppressFunc +// ValidateDiag schema.SchemaValidateDiagFunc +//} +// +//func init() { +// // TODO [SNOW-1645342]: move to the SDK +// userParameterFields := []parameterDef[sdk.UserParameter]{ +// // session params +// {Name: sdk.UserParameterAbortDetachedQuery, Type: schema.TypeBool, Description: "Specifies the action that Snowflake performs for in-progress queries if connectivity is lost due to abrupt termination of a session (e.g. network outage, browser termination, service interruption)."}, +// {Name: sdk.UserParameterAutocommit, Type: schema.TypeBool, Description: "Specifies whether autocommit is enabled for the session. Autocommit determines whether a DML statement, when executed without an active transaction, is automatically committed after the statement successfully completes. For more information, see [Transactions](https://docs.snowflake.com/en/sql-reference/transactions)."}, +// {Name: sdk.UserParameterBinaryInputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToBinaryInputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToBinaryInputFormat), Description: "The format of VARCHAR values passed as input to VARCHAR-to-BINARY conversion functions. For more information, see [Binary input and output](https://docs.snowflake.com/en/sql-reference/binary-input-output)."}, +// {Name: sdk.UserParameterBinaryOutputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToBinaryOutputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToBinaryOutputFormat), Description: "The format for VARCHAR values returned as output by BINARY-to-VARCHAR conversion functions. For more information, see [Binary input and output](https://docs.snowflake.com/en/sql-reference/binary-input-output)."}, +// {Name: sdk.UserParameterClientMemoryLimit, Type: schema.TypeInt, Description: "Parameter that specifies the maximum amount of memory the JDBC driver or ODBC driver should use for the result set from queries (in MB)."}, +// {Name: sdk.UserParameterClientMetadataRequestUseConnectionCtx, Type: schema.TypeBool, Description: "For specific ODBC functions and JDBC methods, this parameter can change the default search scope from all databases/schemas to the current database/schema. The narrower search typically returns fewer rows and executes more quickly."}, +// {Name: sdk.UserParameterClientPrefetchThreads, Type: schema.TypeInt, Description: "Parameter that specifies the number of threads used by the client to pre-fetch large result sets. The driver will attempt to honor the parameter value, but defines the minimum and maximum values (depending on your system’s resources) to improve performance."}, +// {Name: sdk.UserParameterClientResultChunkSize, Type: schema.TypeInt, Description: "Parameter that specifies the maximum size of each set (or chunk) of query results to download (in MB). The JDBC driver downloads query results in chunks."}, +// {Name: sdk.UserParameterClientResultColumnCaseInsensitive, Type: schema.TypeBool, Description: "Parameter that indicates whether to match column name case-insensitively in ResultSet.get* methods in JDBC."}, +// {Name: sdk.UserParameterClientSessionKeepAlive, Type: schema.TypeBool, Description: "Parameter that indicates whether to force a user to log in again after a period of inactivity in the session."}, +// {Name: sdk.UserParameterClientSessionKeepAliveHeartbeatFrequency, Type: schema.TypeInt, Description: "Number of seconds in-between client attempts to update the token for the session."}, +// {Name: sdk.UserParameterClientTimestampTypeMapping, Type: schema.TypeString, Description: "Specifies the [TIMESTAMP_* variation](https://docs.snowflake.com/en/sql-reference/data-types-datetime.html#label-datatypes-timestamp-variations) to use when binding timestamp variables for JDBC or ODBC applications that use the bind API to load data."}, +// {Name: sdk.UserParameterDateInputFormat, Type: schema.TypeString, Description: "Specifies the input format for the DATE data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, +// {Name: sdk.UserParameterDateOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the DATE data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, +// {Name: sdk.UserParameterEnableUnloadPhysicalTypeOptimization, Type: schema.TypeBool, Description: "Specifies whether to set the schema for unloaded Parquet files based on the logical column data types (i.e. the types in the unload SQL query or source table) or on the unloaded column values (i.e. the smallest data types and precision that support the values in the output columns of the unload SQL statement or source table)."}, +// {Name: sdk.UserParameterErrorOnNondeterministicMerge, Type: schema.TypeBool, Description: "Specifies whether to return an error when the [MERGE](https://docs.snowflake.com/en/sql-reference/sql/merge) command is used to update or delete a target row that joins multiple source rows and the system cannot determine the action to perform on the target row."}, +// {Name: sdk.UserParameterErrorOnNondeterministicUpdate, Type: schema.TypeBool, Description: "Specifies whether to return an error when the [UPDATE](https://docs.snowflake.com/en/sql-reference/sql/update) command is used to update a target row that joins multiple source rows and the system cannot determine the action to perform on the target row."}, +// {Name: sdk.UserParameterGeographyOutputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToGeographyOutputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToGeographyOutputFormat), Description: "Display format for [GEOGRAPHY values](https://docs.snowflake.com/en/sql-reference/data-types-geospatial.html#label-data-types-geography)."}, +// {Name: sdk.UserParameterGeometryOutputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToGeometryOutputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToGeometryOutputFormat), Description: "Display format for [GEOMETRY values](https://docs.snowflake.com/en/sql-reference/data-types-geospatial.html#label-data-types-geometry)."}, +// {Name: sdk.UserParameterJdbcTreatDecimalAsInt, Type: schema.TypeBool, Description: "Specifies how JDBC processes columns that have a scale of zero (0)."}, +// {Name: sdk.UserParameterJdbcTreatTimestampNtzAsUtc, Type: schema.TypeBool, Description: "Specifies how JDBC processes TIMESTAMP_NTZ values."}, +// {Name: sdk.UserParameterJdbcUseSessionTimezone, Type: schema.TypeBool, Description: "Specifies whether the JDBC Driver uses the time zone of the JVM or the time zone of the session (specified by the [TIMEZONE](https://docs.snowflake.com/en/sql-reference/parameters#label-timezone) parameter) for the getDate(), getTime(), and getTimestamp() methods of the ResultSet class."}, +// {Name: sdk.UserParameterJsonIndent, Type: schema.TypeInt, Description: "Specifies the number of blank spaces to indent each new element in JSON output in the session. Also specifies whether to insert newline characters after each element."}, +// {Name: sdk.UserParameterLockTimeout, Type: schema.TypeInt, Description: "Number of seconds to wait while trying to lock a resource, before timing out and aborting the statement."}, +// {Name: sdk.UserParameterLogLevel, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToLogLevel), DiffSuppress: NormalizeAndCompare(sdk.ToLogLevel), Description: "Specifies the severity level of messages that should be ingested and made available in the active event table. Messages at the specified level (and at more severe levels) are ingested. For more information about log levels, see [Setting log level](https://docs.snowflake.com/en/developer-guide/logging-tracing/logging-log-level)."}, +// {Name: sdk.UserParameterMultiStatementCount, Type: schema.TypeInt, Description: "Number of statements to execute when using the multi-statement capability."}, +// {Name: sdk.UserParameterNoorderSequenceAsDefault, Type: schema.TypeBool, Description: "Specifies whether the ORDER or NOORDER property is set by default when you create a new sequence or add a new table column. The ORDER and NOORDER properties determine whether or not the values are generated for the sequence or auto-incremented column in [increasing or decreasing order](https://docs.snowflake.com/en/user-guide/querying-sequences.html#label-querying-sequences-increasing-values)."}, +// {Name: sdk.UserParameterOdbcTreatDecimalAsInt, Type: schema.TypeBool, Description: "Specifies how ODBC processes columns that have a scale of zero (0)."}, +// {Name: sdk.UserParameterQueryTag, Type: schema.TypeString, Description: "Optional string that can be used to tag queries and other SQL statements executed within a session. The tags are displayed in the output of the [QUERY_HISTORY, QUERY_HISTORY_BY_*](https://docs.snowflake.com/en/sql-reference/functions/query_history) functions."}, +// {Name: sdk.UserParameterQuotedIdentifiersIgnoreCase, Type: schema.TypeBool, Description: "Specifies whether letters in double-quoted object identifiers are stored and resolved as uppercase letters. By default, Snowflake preserves the case of alphabetic characters when storing and resolving double-quoted identifiers (see [Identifier resolution](https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html#label-identifier-casing)). You can use this parameter in situations in which [third-party applications always use double quotes around identifiers](https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html#label-identifier-casing-parameter)."}, +// {Name: sdk.UserParameterRowsPerResultset, Type: schema.TypeInt, Description: "Specifies the maximum number of rows returned in a result set. A value of 0 specifies no maximum."}, +// {Name: sdk.UserParameterS3StageVpceDnsName, Type: schema.TypeString, Description: "Specifies the DNS name of an Amazon S3 interface endpoint. Requests sent to the internal stage of an account via [AWS PrivateLink for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html) use this endpoint to connect. For more information, see [Accessing Internal stages with dedicated interface endpoints](https://docs.snowflake.com/en/user-guide/private-internal-stages-aws.html#label-aws-privatelink-internal-stage-network-isolation)."}, +// {Name: sdk.UserParameterSearchPath, Type: schema.TypeString, Description: "Specifies the path to search to resolve unqualified object names in queries. For more information, see [Name resolution in queries](https://docs.snowflake.com/en/sql-reference/name-resolution.html#label-object-name-resolution-search-path). Comma-separated list of identifiers. An identifier can be a fully or partially qualified schema name."}, +// {Name: sdk.UserParameterSimulatedDataSharingConsumer, Type: schema.TypeString, Description: "Specifies the name of a consumer account to simulate for testing/validating shared data, particularly shared secure views. When this parameter is set in a session, shared views return rows as if executed in the specified consumer account rather than the provider account. For more information, see [Introduction to Secure Data Sharing](https://docs.snowflake.com/en/user-guide/data-sharing-intro) and [Working with shares](https://docs.snowflake.com/en/user-guide/data-sharing-provider)."}, +// {Name: sdk.UserParameterStatementQueuedTimeoutInSeconds, Type: schema.TypeInt, Description: "Amount of time, in seconds, a SQL statement (query, DDL, DML, etc.) remains queued for a warehouse before it is canceled by the system. This parameter can be used in conjunction with the [MAX_CONCURRENCY_LEVEL](https://docs.snowflake.com/en/sql-reference/parameters#label-max-concurrency-level) parameter to ensure a warehouse is never backlogged."}, +// {Name: sdk.UserParameterStatementTimeoutInSeconds, Type: schema.TypeInt, Description: "Amount of time, in seconds, after which a running SQL statement (query, DDL, DML, etc.) is canceled by the system."}, +// {Name: sdk.UserParameterStrictJsonOutput, Type: schema.TypeBool, Description: "This parameter specifies whether JSON output in a session is compatible with the general standard (as described by [http://json.org](http://json.org)). By design, Snowflake allows JSON input that contains non-standard values; however, these non-standard values might result in Snowflake outputting JSON that is incompatible with other platforms and languages. This parameter, when enabled, ensures that Snowflake outputs valid/compatible JSON."}, +// {Name: sdk.UserParameterTimestampDayIsAlways24h, Type: schema.TypeBool, Description: "Specifies whether the [DATEADD](https://docs.snowflake.com/en/sql-reference/functions/dateadd) function (and its aliases) always consider a day to be exactly 24 hours for expressions that span multiple days."}, +// {Name: sdk.UserParameterTimestampInputFormat, Type: schema.TypeString, Description: "Specifies the input format for the TIMESTAMP data type alias. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). Any valid, supported timestamp format or AUTO (AUTO specifies that Snowflake attempts to automatically detect the format of timestamps stored in the system during the session)."}, +// {Name: sdk.UserParameterTimestampLtzOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP_LTZ data type. If no format is specified, defaults to [TIMESTAMP_OUTPUT_FORMAT](https://docs.snowflake.com/en/sql-reference/parameters#label-timestamp-output-format). For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, +// {Name: sdk.UserParameterTimestampNtzOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP_NTZ data type."}, +// {Name: sdk.UserParameterTimestampOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP data type alias. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, +// {Name: sdk.UserParameterTimestampTypeMapping, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToTimestampTypeMapping), DiffSuppress: NormalizeAndCompare(sdk.ToTimestampTypeMapping), Description: "Specifies the TIMESTAMP_* variation that the TIMESTAMP data type alias maps to."}, +// {Name: sdk.UserParameterTimestampTzOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP_TZ data type. If no format is specified, defaults to [TIMESTAMP_OUTPUT_FORMAT](https://docs.snowflake.com/en/sql-reference/parameters#label-timestamp-output-format). For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, +// {Name: sdk.UserParameterTimezone, Type: schema.TypeString, Description: "Specifies the time zone for the session. You can specify a [time zone name](https://data.iana.org/time-zones/tzdb-2021a/zone1970.tab) or a [link name](https://data.iana.org/time-zones/tzdb-2021a/backward) from release 2021a of the [IANA Time Zone Database](https://www.iana.org/time-zones) (e.g. America/Los_Angeles, Europe/London, UTC, Etc/GMT, etc.)."}, +// {Name: sdk.UserParameterTimeInputFormat, Type: schema.TypeString, Description: "Specifies the input format for the TIME data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). Any valid, supported time format or AUTO (AUTO specifies that Snowflake attempts to automatically detect the format of times stored in the system during the session)."}, +// {Name: sdk.UserParameterTimeOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIME data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, +// {Name: sdk.UserParameterTraceLevel, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToTraceLevel), DiffSuppress: NormalizeAndCompare(sdk.ToTraceLevel), Description: "Controls how trace events are ingested into the event table. For more information about trace levels, see [Setting trace level](https://docs.snowflake.com/en/developer-guide/logging-tracing/tracing-trace-level)."}, +// {Name: sdk.UserParameterTransactionAbortOnError, Type: schema.TypeBool, Description: "Specifies the action to perform when a statement issued within a non-autocommit transaction returns with an error."}, +// {Name: sdk.UserParameterTransactionDefaultIsolationLevel, Type: schema.TypeString, Description: "Specifies the isolation level for transactions in the user session."}, +// {Name: sdk.UserParameterTwoDigitCenturyStart, Type: schema.TypeInt, Description: "Specifies the β€œcentury start” year for 2-digit years (i.e. the earliest year such dates can represent). This parameter prevents ambiguous dates when importing or converting data with the `YY` date format component (i.e. years represented as 2 digits)."}, +// {Name: sdk.UserParameterUnsupportedDdlAction, Type: schema.TypeString, Description: "Determines if an unsupported (i.e. non-default) value specified for a constraint property returns an error."}, +// {Name: sdk.UserParameterUseCachedResult, Type: schema.TypeBool, Description: "Specifies whether to reuse persisted query results, if available, when a matching query is submitted."}, +// {Name: sdk.UserParameterWeekOfYearPolicy, Type: schema.TypeInt, Description: "Specifies how the weeks in a given year are computed. `0`: The semantics used are equivalent to the ISO semantics, in which a week belongs to a given year if at least 4 days of that week are in that year. `1`: January 1 is included in the first week of the year and December 31 is included in the last week of the year."}, +// {Name: sdk.UserParameterWeekStart, Type: schema.TypeInt, Description: "Specifies the first day of the week (used by week-related date functions). `0`: Legacy Snowflake behavior is used (i.e. ISO-like semantics). `1` (Monday) to `7` (Sunday): All the week-related functions use weeks that start on the specified day of the week."}, +// {Name: sdk.UserParameterEnableUnredactedQuerySyntaxError, Type: schema.TypeBool, Description: "Controls whether query text is redacted if a SQL query fails due to a syntax or parsing error. If `FALSE`, the content of a failed query is redacted in the views, pages, and functions that provide a query history. Only users with a role that is granted or inherits the AUDIT privilege can set the ENABLE_UNREDACTED_QUERY_SYNTAX_ERROR parameter. When using the ALTER USER command to set the parameter to `TRUE` for a particular user, modify the user that you want to see the query text, not the user who executed the query (if those are different users)."}, +// {Name: sdk.UserParameterNetworkPolicy, Type: schema.TypeString, Description: "Specifies the network policy to enforce for your account. Network policies enable restricting access to your account based on users’ IP address. For more details, see [Controlling network traffic with network policies](https://docs.snowflake.com/en/user-guide/network-policies). Any existing network policy (created using [CREATE NETWORK POLICY](https://docs.snowflake.com/en/sql-reference/sql/create-network-policy))."}, +// {Name: sdk.UserParameterPreventUnloadToInternalStages, Type: schema.TypeBool, Description: "Specifies whether to prevent data unload operations to internal (Snowflake) stages using [COPY INTO ](https://docs.snowflake.com/en/sql-reference/sql/copy-into-location) statements."}, +// } +// +// // TODO [SNOW-1645342]: extract this method after moving to SDK +// for _, field := range userParameterFields { +// fieldName := strings.ToLower(string(field.Name)) +// +// userParametersSchema[fieldName] = &schema.Schema{ +// Type: field.Type, +// Description: enrichWithReferenceToParameterDocs(field.Name, field.Description), +// Computed: true, +// Optional: true, +// ValidateDiagFunc: field.ValidateDiag, +// DiffSuppressFunc: field.DiffSuppress, +// } +// } +//} +// +//func userParametersProvider(ctx context.Context, d ResourceIdProvider, meta any) ([]*sdk.Parameter, error) { +// return parametersProvider(ctx, d, meta.(*provider.Context), userParametersProviderFunc, sdk.ParseAccountObjectIdentifier) +//} +// +//func userParametersProviderFunc(c *sdk.Client) showParametersFunc[sdk.AccountObjectIdentifier] { +// return c.Users.ShowParameters +//} +// +//// TODO [SNOW-1645342]: make generic based on type definition +//func handleUserParameterRead(d *schema.ResourceData, warehouseParameters []*sdk.Parameter) error { +// for _, p := range warehouseParameters { +// switch p.Key { +// case +// string(sdk.UserParameterClientMemoryLimit), +// string(sdk.UserParameterClientPrefetchThreads), +// string(sdk.UserParameterClientResultChunkSize), +// string(sdk.UserParameterClientSessionKeepAliveHeartbeatFrequency), +// string(sdk.UserParameterJsonIndent), +// string(sdk.UserParameterLockTimeout), +// string(sdk.UserParameterMultiStatementCount), +// string(sdk.UserParameterRowsPerResultset), +// string(sdk.UserParameterStatementQueuedTimeoutInSeconds), +// string(sdk.UserParameterStatementTimeoutInSeconds), +// string(sdk.UserParameterTwoDigitCenturyStart), +// string(sdk.UserParameterWeekOfYearPolicy), +// string(sdk.UserParameterWeekStart): +// value, err := strconv.Atoi(p.Value) +// if err != nil { +// return err +// } +// if err := d.Set(strings.ToLower(p.Key), value); err != nil { +// return err +// } +// case +// string(sdk.UserParameterBinaryInputFormat), +// string(sdk.UserParameterBinaryOutputFormat), +// string(sdk.UserParameterClientTimestampTypeMapping), +// string(sdk.UserParameterDateInputFormat), +// string(sdk.UserParameterDateOutputFormat), +// string(sdk.UserParameterGeographyOutputFormat), +// string(sdk.UserParameterGeometryOutputFormat), +// string(sdk.UserParameterLogLevel), +// string(sdk.UserParameterQueryTag), +// string(sdk.UserParameterS3StageVpceDnsName), +// string(sdk.UserParameterSearchPath), +// string(sdk.UserParameterSimulatedDataSharingConsumer), +// string(sdk.UserParameterTimestampInputFormat), +// string(sdk.UserParameterTimestampLtzOutputFormat), +// string(sdk.UserParameterTimestampNtzOutputFormat), +// string(sdk.UserParameterTimestampOutputFormat), +// string(sdk.UserParameterTimestampTypeMapping), +// string(sdk.UserParameterTimestampTzOutputFormat), +// string(sdk.UserParameterTimezone), +// string(sdk.UserParameterTimeInputFormat), +// string(sdk.UserParameterTimeOutputFormat), +// string(sdk.UserParameterTraceLevel), +// string(sdk.UserParameterTransactionDefaultIsolationLevel), +// string(sdk.UserParameterUnsupportedDdlAction), +// string(sdk.UserParameterNetworkPolicy): +// if err := d.Set(strings.ToLower(p.Key), p.Value); err != nil { +// return err +// } +// case +// string(sdk.UserParameterAbortDetachedQuery), +// string(sdk.UserParameterAutocommit), +// string(sdk.UserParameterClientMetadataRequestUseConnectionCtx), +// string(sdk.UserParameterClientResultColumnCaseInsensitive), +// string(sdk.UserParameterClientSessionKeepAlive), +// string(sdk.UserParameterEnableUnloadPhysicalTypeOptimization), +// string(sdk.UserParameterErrorOnNondeterministicMerge), +// string(sdk.UserParameterErrorOnNondeterministicUpdate), +// string(sdk.UserParameterJdbcTreatDecimalAsInt), +// string(sdk.UserParameterJdbcTreatTimestampNtzAsUtc), +// string(sdk.UserParameterJdbcUseSessionTimezone), +// string(sdk.UserParameterNoorderSequenceAsDefault), +// string(sdk.UserParameterOdbcTreatDecimalAsInt), +// string(sdk.UserParameterQuotedIdentifiersIgnoreCase), +// string(sdk.UserParameterStrictJsonOutput), +// string(sdk.UserParameterTimestampDayIsAlways24h), +// string(sdk.UserParameterTransactionAbortOnError), +// string(sdk.UserParameterUseCachedResult), +// string(sdk.UserParameterEnableUnredactedQuerySyntaxError), +// string(sdk.UserParameterPreventUnloadToInternalStages): +// value, err := strconv.ParseBool(p.Value) +// if err != nil { +// return err +// } +// if err := d.Set(strings.ToLower(p.Key), value); err != nil { +// return err +// } +// } +// } +// +// return nil +//} +// +//// TODO [SNOW-1348330]: consider using SessionParameters#setParam during parameters rework +//// (because currently setParam already is able to set the right parameter based on the string value input, +//// but GetConfigPropertyAsPointerAllowingZeroValue receives typed value, +//// so this would be unnecessary running in circles) +//// TODO [SNOW-1645342]: include mappers in the param definition (after moving it to the SDK: identity versus concrete) +//func handleUserParametersCreate(d *schema.ResourceData, createOpts *sdk.CreateUserOptions) diag.Diagnostics { +// return JoinDiags( +// handleParameterCreate(d, sdk.UserParameterAbortDetachedQuery, &createOpts.SessionParameters.AbortDetachedQuery), +// handleParameterCreate(d, sdk.UserParameterAutocommit, &createOpts.SessionParameters.Autocommit), +// handleParameterCreateWithMapping(d, sdk.UserParameterBinaryInputFormat, &createOpts.SessionParameters.BinaryInputFormat, stringToStringEnumProvider(sdk.ToBinaryInputFormat)), +// handleParameterCreateWithMapping(d, sdk.UserParameterBinaryOutputFormat, &createOpts.SessionParameters.BinaryOutputFormat, stringToStringEnumProvider(sdk.ToBinaryOutputFormat)), +// handleParameterCreate(d, sdk.UserParameterClientMemoryLimit, &createOpts.SessionParameters.ClientMemoryLimit), +// handleParameterCreate(d, sdk.UserParameterClientMetadataRequestUseConnectionCtx, &createOpts.SessionParameters.ClientMetadataRequestUseConnectionCtx), +// handleParameterCreate(d, sdk.UserParameterClientPrefetchThreads, &createOpts.SessionParameters.ClientPrefetchThreads), +// handleParameterCreate(d, sdk.UserParameterClientResultChunkSize, &createOpts.SessionParameters.ClientResultChunkSize), +// handleParameterCreate(d, sdk.UserParameterClientResultColumnCaseInsensitive, &createOpts.SessionParameters.ClientResultColumnCaseInsensitive), +// handleParameterCreate(d, sdk.UserParameterClientSessionKeepAlive, &createOpts.SessionParameters.ClientSessionKeepAlive), +// handleParameterCreate(d, sdk.UserParameterClientSessionKeepAliveHeartbeatFrequency, &createOpts.SessionParameters.ClientSessionKeepAliveHeartbeatFrequency), +// handleParameterCreateWithMapping(d, sdk.UserParameterClientTimestampTypeMapping, &createOpts.SessionParameters.ClientTimestampTypeMapping, stringToStringEnumProvider(sdk.ToClientTimestampTypeMapping)), +// handleParameterCreate(d, sdk.UserParameterDateInputFormat, &createOpts.SessionParameters.DateInputFormat), +// handleParameterCreate(d, sdk.UserParameterDateOutputFormat, &createOpts.SessionParameters.DateOutputFormat), +// handleParameterCreate(d, sdk.UserParameterEnableUnloadPhysicalTypeOptimization, &createOpts.SessionParameters.EnableUnloadPhysicalTypeOptimization), +// handleParameterCreate(d, sdk.UserParameterErrorOnNondeterministicMerge, &createOpts.SessionParameters.ErrorOnNondeterministicMerge), +// handleParameterCreate(d, sdk.UserParameterErrorOnNondeterministicUpdate, &createOpts.SessionParameters.ErrorOnNondeterministicUpdate), +// handleParameterCreateWithMapping(d, sdk.UserParameterGeographyOutputFormat, &createOpts.SessionParameters.GeographyOutputFormat, stringToStringEnumProvider(sdk.ToGeographyOutputFormat)), +// handleParameterCreateWithMapping(d, sdk.UserParameterGeometryOutputFormat, &createOpts.SessionParameters.GeometryOutputFormat, stringToStringEnumProvider(sdk.ToGeometryOutputFormat)), +// handleParameterCreate(d, sdk.UserParameterJdbcTreatDecimalAsInt, &createOpts.SessionParameters.JdbcTreatDecimalAsInt), +// handleParameterCreate(d, sdk.UserParameterJdbcTreatTimestampNtzAsUtc, &createOpts.SessionParameters.JdbcTreatTimestampNtzAsUtc), +// handleParameterCreate(d, sdk.UserParameterJdbcUseSessionTimezone, &createOpts.SessionParameters.JdbcUseSessionTimezone), +// handleParameterCreate(d, sdk.UserParameterJsonIndent, &createOpts.SessionParameters.JSONIndent), +// handleParameterCreate(d, sdk.UserParameterLockTimeout, &createOpts.SessionParameters.LockTimeout), +// handleParameterCreateWithMapping(d, sdk.UserParameterLogLevel, &createOpts.SessionParameters.LogLevel, stringToStringEnumProvider(sdk.ToLogLevel)), +// handleParameterCreate(d, sdk.UserParameterMultiStatementCount, &createOpts.SessionParameters.MultiStatementCount), +// handleParameterCreate(d, sdk.UserParameterNoorderSequenceAsDefault, &createOpts.SessionParameters.NoorderSequenceAsDefault), +// handleParameterCreate(d, sdk.UserParameterOdbcTreatDecimalAsInt, &createOpts.SessionParameters.OdbcTreatDecimalAsInt), +// handleParameterCreate(d, sdk.UserParameterQueryTag, &createOpts.SessionParameters.QueryTag), +// handleParameterCreate(d, sdk.UserParameterQuotedIdentifiersIgnoreCase, &createOpts.SessionParameters.QuotedIdentifiersIgnoreCase), +// handleParameterCreate(d, sdk.UserParameterRowsPerResultset, &createOpts.SessionParameters.RowsPerResultset), +// handleParameterCreate(d, sdk.UserParameterS3StageVpceDnsName, &createOpts.SessionParameters.S3StageVpceDnsName), +// handleParameterCreate(d, sdk.UserParameterSearchPath, &createOpts.SessionParameters.SearchPath), +// handleParameterCreate(d, sdk.UserParameterSimulatedDataSharingConsumer, &createOpts.SessionParameters.SimulatedDataSharingConsumer), +// handleParameterCreate(d, sdk.UserParameterStatementQueuedTimeoutInSeconds, &createOpts.SessionParameters.StatementQueuedTimeoutInSeconds), +// handleParameterCreate(d, sdk.UserParameterStatementTimeoutInSeconds, &createOpts.SessionParameters.StatementTimeoutInSeconds), +// handleParameterCreate(d, sdk.UserParameterStrictJsonOutput, &createOpts.SessionParameters.StrictJSONOutput), +// handleParameterCreate(d, sdk.UserParameterTimestampDayIsAlways24h, &createOpts.SessionParameters.TimestampDayIsAlways24h), +// handleParameterCreate(d, sdk.UserParameterTimestampInputFormat, &createOpts.SessionParameters.TimestampInputFormat), +// handleParameterCreate(d, sdk.UserParameterTimestampLtzOutputFormat, &createOpts.SessionParameters.TimestampLTZOutputFormat), +// handleParameterCreate(d, sdk.UserParameterTimestampNtzOutputFormat, &createOpts.SessionParameters.TimestampNTZOutputFormat), +// handleParameterCreate(d, sdk.UserParameterTimestampOutputFormat, &createOpts.SessionParameters.TimestampOutputFormat), +// handleParameterCreateWithMapping(d, sdk.UserParameterTimestampTypeMapping, &createOpts.SessionParameters.TimestampTypeMapping, stringToStringEnumProvider(sdk.ToTimestampTypeMapping)), +// handleParameterCreate(d, sdk.UserParameterTimestampTzOutputFormat, &createOpts.SessionParameters.TimestampTZOutputFormat), +// handleParameterCreate(d, sdk.UserParameterTimezone, &createOpts.SessionParameters.Timezone), +// handleParameterCreate(d, sdk.UserParameterTimeInputFormat, &createOpts.SessionParameters.TimeInputFormat), +// handleParameterCreate(d, sdk.UserParameterTimeOutputFormat, &createOpts.SessionParameters.TimeOutputFormat), +// handleParameterCreateWithMapping(d, sdk.UserParameterTraceLevel, &createOpts.SessionParameters.TraceLevel, stringToStringEnumProvider(sdk.ToTraceLevel)), +// handleParameterCreate(d, sdk.UserParameterTransactionAbortOnError, &createOpts.SessionParameters.TransactionAbortOnError), +// handleParameterCreateWithMapping(d, sdk.UserParameterTransactionDefaultIsolationLevel, &createOpts.SessionParameters.TransactionDefaultIsolationLevel, stringToStringEnumProvider(sdk.ToTransactionDefaultIsolationLevel)), +// handleParameterCreate(d, sdk.UserParameterTwoDigitCenturyStart, &createOpts.SessionParameters.TwoDigitCenturyStart), +// handleParameterCreateWithMapping(d, sdk.UserParameterUnsupportedDdlAction, &createOpts.SessionParameters.UnsupportedDDLAction, stringToStringEnumProvider(sdk.ToUnsupportedDDLAction)), +// handleParameterCreate(d, sdk.UserParameterUseCachedResult, &createOpts.SessionParameters.UseCachedResult), +// handleParameterCreate(d, sdk.UserParameterWeekOfYearPolicy, &createOpts.SessionParameters.WeekOfYearPolicy), +// handleParameterCreate(d, sdk.UserParameterWeekStart, &createOpts.SessionParameters.WeekStart), +// handleParameterCreate(d, sdk.UserParameterEnableUnredactedQuerySyntaxError, &createOpts.ObjectParameters.EnableUnredactedQuerySyntaxError), +// handleParameterCreateWithMapping(d, sdk.UserParameterNetworkPolicy, &createOpts.ObjectParameters.NetworkPolicy, stringToAccountObjectIdentifier), +// handleParameterCreate(d, sdk.UserParameterPreventUnloadToInternalStages, &createOpts.ObjectParameters.PreventUnloadToInternalStages), +// ) +//} +// +//func handleUserParametersUpdate(d *schema.ResourceData, set *sdk.UserSet, unset *sdk.UserUnset) diag.Diagnostics { +// return JoinDiags( +// handleParameterUpdate(d, sdk.UserParameterAbortDetachedQuery, &set.SessionParameters.AbortDetachedQuery, &unset.SessionParameters.AbortDetachedQuery), +// handleParameterUpdate(d, sdk.UserParameterAutocommit, &set.SessionParameters.Autocommit, &unset.SessionParameters.Autocommit), +// handleParameterUpdateWithMapping(d, sdk.UserParameterBinaryInputFormat, &set.SessionParameters.BinaryInputFormat, &unset.SessionParameters.BinaryInputFormat, stringToStringEnumProvider(sdk.ToBinaryInputFormat)), +// handleParameterUpdateWithMapping(d, sdk.UserParameterBinaryOutputFormat, &set.SessionParameters.BinaryOutputFormat, &unset.SessionParameters.BinaryOutputFormat, stringToStringEnumProvider(sdk.ToBinaryOutputFormat)), +// handleParameterUpdate(d, sdk.UserParameterClientMemoryLimit, &set.SessionParameters.ClientMemoryLimit, &unset.SessionParameters.ClientMemoryLimit), +// handleParameterUpdate(d, sdk.UserParameterClientMetadataRequestUseConnectionCtx, &set.SessionParameters.ClientMetadataRequestUseConnectionCtx, &unset.SessionParameters.ClientMetadataRequestUseConnectionCtx), +// handleParameterUpdate(d, sdk.UserParameterClientPrefetchThreads, &set.SessionParameters.ClientPrefetchThreads, &unset.SessionParameters.ClientPrefetchThreads), +// handleParameterUpdate(d, sdk.UserParameterClientResultChunkSize, &set.SessionParameters.ClientResultChunkSize, &unset.SessionParameters.ClientResultChunkSize), +// handleParameterUpdate(d, sdk.UserParameterClientResultColumnCaseInsensitive, &set.SessionParameters.ClientResultColumnCaseInsensitive, &unset.SessionParameters.ClientResultColumnCaseInsensitive), +// handleParameterUpdate(d, sdk.UserParameterClientSessionKeepAlive, &set.SessionParameters.ClientSessionKeepAlive, &unset.SessionParameters.ClientSessionKeepAlive), +// handleParameterUpdate(d, sdk.UserParameterClientSessionKeepAliveHeartbeatFrequency, &set.SessionParameters.ClientSessionKeepAliveHeartbeatFrequency, &unset.SessionParameters.ClientSessionKeepAliveHeartbeatFrequency), +// handleParameterUpdateWithMapping(d, sdk.UserParameterClientTimestampTypeMapping, &set.SessionParameters.ClientTimestampTypeMapping, &unset.SessionParameters.ClientTimestampTypeMapping, stringToStringEnumProvider(sdk.ToClientTimestampTypeMapping)), +// handleParameterUpdate(d, sdk.UserParameterDateInputFormat, &set.SessionParameters.DateInputFormat, &unset.SessionParameters.DateInputFormat), +// handleParameterUpdate(d, sdk.UserParameterDateOutputFormat, &set.SessionParameters.DateOutputFormat, &unset.SessionParameters.DateOutputFormat), +// handleParameterUpdate(d, sdk.UserParameterEnableUnloadPhysicalTypeOptimization, &set.SessionParameters.EnableUnloadPhysicalTypeOptimization, &unset.SessionParameters.EnableUnloadPhysicalTypeOptimization), +// handleParameterUpdate(d, sdk.UserParameterErrorOnNondeterministicMerge, &set.SessionParameters.ErrorOnNondeterministicMerge, &unset.SessionParameters.ErrorOnNondeterministicMerge), +// handleParameterUpdate(d, sdk.UserParameterErrorOnNondeterministicUpdate, &set.SessionParameters.ErrorOnNondeterministicUpdate, &unset.SessionParameters.ErrorOnNondeterministicUpdate), +// handleParameterUpdateWithMapping(d, sdk.UserParameterGeographyOutputFormat, &set.SessionParameters.GeographyOutputFormat, &unset.SessionParameters.GeographyOutputFormat, stringToStringEnumProvider(sdk.ToGeographyOutputFormat)), +// handleParameterUpdateWithMapping(d, sdk.UserParameterGeometryOutputFormat, &set.SessionParameters.GeometryOutputFormat, &unset.SessionParameters.GeometryOutputFormat, stringToStringEnumProvider(sdk.ToGeometryOutputFormat)), +// handleParameterUpdate(d, sdk.UserParameterJdbcTreatDecimalAsInt, &set.SessionParameters.JdbcTreatDecimalAsInt, &unset.SessionParameters.JdbcTreatDecimalAsInt), +// handleParameterUpdate(d, sdk.UserParameterJdbcTreatTimestampNtzAsUtc, &set.SessionParameters.JdbcTreatTimestampNtzAsUtc, &unset.SessionParameters.JdbcTreatTimestampNtzAsUtc), +// handleParameterUpdate(d, sdk.UserParameterJdbcUseSessionTimezone, &set.SessionParameters.JdbcUseSessionTimezone, &unset.SessionParameters.JdbcUseSessionTimezone), +// handleParameterUpdate(d, sdk.UserParameterJsonIndent, &set.SessionParameters.JSONIndent, &unset.SessionParameters.JSONIndent), +// handleParameterUpdate(d, sdk.UserParameterLockTimeout, &set.SessionParameters.LockTimeout, &unset.SessionParameters.LockTimeout), +// handleParameterUpdateWithMapping(d, sdk.UserParameterLogLevel, &set.SessionParameters.LogLevel, &unset.SessionParameters.LogLevel, stringToStringEnumProvider(sdk.ToLogLevel)), +// handleParameterUpdate(d, sdk.UserParameterMultiStatementCount, &set.SessionParameters.MultiStatementCount, &unset.SessionParameters.MultiStatementCount), +// handleParameterUpdate(d, sdk.UserParameterNoorderSequenceAsDefault, &set.SessionParameters.NoorderSequenceAsDefault, &unset.SessionParameters.NoorderSequenceAsDefault), +// handleParameterUpdate(d, sdk.UserParameterOdbcTreatDecimalAsInt, &set.SessionParameters.OdbcTreatDecimalAsInt, &unset.SessionParameters.OdbcTreatDecimalAsInt), +// handleParameterUpdate(d, sdk.UserParameterQueryTag, &set.SessionParameters.QueryTag, &unset.SessionParameters.QueryTag), +// handleParameterUpdate(d, sdk.UserParameterQuotedIdentifiersIgnoreCase, &set.SessionParameters.QuotedIdentifiersIgnoreCase, &unset.SessionParameters.QuotedIdentifiersIgnoreCase), +// handleParameterUpdate(d, sdk.UserParameterRowsPerResultset, &set.SessionParameters.RowsPerResultset, &unset.SessionParameters.RowsPerResultset), +// handleParameterUpdate(d, sdk.UserParameterS3StageVpceDnsName, &set.SessionParameters.S3StageVpceDnsName, &unset.SessionParameters.S3StageVpceDnsName), +// handleParameterUpdate(d, sdk.UserParameterSearchPath, &set.SessionParameters.SearchPath, &unset.SessionParameters.SearchPath), +// handleParameterUpdate(d, sdk.UserParameterSimulatedDataSharingConsumer, &set.SessionParameters.SimulatedDataSharingConsumer, &unset.SessionParameters.SimulatedDataSharingConsumer), +// handleParameterUpdate(d, sdk.UserParameterStatementQueuedTimeoutInSeconds, &set.SessionParameters.StatementQueuedTimeoutInSeconds, &unset.SessionParameters.StatementQueuedTimeoutInSeconds), +// handleParameterUpdate(d, sdk.UserParameterStatementTimeoutInSeconds, &set.SessionParameters.StatementTimeoutInSeconds, &unset.SessionParameters.StatementTimeoutInSeconds), +// handleParameterUpdate(d, sdk.UserParameterStrictJsonOutput, &set.SessionParameters.StrictJSONOutput, &unset.SessionParameters.StrictJSONOutput), +// handleParameterUpdate(d, sdk.UserParameterTimestampDayIsAlways24h, &set.SessionParameters.TimestampDayIsAlways24h, &unset.SessionParameters.TimestampDayIsAlways24h), +// handleParameterUpdate(d, sdk.UserParameterTimestampInputFormat, &set.SessionParameters.TimestampInputFormat, &unset.SessionParameters.TimestampInputFormat), +// handleParameterUpdate(d, sdk.UserParameterTimestampLtzOutputFormat, &set.SessionParameters.TimestampLTZOutputFormat, &unset.SessionParameters.TimestampLTZOutputFormat), +// handleParameterUpdate(d, sdk.UserParameterTimestampNtzOutputFormat, &set.SessionParameters.TimestampNTZOutputFormat, &unset.SessionParameters.TimestampNTZOutputFormat), +// handleParameterUpdate(d, sdk.UserParameterTimestampOutputFormat, &set.SessionParameters.TimestampOutputFormat, &unset.SessionParameters.TimestampOutputFormat), +// handleParameterUpdateWithMapping(d, sdk.UserParameterTimestampTypeMapping, &set.SessionParameters.TimestampTypeMapping, &unset.SessionParameters.TimestampTypeMapping, stringToStringEnumProvider(sdk.ToTimestampTypeMapping)), +// handleParameterUpdate(d, sdk.UserParameterTimestampTzOutputFormat, &set.SessionParameters.TimestampTZOutputFormat, &unset.SessionParameters.TimestampTZOutputFormat), +// handleParameterUpdate(d, sdk.UserParameterTimezone, &set.SessionParameters.Timezone, &unset.SessionParameters.Timezone), +// handleParameterUpdate(d, sdk.UserParameterTimeInputFormat, &set.SessionParameters.TimeInputFormat, &unset.SessionParameters.TimeInputFormat), +// handleParameterUpdate(d, sdk.UserParameterTimeOutputFormat, &set.SessionParameters.TimeOutputFormat, &unset.SessionParameters.TimeOutputFormat), +// handleParameterUpdateWithMapping(d, sdk.UserParameterTraceLevel, &set.SessionParameters.TraceLevel, &unset.SessionParameters.TraceLevel, stringToStringEnumProvider(sdk.ToTraceLevel)), +// handleParameterUpdate(d, sdk.UserParameterTransactionAbortOnError, &set.SessionParameters.TransactionAbortOnError, &unset.SessionParameters.TransactionAbortOnError), +// handleParameterUpdateWithMapping(d, sdk.UserParameterTransactionDefaultIsolationLevel, &set.SessionParameters.TransactionDefaultIsolationLevel, &unset.SessionParameters.TransactionDefaultIsolationLevel, stringToStringEnumProvider(sdk.ToTransactionDefaultIsolationLevel)), +// handleParameterUpdate(d, sdk.UserParameterTwoDigitCenturyStart, &set.SessionParameters.TwoDigitCenturyStart, &unset.SessionParameters.TwoDigitCenturyStart), +// handleParameterUpdateWithMapping(d, sdk.UserParameterUnsupportedDdlAction, &set.SessionParameters.UnsupportedDDLAction, &unset.SessionParameters.UnsupportedDDLAction, stringToStringEnumProvider(sdk.ToUnsupportedDDLAction)), +// handleParameterUpdate(d, sdk.UserParameterUseCachedResult, &set.SessionParameters.UseCachedResult, &unset.SessionParameters.UseCachedResult), +// handleParameterUpdate(d, sdk.UserParameterWeekOfYearPolicy, &set.SessionParameters.WeekOfYearPolicy, &unset.SessionParameters.WeekOfYearPolicy), +// handleParameterUpdate(d, sdk.UserParameterWeekStart, &set.SessionParameters.WeekStart, &unset.SessionParameters.WeekStart), +// handleParameterUpdate(d, sdk.UserParameterEnableUnredactedQuerySyntaxError, &set.ObjectParameters.EnableUnredactedQuerySyntaxError, &unset.ObjectParameters.EnableUnredactedQuerySyntaxError), +// handleParameterUpdateWithMapping(d, sdk.UserParameterNetworkPolicy, &set.ObjectParameters.NetworkPolicy, &unset.ObjectParameters.NetworkPolicy, stringToAccountObjectIdentifier), +// handleParameterUpdate(d, sdk.UserParameterPreventUnloadToInternalStages, &set.ObjectParameters.PreventUnloadToInternalStages, &unset.ObjectParameters.PreventUnloadToInternalStages), +// ) +//} diff --git a/pkg/schemas/task_gen.go b/pkg/schemas/task_gen.go index a9daac5198..825e66b64e 100644 --- a/pkg/schemas/task_gen.go +++ b/pkg/schemas/task_gen.go @@ -3,6 +3,7 @@ package schemas import ( + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -46,7 +47,8 @@ var ShowTaskSchema = map[string]*schema.Schema{ Computed: true, }, "predecessors": { - Type: schema.TypeInvalid, + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, Computed: true, }, "state": { @@ -89,6 +91,27 @@ var ShowTaskSchema = map[string]*schema.Schema{ Type: schema.TypeString, Computed: true, }, + "task_relations": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "predecessors": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "finalizer": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "last_suspended_reason": { + Type: schema.TypeString, + Computed: true, + }, } var _ = ShowTaskSchema @@ -104,17 +127,26 @@ func TaskToSchema(task *sdk.Task) map[string]any { taskSchema["comment"] = task.Comment taskSchema["warehouse"] = task.Warehouse taskSchema["schedule"] = task.Schedule - taskSchema["predecessors"] = task.Predecessors + taskSchema["predecessors"] = collections.Map(task.Predecessors, sdk.SchemaObjectIdentifier.FullyQualifiedName) taskSchema["state"] = string(task.State) taskSchema["definition"] = task.Definition taskSchema["condition"] = task.Condition taskSchema["allow_overlapping_execution"] = task.AllowOverlappingExecution - taskSchema["error_integration"] = task.ErrorIntegration + if task.ErrorIntegration != nil { + taskSchema["error_integration"] = task.ErrorIntegration.Name() + } taskSchema["last_committed_on"] = task.LastCommittedOn taskSchema["last_suspended_on"] = task.LastSuspendedOn taskSchema["owner_role_type"] = task.OwnerRoleType taskSchema["config"] = task.Config taskSchema["budget"] = task.Budget + taskSchema["last_suspended_reason"] = task.LastSuspendedReason + taskSchema["task_relations"] = []any{ + map[string]any{ + "predecessors": collections.Map(task.TaskRelations.Predecessors, sdk.SchemaObjectIdentifier.FullyQualifiedName), + "finalizer": task.TaskRelations.FinalizerTask, + }, + } return taskSchema } diff --git a/pkg/schemas/task_parameters.go b/pkg/schemas/task_parameters.go new file mode 100644 index 0000000000..1288e1b08c --- /dev/null +++ b/pkg/schemas/task_parameters.go @@ -0,0 +1,29 @@ +package schemas + +import ( + "slices" + "strings" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +var ( + ShowTaskParametersSchema = make(map[string]*schema.Schema) +) + +func init() { + for _, param := range sdk.AllTaskParameters { + ShowTaskParametersSchema[strings.ToLower(string(param))] = ParameterListSchema + } +} + +func TaskParametersToSchema(parameters []*sdk.Parameter) map[string]any { + taskParametersValue := make(map[string]any) + for _, param := range parameters { + if slices.Contains(userParameters, sdk.UserParameter(param.Key)) { + taskParametersValue[strings.ToLower(param.Key)] = []map[string]any{ParameterToSchema(param)} + } + } + return taskParametersValue +} diff --git a/pkg/sdk/parameters.go b/pkg/sdk/parameters.go index 8f275752e1..f341ed503a 100644 --- a/pkg/sdk/parameters.go +++ b/pkg/sdk/parameters.go @@ -721,6 +721,70 @@ const ( TaskParameterWeekStart TaskParameter = "WEEK_START" ) +var AllTaskParameters = []TaskParameter{ + // Task Parameters + TaskParameterSuspendTaskAfterNumFailures, + TaskParameterTaskAutoRetryAttempts, + TaskParameterUserTaskManagedInitialWarehouseSize, + TaskParameterUserTaskMinimumTriggerIntervalInSeconds, + TaskParameterUserTaskTimeoutMs, + + // Session Parameters (inherited) + TaskParameterAbortDetachedQuery, + TaskParameterAutocommit, + TaskParameterBinaryInputFormat, + TaskParameterBinaryOutputFormat, + TaskParameterClientMemoryLimit, + TaskParameterClientMetadataRequestUseConnectionCtx, + TaskParameterClientPrefetchThreads, + TaskParameterClientResultChunkSize, + TaskParameterClientResultColumnCaseInsensitive, + TaskParameterClientSessionKeepAlive, + TaskParameterClientSessionKeepAliveHeartbeatFrequency, + TaskParameterClientTimestampTypeMapping, + TaskParameterDateInputFormat, + TaskParameterDateOutputFormat, + TaskParameterEnableUnloadPhysicalTypeOptimization, + TaskParameterErrorOnNondeterministicMerge, + TaskParameterErrorOnNondeterministicUpdate, + TaskParameterGeographyOutputFormat, + TaskParameterGeometryOutputFormat, + TaskParameterJdbcTreatTimestampNtzAsUtc, + TaskParameterJdbcUseSessionTimezone, + TaskParameterJsonIndent, + TaskParameterLockTimeout, + TaskParameterLogLevel, + TaskParameterMultiStatementCount, + TaskParameterNoorderSequenceAsDefault, + TaskParameterOdbcTreatDecimalAsInt, + TaskParameterQueryTag, + TaskParameterQuotedIdentifiersIgnoreCase, + TaskParameterRowsPerResultset, + TaskParameterS3StageVpceDnsName, + TaskParameterSearchPath, + TaskParameterStatementQueuedTimeoutInSeconds, + TaskParameterStatementTimeoutInSeconds, + TaskParameterStrictJsonOutput, + TaskParameterTimestampDayIsAlways24h, + TaskParameterTimestampInputFormat, + TaskParameterTimestampLtzOutputFormat, + TaskParameterTimestampNtzOutputFormat, + TaskParameterTimestampOutputFormat, + TaskParameterTimestampTypeMapping, + TaskParameterTimestampTzOutputFormat, + TaskParameterTimezone, + TaskParameterTimeInputFormat, + TaskParameterTimeOutputFormat, + TaskParameterTraceLevel, + TaskParameterTransactionAbortOnError, + TaskParameterTransactionDefaultIsolationLevel, + TaskParameterTwoDigitCenturyStart, + TaskParameterUnsupportedDdlAction, + TaskParameterUseCachedResult, + TaskParameterWeekOfYearPolicy, + TaskParameterWeekStart, +} + type WarehouseParameter string const ( diff --git a/pkg/sdk/tasks_gen.go b/pkg/sdk/tasks_gen.go index 56123086ff..a897433ac6 100644 --- a/pkg/sdk/tasks_gen.go +++ b/pkg/sdk/tasks_gen.go @@ -13,6 +13,7 @@ type Tasks interface { Drop(ctx context.Context, request *DropTaskRequest) error Show(ctx context.Context, request *ShowTaskRequest) ([]Task, error) ShowByID(ctx context.Context, id SchemaObjectIdentifier) (*Task, error) + ShowParameters(ctx context.Context, id SchemaObjectIdentifier) ([]*Parameter, error) Describe(ctx context.Context, id SchemaObjectIdentifier) (*Task, error) Execute(ctx context.Context, request *ExecuteTaskRequest) error SuspendRootTasks(ctx context.Context, taskId SchemaObjectIdentifier, id SchemaObjectIdentifier) ([]SchemaObjectIdentifier, error) diff --git a/pkg/sdk/tasks_impl_gen.go b/pkg/sdk/tasks_impl_gen.go index 9f00526c25..f178182f4c 100644 --- a/pkg/sdk/tasks_impl_gen.go +++ b/pkg/sdk/tasks_impl_gen.go @@ -64,6 +64,14 @@ func (v *tasks) ShowByID(ctx context.Context, id SchemaObjectIdentifier) (*Task, return collections.FindFirst(tasks, func(r Task) bool { return r.Name == id.Name() }) } +func (v *tasks) ShowParameters(ctx context.Context, id SchemaObjectIdentifier) ([]*Parameter, error) { + return v.client.Parameters.ShowParameters(ctx, &ShowParametersOptions{ + In: &ParametersIn{ + Task: id, + }, + }) +} + func (v *tasks) Describe(ctx context.Context, id SchemaObjectIdentifier) (*Task, error) { opts := &DescribeTaskOptions{ name: id, From 6837bc5616ce8773c9cfcf7d1b381978b4362953 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Mon, 30 Sep 2024 10:59:19 +0200 Subject: [PATCH 02/12] wip --- .../collections/collection_helpers.go | 13 + pkg/resources/task.go | 406 +++++------------- pkg/schemas/task_gen.go | 4 +- pkg/sdk/sweepers_test.go | 14 +- pkg/sdk/tasks_gen.go | 2 +- pkg/sdk/tasks_impl_gen.go | 2 + 6 files changed, 140 insertions(+), 301 deletions(-) diff --git a/pkg/internal/collections/collection_helpers.go b/pkg/internal/collections/collection_helpers.go index b71c44d62d..f0ed9be9aa 100644 --- a/pkg/internal/collections/collection_helpers.go +++ b/pkg/internal/collections/collection_helpers.go @@ -22,3 +22,16 @@ func Map[T any, R any](collection []T, mapper func(T) R) []R { } return result } + +// TODO: Test +func MapErr[T any, R any](collection []T, mapper func(T) (R, error)) ([]R, error) { + result := make([]R, len(collection)) + for i, elem := range collection { + value, err := mapper(elem) + if err != nil { + return nil, err + } + result[i] = value + } + return result, nil +} diff --git a/pkg/resources/task.go b/pkg/resources/task.go index 9d6aace245..980644a0ac 100644 --- a/pkg/resources/task.go +++ b/pkg/resources/task.go @@ -4,10 +4,9 @@ import ( "context" "errors" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "log" - "slices" - "strconv" "strings" "time" @@ -140,7 +139,7 @@ var taskSchema = map[string]*schema.Schema{ ValidateDiagFunc: IsValidIdentifier[sdk.SchemaObjectIdentifier](), DiffSuppressFunc: SuppressIfAny( suppressIdentifierQuoting, - IgnoreChangeToCurrentSnowflakeValueInShow("task_relations.0.finalizer"), + IgnoreChangeToCurrentSnowflakeValueInShow("task_relations.0.finalize"), ), ConflictsWith: []string{"schedule", "after"}, }, @@ -348,9 +347,12 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - taskId := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) + id, err := sdk.ParseSchemaObjectIdentifier(d.Id()) + if err != nil { + return diag.FromErr(err) + } - tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, taskId, taskId) + tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, id, id) defer func() { if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { log.Printf("[WARN] failed to resume tasks: %s", err) @@ -360,181 +362,21 @@ func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag return diag.FromErr(err) } - if d.HasChange("warehouse") { - newWarehouse := d.Get("warehouse") - alterRequest := sdk.NewAlterTaskRequest(taskId) - if newWarehouse == "" { - alterRequest.WithUnset(*sdk.NewTaskUnsetRequest().WithWarehouse(true)) - } else { - alterRequest.WithSet(*sdk.NewTaskSetRequest().WithWarehouse(sdk.NewAccountObjectIdentifier(newWarehouse.(string)))) - } - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return diag.FromErr(fmt.Errorf("error updating warehouse on task %s err = %w", taskId.FullyQualifiedName(), err)) - } - } - - if d.HasChange("user_task_managed_initial_warehouse_size") { - newSize := d.Get("user_task_managed_initial_warehouse_size") - warehouse := d.Get("warehouse") - - if warehouse == "" && newSize != "" { - size, err := sdk.ToWarehouseSize(newSize.(string)) - if err != nil { - return diag.FromErr(err) - } - alterRequest := sdk.NewAlterTaskRequest(taskId).WithSet(*sdk.NewTaskSetRequest().WithUserTaskManagedInitialWarehouseSize(size)) - err = client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return diag.FromErr(fmt.Errorf("error updating user_task_managed_initial_warehouse_size on task %s", taskId.FullyQualifiedName())) - } - } - } - - if d.HasChange("error_integration") { - newErrorIntegration := d.Get("error_integration") - alterRequest := sdk.NewAlterTaskRequest(taskId) - if newErrorIntegration == "" { - alterRequest.WithUnset(*sdk.NewTaskUnsetRequest().WithErrorIntegration(true)) - } else { - newErrorIntegrationId, err := sdk.ParseAccountObjectIdentifier(newErrorIntegration.(string)) - if err != nil { - return diag.FromErr(err) - } - alterRequest.WithSet(*sdk.NewTaskSetRequest().WithErrorNotificationIntegration(newErrorIntegrationId)) - } - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return diag.FromErr(fmt.Errorf("error updating error integration on task %s", taskId.FullyQualifiedName())) - } - } - - if d.HasChange("after") { - // making changes to after require suspending the current task - // (the task will be brought up to the correct running state in the "enabled" check at the bottom of Update function). - err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithSuspend(true)) - if err != nil { - return diag.FromErr(fmt.Errorf("error suspending task %s, err: %w", taskId.FullyQualifiedName(), err)) - } - - o, n := d.GetChange("after") - oldAfter := expandStringList(o.([]interface{})) - newAfter := expandStringList(n.([]interface{})) - - if len(newAfter) > 0 { - // preemptively removing schedule because a task cannot have both after and schedule - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithUnset(*sdk.NewTaskUnsetRequest().WithSchedule(true))); err != nil { - return diag.FromErr(fmt.Errorf("error updating schedule on task %s", taskId.FullyQualifiedName())) - } - } - - // Remove old dependencies that are not in new dependencies - toRemove := make([]sdk.SchemaObjectIdentifier, 0) - for _, dep := range oldAfter { - if !slices.Contains(newAfter, dep) { - toRemove = append(toRemove, sdk.NewSchemaObjectIdentifierInSchema(taskId.SchemaId(), dep)) - } - } - if len(toRemove) > 0 { - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithRemoveAfter(toRemove)); err != nil { - return diag.FromErr(fmt.Errorf("error removing after dependencies from task %s", taskId.FullyQualifiedName())) - } - } - - // Add new dependencies that are not in old dependencies - toAdd := make([]sdk.SchemaObjectIdentifier, 0) - for _, dep := range newAfter { - if !slices.Contains(oldAfter, dep) { - toAdd = append(toAdd, sdk.NewSchemaObjectIdentifierInSchema(taskId.SchemaId(), dep)) - } - } - if len(toAdd) > 0 { - for _, depId := range toAdd { - tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, depId, taskId) - defer func() { - if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { - log.Printf("[WARN] failed to resume tasks: %s", err) - } - }() - if err != nil { - return diag.FromErr(err) - } - } - - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithAddAfter(toAdd)); err != nil { - return diag.FromErr(fmt.Errorf("error adding after dependencies from task %s", taskId.FullyQualifiedName())) - } - } - } - - if d.HasChange("schedule") { - newSchedule := d.Get("schedule") - alterRequest := sdk.NewAlterTaskRequest(taskId) - if newSchedule == "" { - alterRequest.WithUnset(*sdk.NewTaskUnsetRequest().WithSchedule(true)) - } else { - alterRequest.WithSet(*sdk.NewTaskSetRequest().WithSchedule(newSchedule.(string))) - } - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return diag.FromErr(fmt.Errorf("error updating schedule on task %s", taskId.FullyQualifiedName())) - } - } - - if d.HasChange("user_task_timeout_ms") { - o, n := d.GetChange("user_task_timeout_ms") - alterRequest := sdk.NewAlterTaskRequest(taskId) - if o.(int) > 0 && n.(int) == 0 { - alterRequest.WithUnset(*sdk.NewTaskUnsetRequest().WithUserTaskTimeoutMs(true)) - } else { - alterRequest.WithSet(*sdk.NewTaskSetRequest().WithUserTaskTimeoutMs(n.(int))) - } - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return diag.FromErr(fmt.Errorf("error updating user task timeout on task %s", taskId.FullyQualifiedName())) - } - } - - if d.HasChange("suspend_task_after_num_failures") { - o, n := d.GetChange("suspend_task_after_num_failures") - alterRequest := sdk.NewAlterTaskRequest(taskId) - if o.(int) > 0 && n.(int) == 0 { - alterRequest.WithUnset(*sdk.NewTaskUnsetRequest().WithSuspendTaskAfterNumFailures(true)) - } else { - alterRequest.WithSet(*sdk.NewTaskSetRequest().WithSuspendTaskAfterNumFailures(n.(int))) - } - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return diag.FromErr(fmt.Errorf("error updating suspend task after num failures on task %s", taskId.FullyQualifiedName())) - } - } + set := sdk.NewTaskSetRequest() + unset := sdk.NewTaskUnsetRequest() - if d.HasChange("comment") { - newComment := d.Get("comment") - alterRequest := sdk.NewAlterTaskRequest(taskId) - if newComment == "" { - alterRequest.WithUnset(*sdk.NewTaskUnsetRequest().WithComment(true)) - } else { - alterRequest.WithSet(*sdk.NewTaskSetRequest().WithComment(newComment.(string))) - } - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return diag.FromErr(fmt.Errorf("error updating comment on task %s", taskId.FullyQualifiedName())) - } - } - - if d.HasChange("allow_overlapping_execution") { - n := d.Get("allow_overlapping_execution") - alterRequest := sdk.NewAlterTaskRequest(taskId) - if n == "" { - alterRequest.WithUnset(*sdk.NewTaskUnsetRequest().WithAllowOverlappingExecution(true)) - } else { - alterRequest.WithSet(*sdk.NewTaskSetRequest().WithAllowOverlappingExecution(n.(bool))) - } - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return diag.FromErr(fmt.Errorf("error updating allow overlapping execution on task %s", taskId.FullyQualifiedName())) - } + err = errors.Join( + accountObjectIdentifierAttributeUpdate(d, "warehouse", &set.Warehouse, &unset.Warehouse), + accountObjectIdentifierAttributeUpdate(d, "error_integration", &set.ErrorNotificationIntegration, &unset.ErrorIntegration), // TODO: name inconsistency + stringAttributeUpdate(d, "schedule", &set.Schedule, &unset.Schedule), + //stringAttributeUpdate(d, "user_task_managed_initial_warehouse_size", &set.UserTaskManagedInitialWarehouseSize, &unset.UserTaskManage)// TODO: Not in unsetUSER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE + intAttributeUpdate(d, "user_task_timeout_ms", &set.UserTaskTimeoutMs, &unset.UserTaskTimeoutMs), + intAttributeUpdate(d, "suspend_task_after_num_failures", &set.SuspendTaskAfterNumFailures, &unset.SuspendTaskAfterNumFailures), + stringAttributeUpdate(d, "comment", &set.Comment, &unset.Comment), + booleanStringAttributeUpdate(d, "allow_overlapping_execution", &set.AllowOverlappingExecution, &unset.AllowOverlappingExecution), + ) + if err != nil { + return diag.FromErr(err) } if d.HasChange("session_parameters") { @@ -558,7 +400,7 @@ func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag if err != nil { return diag.FromErr(err) } - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithUnset(*sdk.NewTaskUnsetRequest().WithSessionParametersUnset(*sessionParametersUnset))); err != nil { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithUnset(*sdk.NewTaskUnsetRequest().WithSessionParametersUnset(*sessionParametersUnset))); err != nil { return diag.FromErr(fmt.Errorf("error removing session_parameters on task %v err = %w", d.Id(), err)) } } @@ -568,7 +410,7 @@ func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag if err != nil { return diag.FromErr(err) } - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithSet(*sdk.NewTaskSetRequest().WithSessionParameters(*sessionParameters))); err != nil { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSet(*sdk.NewTaskSetRequest().WithSessionParameters(*sessionParameters))); err != nil { return diag.FromErr(fmt.Errorf("error adding session_parameters to task %v err = %w", d.Id(), err)) } } @@ -578,38 +420,86 @@ func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag if err != nil { return diag.FromErr(err) } - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithSet(*sdk.NewTaskSetRequest().WithSessionParameters(*sessionParameters))); err != nil { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSet(*sdk.NewTaskSetRequest().WithSessionParameters(*sessionParameters))); err != nil { return diag.FromErr(fmt.Errorf("error updating session_parameters in task %v err = %w", d.Id(), err)) } } } if d.HasChange("when") { - n := d.Get("when") - alterRequest := sdk.NewAlterTaskRequest(taskId).WithModifyWhen(n.(string)) - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return diag.FromErr(fmt.Errorf("error updating when condition on task %s", taskId.FullyQualifiedName())) + if v := d.Get("when"); v != "" { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithModifyWhen(v.(string))); err != nil { + return diag.FromErr(err) + } + } else { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithRemoveWhen(true)); err != nil { + return diag.FromErr(err) + } } } if d.HasChange("sql_statement") { - n := d.Get("sql_statement") - alterRequest := sdk.NewAlterTaskRequest(taskId).WithModifyAs(n.(string)) - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return diag.FromErr(fmt.Errorf("error updating sql statement on task %s", taskId.FullyQualifiedName())) + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithModifyAs(d.Get("sql_statement").(string))); err != nil { + return diag.FromErr(err) } } - enabled := d.Get("enabled").(bool) - if enabled { - if waitForTaskStart(ctx, client, taskId) != nil { - log.Printf("[WARN] failed to resume task %s", taskId.FullyQualifiedName()) + if d.HasChange("after") { + // TOOD: after + // Making changes to after require suspending the current task + // (the task will be brought up to the correct running state in the "enabled" check at the bottom of Update function). + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSuspend(true)); err != nil { + return diag.FromErr(err) + } + + oldAfter, newAfter := d.GetChange("after") + addedTasks, removedTasks := ListDiff( + expandStringList(oldAfter.(*schema.Set).List()), + expandStringList(newAfter.(*schema.Set).List()), + ) + + // Order of commands matters: + // The "after"s can only be added when the task doesn't have a "schedule". + // That's why this ALTER has to be below regular ALTER SET/UNSET commands. + if len(addedTasks) > 0 { + addedTaskIds, err := collections.MapErr(addedTasks, sdk.ParseSchemaObjectIdentifier) + if err != nil { + return diag.FromErr(err) + } + + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithAddAfter(addedTaskIds)); err != nil { + return diag.FromErr(err) + } + } + + if len(removedTasks) > 0 { + removedTaskIds, err := collections.MapErr(removedTasks, sdk.ParseSchemaObjectIdentifier) + if err != nil { + return diag.FromErr(err) + } + + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithRemoveAfter(removedTaskIds)); err != nil { + return diag.FromErr(err) + } } - } else { - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithSuspend(true)); err != nil { - return diag.FromErr(fmt.Errorf("failed to suspend task %s", taskId.FullyQualifiedName())) + } + + if d.HasChange("enabled") { + if v := d.Get("enabled").(string); v != BooleanDefault { + enabled, err := booleanStringToBool(v) + if err != nil { + return diag.FromErr(err) + } + + if enabled { + if waitForTaskStart(ctx, client, id) != nil { + log.Printf("[WARN] failed to resume task %s", id.FullyQualifiedName()) + } + } else { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSuspend(true)); err != nil { + return diag.FromErr(err) + } + } } } @@ -645,14 +535,35 @@ func ReadTask(withExternalChangesMarking bool) schema.ReadContextFunc { } if withExternalChangesMarking { + finalizedTaskId := "" + if task.TaskRelations.FinalizerTask != nil { + finalizedTaskId = task.TaskRelations.FinalizerTask.FullyQualifiedName() + } if err = handleExternalChangesToObjectInShow(d, - showMapping{"", "", task.Config, task.Config, nil}, + showMapping{"state", "enabled", task.State, task.State == sdk.TaskStateStarted, nil}, + showMapping{"warehouse", "warehouse", task.Warehouse, task.Warehouse, nil}, + showMapping{"schedule", "schedule", task.Schedule, task.Schedule, nil}, + showMapping{"config", "config", task.Config, task.Config, nil}, + showMapping{"allow_overlapping_execution", "allow_overlapping_execution", task.AllowOverlappingExecution, task.AllowOverlappingExecution, nil}, + showMapping{"error_integration", "error_integration", task.ErrorIntegration, task.ErrorIntegration, nil}, + showMapping{"comment", "comment", task.Comment, task.Comment, nil}, + showMapping{"task_relations.0.finalize", "finalize", finalizedTaskId, finalizedTaskId, nil}, + showMapping{"condition", "when", task.Condition, task.Condition, nil}, + showMapping{"definition", "sql_statement", task.Definition, task.Definition, nil}, ); err != nil { return diag.FromErr(err) } } else { if err = setStateToValuesFromConfig(d, taskSchema, []string{ - "abc", + "warehouse", + "schedule", + "config", + "allow_overlapping_execution", + "error_integration", + "comment", + "finalize", + "condition", + "sql_statement", }); err != nil { return diag.FromErr(err) } @@ -660,6 +571,8 @@ func ReadTask(withExternalChangesMarking bool) schema.ReadContextFunc { if errs := errors.Join( // TODO: handleTaskParametersRead(d, taskParameters) + d.Set("enabled", task.State == sdk.TaskStateStarted), + d.Set("after", collections.Map(task.Predecessors, sdk.SchemaObjectIdentifier.FullyQualifiedName)), d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()), d.Set(ShowOutputAttributeName, []map[string]any{schemas.TaskToSchema(task)}), d.Set(ParametersAttributeName, []map[string]any{schemas.TaskParametersToSchema(taskParameters)}), @@ -667,97 +580,6 @@ func ReadTask(withExternalChangesMarking bool) schema.ReadContextFunc { return diag.FromErr(errs) } - if err := d.Set("enabled", task.State == sdk.TaskStateStarted); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("warehouse", task.Warehouse); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("schedule", task.Schedule); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("comment", task.Comment); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("allow_overlapping_execution", task.AllowOverlappingExecution); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("error_integration", task.ErrorIntegration); err != nil { - return diag.FromErr(err) - } - - predecessors := make([]string, len(task.Predecessors)) - for i, p := range task.Predecessors { - predecessors[i] = p.Name() - } - if err := d.Set("after", predecessors); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("when", task.Condition); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("sql_statement", task.Definition); err != nil { - return diag.FromErr(err) - } - - opts := &sdk.ShowParametersOptions{In: &sdk.ParametersIn{Task: id}} - params, err := client.Parameters.ShowParameters(ctx, opts) - if err != nil { - return diag.FromErr(err) - } - - if len(params) > 0 { - sessionParameters := make(map[string]any) - fieldParameters := map[string]interface{}{ - "user_task_managed_initial_warehouse_size": "", - } - - for _, param := range params { - if param.Level != "TASK" { - continue - } - switch param.Key { - case "USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE": - fieldParameters["user_task_managed_initial_warehouse_size"] = param.Value - case "USER_TASK_TIMEOUT_MS": - timeout, err := strconv.ParseInt(param.Value, 10, 64) - if err != nil { - return diag.FromErr(err) - } - - fieldParameters["user_task_timeout_ms"] = timeout - case "SUSPEND_TASK_AFTER_NUM_FAILURES": - num, err := strconv.ParseInt(param.Value, 10, 64) - if err != nil { - return diag.FromErr(err) - } - - fieldParameters["suspend_task_after_num_failures"] = num - default: - sessionParameters[param.Key] = param.Value - } - } - - if err := d.Set("session_parameters", sessionParameters); err != nil { - return diag.FromErr(err) - } - - for key, value := range fieldParameters { - // lintignore:R001 - err = d.Set(key, value) - if err != nil { - return diag.FromErr(err) - } - } - } - return nil } } diff --git a/pkg/schemas/task_gen.go b/pkg/schemas/task_gen.go index 825e66b64e..6431bf93ff 100644 --- a/pkg/schemas/task_gen.go +++ b/pkg/schemas/task_gen.go @@ -101,7 +101,7 @@ var ShowTaskSchema = map[string]*schema.Schema{ Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "finalizer": { + "finalize": { Type: schema.TypeString, Computed: true, }, @@ -144,7 +144,7 @@ func TaskToSchema(task *sdk.Task) map[string]any { taskSchema["task_relations"] = []any{ map[string]any{ "predecessors": collections.Map(task.TaskRelations.Predecessors, sdk.SchemaObjectIdentifier.FullyQualifiedName), - "finalizer": task.TaskRelations.FinalizerTask, + "finalize": task.TaskRelations.FinalizerTask, }, } return taskSchema diff --git a/pkg/sdk/sweepers_test.go b/pkg/sdk/sweepers_test.go index 8e3d66f3f3..c05894532b 100644 --- a/pkg/sdk/sweepers_test.go +++ b/pkg/sdk/sweepers_test.go @@ -134,7 +134,7 @@ func nukeWarehouses(client *Client, prefix string) func() error { if !slices.Contains(protectedWarehouses, wh.Name) && wh.CreatedOn.Before(time.Now().Add(-2*time.Hour)) { if wh.Owner != "ACCOUNTADMIN" { log.Printf("[DEBUG] Granting ownership on warehouse %s, to ACCOUNTADMIN", wh.ID().FullyQualifiedName()) - err := client.Grants.GrantOwnership( + if err := client.Grants.GrantOwnership( ctx, OwnershipGrantOn{Object: &Object{ ObjectType: ObjectTypeWarehouse, @@ -144,8 +144,9 @@ func nukeWarehouses(client *Client, prefix string) func() error { AccountRoleName: Pointer(NewAccountObjectIdentifier("ACCOUNTADMIN")), }, nil, - ) - errs = append(errs, fmt.Errorf("granting ownership on warehouse %s ended with error, err = %w", wh.ID().FullyQualifiedName(), err)) + ); err != nil { + errs = append(errs, fmt.Errorf("granting ownership on warehouse %s ended with error, err = %w", wh.ID().FullyQualifiedName(), err)) + } continue } @@ -185,7 +186,7 @@ func nukeDatabases(client *Client, prefix string) func() error { for idx, db := range dbs { if db.Owner != "ACCOUNTADMIN" { log.Printf("[DEBUG] Granting ownership on database %s, to ACCOUNTADMIN", db.ID().FullyQualifiedName()) - err := client.Grants.GrantOwnership( + if err := client.Grants.GrantOwnership( ctx, OwnershipGrantOn{Object: &Object{ ObjectType: ObjectTypeDatabase, @@ -195,8 +196,9 @@ func nukeDatabases(client *Client, prefix string) func() error { AccountRoleName: Pointer(NewAccountObjectIdentifier("ACCOUNTADMIN")), }, nil, - ) - errs = append(errs, fmt.Errorf("granting ownership on database %s ended with error, err = %w", db.ID().FullyQualifiedName(), err)) + ); err != nil { + errs = append(errs, fmt.Errorf("granting ownership on database %s ended with error, err = %w", db.ID().FullyQualifiedName(), err)) + } continue } diff --git a/pkg/sdk/tasks_gen.go b/pkg/sdk/tasks_gen.go index a897433ac6..589f834984 100644 --- a/pkg/sdk/tasks_gen.go +++ b/pkg/sdk/tasks_gen.go @@ -187,7 +187,7 @@ type Task struct { SchemaName string Owner string Comment string - Warehouse string + Warehouse string // TODO: *AccountObjectIdentifier Schedule string Predecessors []SchemaObjectIdentifier State TaskState diff --git a/pkg/sdk/tasks_impl_gen.go b/pkg/sdk/tasks_impl_gen.go index f178182f4c..88a95c9e93 100644 --- a/pkg/sdk/tasks_impl_gen.go +++ b/pkg/sdk/tasks_impl_gen.go @@ -340,6 +340,8 @@ func (r taskDBRow) convert() *Task { } } task.Predecessors = ids + } else { + task.Predecessors = make([]SchemaObjectIdentifier, 0) } if len(r.State) > 0 { taskState, err := ToTaskState(r.State) From c2f707deb059126b185478f546aa9cf2a0d7df59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Mon, 30 Sep 2024 11:12:24 +0200 Subject: [PATCH 03/12] wip --- .../resourceassert/gen/resource_schema_def.go | 4 + .../config/model/task_model_ext.go | 16 + .../config/model/task_model_gen.go | 278 +++++ pkg/resources/task_acceptance_test.go | 1046 +++++++++-------- 4 files changed, 853 insertions(+), 491 deletions(-) create mode 100644 pkg/acceptance/bettertestspoc/config/model/task_model_ext.go create mode 100644 pkg/acceptance/bettertestspoc/config/model/task_model_gen.go diff --git a/pkg/acceptance/bettertestspoc/assert/resourceassert/gen/resource_schema_def.go b/pkg/acceptance/bettertestspoc/assert/resourceassert/gen/resource_schema_def.go index d3670bbba6..cc49f937f4 100644 --- a/pkg/acceptance/bettertestspoc/assert/resourceassert/gen/resource_schema_def.go +++ b/pkg/acceptance/bettertestspoc/assert/resourceassert/gen/resource_schema_def.go @@ -49,4 +49,8 @@ var allResourceSchemaDefs = []ResourceSchemaDef{ name: "MaskingPolicy", schema: resources.MaskingPolicy().Schema, }, + { + name: "Task", + schema: resources.Task().Schema, + }, } diff --git a/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go b/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go new file mode 100644 index 0000000000..34dce36081 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go @@ -0,0 +1,16 @@ +package model + +import ( + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" +) + +func TaskWithId(resourceName string, id sdk.SchemaObjectIdentifier, sqlStatement string) *TaskModel { + t := &TaskModel{ResourceModelMeta: config.Meta(resourceName, resources.Task)} + t.WithDatabase(id.DatabaseName()) + t.WithSchema(id.SchemaName()) + t.WithName(id.Name()) + t.WithSqlStatement(sqlStatement) + return t +} diff --git a/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go b/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go new file mode 100644 index 0000000000..ec5ab9477d --- /dev/null +++ b/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go @@ -0,0 +1,278 @@ +// Code generated by config model builder generator; DO NOT EDIT. + +package model + +import ( + tfconfig "github.com/hashicorp/terraform-plugin-testing/config" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" +) + +type TaskModel struct { + After tfconfig.Variable `json:"after,omitempty"` + AllowOverlappingExecution tfconfig.Variable `json:"allow_overlapping_execution,omitempty"` + Comment tfconfig.Variable `json:"comment,omitempty"` + Config tfconfig.Variable `json:"config,omitempty"` + Database tfconfig.Variable `json:"database,omitempty"` + Enabled tfconfig.Variable `json:"enabled,omitempty"` + ErrorIntegration tfconfig.Variable `json:"error_integration,omitempty"` + Finalize tfconfig.Variable `json:"finalize,omitempty"` + FullyQualifiedName tfconfig.Variable `json:"fully_qualified_name,omitempty"` + Name tfconfig.Variable `json:"name,omitempty"` + Schedule tfconfig.Variable `json:"schedule,omitempty"` + Schema tfconfig.Variable `json:"schema,omitempty"` + SessionParameters tfconfig.Variable `json:"session_parameters,omitempty"` + SqlStatement tfconfig.Variable `json:"sql_statement,omitempty"` + SuspendTaskAfterNumFailures tfconfig.Variable `json:"suspend_task_after_num_failures,omitempty"` + TaskAutoRetryAttempts tfconfig.Variable `json:"task_auto_retry_attempts,omitempty"` + UserTaskManagedInitialWarehouseSize tfconfig.Variable `json:"user_task_managed_initial_warehouse_size,omitempty"` + UserTaskMinimumTriggerIntervalInSeconds tfconfig.Variable `json:"user_task_minimum_trigger_interval_in_seconds,omitempty"` + UserTaskTimeoutMs tfconfig.Variable `json:"user_task_timeout_ms,omitempty"` + Warehouse tfconfig.Variable `json:"warehouse,omitempty"` + When tfconfig.Variable `json:"when,omitempty"` + + *config.ResourceModelMeta +} + +///////////////////////////////////////////////// +// Basic builders (resource name and required) // +///////////////////////////////////////////////// + +func Task( + resourceName string, + database string, + name string, + schema string, + sqlStatement string, +) *TaskModel { + t := &TaskModel{ResourceModelMeta: config.Meta(resourceName, resources.Task)} + t.WithDatabase(database) + t.WithName(name) + t.WithSchema(schema) + t.WithSqlStatement(sqlStatement) + return t +} + +func TaskWithDefaultMeta( + database string, + name string, + schema string, + sqlStatement string, +) *TaskModel { + t := &TaskModel{ResourceModelMeta: config.DefaultMeta(resources.Task)} + t.WithDatabase(database) + t.WithName(name) + t.WithSchema(schema) + t.WithSqlStatement(sqlStatement) + return t +} + +///////////////////////////////// +// below all the proper values // +///////////////////////////////// + +// after attribute type is not yet supported, so WithAfter can't be generated + +func (t *TaskModel) WithAllowOverlappingExecution(allowOverlappingExecution bool) *TaskModel { + t.AllowOverlappingExecution = tfconfig.BoolVariable(allowOverlappingExecution) + return t +} + +func (t *TaskModel) WithComment(comment string) *TaskModel { + t.Comment = tfconfig.StringVariable(comment) + return t +} + +func (t *TaskModel) WithConfig(config string) *TaskModel { + t.Config = tfconfig.StringVariable(config) + return t +} + +func (t *TaskModel) WithDatabase(database string) *TaskModel { + t.Database = tfconfig.StringVariable(database) + return t +} + +func (t *TaskModel) WithEnabled(enabled string) *TaskModel { + t.Enabled = tfconfig.StringVariable(enabled) + return t +} + +func (t *TaskModel) WithErrorIntegration(errorIntegration string) *TaskModel { + t.ErrorIntegration = tfconfig.StringVariable(errorIntegration) + return t +} + +// finalize attribute type is not yet supported, so WithFinalize can't be generated + +func (t *TaskModel) WithFullyQualifiedName(fullyQualifiedName string) *TaskModel { + t.FullyQualifiedName = tfconfig.StringVariable(fullyQualifiedName) + return t +} + +func (t *TaskModel) WithName(name string) *TaskModel { + t.Name = tfconfig.StringVariable(name) + return t +} + +func (t *TaskModel) WithSchedule(schedule string) *TaskModel { + t.Schedule = tfconfig.StringVariable(schedule) + return t +} + +func (t *TaskModel) WithSchema(schema string) *TaskModel { + t.Schema = tfconfig.StringVariable(schema) + return t +} + +// session_parameters attribute type is not yet supported, so WithSessionParameters can't be generated + +func (t *TaskModel) WithSqlStatement(sqlStatement string) *TaskModel { + t.SqlStatement = tfconfig.StringVariable(sqlStatement) + return t +} + +func (t *TaskModel) WithSuspendTaskAfterNumFailures(suspendTaskAfterNumFailures int) *TaskModel { + t.SuspendTaskAfterNumFailures = tfconfig.IntegerVariable(suspendTaskAfterNumFailures) + return t +} + +func (t *TaskModel) WithTaskAutoRetryAttempts(taskAutoRetryAttempts int) *TaskModel { + t.TaskAutoRetryAttempts = tfconfig.IntegerVariable(taskAutoRetryAttempts) + return t +} + +func (t *TaskModel) WithUserTaskManagedInitialWarehouseSize(userTaskManagedInitialWarehouseSize string) *TaskModel { + t.UserTaskManagedInitialWarehouseSize = tfconfig.StringVariable(userTaskManagedInitialWarehouseSize) + return t +} + +func (t *TaskModel) WithUserTaskMinimumTriggerIntervalInSeconds(userTaskMinimumTriggerIntervalInSeconds int) *TaskModel { + t.UserTaskMinimumTriggerIntervalInSeconds = tfconfig.IntegerVariable(userTaskMinimumTriggerIntervalInSeconds) + return t +} + +func (t *TaskModel) WithUserTaskTimeoutMs(userTaskTimeoutMs int) *TaskModel { + t.UserTaskTimeoutMs = tfconfig.IntegerVariable(userTaskTimeoutMs) + return t +} + +func (t *TaskModel) WithWarehouse(warehouse string) *TaskModel { + t.Warehouse = tfconfig.StringVariable(warehouse) + return t +} + +func (t *TaskModel) WithWhen(when string) *TaskModel { + t.When = tfconfig.StringVariable(when) + return t +} + +////////////////////////////////////////// +// below it's possible to set any value // +////////////////////////////////////////// + +func (t *TaskModel) WithAfterValue(value tfconfig.Variable) *TaskModel { + t.After = value + return t +} + +func (t *TaskModel) WithAllowOverlappingExecutionValue(value tfconfig.Variable) *TaskModel { + t.AllowOverlappingExecution = value + return t +} + +func (t *TaskModel) WithCommentValue(value tfconfig.Variable) *TaskModel { + t.Comment = value + return t +} + +func (t *TaskModel) WithConfigValue(value tfconfig.Variable) *TaskModel { + t.Config = value + return t +} + +func (t *TaskModel) WithDatabaseValue(value tfconfig.Variable) *TaskModel { + t.Database = value + return t +} + +func (t *TaskModel) WithEnabledValue(value tfconfig.Variable) *TaskModel { + t.Enabled = value + return t +} + +func (t *TaskModel) WithErrorIntegrationValue(value tfconfig.Variable) *TaskModel { + t.ErrorIntegration = value + return t +} + +func (t *TaskModel) WithFinalizeValue(value tfconfig.Variable) *TaskModel { + t.Finalize = value + return t +} + +func (t *TaskModel) WithFullyQualifiedNameValue(value tfconfig.Variable) *TaskModel { + t.FullyQualifiedName = value + return t +} + +func (t *TaskModel) WithNameValue(value tfconfig.Variable) *TaskModel { + t.Name = value + return t +} + +func (t *TaskModel) WithScheduleValue(value tfconfig.Variable) *TaskModel { + t.Schedule = value + return t +} + +func (t *TaskModel) WithSchemaValue(value tfconfig.Variable) *TaskModel { + t.Schema = value + return t +} + +func (t *TaskModel) WithSessionParametersValue(value tfconfig.Variable) *TaskModel { + t.SessionParameters = value + return t +} + +func (t *TaskModel) WithSqlStatementValue(value tfconfig.Variable) *TaskModel { + t.SqlStatement = value + return t +} + +func (t *TaskModel) WithSuspendTaskAfterNumFailuresValue(value tfconfig.Variable) *TaskModel { + t.SuspendTaskAfterNumFailures = value + return t +} + +func (t *TaskModel) WithTaskAutoRetryAttemptsValue(value tfconfig.Variable) *TaskModel { + t.TaskAutoRetryAttempts = value + return t +} + +func (t *TaskModel) WithUserTaskManagedInitialWarehouseSizeValue(value tfconfig.Variable) *TaskModel { + t.UserTaskManagedInitialWarehouseSize = value + return t +} + +func (t *TaskModel) WithUserTaskMinimumTriggerIntervalInSecondsValue(value tfconfig.Variable) *TaskModel { + t.UserTaskMinimumTriggerIntervalInSeconds = value + return t +} + +func (t *TaskModel) WithUserTaskTimeoutMsValue(value tfconfig.Variable) *TaskModel { + t.UserTaskTimeoutMs = value + return t +} + +func (t *TaskModel) WithWarehouseValue(value tfconfig.Variable) *TaskModel { + t.Warehouse = value + return t +} + +func (t *TaskModel) WithWhenValue(value tfconfig.Variable) *TaskModel { + t.When = value + return t +} diff --git a/pkg/resources/task_acceptance_test.go b/pkg/resources/task_acceptance_test.go index 05ffa51176..4d3975a617 100644 --- a/pkg/resources/task_acceptance_test.go +++ b/pkg/resources/task_acceptance_test.go @@ -1,421 +1,485 @@ package resources_test import ( - "bytes" "fmt" - "testing" - "text/template" - acc "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceassert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config/model" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "testing" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" - "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" - "github.com/hashicorp/terraform-plugin-testing/config" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-plugin-testing/tfversion" ) -type ( - AccTaskTestSettings struct { - DatabaseName string - WarehouseName string - RootTask *TaskSettings - ChildTask *TaskSettings - SoloTask *TaskSettings - } - - TaskSettings struct { - Name string - Enabled bool - Schema string - SQL string - Schedule string - Comment string - When string - SessionParams map[string]string - UserTaskTimeoutMs int64 - } -) - -var ( - rootname = acc.TestClient().Ids.AlphaContaining("_root_task") - rootId = sdk.NewSchemaObjectIdentifier(acc.TestDatabaseName, acc.TestSchemaName, rootname) - childname = acc.TestClient().Ids.AlphaContaining("_child_task") - childId = sdk.NewSchemaObjectIdentifier(acc.TestDatabaseName, acc.TestSchemaName, childname) - soloname = acc.TestClient().Ids.AlphaContaining("_standalone_task") - - initialState = &AccTaskTestSettings{ //nolint - WarehouseName: acc.TestWarehouseName, - DatabaseName: acc.TestDatabaseName, - RootTask: &TaskSettings{ - Name: rootname, - Schema: acc.TestSchemaName, - SQL: "SHOW FUNCTIONS", - Enabled: true, - Schedule: "5 MINUTE", - UserTaskTimeoutMs: 1800000, - SessionParams: map[string]string{ - string(sdk.SessionParameterLockTimeout): "1000", - string(sdk.SessionParameterStrictJSONOutput): "true", - }, - }, - - ChildTask: &TaskSettings{ - Name: childname, - SQL: "SELECT 1", - Enabled: false, - Comment: "initial state", - }, - - SoloTask: &TaskSettings{ - Name: soloname, - Schema: acc.TestSchemaName, - SQL: "SELECT 1", - When: "TRUE", - Enabled: false, - }, - } - - // Enables the Child and changes the SQL. - stepOne = &AccTaskTestSettings{ //nolint - WarehouseName: acc.TestWarehouseName, - DatabaseName: acc.TestDatabaseName, - RootTask: &TaskSettings{ - Name: rootname, - Schema: acc.TestSchemaName, - SQL: "SHOW FUNCTIONS", - Enabled: true, - Schedule: "5 MINUTE", - UserTaskTimeoutMs: 1800000, - SessionParams: map[string]string{ - string(sdk.SessionParameterLockTimeout): "1000", - string(sdk.SessionParameterStrictJSONOutput): "true", - }, - }, +func TestAcc_Task_Basic(t *testing.T) { + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + configModel := model.TaskWithId("test", id, "SELECT 1") - ChildTask: &TaskSettings{ - Name: childname, - SQL: "SELECT *", - Enabled: true, - Comment: "secondary state", - }, - - SoloTask: &TaskSettings{ - Name: soloname, - Schema: acc.TestSchemaName, - SQL: "SELECT *", - When: "TRUE", - Enabled: true, - SessionParams: map[string]string{ - string(sdk.SessionParameterTimestampInputFormat): "YYYY-MM-DD HH24", - }, - Schedule: "5 MINUTE", - UserTaskTimeoutMs: 1800000, - }, - } - - // Changes Root Schedule and SQL. - stepTwo = &AccTaskTestSettings{ //nolint - WarehouseName: acc.TestWarehouseName, - DatabaseName: acc.TestDatabaseName, - RootTask: &TaskSettings{ - Name: rootname, - Schema: acc.TestSchemaName, - SQL: "SHOW TABLES", - Enabled: true, - Schedule: "15 MINUTE", - UserTaskTimeoutMs: 1800000, - SessionParams: map[string]string{ - string(sdk.SessionParameterLockTimeout): "1000", - string(sdk.SessionParameterStrictJSONOutput): "true", - }, - }, - - ChildTask: &TaskSettings{ - Name: childname, - SQL: "SELECT 1", - Enabled: true, - Comment: "third state", - }, - - SoloTask: &TaskSettings{ - Name: soloname, - Schema: acc.TestSchemaName, - SQL: "SELECT *", - When: "FALSE", - Enabled: true, - Schedule: "15 MINUTE", - UserTaskTimeoutMs: 900000, - }, - } - - stepThree = &AccTaskTestSettings{ //nolint - WarehouseName: acc.TestWarehouseName, - DatabaseName: acc.TestDatabaseName, - - RootTask: &TaskSettings{ - Name: rootname, - Schema: acc.TestSchemaName, - SQL: "SHOW FUNCTIONS", - Enabled: false, - Schedule: "5 MINUTE", - UserTaskTimeoutMs: 1800000, - // Changes session params: one is updated, one is removed, one is added - SessionParams: map[string]string{ - string(sdk.SessionParameterLockTimeout): "2000", - string(sdk.SessionParameterMultiStatementCount): "5", - }, - }, - - ChildTask: &TaskSettings{ - Name: childname, - SQL: "SELECT 1", - Enabled: false, - Comment: "reset", - }, - - SoloTask: &TaskSettings{ - Name: soloname, - Schema: acc.TestSchemaName, - SQL: "SELECT 1", - When: "TRUE", - Enabled: true, - SessionParams: map[string]string{ - string(sdk.SessionParameterTimestampInputFormat): "YYYY-MM-DD HH24", - }, - Schedule: "5 MINUTE", - UserTaskTimeoutMs: 0, - }, - } -) - -func TestAcc_Task(t *testing.T) { resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.RequireAbove(tfversion.Version1_5_0), }, - PreCheck: func() { acc.TestAccPreCheck(t) }, - CheckDestroy: acc.CheckDestroy(t, resources.Task), + CheckDestroy: acc.CheckDestroy(t, resources.ResourceMonitor), Steps: []resource.TestStep{ { - Config: taskConfig(initialState), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "false"), - resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), - resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), - resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", initialState.RootTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", initialState.ChildTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "after.0", rootname), - resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", initialState.ChildTask.Comment), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", initialState.RootTask.Schedule), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", initialState.ChildTask.Schedule), - checkInt64("snowflake_task.root_task", "user_task_timeout_ms", initialState.RootTask.UserTaskTimeoutMs), - resource.TestCheckNoResourceAttr("snowflake_task.solo_task", "user_task_timeout_ms"), - checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), - resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), - resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), + Config: config.FromModel(t, configModel), + Check: assert.AssertThat(t, + resourceassert.ResourceMonitorResource(t, "snowflake_resource_monitor.test"). + HasNameString(id.Name()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasNoCreditQuota(). + HasNotifyUsersLen(0). + HasNoFrequency(). + HasNoStartTimestamp(). + HasNoEndTimestamp(). + HasNoNotifyTriggers(). + HasNoSuspendTrigger(). + HasNoSuspendImmediateTrigger(), + resourceshowoutputassert.ResourceMonitorShowOutput(t, "snowflake_resource_monitor.test"). + HasName(id.Name()). + HasCreditQuota(0). + HasUsedCredits(0). + HasRemainingCredits(0). + HasLevel(""). + HasFrequency(sdk.FrequencyMonthly). + HasStartTimeNotEmpty(). + HasEndTime(""). + HasSuspendAt(0). + HasSuspendImmediateAt(0). + HasCreatedOnNotEmpty(). + HasOwnerNotEmpty(). + HasComment(""), ), }, { - Config: taskConfig(stepOne), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), - resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), - resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", stepOne.RootTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", stepOne.ChildTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", stepOne.ChildTask.Comment), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", stepOne.RootTask.Schedule), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", stepOne.ChildTask.Schedule), - checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepOne.RootTask.UserTaskTimeoutMs), - checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", stepOne.SoloTask.UserTaskTimeoutMs), - checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), - resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), - resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), - ), - }, - { - Config: taskConfig(stepTwo), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), - resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), - resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", stepTwo.RootTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", stepTwo.ChildTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", stepTwo.ChildTask.Comment), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", stepTwo.RootTask.Schedule), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", stepTwo.ChildTask.Schedule), - checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepTwo.RootTask.UserTaskTimeoutMs), - checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", stepTwo.SoloTask.UserTaskTimeoutMs), - checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), - resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), - resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), - ), - }, - { - Config: taskConfig(stepThree), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "false"), - resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "false"), - resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), - resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), - resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", stepThree.RootTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", stepThree.ChildTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", stepThree.ChildTask.Comment), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", stepThree.RootTask.Schedule), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", stepThree.ChildTask.Schedule), - checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepThree.RootTask.UserTaskTimeoutMs), - checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", stepThree.SoloTask.UserTaskTimeoutMs), - checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 2000), - resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT"), - checkInt64("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT", 5), - ), - }, - { - Config: taskConfig(initialState), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "false"), - resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), - resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), - resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", initialState.RootTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", initialState.ChildTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", initialState.ChildTask.Comment), - checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepOne.RootTask.UserTaskTimeoutMs), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", initialState.RootTask.Schedule), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", initialState.ChildTask.Schedule), - // Terraform SDK is not able to differentiate if the - // attribute has deleted or set to zero value. - // ResourceData.GetChange returns the zero value of defined - // type in schema as new the value. Provider handles 0 for - // `user_task_timeout_ms` by unsetting the - // USER_TASK_TIMEOUT_MS session variable. - checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", initialState.ChildTask.UserTaskTimeoutMs), - checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), - resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), - resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), + ResourceName: "snowflake_resource_monitor.test", + ImportState: true, + ImportStateCheck: assert.AssertThatImport(t, + resourceassert.ImportedResourceMonitorResource(t, helpers.EncodeResourceIdentifier(id)). + HasNameString(id.Name()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasCreditQuotaString("0"). + HasNotifyUsersLen(0). + HasFrequencyString(string(sdk.FrequencyMonthly)). + HasStartTimestampNotEmpty(). + HasEndTimestampString(""). + HasNoNotifyTriggers(). + HasSuspendTriggerString("0"). + HasSuspendImmediateTriggerString("0"), ), }, }, }) } -func taskConfig(settings *AccTaskTestSettings) string { //nolint - config, err := template.New("task_acceptance_test_config").Parse(` -resource "snowflake_warehouse" "wh" { - name = "{{ .WarehouseName }}-{{ .RootTask.Name }}" -} -resource "snowflake_task" "root_task" { - name = "{{ .RootTask.Name }}" - database = "{{ .DatabaseName }}" - schema = "{{ .RootTask.Schema }}" - warehouse = "${snowflake_warehouse.wh.name}" - sql_statement = "{{ .RootTask.SQL }}" - enabled = {{ .RootTask.Enabled }} - schedule = "{{ .RootTask.Schedule }}" - {{ if .RootTask.UserTaskTimeoutMs }} - user_task_timeout_ms = {{ .RootTask.UserTaskTimeoutMs }} - {{- end }} - - {{ if .RootTask.SessionParams }} - session_parameters = { - {{ range $key, $value := .RootTask.SessionParams}} - {{ $key }} = "{{ $value }}", - {{- end }} - } - {{- end }} -} -resource "snowflake_task" "child_task" { - name = "{{ .ChildTask.Name }}" - database = snowflake_task.root_task.database - schema = snowflake_task.root_task.schema - warehouse = snowflake_task.root_task.warehouse - sql_statement = "{{ .ChildTask.SQL }}" - enabled = {{ .ChildTask.Enabled }} - after = [snowflake_task.root_task.name] - comment = "{{ .ChildTask.Comment }}" - {{ if .ChildTask.UserTaskTimeoutMs }} - user_task_timeout_ms = {{ .ChildTask.UserTaskTimeoutMs }} - {{- end }} - - {{ if .ChildTask.SessionParams }} - session_parameters = { - {{ range $key, $value := .ChildTask.SessionParams}} - {{ $key }} = "{{ $value }}", - {{- end }} - } - {{- end }} -} -resource "snowflake_task" "solo_task" { - name = "{{ .SoloTask.Name }}" - database = "{{ .DatabaseName }}" - schema = "{{ .SoloTask.Schema }}" - warehouse = "{{ .WarehouseName }}" - sql_statement = "{{ .SoloTask.SQL }}" - enabled = {{ .SoloTask.Enabled }} - when = "{{ .SoloTask.When }}" - {{ if .SoloTask.Schedule }} - schedule = "{{ .SoloTask.Schedule }}" - {{- end }} - - {{ if .SoloTask.UserTaskTimeoutMs }} - user_task_timeout_ms = {{ .SoloTask.UserTaskTimeoutMs }} - {{- end }} - - {{ if .SoloTask.SessionParams }} - session_parameters = { - {{ range $key, $value := .SoloTask.SessionParams}} - {{ $key }} = "{{ $value }}", - {{- end }} - } - {{- end }} -} - `) - if err != nil { - fmt.Println(err) - } - - var result bytes.Buffer - config.Execute(&result, settings) //nolint - - return result.String() -} +//type ( +// AccTaskTestSettings struct { +// DatabaseName string +// WarehouseName string +// RootTask *TaskSettings +// ChildTask *TaskSettings +// SoloTask *TaskSettings +// } +// +// TaskSettings struct { +// Name string +// Enabled bool +// Schema string +// SQL string +// Schedule string +// Comment string +// When string +// SessionParams map[string]string +// UserTaskTimeoutMs int64 +// } +//) +// +//var ( +// rootname = acc.TestClient().Ids.AlphaContaining("_root_task") +// rootId = sdk.NewSchemaObjectIdentifier(acc.TestDatabaseName, acc.TestSchemaName, rootname) +// childname = acc.TestClient().Ids.AlphaContaining("_child_task") +// childId = sdk.NewSchemaObjectIdentifier(acc.TestDatabaseName, acc.TestSchemaName, childname) +// soloname = acc.TestClient().Ids.AlphaContaining("_standalone_task") +// +// initialState = &AccTaskTestSettings{ //nolint +// WarehouseName: acc.TestWarehouseName, +// DatabaseName: acc.TestDatabaseName, +// RootTask: &TaskSettings{ +// Name: rootname, +// Schema: acc.TestSchemaName, +// SQL: "SHOW FUNCTIONS", +// Enabled: true, +// Schedule: "5 MINUTE", +// UserTaskTimeoutMs: 1800000, +// SessionParams: map[string]string{ +// string(sdk.SessionParameterLockTimeout): "1000", +// string(sdk.SessionParameterStrictJSONOutput): "true", +// }, +// }, +// +// ChildTask: &TaskSettings{ +// Name: childname, +// SQL: "SELECT 1", +// Enabled: false, +// Comment: "initial state", +// }, +// +// SoloTask: &TaskSettings{ +// Name: soloname, +// Schema: acc.TestSchemaName, +// SQL: "SELECT 1", +// When: "TRUE", +// Enabled: false, +// }, +// } +// +// // Enables the Child and changes the SQL. +// stepOne = &AccTaskTestSettings{ //nolint +// WarehouseName: acc.TestWarehouseName, +// DatabaseName: acc.TestDatabaseName, +// RootTask: &TaskSettings{ +// Name: rootname, +// Schema: acc.TestSchemaName, +// SQL: "SHOW FUNCTIONS", +// Enabled: true, +// Schedule: "5 MINUTE", +// UserTaskTimeoutMs: 1800000, +// SessionParams: map[string]string{ +// string(sdk.SessionParameterLockTimeout): "1000", +// string(sdk.SessionParameterStrictJSONOutput): "true", +// }, +// }, +// +// ChildTask: &TaskSettings{ +// Name: childname, +// SQL: "SELECT *", +// Enabled: true, +// Comment: "secondary state", +// }, +// +// SoloTask: &TaskSettings{ +// Name: soloname, +// Schema: acc.TestSchemaName, +// SQL: "SELECT *", +// When: "TRUE", +// Enabled: true, +// SessionParams: map[string]string{ +// string(sdk.SessionParameterTimestampInputFormat): "YYYY-MM-DD HH24", +// }, +// Schedule: "5 MINUTE", +// UserTaskTimeoutMs: 1800000, +// }, +// } +// +// // Changes Root Schedule and SQL. +// stepTwo = &AccTaskTestSettings{ //nolint +// WarehouseName: acc.TestWarehouseName, +// DatabaseName: acc.TestDatabaseName, +// RootTask: &TaskSettings{ +// Name: rootname, +// Schema: acc.TestSchemaName, +// SQL: "SHOW TABLES", +// Enabled: true, +// Schedule: "15 MINUTE", +// UserTaskTimeoutMs: 1800000, +// SessionParams: map[string]string{ +// string(sdk.SessionParameterLockTimeout): "1000", +// string(sdk.SessionParameterStrictJSONOutput): "true", +// }, +// }, +// +// ChildTask: &TaskSettings{ +// Name: childname, +// SQL: "SELECT 1", +// Enabled: true, +// Comment: "third state", +// }, +// +// SoloTask: &TaskSettings{ +// Name: soloname, +// Schema: acc.TestSchemaName, +// SQL: "SELECT *", +// When: "FALSE", +// Enabled: true, +// Schedule: "15 MINUTE", +// UserTaskTimeoutMs: 900000, +// }, +// } +// +// stepThree = &AccTaskTestSettings{ //nolint +// WarehouseName: acc.TestWarehouseName, +// DatabaseName: acc.TestDatabaseName, +// +// RootTask: &TaskSettings{ +// Name: rootname, +// Schema: acc.TestSchemaName, +// SQL: "SHOW FUNCTIONS", +// Enabled: false, +// Schedule: "5 MINUTE", +// UserTaskTimeoutMs: 1800000, +// // Changes session params: one is updated, one is removed, one is added +// SessionParams: map[string]string{ +// string(sdk.SessionParameterLockTimeout): "2000", +// string(sdk.SessionParameterMultiStatementCount): "5", +// }, +// }, +// +// ChildTask: &TaskSettings{ +// Name: childname, +// SQL: "SELECT 1", +// Enabled: false, +// Comment: "reset", +// }, +// +// SoloTask: &TaskSettings{ +// Name: soloname, +// Schema: acc.TestSchemaName, +// SQL: "SELECT 1", +// When: "TRUE", +// Enabled: true, +// SessionParams: map[string]string{ +// string(sdk.SessionParameterTimestampInputFormat): "YYYY-MM-DD HH24", +// }, +// Schedule: "5 MINUTE", +// UserTaskTimeoutMs: 0, +// }, +// } +//) + +//func TestAcc_Task(t *testing.T) { +// resource.Test(t, resource.TestCase{ +// ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, +// TerraformVersionChecks: []tfversion.TerraformVersionCheck{ +// tfversion.RequireAbove(tfversion.Version1_5_0), +// }, +// PreCheck: func() { acc.TestAccPreCheck(t) }, +// CheckDestroy: acc.CheckDestroy(t, resources.Task), +// Steps: []resource.TestStep{ +// { +// Config: taskConfig(initialState), +// Check: resource.ComposeTestCheckFunc( +// resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "false"), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", initialState.RootTask.SQL), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", initialState.ChildTask.SQL), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "after.0", rootname), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", initialState.ChildTask.Comment), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", initialState.RootTask.Schedule), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", initialState.ChildTask.Schedule), +// checkInt64("snowflake_task.root_task", "user_task_timeout_ms", initialState.RootTask.UserTaskTimeoutMs), +// resource.TestCheckNoResourceAttr("snowflake_task.solo_task", "user_task_timeout_ms"), +// checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), +// resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), +// ), +// }, +// { +// Config: taskConfig(stepOne), +// Check: resource.ComposeTestCheckFunc( +// resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", stepOne.RootTask.SQL), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", stepOne.ChildTask.SQL), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", stepOne.ChildTask.Comment), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", stepOne.RootTask.Schedule), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", stepOne.ChildTask.Schedule), +// checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepOne.RootTask.UserTaskTimeoutMs), +// checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", stepOne.SoloTask.UserTaskTimeoutMs), +// checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), +// resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), +// ), +// }, +// { +// Config: taskConfig(stepTwo), +// Check: resource.ComposeTestCheckFunc( +// resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", stepTwo.RootTask.SQL), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", stepTwo.ChildTask.SQL), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", stepTwo.ChildTask.Comment), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", stepTwo.RootTask.Schedule), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", stepTwo.ChildTask.Schedule), +// checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepTwo.RootTask.UserTaskTimeoutMs), +// checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", stepTwo.SoloTask.UserTaskTimeoutMs), +// checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), +// resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), +// ), +// }, +// { +// Config: taskConfig(stepThree), +// Check: resource.ComposeTestCheckFunc( +// resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "false"), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "false"), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", stepThree.RootTask.SQL), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", stepThree.ChildTask.SQL), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", stepThree.ChildTask.Comment), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", stepThree.RootTask.Schedule), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", stepThree.ChildTask.Schedule), +// checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepThree.RootTask.UserTaskTimeoutMs), +// checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", stepThree.SoloTask.UserTaskTimeoutMs), +// checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 2000), +// resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT"), +// checkInt64("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT", 5), +// ), +// }, +// { +// Config: taskConfig(initialState), +// Check: resource.ComposeTestCheckFunc( +// resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "false"), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", initialState.RootTask.SQL), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", initialState.ChildTask.SQL), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", initialState.ChildTask.Comment), +// checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepOne.RootTask.UserTaskTimeoutMs), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", initialState.RootTask.Schedule), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", initialState.ChildTask.Schedule), +// // Terraform SDK is not able to differentiate if the +// // attribute has deleted or set to zero value. +// // ResourceData.GetChange returns the zero value of defined +// // type in schema as new the value. Provider handles 0 for +// // `user_task_timeout_ms` by unsetting the +// // USER_TASK_TIMEOUT_MS session variable. +// checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", initialState.ChildTask.UserTaskTimeoutMs), +// checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), +// resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), +// resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), +// ), +// }, +// }, +// }) +//} + +//func taskConfig(settings *AccTaskTestSettings) string { //nolint +// config, err := template.New("task_acceptance_test_config").Parse(` +//resource "snowflake_warehouse" "wh" { +// name = "{{ .WarehouseName }}-{{ .RootTask.Name }}" +//} +//resource "snowflake_task" "root_task" { +// name = "{{ .RootTask.Name }}" +// database = "{{ .DatabaseName }}" +// schema = "{{ .RootTask.Schema }}" +// warehouse = "${snowflake_warehouse.wh.name}" +// sql_statement = "{{ .RootTask.SQL }}" +// enabled = {{ .RootTask.Enabled }} +// schedule = "{{ .RootTask.Schedule }}" +// {{ if .RootTask.UserTaskTimeoutMs }} +// user_task_timeout_ms = {{ .RootTask.UserTaskTimeoutMs }} +// {{- end }} +// +// {{ if .RootTask.SessionParams }} +// session_parameters = { +// {{ range $key, $value := .RootTask.SessionParams}} +// {{ $key }} = "{{ $value }}", +// {{- end }} +// } +// {{- end }} +//} +//resource "snowflake_task" "child_task" { +// name = "{{ .ChildTask.Name }}" +// database = snowflake_task.root_task.database +// schema = snowflake_task.root_task.schema +// warehouse = snowflake_task.root_task.warehouse +// sql_statement = "{{ .ChildTask.SQL }}" +// enabled = {{ .ChildTask.Enabled }} +// after = [snowflake_task.root_task.name] +// comment = "{{ .ChildTask.Comment }}" +// {{ if .ChildTask.UserTaskTimeoutMs }} +// user_task_timeout_ms = {{ .ChildTask.UserTaskTimeoutMs }} +// {{- end }} +// +// {{ if .ChildTask.SessionParams }} +// session_parameters = { +// {{ range $key, $value := .ChildTask.SessionParams}} +// {{ $key }} = "{{ $value }}", +// {{- end }} +// } +// {{- end }} +//} +//resource "snowflake_task" "solo_task" { +// name = "{{ .SoloTask.Name }}" +// database = "{{ .DatabaseName }}" +// schema = "{{ .SoloTask.Schema }}" +// warehouse = "{{ .WarehouseName }}" +// sql_statement = "{{ .SoloTask.SQL }}" +// enabled = {{ .SoloTask.Enabled }} +// when = "{{ .SoloTask.When }}" +// {{ if .SoloTask.Schedule }} +// schedule = "{{ .SoloTask.Schedule }}" +// {{- end }} +// +// {{ if .SoloTask.UserTaskTimeoutMs }} +// user_task_timeout_ms = {{ .SoloTask.UserTaskTimeoutMs }} +// {{- end }} +// +// {{ if .SoloTask.SessionParams }} +// session_parameters = { +// {{ range $key, $value := .SoloTask.SessionParams}} +// {{ $key }} = "{{ $value }}", +// {{- end }} +// } +// {{- end }} +//} +// `) +// if err != nil { +// fmt.Println(err) +// } +// +// var result bytes.Buffer +// config.Execute(&result, settings) //nolint +// +// return result.String() +//} /* todo: this test is failing due to error message below. Need to figure out why this is happening @@ -692,105 +756,105 @@ func checkInt64(name, key string, value int64) func(*terraform.State) error { } } -func TestAcc_Task_issue2207(t *testing.T) { - prefix := acc.TestClient().Ids.Alpha() - rootName := prefix + "_root_task" - childName := prefix + "_child_task" - - m := func() map[string]config.Variable { - return map[string]config.Variable{ - "root_name": config.StringVariable(rootName), - "database": config.StringVariable(acc.TestDatabaseName), - "schema": config.StringVariable(acc.TestSchemaName), - "warehouse": config.StringVariable(acc.TestWarehouseName), - "child_name": config.StringVariable(childName), - "comment": config.StringVariable("abc"), - } - } - m2 := m() - m2["comment"] = config.StringVariable("def") - - resource.Test(t, resource.TestCase{ - ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, - PreCheck: func() { acc.TestAccPreCheck(t) }, - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.RequireAbove(tfversion.Version1_5_0), - }, - CheckDestroy: acc.CheckDestroy(t, resources.Task), - Steps: []resource.TestStep{ - { - ConfigDirectory: config.TestStepDirectory(), - ConfigVariables: m(), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectEmptyPlan(), - }, - }, - }, - // change comment - { - ConfigDirectory: acc.ConfigurationSameAsStepN(1), - ConfigVariables: m2, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), - ), - }, - }, - }) -} - -func TestAcc_Task_issue2036(t *testing.T) { - name := acc.TestClient().Ids.Alpha() - - m := func() map[string]config.Variable { - return map[string]config.Variable{ - "name": config.StringVariable(name), - "database": config.StringVariable(acc.TestDatabaseName), - "schema": config.StringVariable(acc.TestSchemaName), - "warehouse": config.StringVariable(acc.TestWarehouseName), - } - } - - resource.Test(t, resource.TestCase{ - ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, - PreCheck: func() { acc.TestAccPreCheck(t) }, - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.RequireAbove(tfversion.Version1_5_0), - }, - CheckDestroy: acc.CheckDestroy(t, resources.Task), - Steps: []resource.TestStep{ - // create without when - { - ConfigDirectory: config.TestStepDirectory(), - ConfigVariables: m(), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "when", ""), - ), - }, - // add when - { - ConfigDirectory: config.TestStepDirectory(), - ConfigVariables: m(), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "when", "TRUE"), - ), - }, - // remove when - { - ConfigDirectory: acc.ConfigurationSameAsStepN(1), - ConfigVariables: m(), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "when", ""), - ), - }, - }, - }) -} +//func TestAcc_Task_issue2207(t *testing.T) { +// prefix := acc.TestClient().Ids.Alpha() +// rootName := prefix + "_root_task" +// childName := prefix + "_child_task" +// +// m := func() map[string]config.Variable { +// return map[string]config.Variable{ +// "root_name": config.StringVariable(rootName), +// "database": config.StringVariable(acc.TestDatabaseName), +// "schema": config.StringVariable(acc.TestSchemaName), +// "warehouse": config.StringVariable(acc.TestWarehouseName), +// "child_name": config.StringVariable(childName), +// "comment": config.StringVariable("abc"), +// } +// } +// m2 := m() +// m2["comment"] = config.StringVariable("def") +// +// resource.Test(t, resource.TestCase{ +// ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, +// PreCheck: func() { acc.TestAccPreCheck(t) }, +// TerraformVersionChecks: []tfversion.TerraformVersionCheck{ +// tfversion.RequireAbove(tfversion.Version1_5_0), +// }, +// CheckDestroy: acc.CheckDestroy(t, resources.Task), +// Steps: []resource.TestStep{ +// { +// ConfigDirectory: config.TestStepDirectory(), +// ConfigVariables: m(), +// Check: resource.ComposeTestCheckFunc( +// resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), +// ), +// ConfigPlanChecks: resource.ConfigPlanChecks{ +// PostApplyPostRefresh: []plancheck.PlanCheck{ +// plancheck.ExpectEmptyPlan(), +// }, +// }, +// }, +// // change comment +// { +// ConfigDirectory: acc.ConfigurationSameAsStepN(1), +// ConfigVariables: m2, +// Check: resource.ComposeTestCheckFunc( +// resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), +// resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), +// ), +// }, +// }, +// }) +//} +// +//func TestAcc_Task_issue2036(t *testing.T) { +// name := acc.TestClient().Ids.Alpha() +// +// m := func() map[string]config.Variable { +// return map[string]config.Variable{ +// "name": config.StringVariable(name), +// "database": config.StringVariable(acc.TestDatabaseName), +// "schema": config.StringVariable(acc.TestSchemaName), +// "warehouse": config.StringVariable(acc.TestWarehouseName), +// } +// } +// +// resource.Test(t, resource.TestCase{ +// ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, +// PreCheck: func() { acc.TestAccPreCheck(t) }, +// TerraformVersionChecks: []tfversion.TerraformVersionCheck{ +// tfversion.RequireAbove(tfversion.Version1_5_0), +// }, +// CheckDestroy: acc.CheckDestroy(t, resources.Task), +// Steps: []resource.TestStep{ +// // create without when +// { +// ConfigDirectory: config.TestStepDirectory(), +// ConfigVariables: m(), +// Check: resource.ComposeTestCheckFunc( +// resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), +// resource.TestCheckResourceAttr("snowflake_task.test_task", "when", ""), +// ), +// }, +// // add when +// { +// ConfigDirectory: config.TestStepDirectory(), +// ConfigVariables: m(), +// Check: resource.ComposeTestCheckFunc( +// resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), +// resource.TestCheckResourceAttr("snowflake_task.test_task", "when", "TRUE"), +// ), +// }, +// // remove when +// { +// ConfigDirectory: acc.ConfigurationSameAsStepN(1), +// ConfigVariables: m(), +// Check: resource.ComposeTestCheckFunc( +// resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), +// resource.TestCheckResourceAttr("snowflake_task.test_task", "when", ""), +// ), +// }, +// }, +// }) +//} From 6843e03bbd707d300aecfbee3cb13a3ce1bf359c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Tue, 1 Oct 2024 12:42:23 +0200 Subject: [PATCH 04/12] wip --- .../assert/objectassert/task_snowflake_gen.go | 2 +- .../resourceassert/task_resource_ext.go | 11 + .../resourceassert/task_resource_gen.go | 247 ++++++ .../task_resource_parameters_ext.go | 68 ++ .../task_show_output_ext.go | 36 + .../task_show_output_gen.go | 151 ++++ .../notification_integration_client.go | 60 ++ pkg/acceptance/helpers/test_client.go | 2 + pkg/resources/task.go | 313 ++++--- pkg/resources/task_acceptance_test.go | 386 ++++++++- pkg/resources/task_parameters.go | 786 +++++++++--------- pkg/schemas/task_parameters.go | 66 +- pkg/sdk/tasks_dto_builders_gen.go | 5 + pkg/sdk/tasks_dto_gen.go | 1 + pkg/sdk/tasks_gen.go | 3 +- pkg/sdk/tasks_impl_gen.go | 10 +- pkg/sdk/testint/tasks_gen_integration_test.go | 25 +- 17 files changed, 1578 insertions(+), 594 deletions(-) create mode 100644 pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go create mode 100644 pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go create mode 100644 pkg/acceptance/bettertestspoc/assert/resourceparametersassert/task_resource_parameters_ext.go create mode 100644 pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go create mode 100644 pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go create mode 100644 pkg/acceptance/helpers/notification_integration_client.go diff --git a/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go b/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go index c0180747ab..938969f272 100644 --- a/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go +++ b/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go @@ -110,7 +110,7 @@ func (t *TaskAssert) HasComment(expected string) *TaskAssert { func (t *TaskAssert) HasWarehouse(expected string) *TaskAssert { t.AddAssertion(func(t *testing.T, o *sdk.Task) error { t.Helper() - if o.Warehouse != expected { + if o.Warehouse != nil && o.Warehouse.FullyQualifiedName() != expected { return fmt.Errorf("expected warehouse: %v; got: %v", expected, o.Warehouse) } return nil diff --git a/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go new file mode 100644 index 0000000000..6ae80d8689 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go @@ -0,0 +1,11 @@ +package resourceassert + +import ( + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "strconv" +) + +func (t *TaskResourceAssert) HasAfterLen(len int) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("after.#", strconv.FormatInt(int64(len), 10))) + return t +} diff --git a/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go new file mode 100644 index 0000000000..543134a4ca --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go @@ -0,0 +1,247 @@ +// Code generated by assertions generator; DO NOT EDIT. + +package resourceassert + +import ( + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" +) + +type TaskResourceAssert struct { + *assert.ResourceAssert +} + +func TaskResource(t *testing.T, name string) *TaskResourceAssert { + t.Helper() + + return &TaskResourceAssert{ + ResourceAssert: assert.NewResourceAssert(name, "resource"), + } +} + +func ImportedTaskResource(t *testing.T, id string) *TaskResourceAssert { + t.Helper() + + return &TaskResourceAssert{ + ResourceAssert: assert.NewImportedResourceAssert(id, "imported resource"), + } +} + +/////////////////////////////////// +// Attribute value string checks // +/////////////////////////////////// + +func (t *TaskResourceAssert) HasAfterString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("after", expected)) + return t +} + +func (t *TaskResourceAssert) HasAllowOverlappingExecutionString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("allow_overlapping_execution", expected)) + return t +} + +func (t *TaskResourceAssert) HasCommentString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("comment", expected)) + return t +} + +func (t *TaskResourceAssert) HasConfigString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("config", expected)) + return t +} + +func (t *TaskResourceAssert) HasDatabaseString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("database", expected)) + return t +} + +func (t *TaskResourceAssert) HasEnabledString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("enabled", expected)) + return t +} + +func (t *TaskResourceAssert) HasErrorIntegrationString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("error_integration", expected)) + return t +} + +func (t *TaskResourceAssert) HasFinalizeString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("finalize", expected)) + return t +} + +func (t *TaskResourceAssert) HasFullyQualifiedNameString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("fully_qualified_name", expected)) + return t +} + +func (t *TaskResourceAssert) HasNameString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("name", expected)) + return t +} + +func (t *TaskResourceAssert) HasScheduleString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("schedule", expected)) + return t +} + +func (t *TaskResourceAssert) HasSchemaString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("schema", expected)) + return t +} + +func (t *TaskResourceAssert) HasSessionParametersString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("session_parameters", expected)) + return t +} + +func (t *TaskResourceAssert) HasSqlStatementString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("sql_statement", expected)) + return t +} + +func (t *TaskResourceAssert) HasSuspendTaskAfterNumFailuresString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("suspend_task_after_num_failures", expected)) + return t +} + +func (t *TaskResourceAssert) HasTaskAutoRetryAttemptsString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("task_auto_retry_attempts", expected)) + return t +} + +func (t *TaskResourceAssert) HasUserTaskManagedInitialWarehouseSizeString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("user_task_managed_initial_warehouse_size", expected)) + return t +} + +func (t *TaskResourceAssert) HasUserTaskMinimumTriggerIntervalInSecondsString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("user_task_minimum_trigger_interval_in_seconds", expected)) + return t +} + +func (t *TaskResourceAssert) HasUserTaskTimeoutMsString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("user_task_timeout_ms", expected)) + return t +} + +func (t *TaskResourceAssert) HasWarehouseString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("warehouse", expected)) + return t +} + +func (t *TaskResourceAssert) HasWhenString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("when", expected)) + return t +} + +//////////////////////////// +// Attribute empty checks // +//////////////////////////// + +func (t *TaskResourceAssert) HasNoAfter() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("after")) + return t +} + +func (t *TaskResourceAssert) HasNoAllowOverlappingExecution() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("allow_overlapping_execution")) + return t +} + +func (t *TaskResourceAssert) HasNoComment() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("comment")) + return t +} + +func (t *TaskResourceAssert) HasNoConfig() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("config")) + return t +} + +func (t *TaskResourceAssert) HasNoDatabase() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("database")) + return t +} + +func (t *TaskResourceAssert) HasNoEnabled() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("enabled")) + return t +} + +func (t *TaskResourceAssert) HasNoErrorIntegration() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("error_integration")) + return t +} + +func (t *TaskResourceAssert) HasNoFinalize() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("finalize")) + return t +} + +func (t *TaskResourceAssert) HasNoFullyQualifiedName() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("fully_qualified_name")) + return t +} + +func (t *TaskResourceAssert) HasNoName() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("name")) + return t +} + +func (t *TaskResourceAssert) HasNoSchedule() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("schedule")) + return t +} + +func (t *TaskResourceAssert) HasNoSchema() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("schema")) + return t +} + +func (t *TaskResourceAssert) HasNoSessionParameters() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("session_parameters")) + return t +} + +func (t *TaskResourceAssert) HasNoSqlStatement() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("sql_statement")) + return t +} + +func (t *TaskResourceAssert) HasNoSuspendTaskAfterNumFailures() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("suspend_task_after_num_failures")) + return t +} + +func (t *TaskResourceAssert) HasNoTaskAutoRetryAttempts() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("task_auto_retry_attempts")) + return t +} + +func (t *TaskResourceAssert) HasNoUserTaskManagedInitialWarehouseSize() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("user_task_managed_initial_warehouse_size")) + return t +} + +func (t *TaskResourceAssert) HasNoUserTaskMinimumTriggerIntervalInSeconds() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("user_task_minimum_trigger_interval_in_seconds")) + return t +} + +func (t *TaskResourceAssert) HasNoUserTaskTimeoutMs() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("user_task_timeout_ms")) + return t +} + +func (t *TaskResourceAssert) HasNoWarehouse() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("warehouse")) + return t +} + +func (t *TaskResourceAssert) HasNoWhen() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("when")) + return t +} diff --git a/pkg/acceptance/bettertestspoc/assert/resourceparametersassert/task_resource_parameters_ext.go b/pkg/acceptance/bettertestspoc/assert/resourceparametersassert/task_resource_parameters_ext.go new file mode 100644 index 0000000000..463151941b --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/resourceparametersassert/task_resource_parameters_ext.go @@ -0,0 +1,68 @@ +package resourceparametersassert + +import ( + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "strings" +) + +func (u *TaskResourceParametersAssert) HasAllDefaults() *TaskResourceParametersAssert { + return u. + HasSuspendTaskAfterNumFailures(10). + HasTaskAutoRetryAttempts(0). + HasUserTaskManagedInitialWarehouseSize("Medium"). + HasUserTaskMinimumTriggerIntervalInSeconds(30). + HasUserTaskTimeoutMs(3600000). + HasAbortDetachedQuery(false). + HasAutocommit(true). + HasBinaryInputFormat(sdk.BinaryInputFormatHex). + HasBinaryOutputFormat(sdk.BinaryOutputFormatHex). + HasClientMemoryLimit(1536). + HasClientMetadataRequestUseConnectionCtx(false). + HasClientPrefetchThreads(4). + HasClientResultChunkSize(160). + HasClientResultColumnCaseInsensitive(false). + HasClientSessionKeepAlive(false). + HasClientSessionKeepAliveHeartbeatFrequency(3600). + HasClientTimestampTypeMapping(sdk.ClientTimestampTypeMappingLtz). + HasDateInputFormat("AUTO"). + HasDateOutputFormat("YYYY-MM-DD"). + HasEnableUnloadPhysicalTypeOptimization(true). + HasErrorOnNondeterministicMerge(true). + HasErrorOnNondeterministicUpdate(false). + HasGeographyOutputFormat(sdk.GeographyOutputFormatGeoJSON). + HasGeometryOutputFormat(sdk.GeometryOutputFormatGeoJSON). + HasJdbcTreatTimestampNtzAsUtc(false). + HasJdbcUseSessionTimezone(true). + HasJsonIndent(2). + HasLockTimeout(43200). + HasLogLevel(sdk.LogLevelOff). + HasMultiStatementCount(1). + HasNoorderSequenceAsDefault(true). + HasOdbcTreatDecimalAsInt(false). + HasQueryTag(""). + HasQuotedIdentifiersIgnoreCase(false). + HasRowsPerResultset(0). + HasS3StageVpceDnsName(""). + HasSearchPath("$current, $public"). + HasStatementQueuedTimeoutInSeconds(0). + HasStatementTimeoutInSeconds(172800). + HasStrictJsonOutput(false). + HasTimestampDayIsAlways24h(false). + HasTimestampInputFormat("AUTO"). + HasTimestampLtzOutputFormat(""). + HasTimestampNtzOutputFormat("YYYY-MM-DD HH24:MI:SS.FF3"). + HasTimestampOutputFormat("YYYY-MM-DD HH24:MI:SS.FF3 TZHTZM"). + HasTimestampTypeMapping(sdk.TimestampTypeMappingNtz). + HasTimestampTzOutputFormat(""). + HasTimezone("America/Los_Angeles"). + HasTimeInputFormat("AUTO"). + HasTimeOutputFormat("HH24:MI:SS"). + HasTraceLevel(sdk.TraceLevelOff). + HasTransactionAbortOnError(false). + HasTransactionDefaultIsolationLevel(sdk.TransactionDefaultIsolationLevelReadCommitted). + HasTwoDigitCenturyStart(1970). + HasUnsupportedDdlAction(sdk.UnsupportedDDLAction(strings.ToLower(string(sdk.UnsupportedDDLActionIgnore)))). + HasUseCachedResult(true). + HasWeekOfYearPolicy(0). + HasWeekStart(0) +} diff --git a/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go new file mode 100644 index 0000000000..6d99220a58 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go @@ -0,0 +1,36 @@ +package resourceshowoutputassert + +import ( + "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "strconv" +) + +func (t *TaskShowOutputAssert) HasCreatedOnNotEmpty() *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValuePresent("created_on")) + return t +} + +func (t *TaskShowOutputAssert) HasIdNotEmpty() *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValuePresent("id")) + return t +} + +func (t *TaskShowOutputAssert) HasLastCommittedOnNotEmpty() *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValuePresent("last_committed_on")) + return t +} + +func (t *TaskShowOutputAssert) HasLastSuspendedOnNotEmpty() *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValuePresent("last_suspended_on")) + return t +} + +func (t *TaskShowOutputAssert) HasPredecessors(predecessors ...sdk.SchemaObjectIdentifier) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("predecessors.#", strconv.Itoa(len(predecessors)))) + for i, predecessor := range predecessors { + t.AddAssertion(assert.ResourceShowOutputValueSet(fmt.Sprintf("predecessors.%d", i), predecessor.FullyQualifiedName())) + } + return t +} diff --git a/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go new file mode 100644 index 0000000000..a150ce6084 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go @@ -0,0 +1,151 @@ +//Code generated by assertions generator; DO NOT EDIT. + +package resourceshowoutputassert + +import ( + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" +) + +// to ensure sdk package is used +var _ = sdk.Object{} + +type TaskShowOutputAssert struct { + *assert.ResourceAssert +} + +func TaskShowOutput(t *testing.T, name string) *TaskShowOutputAssert { + t.Helper() + + task := TaskShowOutputAssert{ + ResourceAssert: assert.NewResourceAssert(name, "show_output"), + } + task.AddAssertion(assert.ValueSet("show_output.#", "1")) + return &task +} + +func ImportedTaskShowOutput(t *testing.T, id string) *TaskShowOutputAssert { + t.Helper() + + task := TaskShowOutputAssert{ + ResourceAssert: assert.NewImportedResourceAssert(id, "show_output"), + } + task.AddAssertion(assert.ValueSet("show_output.#", "1")) + return &task +} + +//////////////////////////// +// Attribute value checks // +//////////////////////////// + +func (t *TaskShowOutputAssert) HasCreatedOn(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("created_on", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasName(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("name", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasId(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("id", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasDatabaseName(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("database_name", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasSchemaName(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("schema_name", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasOwner(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("owner", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasComment(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("comment", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasWarehouse(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("warehouse", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasSchedule(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("schedule", expected)) + return t +} + +//func (t *TaskShowOutputAssert) HasPredecessors(expected []sdk.SchemaObjectIdentifier) *TaskShowOutputAssert { +// t.AddAssertion(assert.ResourceShowOutputValueSet("predecessors", collections.Map(expected, sdk.SchemaObjectIdentifier.FullyQualifiedName))) +// return t +//} + +func (t *TaskShowOutputAssert) HasState(expected sdk.TaskState) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("state", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasDefinition(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("definition", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasCondition(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("condition", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasAllowOverlappingExecution(expected bool) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputBoolValueSet("allow_overlapping_execution", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasErrorIntegration(expected sdk.AccountObjectIdentifier) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("error_integration", expected.Name())) + return t +} + +func (t *TaskShowOutputAssert) HasLastCommittedOn(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("last_committed_on", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasLastSuspendedOn(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("last_suspended_on", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasOwnerRoleType(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("owner_role_type", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasConfig(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("config", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasBudget(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("budget", expected)) + return t +} + +//func (t *TaskShowOutputAssert) HasTaskRelations(expected sdk.TaskRelations) *TaskShowOutputAssert { +// t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations", expected)) +// return t +//} + +func (t *TaskShowOutputAssert) HasLastSuspendedReason(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("last_suspended_reason", expected)) + return t +} diff --git a/pkg/acceptance/helpers/notification_integration_client.go b/pkg/acceptance/helpers/notification_integration_client.go new file mode 100644 index 0000000000..e22f498140 --- /dev/null +++ b/pkg/acceptance/helpers/notification_integration_client.go @@ -0,0 +1,60 @@ +package helpers + +import ( + "context" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/stretchr/testify/require" +) + +// TODO [SNOW-1017580]: replace with real value +const gcpPubsubSubscriptionName = "projects/project-1234/subscriptions/sub2" + +type NotificationIntegrationClient struct { + context *TestClientContext + ids *IdsGenerator +} + +func NewNotificationIntegrationClient(context *TestClientContext, idsGenerator *IdsGenerator) *NotificationIntegrationClient { + return &NotificationIntegrationClient{ + context: context, + ids: idsGenerator, + } +} + +func (c *NotificationIntegrationClient) client() sdk.NotificationIntegrations { + return c.context.client.NotificationIntegrations +} + +func (c *NotificationIntegrationClient) Create(t *testing.T) (*sdk.NotificationIntegration, func()) { + t.Helper() + return c.CreateWithRequest(t, sdk.NewCreateNotificationIntegrationRequest(c.ids.RandomAccountObjectIdentifier(), true). + WithAutomatedDataLoadsParams(sdk.NewAutomatedDataLoadsParamsRequest(). + WithGoogleAutoParams(sdk.NewGoogleAutoParamsRequest(gcpPubsubSubscriptionName)), + ), + ) +} + +func (c *NotificationIntegrationClient) CreateWithRequest(t *testing.T, request *sdk.CreateNotificationIntegrationRequest) (*sdk.NotificationIntegration, func()) { + t.Helper() + ctx := context.Background() + + err := c.client().Create(ctx, request) + require.NoError(t, err) + + networkRule, err := c.client().ShowByID(ctx, request.GetName()) + require.NoError(t, err) + + return networkRule, c.DropFunc(t, request.GetName()) +} + +func (c *NotificationIntegrationClient) DropFunc(t *testing.T, id sdk.AccountObjectIdentifier) func() { + t.Helper() + ctx := context.Background() + + return func() { + err := c.client().Drop(ctx, sdk.NewDropNotificationIntegrationRequest(id).WithIfExists(sdk.Bool(true))) + require.NoError(t, err) + } +} diff --git a/pkg/acceptance/helpers/test_client.go b/pkg/acceptance/helpers/test_client.go index 975c74a061..3dc2390f9b 100644 --- a/pkg/acceptance/helpers/test_client.go +++ b/pkg/acceptance/helpers/test_client.go @@ -37,6 +37,7 @@ type TestClient struct { MaterializedView *MaterializedViewClient NetworkPolicy *NetworkPolicyClient NetworkRule *NetworkRuleClient + NotificationIntegration *NotificationIntegrationClient Parameter *ParameterClient PasswordPolicy *PasswordPolicyClient Pipe *PipeClient @@ -102,6 +103,7 @@ func NewTestClient(c *sdk.Client, database string, schema string, warehouse stri MaterializedView: NewMaterializedViewClient(context, idsGenerator), NetworkPolicy: NewNetworkPolicyClient(context, idsGenerator), NetworkRule: NewNetworkRuleClient(context, idsGenerator), + NotificationIntegration: NewNotificationIntegrationClient(context, idsGenerator), Parameter: NewParameterClient(context), PasswordPolicy: NewPasswordPolicyClient(context, idsGenerator), Pipe: NewPipeClient(context, idsGenerator), diff --git a/pkg/resources/task.go b/pkg/resources/task.go index 980644a0ac..0fcf1f2da4 100644 --- a/pkg/resources/task.go +++ b/pkg/resources/task.go @@ -5,7 +5,9 @@ import ( "errors" "fmt" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "log" "strings" "time" @@ -59,17 +61,17 @@ var taskSchema = map[string]*schema.Schema{ Description: "The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. (Conflicts with user_task_managed_initial_warehouse_size)", ConflictsWith: []string{"user_task_managed_initial_warehouse_size"}, }, - "user_task_managed_initial_warehouse_size": { - Type: schema.TypeString, - Optional: true, - ValidateDiagFunc: sdkValidation(sdk.ToWarehouseSize), - DiffSuppressFunc: SuppressIfAny( - NormalizeAndCompare(sdk.ToWarehouseSize), - IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterUserTaskManagedInitialWarehouseSize))), - ), - Description: fmt.Sprintf("Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. Valid values are (case-insensitive): %s. (Conflicts with warehouse)", possibleValuesListed(sdk.ValidWarehouseSizesString)), - ConflictsWith: []string{"warehouse"}, - }, + //"user_task_managed_initial_warehouse_size": { + // Type: schema.TypeString, + // Optional: true, + // ValidateDiagFunc: sdkValidation(sdk.ToWarehouseSize), + // DiffSuppressFunc: SuppressIfAny( + // NormalizeAndCompare(sdk.ToWarehouseSize), + // IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterUserTaskManagedInitialWarehouseSize))), + // ), + // Description: fmt.Sprintf("Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. Valid values are (case-insensitive): %s. (Conflicts with warehouse)", possibleValuesListed(sdk.ValidWarehouseSizesString)), + // ConflictsWith: []string{"warehouse"}, + //}, "schedule": { Type: schema.TypeString, Optional: true, @@ -78,50 +80,43 @@ var taskSchema = map[string]*schema.Schema{ ConflictsWith: []string{"finalize", "after"}, }, "config": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShow("config"), + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: SuppressIfAny( + IgnoreChangeToCurrentSnowflakeValueInShow("config"), + func(k, oldValue, newValue string, d *schema.ResourceData) bool { + // TODO: Trim left and right instead of replace all + extract + return strings.ReplaceAll(oldValue, "$", "") == strings.ReplaceAll(newValue, "$", "") + }, + ), // TODO: it could be retrieved with system function and show/desc (which should be used?) // TODO: Doc request: there's no schema for JSON config format Description: "Specifies a string representation of key value pairs that can be accessed by all tasks in the task graph. Must be in JSON format.", }, "allow_overlapping_execution": { - Type: schema.TypeBool, + Type: schema.TypeString, Optional: true, Default: BooleanDefault, ValidateDiagFunc: validateBooleanString, DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShow("allow_overlapping_execution"), Description: booleanStringFieldDescription("By default, Snowflake ensures that only one instance of a particular DAG is allowed to run at a time, setting the parameter value to TRUE permits DAG runs to overlap."), }, - "session_parameters": { - // TODO: Description and validation - Type: schema.TypeList, // TODO: make it actual schema (check user) - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "a": {}, - // TODO: - }, - }, - Optional: true, - Description: "Specifies session parameters to set for the session when the task runs. A task supports all session parameters.", - }, - "user_task_timeout_ms": { - Type: schema.TypeInt, - Optional: true, - Default: IntDefault, - ValidateFunc: validation.IntAtLeast(0), - DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterUserTaskTimeoutMs))), - Description: "Specifies the time limit on a single run of the task before it times out (in milliseconds).", - }, - "suspend_task_after_num_failures": { - Type: schema.TypeInt, - Optional: true, - Default: IntDefault, - ValidateFunc: validation.IntAtLeast(0), - DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterSuspendTaskAfterNumFailures))), - Description: "Specifies the number of consecutive failed task runs after which the current task is suspended automatically. The default is 0 (no automatic suspension).", - }, + //"user_task_timeout_ms": { + // Type: schema.TypeInt, + // Optional: true, + // Default: IntDefault, + // ValidateFunc: validation.IntAtLeast(0), + // DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterUserTaskTimeoutMs))+".0.value"), + // Description: "Specifies the time limit on a single run of the task before it times out (in milliseconds).", + //}, + //"suspend_task_after_num_failures": { + // Type: schema.TypeInt, + // Optional: true, + // Default: IntDefault, + // ValidateFunc: validation.IntAtLeast(0), + // DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterSuspendTaskAfterNumFailures))+".0.value"), + // Description: "Specifies the number of consecutive failed task runs after which the current task is suspended automatically. The default is 0 (no automatic suspension).", + //}, "error_integration": { Type: schema.TypeString, Optional: true, @@ -136,6 +131,7 @@ var taskSchema = map[string]*schema.Schema{ }, "finalize": { Optional: true, + Type: schema.TypeString, ValidateDiagFunc: IsValidIdentifier[sdk.SchemaObjectIdentifier](), DiffSuppressFunc: SuppressIfAny( suppressIdentifierQuoting, @@ -143,20 +139,20 @@ var taskSchema = map[string]*schema.Schema{ ), ConflictsWith: []string{"schedule", "after"}, }, - "task_auto_retry_attempts": { - Type: schema.TypeInt, - Optional: true, - Default: IntDefault, - ValidateFunc: validation.IntAtLeast(0), - DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterTaskAutoRetryAttempts))), - Description: "Specifies the number of automatic task graph retry attempts. If any task graphs complete in a FAILED state, Snowflake can automatically retry the task graphs from the last task in the graph that failed.", - }, + //"task_auto_retry_attempts": { + // Type: schema.TypeInt, + // Optional: true, + // Default: IntDefault, + // ValidateFunc: validation.IntAtLeast(0), + // DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterTaskAutoRetryAttempts))+".0.value"), + // Description: "Specifies the number of automatic task graph retry attempts. If any task graphs complete in a FAILED state, Snowflake can automatically retry the task graphs from the last task in the graph that failed.", + //}, "user_task_minimum_trigger_interval_in_seconds": { Type: schema.TypeInt, Optional: true, Default: IntDefault, ValidateFunc: validation.IntAtLeast(15), - DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds))), + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds))+".0.value"), Description: "Defines how frequently a task can execute in seconds. If data changes occur more often than the specified minimum, changes will be grouped and processed together.", }, "after": { @@ -211,11 +207,46 @@ func Task() *schema.Resource { ReadContext: ReadTask(true), DeleteContext: DeleteTask, - Schema: taskSchema, + Schema: helpers.MergeMaps(taskSchema, taskParametersSchema), Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, // TODO: Import + StateContext: ImportTask, }, + + CustomizeDiff: customdiff.All( + ComputedIfAnyAttributeChanged(taskSchema, ShowOutputAttributeName, "name", "enabled", "warehouse", "user_task_managed_initial_warehouse_size", "schedule", "config", "allow_overlapping_execution", "error_integration", "comment", "finalize", "after", "when"), + ComputedIfAnyAttributeChanged(taskParametersSchema, ParametersAttributeName, collections.Map(sdk.AsStringList(sdk.AllTaskParameters), strings.ToLower)...), + ComputedIfAnyAttributeChanged(taskSchema, FullyQualifiedNameAttributeName, "name"), + taskParametersCustomDiff, + ), + } +} + +func ImportTask(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { + logging.DebugLogger.Printf("[DEBUG] Starting task import") + client := meta.(*provider.Context).Client + + id, err := sdk.ParseSchemaObjectIdentifier(d.Id()) + if err != nil { + return nil, err + } + + task, err := client.Tasks.ShowByID(ctx, id) + if err != nil { + return nil, err + } + + if _, err := ImportName[sdk.SchemaObjectIdentifier](context.Background(), d, nil); err != nil { + return nil, err + } + + if err = errors.Join( + d.Set("enabled", booleanStringFromBool(task.State == sdk.TaskStateStarted)), + d.Set("allow_overlapping_execution", booleanStringFromBool(task.AllowOverlappingExecution)), + ); err != nil { + return nil, err } + + return []*schema.ResourceData{d}, nil } func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { @@ -235,14 +266,6 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag req.WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithWarehouse(warehouseId)) } - if v, ok := d.GetOk("user_task_managed_initial_warehouse_size"); ok { - size, err := sdk.ToWarehouseSize(v.(string)) - if err != nil { - return diag.FromErr(err) - } - req.WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithUserTaskManagedInitialWarehouseSize(size)) - } - if v, ok := d.GetOk("schedule"); ok { req.WithSchedule(v.(string)) // TODO: What about cron, how do we track changed (only through show) } @@ -251,25 +274,29 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag req.WithConfig(v.(string)) } - if v, ok := d.GetOk("allow_overlapping_execution"); ok { - req.WithAllowOverlappingExecution(v.(bool)) - } - - if v, ok := d.GetOk("session_parameters"); ok { - sessionParameters, err := sdk.GetSessionParametersFrom(v.(map[string]any)) + if v := d.Get("allow_overlapping_execution").(string); v != BooleanDefault { + parsedBool, err := booleanStringToBool(v) if err != nil { return diag.FromErr(err) } - req.WithSessionParameters(*sessionParameters) + req.WithAllowOverlappingExecution(parsedBool) } - if v := d.Get("user_task_timeout_ms"); v != IntDefault { - req.WithUserTaskTimeoutMs(v.(int)) - } + //if v, ok := d.GetOk("session_parameters"); ok { + // sessionParameters, err := sdk.GetSessionParametersFrom(v.(map[string]any)) + // if err != nil { + // return diag.FromErr(err) + // } + // req.WithSessionParameters(*sessionParameters) + //} - if v := d.Get("suspend_task_after_num_failures"); v != IntDefault { - req.WithSuspendTaskAfterNumFailures(v.(int)) - } + //if v := d.Get("user_task_timeout_ms"); v != IntDefault { + // req.WithUserTaskTimeoutMs(v.(int)) + //} + // + //if v := d.Get("suspend_task_after_num_failures"); v != IntDefault { + // req.WithSuspendTaskAfterNumFailures(v.(int)) + //} // TODO: Decide on name (error_notification_integration ?) if v, ok := d.GetOk("error_integration"); ok { @@ -292,13 +319,13 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag req.WithFinalize(rootTaskId) } - if v := d.Get("task_auto_retry_attempts"); v != IntDefault { - req.WithTaskAutoRetryAttempts(v.(int)) - } - - if v := d.Get("user_task_minimum_trigger_interval_in_seconds"); v != IntDefault { - req.WithUserTaskMinimumTriggerIntervalInSeconds(v.(int)) - } + //if v := d.Get("task_auto_retry_attempts"); v != IntDefault { + // req.WithTaskAutoRetryAttempts(v.(int)) + //} + // + //if v := d.Get("user_task_minimum_trigger_interval_in_seconds"); v != IntDefault { + // req.WithUserTaskMinimumTriggerIntervalInSeconds(v.(int)) + //} if v, ok := d.GetOk("after"); ok { // TODO: Should after take in task names or fully qualified names? after := expandStringList(v.([]interface{})) @@ -323,6 +350,10 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag req.WithWhen(v.(string)) } + if parameterCreateDiags := handleTaskParametersCreate(d, req); len(parameterCreateDiags) > 0 { + return parameterCreateDiags + } + if err := client.Tasks.Create(ctx, req); err != nil { return diag.FromErr(err) } @@ -367,62 +398,29 @@ func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag err = errors.Join( accountObjectIdentifierAttributeUpdate(d, "warehouse", &set.Warehouse, &unset.Warehouse), - accountObjectIdentifierAttributeUpdate(d, "error_integration", &set.ErrorNotificationIntegration, &unset.ErrorIntegration), // TODO: name inconsistency stringAttributeUpdate(d, "schedule", &set.Schedule, &unset.Schedule), - //stringAttributeUpdate(d, "user_task_managed_initial_warehouse_size", &set.UserTaskManagedInitialWarehouseSize, &unset.UserTaskManage)// TODO: Not in unsetUSER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE - intAttributeUpdate(d, "user_task_timeout_ms", &set.UserTaskTimeoutMs, &unset.UserTaskTimeoutMs), - intAttributeUpdate(d, "suspend_task_after_num_failures", &set.SuspendTaskAfterNumFailures, &unset.SuspendTaskAfterNumFailures), - stringAttributeUpdate(d, "comment", &set.Comment, &unset.Comment), + stringAttributeUpdate(d, "config", &set.Config, &unset.Config), booleanStringAttributeUpdate(d, "allow_overlapping_execution", &set.AllowOverlappingExecution, &unset.AllowOverlappingExecution), + accountObjectIdentifierAttributeUpdate(d, "error_integration", &set.ErrorNotificationIntegration, &unset.ErrorIntegration), // TODO: name inconsistency + stringAttributeUpdate(d, "comment", &set.Comment, &unset.Comment), ) if err != nil { return diag.FromErr(err) } - if d.HasChange("session_parameters") { - o, n := d.GetChange("session_parameters") - - if o == nil { - o = make(map[string]interface{}) - } - if n == nil { - n = make(map[string]interface{}) - } - os := o.(map[string]any) - ns := n.(map[string]any) - - remove := difference(os, ns) - add := difference(ns, os) - change := differentValue(os, ns) - - if len(remove) > 0 { - sessionParametersUnset, err := sdk.GetSessionParametersUnsetFrom(remove) - if err != nil { - return diag.FromErr(err) - } - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithUnset(*sdk.NewTaskUnsetRequest().WithSessionParametersUnset(*sessionParametersUnset))); err != nil { - return diag.FromErr(fmt.Errorf("error removing session_parameters on task %v err = %w", d.Id(), err)) - } - } + if updateDiags := handleTaskParametersUpdate(d, set, unset); len(updateDiags) > 0 { + return updateDiags + } - if len(add) > 0 { - sessionParameters, err := sdk.GetSessionParametersFrom(add) - if err != nil { - return diag.FromErr(err) - } - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSet(*sdk.NewTaskSetRequest().WithSessionParameters(*sessionParameters))); err != nil { - return diag.FromErr(fmt.Errorf("error adding session_parameters to task %v err = %w", d.Id(), err)) - } + if *set != (sdk.TaskSetRequest{}) { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSet(*set)); err != nil { + return diag.FromErr(err) } + } - if len(change) > 0 { - sessionParameters, err := sdk.GetSessionParametersFrom(change) - if err != nil { - return diag.FromErr(err) - } - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSet(*sdk.NewTaskSetRequest().WithSessionParameters(*sessionParameters))); err != nil { - return diag.FromErr(fmt.Errorf("error updating session_parameters in task %v err = %w", d.Id(), err)) - } + if *unset != (sdk.TaskUnsetRequest{}) { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithUnset(*unset)); err != nil { + return diag.FromErr(err) } } @@ -535,44 +533,43 @@ func ReadTask(withExternalChangesMarking bool) schema.ReadContextFunc { } if withExternalChangesMarking { - finalizedTaskId := "" - if task.TaskRelations.FinalizerTask != nil { - finalizedTaskId = task.TaskRelations.FinalizerTask.FullyQualifiedName() - } if err = handleExternalChangesToObjectInShow(d, - showMapping{"state", "enabled", task.State, task.State == sdk.TaskStateStarted, nil}, - showMapping{"warehouse", "warehouse", task.Warehouse, task.Warehouse, nil}, - showMapping{"schedule", "schedule", task.Schedule, task.Schedule, nil}, - showMapping{"config", "config", task.Config, task.Config, nil}, - showMapping{"allow_overlapping_execution", "allow_overlapping_execution", task.AllowOverlappingExecution, task.AllowOverlappingExecution, nil}, - showMapping{"error_integration", "error_integration", task.ErrorIntegration, task.ErrorIntegration, nil}, - showMapping{"comment", "comment", task.Comment, task.Comment, nil}, - showMapping{"task_relations.0.finalize", "finalize", finalizedTaskId, finalizedTaskId, nil}, - showMapping{"condition", "when", task.Condition, task.Condition, nil}, - showMapping{"definition", "sql_statement", task.Definition, task.Definition, nil}, + showMapping{"state", "enabled", string(task.State), booleanStringFromBool(task.State == sdk.TaskStateStarted), nil}, + showMapping{"allow_overlapping_execution", "allow_overlapping_execution", task.AllowOverlappingExecution, booleanStringFromBool(task.AllowOverlappingExecution), nil}, ); err != nil { return diag.FromErr(err) } - } else { - if err = setStateToValuesFromConfig(d, taskSchema, []string{ - "warehouse", - "schedule", - "config", - "allow_overlapping_execution", - "error_integration", - "comment", - "finalize", - "condition", - "sql_statement", - }); err != nil { - return diag.FromErr(err) - } + } + if err = setStateToValuesFromConfig(d, taskSchema, []string{ + "enabled", + "allow_overlapping_execution", + }); err != nil { + return diag.FromErr(err) + } + + errorIntegrationId := "" + if task.ErrorIntegration != nil { + errorIntegrationId = task.ErrorIntegration.Name() + } + + finalizedTaskId := "" + if task.TaskRelations.FinalizerTask != nil { + finalizedTaskId = task.TaskRelations.FinalizerTask.FullyQualifiedName() } if errs := errors.Join( // TODO: handleTaskParametersRead(d, taskParameters) - d.Set("enabled", task.State == sdk.TaskStateStarted), + // TODO: Reorder + d.Set("warehouse", task.Warehouse), + d.Set("schedule", task.Schedule), + d.Set("when", task.Condition), + d.Set("config", task.Config), + d.Set("error_integration", errorIntegrationId), + d.Set("comment", task.Comment), + d.Set("sql_statement", task.Definition), d.Set("after", collections.Map(task.Predecessors, sdk.SchemaObjectIdentifier.FullyQualifiedName)), + d.Set("finalize", finalizedTaskId), + handleTaskParameterRead(d, taskParameters), d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()), d.Set(ShowOutputAttributeName, []map[string]any{schemas.TaskToSchema(task)}), d.Set(ParametersAttributeName, []map[string]any{schemas.TaskParametersToSchema(taskParameters)}), diff --git a/pkg/resources/task_acceptance_test.go b/pkg/resources/task_acceptance_test.go index 4d3975a617..0844eaa94c 100644 --- a/pkg/resources/task_acceptance_test.go +++ b/pkg/resources/task_acceptance_test.go @@ -5,11 +5,17 @@ import ( acc "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceassert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceparametersassert" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config/model" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/helpers/random" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/testenvs" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" + r "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + configvariable "github.com/hashicorp/terraform-plugin-testing/config" + "strings" "testing" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" @@ -18,9 +24,15 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" ) +// TODO: More tests for complicated DAGs + func TestAcc_Task_Basic(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() - configModel := model.TaskWithId("test", id, "SELECT 1") + statement := "SELECT 1" + configModel := model.TaskWithId("test", id, statement) resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, @@ -28,59 +40,365 @@ func TestAcc_Task_Basic(t *testing.T) { TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.RequireAbove(tfversion.Version1_5_0), }, - CheckDestroy: acc.CheckDestroy(t, resources.ResourceMonitor), + CheckDestroy: acc.CheckDestroy(t, resources.Task), Steps: []resource.TestStep{ { Config: config.FromModel(t, configModel), Check: assert.AssertThat(t, - resourceassert.ResourceMonitorResource(t, "snowflake_resource_monitor.test"). - HasNameString(id.Name()). + resourceassert.TaskResource(t, "snowflake_task.test"). HasFullyQualifiedNameString(id.FullyQualifiedName()). - HasNoCreditQuota(). - HasNotifyUsersLen(0). - HasNoFrequency(). - HasNoStartTimestamp(). - HasNoEndTimestamp(). - HasNoNotifyTriggers(). - HasNoSuspendTrigger(). - HasNoSuspendImmediateTrigger(), - resourceshowoutputassert.ResourceMonitorShowOutput(t, "snowflake_resource_monitor.test"). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasEnabledString(r.BooleanDefault). + HasWarehouseString(""). + HasScheduleString(""). + HasConfigString(""). + HasAllowOverlappingExecutionString(r.BooleanDefault). + HasErrorIntegrationString(""). + HasCommentString(""). + HasFinalizeString(""). + HasAfterLen(0). + HasWhenString(""). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, "snowflake_task.test"). + HasCreatedOnNotEmpty(). HasName(id.Name()). - HasCreditQuota(0). - HasUsedCredits(0). - HasRemainingCredits(0). - HasLevel(""). - HasFrequency(sdk.FrequencyMonthly). - HasStartTimeNotEmpty(). - HasEndTime(""). - HasSuspendAt(0). - HasSuspendImmediateAt(0). + HasIdNotEmpty(). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasOwner("ACCOUNTADMIN"). // TODO: Current role + HasComment(""). + HasWarehouse(""). + HasSchedule(""). + HasPredecessors(). + HasState(sdk.TaskStateSuspended). + HasDefinition(statement). + HasCondition(""). + HasAllowOverlappingExecution(false). + HasErrorIntegration(sdk.NewAccountObjectIdentifier("")). // TODO: *sdk.AOI + HasLastCommittedOn(""). + HasLastSuspendedOn(""). + HasOwnerRoleType("ROLE"). + HasConfig(""). + HasBudget(""), + //HasTaskRelations(sdk.TaskRelations{}). // TODO: + resourceparametersassert.TaskResourceParameters(t, "snowflake_task.test"). + HasAllDefaults(), + ), + }, + { + ResourceName: "snowflake_task.test", + ImportState: true, + ImportStateCheck: assert.AssertThatImport(t, + resourceassert.ImportedTaskResource(t, helpers.EncodeResourceIdentifier(id)). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasEnabledString(r.BooleanFalse). + HasWarehouseString(""). + HasScheduleString(""). + HasConfigString(""). + HasAllowOverlappingExecutionString(r.BooleanFalse). + HasErrorIntegrationString(""). + HasCommentString(""). + HasFinalizeString(""). + HasNoAfter(). + HasWhenString(""). + HasSqlStatementString(statement), + ), + }, + }, + }) +} + +func TestAcc_Task_Complete(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + errorNotificationIntegration, errorNotificationIntegrationCleanup := acc.TestClient().NotificationIntegration.Create(t) + t.Cleanup(errorNotificationIntegrationCleanup) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + taskConfig := `$${"output_dir": "/temp/test_directory/", "learning_rate": 0.1}$$` + // We have to do three $ at the beginning because Terraform will remove one $. + // It's because `${` is a special pattern, and it's escaped by `$${`. + expectedTaskConfig := strings.ReplaceAll(taskConfig, "$", "") + taskConfigVariableValue := "$" + taskConfig + comment := random.Comment() + condition := `SYSTEM$STREAM_HAS_DATA('MYSTREAM')` + configModel := model.TaskWithId("test", id, statement). + WithEnabled(r.BooleanTrue). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithSchedule("10 MINUTES"). + WithConfigValue(configvariable.StringVariable(taskConfigVariableValue)). + WithAllowOverlappingExecution(true). + WithErrorIntegration(errorNotificationIntegration.ID().Name()). + WithComment(comment). + WithWhen(condition) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, configModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, "snowflake_task.test"). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasEnabledString(r.BooleanTrue). + HasWarehouseString(acc.TestClient().Ids.WarehouseId().Name()). + HasScheduleString("10 MINUTES"). + HasConfigString(expectedTaskConfig). + HasAllowOverlappingExecutionString(r.BooleanTrue). + HasErrorIntegrationString(errorNotificationIntegration.ID().Name()). + HasCommentString(comment). + HasFinalizeString(""). + HasNoAfter(). + HasWhenString(condition). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, "snowflake_task.test"). HasCreatedOnNotEmpty(). - HasOwnerNotEmpty(). - HasComment(""), + HasName(id.Name()). + //HasId(id.FullyQualifiedName()). // TODO: not empty + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasOwner("ACCOUNTADMIN"). // TODO: Current role + HasComment(comment). + HasWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + HasSchedule("10 MINUTES"). + //HasPredecessors(nil). // TODO: + HasState(sdk.TaskStateStarted). + HasDefinition(statement). + HasCondition(condition). + HasAllowOverlappingExecution(true). + HasErrorIntegration(errorNotificationIntegration.ID()). + HasLastCommittedOnNotEmpty(). + HasLastSuspendedOn(""). + HasOwnerRoleType("ROLE"). + HasConfig(expectedTaskConfig). + HasBudget(""), + //HasTaskRelations(sdk.TaskRelations{}). // TODO: + resourceparametersassert.TaskResourceParameters(t, "snowflake_task.test"). + HasAllDefaults(), ), }, { - ResourceName: "snowflake_resource_monitor.test", + ResourceName: "snowflake_task.test", ImportState: true, ImportStateCheck: assert.AssertThatImport(t, - resourceassert.ImportedResourceMonitorResource(t, helpers.EncodeResourceIdentifier(id)). + resourceassert.ImportedTaskResource(t, helpers.EncodeResourceIdentifier(id)). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasEnabledString(r.BooleanTrue). + HasWarehouseString(acc.TestClient().Ids.WarehouseId().Name()). + HasScheduleString("10 MINUTES"). + HasConfigString(expectedTaskConfig). + HasAllowOverlappingExecutionString(r.BooleanTrue). + HasErrorIntegrationString(errorNotificationIntegration.ID().Name()). + HasCommentString(comment). + HasFinalizeString(""). + HasNoAfter(). + HasWhenString(condition). + HasSqlStatementString(statement), + ), + }, + }, + }) +} + +func TestAcc_Task_Updates(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + basicConfigModel := model.TaskWithId("test", id, statement) + + // TODO: Assert the rest of fields (e.g. parameters) + + errorNotificationIntegration, errorNotificationIntegrationCleanup := acc.TestClient().NotificationIntegration.Create(t) + t.Cleanup(errorNotificationIntegrationCleanup) + + taskConfig := `$${"output_dir": "/temp/test_directory/", "learning_rate": 0.1}$$` + // We have to do three $ at the beginning because Terraform will remove one $. + // It's because `${` is a special pattern, and it's escaped by `$${`. + expectedTaskConfig := strings.ReplaceAll(taskConfig, "$", "") + taskConfigVariableValue := "$" + taskConfig + comment := random.Comment() + condition := `SYSTEM$STREAM_HAS_DATA('MYSTREAM')` + completeConfigModel := model.TaskWithId("test", id, statement). + WithEnabled(r.BooleanTrue). + // TODO: Warehouse cannot be set (error) + //WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithSchedule("10 MINUTES"). + WithConfigValue(configvariable.StringVariable(taskConfigVariableValue)). + WithAllowOverlappingExecution(true). + WithErrorIntegration(errorNotificationIntegration.ID().Name()). + WithComment(comment). + WithWhen(condition) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, basicConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, "snowflake_task.test"). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasEnabledString(r.BooleanDefault). + HasWarehouseString(""). + HasScheduleString(""). + HasConfigString(""). + HasAllowOverlappingExecutionString(r.BooleanDefault). + HasErrorIntegrationString(""). + HasCommentString(""). + HasFinalizeString(""). + HasAfterLen(0). + HasWhenString(""). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, "snowflake_task.test"). + //HasCreatedOnNotEmpty(), + HasName(id.Name()). + //HasId(id.FullyQualifiedName()). // TODO: not empty + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasOwner("ACCOUNTADMIN"). // TODO: Current role + HasComment(""). + HasWarehouse(""). + HasSchedule(""). + //HasPredecessors(nil). // TODO: + HasState(sdk.TaskStateSuspended). + HasDefinition(statement). + HasCondition(""). + HasAllowOverlappingExecution(false). + HasErrorIntegration(sdk.NewAccountObjectIdentifier("")). // TODO: *sdk.AOI + HasLastCommittedOn(""). + HasLastSuspendedOn(""). + HasOwnerRoleType("ROLE"). + HasConfig(""). + HasBudget(""), + //HasTaskRelations(sdk.TaskRelations{}). // TODO: + ), + }, + // Set + { + Config: config.FromModel(t, completeConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, "snowflake_task.test"). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). HasNameString(id.Name()). + HasEnabledString(r.BooleanTrue). + //HasWarehouseString(acc.TestClient().Ids.WarehouseId().Name()). + HasScheduleString("10 MINUTES"). + HasConfigString(expectedTaskConfig). + HasAllowOverlappingExecutionString(r.BooleanTrue). + HasErrorIntegrationString(errorNotificationIntegration.ID().Name()). + HasCommentString(comment). + HasFinalizeString(""). + HasAfterLen(0). + HasWhenString(condition). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, "snowflake_task.test"). + HasCreatedOnNotEmpty(). + HasName(id.Name()). + //HasId(id.FullyQualifiedName()). // TODO: not empty + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasOwner("ACCOUNTADMIN"). // TODO: Current role + HasComment(comment). + //HasWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + HasSchedule("10 MINUTES"). + //HasPredecessors(nil). // TODO: + HasState(sdk.TaskStateStarted). + HasDefinition(statement). + HasCondition(condition). + HasAllowOverlappingExecution(true). + HasErrorIntegration(errorNotificationIntegration.ID()). + HasLastCommittedOnNotEmpty(). + HasLastSuspendedOn(""). + HasOwnerRoleType("ROLE"). + HasConfig(expectedTaskConfig). + HasBudget(""), + //HasTaskRelations(sdk.TaskRelations{}). // TODO: + ), + }, + // Unset + { + Config: config.FromModel(t, basicConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, "snowflake_task.test"). HasFullyQualifiedNameString(id.FullyQualifiedName()). - HasCreditQuotaString("0"). - HasNotifyUsersLen(0). - HasFrequencyString(string(sdk.FrequencyMonthly)). - HasStartTimestampNotEmpty(). - HasEndTimestampString(""). - HasNoNotifyTriggers(). - HasSuspendTriggerString("0"). - HasSuspendImmediateTriggerString("0"), + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasEnabledString(r.BooleanDefault). + HasWarehouseString(""). + HasScheduleString(""). + HasConfigString(""). + HasAllowOverlappingExecutionString(r.BooleanDefault). + HasErrorIntegrationString(""). + HasCommentString(""). + HasFinalizeString(""). + HasAfterLen(0). + HasWhenString(""). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, "snowflake_task.test"). + //HasCreatedOnNotEmpty(), + HasName(id.Name()). + //HasId(id.FullyQualifiedName()). // TODO: not empty + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasOwner("ACCOUNTADMIN"). // TODO: Current role + HasComment(""). + HasWarehouse(""). + HasSchedule(""). + //HasPredecessors(nil). // TODO: + HasState(sdk.TaskStateSuspended). + HasDefinition(statement). + HasCondition(""). + HasAllowOverlappingExecution(false). + HasErrorIntegration(sdk.NewAccountObjectIdentifier("")). // TODO: *sdk.AOI + HasLastCommittedOnNotEmpty(). + HasLastSuspendedOnNotEmpty(). + HasOwnerRoleType("ROLE"). + HasConfig(""). + HasBudget(""), + //HasTaskRelations(sdk.TaskRelations{}). // TODO: ), }, }, }) } +func TestAcc_Task_AllParameters(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) +} + +// TODO: Test other paths (alter finalize, after, itd) + //type ( // AccTaskTestSettings struct { // DatabaseName string diff --git a/pkg/resources/task_parameters.go b/pkg/resources/task_parameters.go index b5fe06023b..e326c10fbc 100644 --- a/pkg/resources/task_parameters.go +++ b/pkg/resources/task_parameters.go @@ -1,380 +1,410 @@ package resources -//var ( -// userParametersSchema = make(map[string]*schema.Schema) -// userParametersCustomDiff = ParametersCustomDiff( -// userParametersProvider, -// parameter[sdk.UserParameter]{sdk.UserParameterAbortDetachedQuery, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterAutocommit, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterBinaryInputFormat, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterBinaryOutputFormat, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterClientMemoryLimit, valueTypeInt, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterClientMetadataRequestUseConnectionCtx, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterClientPrefetchThreads, valueTypeInt, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterClientResultChunkSize, valueTypeInt, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterClientResultColumnCaseInsensitive, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterClientSessionKeepAlive, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterClientSessionKeepAliveHeartbeatFrequency, valueTypeInt, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterClientTimestampTypeMapping, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterDateInputFormat, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterDateOutputFormat, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterEnableUnloadPhysicalTypeOptimization, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterErrorOnNondeterministicMerge, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterErrorOnNondeterministicUpdate, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterGeographyOutputFormat, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterGeometryOutputFormat, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterJdbcTreatDecimalAsInt, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterJdbcTreatTimestampNtzAsUtc, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterJdbcUseSessionTimezone, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterJsonIndent, valueTypeInt, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterLockTimeout, valueTypeInt, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterLogLevel, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterMultiStatementCount, valueTypeInt, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterNoorderSequenceAsDefault, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterOdbcTreatDecimalAsInt, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterQueryTag, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterQuotedIdentifiersIgnoreCase, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterRowsPerResultset, valueTypeInt, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterS3StageVpceDnsName, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterSearchPath, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterSimulatedDataSharingConsumer, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterStatementQueuedTimeoutInSeconds, valueTypeInt, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterStatementTimeoutInSeconds, valueTypeInt, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterStrictJsonOutput, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterTimestampDayIsAlways24h, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterTimestampInputFormat, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterTimestampLtzOutputFormat, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterTimestampNtzOutputFormat, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterTimestampOutputFormat, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterTimestampTypeMapping, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterTimestampTzOutputFormat, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterTimezone, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterTimeInputFormat, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterTimeOutputFormat, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterTraceLevel, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterTransactionAbortOnError, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterTransactionDefaultIsolationLevel, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterTwoDigitCenturyStart, valueTypeInt, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterUnsupportedDdlAction, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterUseCachedResult, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterWeekOfYearPolicy, valueTypeInt, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterWeekStart, valueTypeInt, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterEnableUnredactedQuerySyntaxError, valueTypeBool, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterNetworkPolicy, valueTypeString, sdk.ParameterTypeUser}, -// parameter[sdk.UserParameter]{sdk.UserParameterPreventUnloadToInternalStages, valueTypeBool, sdk.ParameterTypeUser}, -// ) -//) -// -//type parameterDef[T ~string] struct { -// Name T -// Type schema.ValueType -// Description string -// DiffSuppress schema.SchemaDiffSuppressFunc -// ValidateDiag schema.SchemaValidateDiagFunc -//} -// -//func init() { -// // TODO [SNOW-1645342]: move to the SDK -// userParameterFields := []parameterDef[sdk.UserParameter]{ -// // session params -// {Name: sdk.UserParameterAbortDetachedQuery, Type: schema.TypeBool, Description: "Specifies the action that Snowflake performs for in-progress queries if connectivity is lost due to abrupt termination of a session (e.g. network outage, browser termination, service interruption)."}, -// {Name: sdk.UserParameterAutocommit, Type: schema.TypeBool, Description: "Specifies whether autocommit is enabled for the session. Autocommit determines whether a DML statement, when executed without an active transaction, is automatically committed after the statement successfully completes. For more information, see [Transactions](https://docs.snowflake.com/en/sql-reference/transactions)."}, -// {Name: sdk.UserParameterBinaryInputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToBinaryInputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToBinaryInputFormat), Description: "The format of VARCHAR values passed as input to VARCHAR-to-BINARY conversion functions. For more information, see [Binary input and output](https://docs.snowflake.com/en/sql-reference/binary-input-output)."}, -// {Name: sdk.UserParameterBinaryOutputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToBinaryOutputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToBinaryOutputFormat), Description: "The format for VARCHAR values returned as output by BINARY-to-VARCHAR conversion functions. For more information, see [Binary input and output](https://docs.snowflake.com/en/sql-reference/binary-input-output)."}, -// {Name: sdk.UserParameterClientMemoryLimit, Type: schema.TypeInt, Description: "Parameter that specifies the maximum amount of memory the JDBC driver or ODBC driver should use for the result set from queries (in MB)."}, -// {Name: sdk.UserParameterClientMetadataRequestUseConnectionCtx, Type: schema.TypeBool, Description: "For specific ODBC functions and JDBC methods, this parameter can change the default search scope from all databases/schemas to the current database/schema. The narrower search typically returns fewer rows and executes more quickly."}, -// {Name: sdk.UserParameterClientPrefetchThreads, Type: schema.TypeInt, Description: "Parameter that specifies the number of threads used by the client to pre-fetch large result sets. The driver will attempt to honor the parameter value, but defines the minimum and maximum values (depending on your system’s resources) to improve performance."}, -// {Name: sdk.UserParameterClientResultChunkSize, Type: schema.TypeInt, Description: "Parameter that specifies the maximum size of each set (or chunk) of query results to download (in MB). The JDBC driver downloads query results in chunks."}, -// {Name: sdk.UserParameterClientResultColumnCaseInsensitive, Type: schema.TypeBool, Description: "Parameter that indicates whether to match column name case-insensitively in ResultSet.get* methods in JDBC."}, -// {Name: sdk.UserParameterClientSessionKeepAlive, Type: schema.TypeBool, Description: "Parameter that indicates whether to force a user to log in again after a period of inactivity in the session."}, -// {Name: sdk.UserParameterClientSessionKeepAliveHeartbeatFrequency, Type: schema.TypeInt, Description: "Number of seconds in-between client attempts to update the token for the session."}, -// {Name: sdk.UserParameterClientTimestampTypeMapping, Type: schema.TypeString, Description: "Specifies the [TIMESTAMP_* variation](https://docs.snowflake.com/en/sql-reference/data-types-datetime.html#label-datatypes-timestamp-variations) to use when binding timestamp variables for JDBC or ODBC applications that use the bind API to load data."}, -// {Name: sdk.UserParameterDateInputFormat, Type: schema.TypeString, Description: "Specifies the input format for the DATE data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, -// {Name: sdk.UserParameterDateOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the DATE data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, -// {Name: sdk.UserParameterEnableUnloadPhysicalTypeOptimization, Type: schema.TypeBool, Description: "Specifies whether to set the schema for unloaded Parquet files based on the logical column data types (i.e. the types in the unload SQL query or source table) or on the unloaded column values (i.e. the smallest data types and precision that support the values in the output columns of the unload SQL statement or source table)."}, -// {Name: sdk.UserParameterErrorOnNondeterministicMerge, Type: schema.TypeBool, Description: "Specifies whether to return an error when the [MERGE](https://docs.snowflake.com/en/sql-reference/sql/merge) command is used to update or delete a target row that joins multiple source rows and the system cannot determine the action to perform on the target row."}, -// {Name: sdk.UserParameterErrorOnNondeterministicUpdate, Type: schema.TypeBool, Description: "Specifies whether to return an error when the [UPDATE](https://docs.snowflake.com/en/sql-reference/sql/update) command is used to update a target row that joins multiple source rows and the system cannot determine the action to perform on the target row."}, -// {Name: sdk.UserParameterGeographyOutputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToGeographyOutputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToGeographyOutputFormat), Description: "Display format for [GEOGRAPHY values](https://docs.snowflake.com/en/sql-reference/data-types-geospatial.html#label-data-types-geography)."}, -// {Name: sdk.UserParameterGeometryOutputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToGeometryOutputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToGeometryOutputFormat), Description: "Display format for [GEOMETRY values](https://docs.snowflake.com/en/sql-reference/data-types-geospatial.html#label-data-types-geometry)."}, -// {Name: sdk.UserParameterJdbcTreatDecimalAsInt, Type: schema.TypeBool, Description: "Specifies how JDBC processes columns that have a scale of zero (0)."}, -// {Name: sdk.UserParameterJdbcTreatTimestampNtzAsUtc, Type: schema.TypeBool, Description: "Specifies how JDBC processes TIMESTAMP_NTZ values."}, -// {Name: sdk.UserParameterJdbcUseSessionTimezone, Type: schema.TypeBool, Description: "Specifies whether the JDBC Driver uses the time zone of the JVM or the time zone of the session (specified by the [TIMEZONE](https://docs.snowflake.com/en/sql-reference/parameters#label-timezone) parameter) for the getDate(), getTime(), and getTimestamp() methods of the ResultSet class."}, -// {Name: sdk.UserParameterJsonIndent, Type: schema.TypeInt, Description: "Specifies the number of blank spaces to indent each new element in JSON output in the session. Also specifies whether to insert newline characters after each element."}, -// {Name: sdk.UserParameterLockTimeout, Type: schema.TypeInt, Description: "Number of seconds to wait while trying to lock a resource, before timing out and aborting the statement."}, -// {Name: sdk.UserParameterLogLevel, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToLogLevel), DiffSuppress: NormalizeAndCompare(sdk.ToLogLevel), Description: "Specifies the severity level of messages that should be ingested and made available in the active event table. Messages at the specified level (and at more severe levels) are ingested. For more information about log levels, see [Setting log level](https://docs.snowflake.com/en/developer-guide/logging-tracing/logging-log-level)."}, -// {Name: sdk.UserParameterMultiStatementCount, Type: schema.TypeInt, Description: "Number of statements to execute when using the multi-statement capability."}, -// {Name: sdk.UserParameterNoorderSequenceAsDefault, Type: schema.TypeBool, Description: "Specifies whether the ORDER or NOORDER property is set by default when you create a new sequence or add a new table column. The ORDER and NOORDER properties determine whether or not the values are generated for the sequence or auto-incremented column in [increasing or decreasing order](https://docs.snowflake.com/en/user-guide/querying-sequences.html#label-querying-sequences-increasing-values)."}, -// {Name: sdk.UserParameterOdbcTreatDecimalAsInt, Type: schema.TypeBool, Description: "Specifies how ODBC processes columns that have a scale of zero (0)."}, -// {Name: sdk.UserParameterQueryTag, Type: schema.TypeString, Description: "Optional string that can be used to tag queries and other SQL statements executed within a session. The tags are displayed in the output of the [QUERY_HISTORY, QUERY_HISTORY_BY_*](https://docs.snowflake.com/en/sql-reference/functions/query_history) functions."}, -// {Name: sdk.UserParameterQuotedIdentifiersIgnoreCase, Type: schema.TypeBool, Description: "Specifies whether letters in double-quoted object identifiers are stored and resolved as uppercase letters. By default, Snowflake preserves the case of alphabetic characters when storing and resolving double-quoted identifiers (see [Identifier resolution](https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html#label-identifier-casing)). You can use this parameter in situations in which [third-party applications always use double quotes around identifiers](https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html#label-identifier-casing-parameter)."}, -// {Name: sdk.UserParameterRowsPerResultset, Type: schema.TypeInt, Description: "Specifies the maximum number of rows returned in a result set. A value of 0 specifies no maximum."}, -// {Name: sdk.UserParameterS3StageVpceDnsName, Type: schema.TypeString, Description: "Specifies the DNS name of an Amazon S3 interface endpoint. Requests sent to the internal stage of an account via [AWS PrivateLink for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html) use this endpoint to connect. For more information, see [Accessing Internal stages with dedicated interface endpoints](https://docs.snowflake.com/en/user-guide/private-internal-stages-aws.html#label-aws-privatelink-internal-stage-network-isolation)."}, -// {Name: sdk.UserParameterSearchPath, Type: schema.TypeString, Description: "Specifies the path to search to resolve unqualified object names in queries. For more information, see [Name resolution in queries](https://docs.snowflake.com/en/sql-reference/name-resolution.html#label-object-name-resolution-search-path). Comma-separated list of identifiers. An identifier can be a fully or partially qualified schema name."}, -// {Name: sdk.UserParameterSimulatedDataSharingConsumer, Type: schema.TypeString, Description: "Specifies the name of a consumer account to simulate for testing/validating shared data, particularly shared secure views. When this parameter is set in a session, shared views return rows as if executed in the specified consumer account rather than the provider account. For more information, see [Introduction to Secure Data Sharing](https://docs.snowflake.com/en/user-guide/data-sharing-intro) and [Working with shares](https://docs.snowflake.com/en/user-guide/data-sharing-provider)."}, -// {Name: sdk.UserParameterStatementQueuedTimeoutInSeconds, Type: schema.TypeInt, Description: "Amount of time, in seconds, a SQL statement (query, DDL, DML, etc.) remains queued for a warehouse before it is canceled by the system. This parameter can be used in conjunction with the [MAX_CONCURRENCY_LEVEL](https://docs.snowflake.com/en/sql-reference/parameters#label-max-concurrency-level) parameter to ensure a warehouse is never backlogged."}, -// {Name: sdk.UserParameterStatementTimeoutInSeconds, Type: schema.TypeInt, Description: "Amount of time, in seconds, after which a running SQL statement (query, DDL, DML, etc.) is canceled by the system."}, -// {Name: sdk.UserParameterStrictJsonOutput, Type: schema.TypeBool, Description: "This parameter specifies whether JSON output in a session is compatible with the general standard (as described by [http://json.org](http://json.org)). By design, Snowflake allows JSON input that contains non-standard values; however, these non-standard values might result in Snowflake outputting JSON that is incompatible with other platforms and languages. This parameter, when enabled, ensures that Snowflake outputs valid/compatible JSON."}, -// {Name: sdk.UserParameterTimestampDayIsAlways24h, Type: schema.TypeBool, Description: "Specifies whether the [DATEADD](https://docs.snowflake.com/en/sql-reference/functions/dateadd) function (and its aliases) always consider a day to be exactly 24 hours for expressions that span multiple days."}, -// {Name: sdk.UserParameterTimestampInputFormat, Type: schema.TypeString, Description: "Specifies the input format for the TIMESTAMP data type alias. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). Any valid, supported timestamp format or AUTO (AUTO specifies that Snowflake attempts to automatically detect the format of timestamps stored in the system during the session)."}, -// {Name: sdk.UserParameterTimestampLtzOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP_LTZ data type. If no format is specified, defaults to [TIMESTAMP_OUTPUT_FORMAT](https://docs.snowflake.com/en/sql-reference/parameters#label-timestamp-output-format). For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, -// {Name: sdk.UserParameterTimestampNtzOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP_NTZ data type."}, -// {Name: sdk.UserParameterTimestampOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP data type alias. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, -// {Name: sdk.UserParameterTimestampTypeMapping, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToTimestampTypeMapping), DiffSuppress: NormalizeAndCompare(sdk.ToTimestampTypeMapping), Description: "Specifies the TIMESTAMP_* variation that the TIMESTAMP data type alias maps to."}, -// {Name: sdk.UserParameterTimestampTzOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP_TZ data type. If no format is specified, defaults to [TIMESTAMP_OUTPUT_FORMAT](https://docs.snowflake.com/en/sql-reference/parameters#label-timestamp-output-format). For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, -// {Name: sdk.UserParameterTimezone, Type: schema.TypeString, Description: "Specifies the time zone for the session. You can specify a [time zone name](https://data.iana.org/time-zones/tzdb-2021a/zone1970.tab) or a [link name](https://data.iana.org/time-zones/tzdb-2021a/backward) from release 2021a of the [IANA Time Zone Database](https://www.iana.org/time-zones) (e.g. America/Los_Angeles, Europe/London, UTC, Etc/GMT, etc.)."}, -// {Name: sdk.UserParameterTimeInputFormat, Type: schema.TypeString, Description: "Specifies the input format for the TIME data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). Any valid, supported time format or AUTO (AUTO specifies that Snowflake attempts to automatically detect the format of times stored in the system during the session)."}, -// {Name: sdk.UserParameterTimeOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIME data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, -// {Name: sdk.UserParameterTraceLevel, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToTraceLevel), DiffSuppress: NormalizeAndCompare(sdk.ToTraceLevel), Description: "Controls how trace events are ingested into the event table. For more information about trace levels, see [Setting trace level](https://docs.snowflake.com/en/developer-guide/logging-tracing/tracing-trace-level)."}, -// {Name: sdk.UserParameterTransactionAbortOnError, Type: schema.TypeBool, Description: "Specifies the action to perform when a statement issued within a non-autocommit transaction returns with an error."}, -// {Name: sdk.UserParameterTransactionDefaultIsolationLevel, Type: schema.TypeString, Description: "Specifies the isolation level for transactions in the user session."}, -// {Name: sdk.UserParameterTwoDigitCenturyStart, Type: schema.TypeInt, Description: "Specifies the β€œcentury start” year for 2-digit years (i.e. the earliest year such dates can represent). This parameter prevents ambiguous dates when importing or converting data with the `YY` date format component (i.e. years represented as 2 digits)."}, -// {Name: sdk.UserParameterUnsupportedDdlAction, Type: schema.TypeString, Description: "Determines if an unsupported (i.e. non-default) value specified for a constraint property returns an error."}, -// {Name: sdk.UserParameterUseCachedResult, Type: schema.TypeBool, Description: "Specifies whether to reuse persisted query results, if available, when a matching query is submitted."}, -// {Name: sdk.UserParameterWeekOfYearPolicy, Type: schema.TypeInt, Description: "Specifies how the weeks in a given year are computed. `0`: The semantics used are equivalent to the ISO semantics, in which a week belongs to a given year if at least 4 days of that week are in that year. `1`: January 1 is included in the first week of the year and December 31 is included in the last week of the year."}, -// {Name: sdk.UserParameterWeekStart, Type: schema.TypeInt, Description: "Specifies the first day of the week (used by week-related date functions). `0`: Legacy Snowflake behavior is used (i.e. ISO-like semantics). `1` (Monday) to `7` (Sunday): All the week-related functions use weeks that start on the specified day of the week."}, -// {Name: sdk.UserParameterEnableUnredactedQuerySyntaxError, Type: schema.TypeBool, Description: "Controls whether query text is redacted if a SQL query fails due to a syntax or parsing error. If `FALSE`, the content of a failed query is redacted in the views, pages, and functions that provide a query history. Only users with a role that is granted or inherits the AUDIT privilege can set the ENABLE_UNREDACTED_QUERY_SYNTAX_ERROR parameter. When using the ALTER USER command to set the parameter to `TRUE` for a particular user, modify the user that you want to see the query text, not the user who executed the query (if those are different users)."}, -// {Name: sdk.UserParameterNetworkPolicy, Type: schema.TypeString, Description: "Specifies the network policy to enforce for your account. Network policies enable restricting access to your account based on users’ IP address. For more details, see [Controlling network traffic with network policies](https://docs.snowflake.com/en/user-guide/network-policies). Any existing network policy (created using [CREATE NETWORK POLICY](https://docs.snowflake.com/en/sql-reference/sql/create-network-policy))."}, -// {Name: sdk.UserParameterPreventUnloadToInternalStages, Type: schema.TypeBool, Description: "Specifies whether to prevent data unload operations to internal (Snowflake) stages using [COPY INTO ](https://docs.snowflake.com/en/sql-reference/sql/copy-into-location) statements."}, -// } -// -// // TODO [SNOW-1645342]: extract this method after moving to SDK -// for _, field := range userParameterFields { -// fieldName := strings.ToLower(string(field.Name)) -// -// userParametersSchema[fieldName] = &schema.Schema{ -// Type: field.Type, -// Description: enrichWithReferenceToParameterDocs(field.Name, field.Description), -// Computed: true, -// Optional: true, -// ValidateDiagFunc: field.ValidateDiag, -// DiffSuppressFunc: field.DiffSuppress, -// } -// } -//} -// -//func userParametersProvider(ctx context.Context, d ResourceIdProvider, meta any) ([]*sdk.Parameter, error) { -// return parametersProvider(ctx, d, meta.(*provider.Context), userParametersProviderFunc, sdk.ParseAccountObjectIdentifier) -//} -// -//func userParametersProviderFunc(c *sdk.Client) showParametersFunc[sdk.AccountObjectIdentifier] { -// return c.Users.ShowParameters -//} -// -//// TODO [SNOW-1645342]: make generic based on type definition -//func handleUserParameterRead(d *schema.ResourceData, warehouseParameters []*sdk.Parameter) error { -// for _, p := range warehouseParameters { -// switch p.Key { -// case -// string(sdk.UserParameterClientMemoryLimit), -// string(sdk.UserParameterClientPrefetchThreads), -// string(sdk.UserParameterClientResultChunkSize), -// string(sdk.UserParameterClientSessionKeepAliveHeartbeatFrequency), -// string(sdk.UserParameterJsonIndent), -// string(sdk.UserParameterLockTimeout), -// string(sdk.UserParameterMultiStatementCount), -// string(sdk.UserParameterRowsPerResultset), -// string(sdk.UserParameterStatementQueuedTimeoutInSeconds), -// string(sdk.UserParameterStatementTimeoutInSeconds), -// string(sdk.UserParameterTwoDigitCenturyStart), -// string(sdk.UserParameterWeekOfYearPolicy), -// string(sdk.UserParameterWeekStart): -// value, err := strconv.Atoi(p.Value) -// if err != nil { -// return err -// } -// if err := d.Set(strings.ToLower(p.Key), value); err != nil { -// return err -// } -// case -// string(sdk.UserParameterBinaryInputFormat), -// string(sdk.UserParameterBinaryOutputFormat), -// string(sdk.UserParameterClientTimestampTypeMapping), -// string(sdk.UserParameterDateInputFormat), -// string(sdk.UserParameterDateOutputFormat), -// string(sdk.UserParameterGeographyOutputFormat), -// string(sdk.UserParameterGeometryOutputFormat), -// string(sdk.UserParameterLogLevel), -// string(sdk.UserParameterQueryTag), -// string(sdk.UserParameterS3StageVpceDnsName), -// string(sdk.UserParameterSearchPath), -// string(sdk.UserParameterSimulatedDataSharingConsumer), -// string(sdk.UserParameterTimestampInputFormat), -// string(sdk.UserParameterTimestampLtzOutputFormat), -// string(sdk.UserParameterTimestampNtzOutputFormat), -// string(sdk.UserParameterTimestampOutputFormat), -// string(sdk.UserParameterTimestampTypeMapping), -// string(sdk.UserParameterTimestampTzOutputFormat), -// string(sdk.UserParameterTimezone), -// string(sdk.UserParameterTimeInputFormat), -// string(sdk.UserParameterTimeOutputFormat), -// string(sdk.UserParameterTraceLevel), -// string(sdk.UserParameterTransactionDefaultIsolationLevel), -// string(sdk.UserParameterUnsupportedDdlAction), -// string(sdk.UserParameterNetworkPolicy): -// if err := d.Set(strings.ToLower(p.Key), p.Value); err != nil { -// return err -// } -// case -// string(sdk.UserParameterAbortDetachedQuery), -// string(sdk.UserParameterAutocommit), -// string(sdk.UserParameterClientMetadataRequestUseConnectionCtx), -// string(sdk.UserParameterClientResultColumnCaseInsensitive), -// string(sdk.UserParameterClientSessionKeepAlive), -// string(sdk.UserParameterEnableUnloadPhysicalTypeOptimization), -// string(sdk.UserParameterErrorOnNondeterministicMerge), -// string(sdk.UserParameterErrorOnNondeterministicUpdate), -// string(sdk.UserParameterJdbcTreatDecimalAsInt), -// string(sdk.UserParameterJdbcTreatTimestampNtzAsUtc), -// string(sdk.UserParameterJdbcUseSessionTimezone), -// string(sdk.UserParameterNoorderSequenceAsDefault), -// string(sdk.UserParameterOdbcTreatDecimalAsInt), -// string(sdk.UserParameterQuotedIdentifiersIgnoreCase), -// string(sdk.UserParameterStrictJsonOutput), -// string(sdk.UserParameterTimestampDayIsAlways24h), -// string(sdk.UserParameterTransactionAbortOnError), -// string(sdk.UserParameterUseCachedResult), -// string(sdk.UserParameterEnableUnredactedQuerySyntaxError), -// string(sdk.UserParameterPreventUnloadToInternalStages): -// value, err := strconv.ParseBool(p.Value) -// if err != nil { -// return err -// } -// if err := d.Set(strings.ToLower(p.Key), value); err != nil { -// return err -// } -// } -// } -// -// return nil -//} -// -//// TODO [SNOW-1348330]: consider using SessionParameters#setParam during parameters rework -//// (because currently setParam already is able to set the right parameter based on the string value input, -//// but GetConfigPropertyAsPointerAllowingZeroValue receives typed value, -//// so this would be unnecessary running in circles) -//// TODO [SNOW-1645342]: include mappers in the param definition (after moving it to the SDK: identity versus concrete) -//func handleUserParametersCreate(d *schema.ResourceData, createOpts *sdk.CreateUserOptions) diag.Diagnostics { -// return JoinDiags( -// handleParameterCreate(d, sdk.UserParameterAbortDetachedQuery, &createOpts.SessionParameters.AbortDetachedQuery), -// handleParameterCreate(d, sdk.UserParameterAutocommit, &createOpts.SessionParameters.Autocommit), -// handleParameterCreateWithMapping(d, sdk.UserParameterBinaryInputFormat, &createOpts.SessionParameters.BinaryInputFormat, stringToStringEnumProvider(sdk.ToBinaryInputFormat)), -// handleParameterCreateWithMapping(d, sdk.UserParameterBinaryOutputFormat, &createOpts.SessionParameters.BinaryOutputFormat, stringToStringEnumProvider(sdk.ToBinaryOutputFormat)), -// handleParameterCreate(d, sdk.UserParameterClientMemoryLimit, &createOpts.SessionParameters.ClientMemoryLimit), -// handleParameterCreate(d, sdk.UserParameterClientMetadataRequestUseConnectionCtx, &createOpts.SessionParameters.ClientMetadataRequestUseConnectionCtx), -// handleParameterCreate(d, sdk.UserParameterClientPrefetchThreads, &createOpts.SessionParameters.ClientPrefetchThreads), -// handleParameterCreate(d, sdk.UserParameterClientResultChunkSize, &createOpts.SessionParameters.ClientResultChunkSize), -// handleParameterCreate(d, sdk.UserParameterClientResultColumnCaseInsensitive, &createOpts.SessionParameters.ClientResultColumnCaseInsensitive), -// handleParameterCreate(d, sdk.UserParameterClientSessionKeepAlive, &createOpts.SessionParameters.ClientSessionKeepAlive), -// handleParameterCreate(d, sdk.UserParameterClientSessionKeepAliveHeartbeatFrequency, &createOpts.SessionParameters.ClientSessionKeepAliveHeartbeatFrequency), -// handleParameterCreateWithMapping(d, sdk.UserParameterClientTimestampTypeMapping, &createOpts.SessionParameters.ClientTimestampTypeMapping, stringToStringEnumProvider(sdk.ToClientTimestampTypeMapping)), -// handleParameterCreate(d, sdk.UserParameterDateInputFormat, &createOpts.SessionParameters.DateInputFormat), -// handleParameterCreate(d, sdk.UserParameterDateOutputFormat, &createOpts.SessionParameters.DateOutputFormat), -// handleParameterCreate(d, sdk.UserParameterEnableUnloadPhysicalTypeOptimization, &createOpts.SessionParameters.EnableUnloadPhysicalTypeOptimization), -// handleParameterCreate(d, sdk.UserParameterErrorOnNondeterministicMerge, &createOpts.SessionParameters.ErrorOnNondeterministicMerge), -// handleParameterCreate(d, sdk.UserParameterErrorOnNondeterministicUpdate, &createOpts.SessionParameters.ErrorOnNondeterministicUpdate), -// handleParameterCreateWithMapping(d, sdk.UserParameterGeographyOutputFormat, &createOpts.SessionParameters.GeographyOutputFormat, stringToStringEnumProvider(sdk.ToGeographyOutputFormat)), -// handleParameterCreateWithMapping(d, sdk.UserParameterGeometryOutputFormat, &createOpts.SessionParameters.GeometryOutputFormat, stringToStringEnumProvider(sdk.ToGeometryOutputFormat)), -// handleParameterCreate(d, sdk.UserParameterJdbcTreatDecimalAsInt, &createOpts.SessionParameters.JdbcTreatDecimalAsInt), -// handleParameterCreate(d, sdk.UserParameterJdbcTreatTimestampNtzAsUtc, &createOpts.SessionParameters.JdbcTreatTimestampNtzAsUtc), -// handleParameterCreate(d, sdk.UserParameterJdbcUseSessionTimezone, &createOpts.SessionParameters.JdbcUseSessionTimezone), -// handleParameterCreate(d, sdk.UserParameterJsonIndent, &createOpts.SessionParameters.JSONIndent), -// handleParameterCreate(d, sdk.UserParameterLockTimeout, &createOpts.SessionParameters.LockTimeout), -// handleParameterCreateWithMapping(d, sdk.UserParameterLogLevel, &createOpts.SessionParameters.LogLevel, stringToStringEnumProvider(sdk.ToLogLevel)), -// handleParameterCreate(d, sdk.UserParameterMultiStatementCount, &createOpts.SessionParameters.MultiStatementCount), -// handleParameterCreate(d, sdk.UserParameterNoorderSequenceAsDefault, &createOpts.SessionParameters.NoorderSequenceAsDefault), -// handleParameterCreate(d, sdk.UserParameterOdbcTreatDecimalAsInt, &createOpts.SessionParameters.OdbcTreatDecimalAsInt), -// handleParameterCreate(d, sdk.UserParameterQueryTag, &createOpts.SessionParameters.QueryTag), -// handleParameterCreate(d, sdk.UserParameterQuotedIdentifiersIgnoreCase, &createOpts.SessionParameters.QuotedIdentifiersIgnoreCase), -// handleParameterCreate(d, sdk.UserParameterRowsPerResultset, &createOpts.SessionParameters.RowsPerResultset), -// handleParameterCreate(d, sdk.UserParameterS3StageVpceDnsName, &createOpts.SessionParameters.S3StageVpceDnsName), -// handleParameterCreate(d, sdk.UserParameterSearchPath, &createOpts.SessionParameters.SearchPath), -// handleParameterCreate(d, sdk.UserParameterSimulatedDataSharingConsumer, &createOpts.SessionParameters.SimulatedDataSharingConsumer), -// handleParameterCreate(d, sdk.UserParameterStatementQueuedTimeoutInSeconds, &createOpts.SessionParameters.StatementQueuedTimeoutInSeconds), -// handleParameterCreate(d, sdk.UserParameterStatementTimeoutInSeconds, &createOpts.SessionParameters.StatementTimeoutInSeconds), -// handleParameterCreate(d, sdk.UserParameterStrictJsonOutput, &createOpts.SessionParameters.StrictJSONOutput), -// handleParameterCreate(d, sdk.UserParameterTimestampDayIsAlways24h, &createOpts.SessionParameters.TimestampDayIsAlways24h), -// handleParameterCreate(d, sdk.UserParameterTimestampInputFormat, &createOpts.SessionParameters.TimestampInputFormat), -// handleParameterCreate(d, sdk.UserParameterTimestampLtzOutputFormat, &createOpts.SessionParameters.TimestampLTZOutputFormat), -// handleParameterCreate(d, sdk.UserParameterTimestampNtzOutputFormat, &createOpts.SessionParameters.TimestampNTZOutputFormat), -// handleParameterCreate(d, sdk.UserParameterTimestampOutputFormat, &createOpts.SessionParameters.TimestampOutputFormat), -// handleParameterCreateWithMapping(d, sdk.UserParameterTimestampTypeMapping, &createOpts.SessionParameters.TimestampTypeMapping, stringToStringEnumProvider(sdk.ToTimestampTypeMapping)), -// handleParameterCreate(d, sdk.UserParameterTimestampTzOutputFormat, &createOpts.SessionParameters.TimestampTZOutputFormat), -// handleParameterCreate(d, sdk.UserParameterTimezone, &createOpts.SessionParameters.Timezone), -// handleParameterCreate(d, sdk.UserParameterTimeInputFormat, &createOpts.SessionParameters.TimeInputFormat), -// handleParameterCreate(d, sdk.UserParameterTimeOutputFormat, &createOpts.SessionParameters.TimeOutputFormat), -// handleParameterCreateWithMapping(d, sdk.UserParameterTraceLevel, &createOpts.SessionParameters.TraceLevel, stringToStringEnumProvider(sdk.ToTraceLevel)), -// handleParameterCreate(d, sdk.UserParameterTransactionAbortOnError, &createOpts.SessionParameters.TransactionAbortOnError), -// handleParameterCreateWithMapping(d, sdk.UserParameterTransactionDefaultIsolationLevel, &createOpts.SessionParameters.TransactionDefaultIsolationLevel, stringToStringEnumProvider(sdk.ToTransactionDefaultIsolationLevel)), -// handleParameterCreate(d, sdk.UserParameterTwoDigitCenturyStart, &createOpts.SessionParameters.TwoDigitCenturyStart), -// handleParameterCreateWithMapping(d, sdk.UserParameterUnsupportedDdlAction, &createOpts.SessionParameters.UnsupportedDDLAction, stringToStringEnumProvider(sdk.ToUnsupportedDDLAction)), -// handleParameterCreate(d, sdk.UserParameterUseCachedResult, &createOpts.SessionParameters.UseCachedResult), -// handleParameterCreate(d, sdk.UserParameterWeekOfYearPolicy, &createOpts.SessionParameters.WeekOfYearPolicy), -// handleParameterCreate(d, sdk.UserParameterWeekStart, &createOpts.SessionParameters.WeekStart), -// handleParameterCreate(d, sdk.UserParameterEnableUnredactedQuerySyntaxError, &createOpts.ObjectParameters.EnableUnredactedQuerySyntaxError), -// handleParameterCreateWithMapping(d, sdk.UserParameterNetworkPolicy, &createOpts.ObjectParameters.NetworkPolicy, stringToAccountObjectIdentifier), -// handleParameterCreate(d, sdk.UserParameterPreventUnloadToInternalStages, &createOpts.ObjectParameters.PreventUnloadToInternalStages), -// ) -//} -// -//func handleUserParametersUpdate(d *schema.ResourceData, set *sdk.UserSet, unset *sdk.UserUnset) diag.Diagnostics { -// return JoinDiags( -// handleParameterUpdate(d, sdk.UserParameterAbortDetachedQuery, &set.SessionParameters.AbortDetachedQuery, &unset.SessionParameters.AbortDetachedQuery), -// handleParameterUpdate(d, sdk.UserParameterAutocommit, &set.SessionParameters.Autocommit, &unset.SessionParameters.Autocommit), -// handleParameterUpdateWithMapping(d, sdk.UserParameterBinaryInputFormat, &set.SessionParameters.BinaryInputFormat, &unset.SessionParameters.BinaryInputFormat, stringToStringEnumProvider(sdk.ToBinaryInputFormat)), -// handleParameterUpdateWithMapping(d, sdk.UserParameterBinaryOutputFormat, &set.SessionParameters.BinaryOutputFormat, &unset.SessionParameters.BinaryOutputFormat, stringToStringEnumProvider(sdk.ToBinaryOutputFormat)), -// handleParameterUpdate(d, sdk.UserParameterClientMemoryLimit, &set.SessionParameters.ClientMemoryLimit, &unset.SessionParameters.ClientMemoryLimit), -// handleParameterUpdate(d, sdk.UserParameterClientMetadataRequestUseConnectionCtx, &set.SessionParameters.ClientMetadataRequestUseConnectionCtx, &unset.SessionParameters.ClientMetadataRequestUseConnectionCtx), -// handleParameterUpdate(d, sdk.UserParameterClientPrefetchThreads, &set.SessionParameters.ClientPrefetchThreads, &unset.SessionParameters.ClientPrefetchThreads), -// handleParameterUpdate(d, sdk.UserParameterClientResultChunkSize, &set.SessionParameters.ClientResultChunkSize, &unset.SessionParameters.ClientResultChunkSize), -// handleParameterUpdate(d, sdk.UserParameterClientResultColumnCaseInsensitive, &set.SessionParameters.ClientResultColumnCaseInsensitive, &unset.SessionParameters.ClientResultColumnCaseInsensitive), -// handleParameterUpdate(d, sdk.UserParameterClientSessionKeepAlive, &set.SessionParameters.ClientSessionKeepAlive, &unset.SessionParameters.ClientSessionKeepAlive), -// handleParameterUpdate(d, sdk.UserParameterClientSessionKeepAliveHeartbeatFrequency, &set.SessionParameters.ClientSessionKeepAliveHeartbeatFrequency, &unset.SessionParameters.ClientSessionKeepAliveHeartbeatFrequency), -// handleParameterUpdateWithMapping(d, sdk.UserParameterClientTimestampTypeMapping, &set.SessionParameters.ClientTimestampTypeMapping, &unset.SessionParameters.ClientTimestampTypeMapping, stringToStringEnumProvider(sdk.ToClientTimestampTypeMapping)), -// handleParameterUpdate(d, sdk.UserParameterDateInputFormat, &set.SessionParameters.DateInputFormat, &unset.SessionParameters.DateInputFormat), -// handleParameterUpdate(d, sdk.UserParameterDateOutputFormat, &set.SessionParameters.DateOutputFormat, &unset.SessionParameters.DateOutputFormat), -// handleParameterUpdate(d, sdk.UserParameterEnableUnloadPhysicalTypeOptimization, &set.SessionParameters.EnableUnloadPhysicalTypeOptimization, &unset.SessionParameters.EnableUnloadPhysicalTypeOptimization), -// handleParameterUpdate(d, sdk.UserParameterErrorOnNondeterministicMerge, &set.SessionParameters.ErrorOnNondeterministicMerge, &unset.SessionParameters.ErrorOnNondeterministicMerge), -// handleParameterUpdate(d, sdk.UserParameterErrorOnNondeterministicUpdate, &set.SessionParameters.ErrorOnNondeterministicUpdate, &unset.SessionParameters.ErrorOnNondeterministicUpdate), -// handleParameterUpdateWithMapping(d, sdk.UserParameterGeographyOutputFormat, &set.SessionParameters.GeographyOutputFormat, &unset.SessionParameters.GeographyOutputFormat, stringToStringEnumProvider(sdk.ToGeographyOutputFormat)), -// handleParameterUpdateWithMapping(d, sdk.UserParameterGeometryOutputFormat, &set.SessionParameters.GeometryOutputFormat, &unset.SessionParameters.GeometryOutputFormat, stringToStringEnumProvider(sdk.ToGeometryOutputFormat)), -// handleParameterUpdate(d, sdk.UserParameterJdbcTreatDecimalAsInt, &set.SessionParameters.JdbcTreatDecimalAsInt, &unset.SessionParameters.JdbcTreatDecimalAsInt), -// handleParameterUpdate(d, sdk.UserParameterJdbcTreatTimestampNtzAsUtc, &set.SessionParameters.JdbcTreatTimestampNtzAsUtc, &unset.SessionParameters.JdbcTreatTimestampNtzAsUtc), -// handleParameterUpdate(d, sdk.UserParameterJdbcUseSessionTimezone, &set.SessionParameters.JdbcUseSessionTimezone, &unset.SessionParameters.JdbcUseSessionTimezone), -// handleParameterUpdate(d, sdk.UserParameterJsonIndent, &set.SessionParameters.JSONIndent, &unset.SessionParameters.JSONIndent), -// handleParameterUpdate(d, sdk.UserParameterLockTimeout, &set.SessionParameters.LockTimeout, &unset.SessionParameters.LockTimeout), -// handleParameterUpdateWithMapping(d, sdk.UserParameterLogLevel, &set.SessionParameters.LogLevel, &unset.SessionParameters.LogLevel, stringToStringEnumProvider(sdk.ToLogLevel)), -// handleParameterUpdate(d, sdk.UserParameterMultiStatementCount, &set.SessionParameters.MultiStatementCount, &unset.SessionParameters.MultiStatementCount), -// handleParameterUpdate(d, sdk.UserParameterNoorderSequenceAsDefault, &set.SessionParameters.NoorderSequenceAsDefault, &unset.SessionParameters.NoorderSequenceAsDefault), -// handleParameterUpdate(d, sdk.UserParameterOdbcTreatDecimalAsInt, &set.SessionParameters.OdbcTreatDecimalAsInt, &unset.SessionParameters.OdbcTreatDecimalAsInt), -// handleParameterUpdate(d, sdk.UserParameterQueryTag, &set.SessionParameters.QueryTag, &unset.SessionParameters.QueryTag), -// handleParameterUpdate(d, sdk.UserParameterQuotedIdentifiersIgnoreCase, &set.SessionParameters.QuotedIdentifiersIgnoreCase, &unset.SessionParameters.QuotedIdentifiersIgnoreCase), -// handleParameterUpdate(d, sdk.UserParameterRowsPerResultset, &set.SessionParameters.RowsPerResultset, &unset.SessionParameters.RowsPerResultset), -// handleParameterUpdate(d, sdk.UserParameterS3StageVpceDnsName, &set.SessionParameters.S3StageVpceDnsName, &unset.SessionParameters.S3StageVpceDnsName), -// handleParameterUpdate(d, sdk.UserParameterSearchPath, &set.SessionParameters.SearchPath, &unset.SessionParameters.SearchPath), -// handleParameterUpdate(d, sdk.UserParameterSimulatedDataSharingConsumer, &set.SessionParameters.SimulatedDataSharingConsumer, &unset.SessionParameters.SimulatedDataSharingConsumer), -// handleParameterUpdate(d, sdk.UserParameterStatementQueuedTimeoutInSeconds, &set.SessionParameters.StatementQueuedTimeoutInSeconds, &unset.SessionParameters.StatementQueuedTimeoutInSeconds), -// handleParameterUpdate(d, sdk.UserParameterStatementTimeoutInSeconds, &set.SessionParameters.StatementTimeoutInSeconds, &unset.SessionParameters.StatementTimeoutInSeconds), -// handleParameterUpdate(d, sdk.UserParameterStrictJsonOutput, &set.SessionParameters.StrictJSONOutput, &unset.SessionParameters.StrictJSONOutput), -// handleParameterUpdate(d, sdk.UserParameterTimestampDayIsAlways24h, &set.SessionParameters.TimestampDayIsAlways24h, &unset.SessionParameters.TimestampDayIsAlways24h), -// handleParameterUpdate(d, sdk.UserParameterTimestampInputFormat, &set.SessionParameters.TimestampInputFormat, &unset.SessionParameters.TimestampInputFormat), -// handleParameterUpdate(d, sdk.UserParameterTimestampLtzOutputFormat, &set.SessionParameters.TimestampLTZOutputFormat, &unset.SessionParameters.TimestampLTZOutputFormat), -// handleParameterUpdate(d, sdk.UserParameterTimestampNtzOutputFormat, &set.SessionParameters.TimestampNTZOutputFormat, &unset.SessionParameters.TimestampNTZOutputFormat), -// handleParameterUpdate(d, sdk.UserParameterTimestampOutputFormat, &set.SessionParameters.TimestampOutputFormat, &unset.SessionParameters.TimestampOutputFormat), -// handleParameterUpdateWithMapping(d, sdk.UserParameterTimestampTypeMapping, &set.SessionParameters.TimestampTypeMapping, &unset.SessionParameters.TimestampTypeMapping, stringToStringEnumProvider(sdk.ToTimestampTypeMapping)), -// handleParameterUpdate(d, sdk.UserParameterTimestampTzOutputFormat, &set.SessionParameters.TimestampTZOutputFormat, &unset.SessionParameters.TimestampTZOutputFormat), -// handleParameterUpdate(d, sdk.UserParameterTimezone, &set.SessionParameters.Timezone, &unset.SessionParameters.Timezone), -// handleParameterUpdate(d, sdk.UserParameterTimeInputFormat, &set.SessionParameters.TimeInputFormat, &unset.SessionParameters.TimeInputFormat), -// handleParameterUpdate(d, sdk.UserParameterTimeOutputFormat, &set.SessionParameters.TimeOutputFormat, &unset.SessionParameters.TimeOutputFormat), -// handleParameterUpdateWithMapping(d, sdk.UserParameterTraceLevel, &set.SessionParameters.TraceLevel, &unset.SessionParameters.TraceLevel, stringToStringEnumProvider(sdk.ToTraceLevel)), -// handleParameterUpdate(d, sdk.UserParameterTransactionAbortOnError, &set.SessionParameters.TransactionAbortOnError, &unset.SessionParameters.TransactionAbortOnError), -// handleParameterUpdateWithMapping(d, sdk.UserParameterTransactionDefaultIsolationLevel, &set.SessionParameters.TransactionDefaultIsolationLevel, &unset.SessionParameters.TransactionDefaultIsolationLevel, stringToStringEnumProvider(sdk.ToTransactionDefaultIsolationLevel)), -// handleParameterUpdate(d, sdk.UserParameterTwoDigitCenturyStart, &set.SessionParameters.TwoDigitCenturyStart, &unset.SessionParameters.TwoDigitCenturyStart), -// handleParameterUpdateWithMapping(d, sdk.UserParameterUnsupportedDdlAction, &set.SessionParameters.UnsupportedDDLAction, &unset.SessionParameters.UnsupportedDDLAction, stringToStringEnumProvider(sdk.ToUnsupportedDDLAction)), -// handleParameterUpdate(d, sdk.UserParameterUseCachedResult, &set.SessionParameters.UseCachedResult, &unset.SessionParameters.UseCachedResult), -// handleParameterUpdate(d, sdk.UserParameterWeekOfYearPolicy, &set.SessionParameters.WeekOfYearPolicy, &unset.SessionParameters.WeekOfYearPolicy), -// handleParameterUpdate(d, sdk.UserParameterWeekStart, &set.SessionParameters.WeekStart, &unset.SessionParameters.WeekStart), -// handleParameterUpdate(d, sdk.UserParameterEnableUnredactedQuerySyntaxError, &set.ObjectParameters.EnableUnredactedQuerySyntaxError, &unset.ObjectParameters.EnableUnredactedQuerySyntaxError), -// handleParameterUpdateWithMapping(d, sdk.UserParameterNetworkPolicy, &set.ObjectParameters.NetworkPolicy, &unset.ObjectParameters.NetworkPolicy, stringToAccountObjectIdentifier), -// handleParameterUpdate(d, sdk.UserParameterPreventUnloadToInternalStages, &set.ObjectParameters.PreventUnloadToInternalStages, &unset.ObjectParameters.PreventUnloadToInternalStages), -// ) -//} +import ( + "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "strconv" + "strings" +) + +var ( + taskParametersSchema = make(map[string]*schema.Schema) + taskParametersCustomDiff = ParametersCustomDiff( + taskParametersProvider, + // task parameters + parameter[sdk.TaskParameter]{sdk.TaskParameterSuspendTaskAfterNumFailures, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTaskAutoRetryAttempts, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterUserTaskManagedInitialWarehouseSize, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterUserTaskTimeoutMs, valueTypeInt, sdk.ParameterTypeTask}, + // session parameters + parameter[sdk.TaskParameter]{sdk.TaskParameterAbortDetachedQuery, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterAutocommit, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterBinaryInputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterBinaryOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientMemoryLimit, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientMetadataRequestUseConnectionCtx, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientPrefetchThreads, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientResultChunkSize, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientResultColumnCaseInsensitive, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientSessionKeepAlive, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientSessionKeepAliveHeartbeatFrequency, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientTimestampTypeMapping, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterDateInputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterDateOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterEnableUnloadPhysicalTypeOptimization, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterErrorOnNondeterministicMerge, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterErrorOnNondeterministicUpdate, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterGeographyOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterGeometryOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterJdbcTreatTimestampNtzAsUtc, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterJdbcUseSessionTimezone, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterJsonIndent, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterLockTimeout, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterLogLevel, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterMultiStatementCount, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterNoorderSequenceAsDefault, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterOdbcTreatDecimalAsInt, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterQueryTag, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterQuotedIdentifiersIgnoreCase, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterRowsPerResultset, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterS3StageVpceDnsName, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterSearchPath, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterStatementQueuedTimeoutInSeconds, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterStatementTimeoutInSeconds, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterStrictJsonOutput, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimestampDayIsAlways24h, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimestampInputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimestampLtzOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimestampNtzOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimestampOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimestampTypeMapping, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimestampTzOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimezone, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimeInputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimeOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTraceLevel, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTransactionAbortOnError, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTransactionDefaultIsolationLevel, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTwoDigitCenturyStart, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterUnsupportedDdlAction, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterUseCachedResult, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterWeekOfYearPolicy, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterWeekStart, valueTypeInt, sdk.ParameterTypeTask}, + ) +) + +func init() { + // TODO [SNOW-1645342]: move to the SDK + TaskParameterFields := []parameterDef[sdk.TaskParameter]{ + // task parameters + {Name: sdk.TaskParameterSuspendTaskAfterNumFailures, Type: schema.TypeInt, ValidateDiag: validation.ToDiagFunc(validation.IntAtLeast(0)), Description: "Specifies the number of consecutive failed task runs after which the current task is suspended automatically. The default is 0 (no automatic suspension)."}, + {Name: sdk.TaskParameterTaskAutoRetryAttempts, Type: schema.TypeInt, ValidateDiag: validation.ToDiagFunc(validation.IntAtLeast(0)), Description: "Specifies the number of automatic task graph retry attempts. If any task graphs complete in a FAILED state, Snowflake can automatically retry the task graphs from the last task in the graph that failed."}, + {Name: sdk.TaskParameterUserTaskManagedInitialWarehouseSize, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToWarehouseSize), DiffSuppress: NormalizeAndCompare(sdk.ToWarehouseSize), Description: "Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. Valid values are (case-insensitive): %s. (Conflicts with warehouse)"}, + {Name: sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds, Type: schema.TypeInt, ValidateDiag: validation.ToDiagFunc(validation.IntAtLeast(0)), Description: "Minimum amount of time between Triggered Task executions in seconds"}, + {Name: sdk.TaskParameterUserTaskTimeoutMs, Type: schema.TypeInt, ValidateDiag: validation.ToDiagFunc(validation.IntAtLeast(0)), Description: "Specifies the time limit on a single run of the task before it times out (in milliseconds)."}, + // session params + {Name: sdk.TaskParameterAbortDetachedQuery, Type: schema.TypeBool, Description: "Specifies the action that Snowflake performs for in-progress queries if connectivity is lost due to abrupt termination of a session (e.g. network outage, browser termination, service interruption)."}, + {Name: sdk.TaskParameterAutocommit, Type: schema.TypeBool, Description: "Specifies whether autocommit is enabled for the session. Autocommit determines whether a DML statement, when executed without an active transaction, is automatically committed after the statement successfully completes. For more information, see [Transactions](https://docs.snowflake.com/en/sql-reference/transactions)."}, + {Name: sdk.TaskParameterBinaryInputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToBinaryInputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToBinaryInputFormat), Description: "The format of VARCHAR values passed as input to VARCHAR-to-BINARY conversion functions. For more information, see [Binary input and output](https://docs.snowflake.com/en/sql-reference/binary-input-output)."}, + {Name: sdk.TaskParameterBinaryOutputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToBinaryOutputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToBinaryOutputFormat), Description: "The format for VARCHAR values returned as output by BINARY-to-VARCHAR conversion functions. For more information, see [Binary input and output](https://docs.snowflake.com/en/sql-reference/binary-input-output)."}, + {Name: sdk.TaskParameterClientMemoryLimit, Type: schema.TypeInt, Description: "Parameter that specifies the maximum amount of memory the JDBC driver or ODBC driver should use for the result set from queries (in MB)."}, + {Name: sdk.TaskParameterClientMetadataRequestUseConnectionCtx, Type: schema.TypeBool, Description: "For specific ODBC functions and JDBC methods, this parameter can change the default search scope from all databases/schemas to the current database/schema. The narrower search typically returns fewer rows and executes more quickly."}, + {Name: sdk.TaskParameterClientPrefetchThreads, Type: schema.TypeInt, Description: "Parameter that specifies the number of threads used by the client to pre-fetch large result sets. The driver will attempt to honor the parameter value, but defines the minimum and maximum values (depending on your system’s resources) to improve performance."}, + {Name: sdk.TaskParameterClientResultChunkSize, Type: schema.TypeInt, Description: "Parameter that specifies the maximum size of each set (or chunk) of query results to download (in MB). The JDBC driver downloads query results in chunks."}, + {Name: sdk.TaskParameterClientResultColumnCaseInsensitive, Type: schema.TypeBool, Description: "Parameter that indicates whether to match column name case-insensitively in ResultSet.get* methods in JDBC."}, + {Name: sdk.TaskParameterClientSessionKeepAlive, Type: schema.TypeBool, Description: "Parameter that indicates whether to force a user to log in again after a period of inactivity in the session."}, + {Name: sdk.TaskParameterClientSessionKeepAliveHeartbeatFrequency, Type: schema.TypeInt, Description: "Number of seconds in-between client attempts to update the token for the session."}, + {Name: sdk.TaskParameterClientTimestampTypeMapping, Type: schema.TypeString, Description: "Specifies the [TIMESTAMP_* variation](https://docs.snowflake.com/en/sql-reference/data-types-datetime.html#label-datatypes-timestamp-variations) to use when binding timestamp variables for JDBC or ODBC applications that use the bind API to load data."}, + {Name: sdk.TaskParameterDateInputFormat, Type: schema.TypeString, Description: "Specifies the input format for the DATE data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, + {Name: sdk.TaskParameterDateOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the DATE data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, + {Name: sdk.TaskParameterEnableUnloadPhysicalTypeOptimization, Type: schema.TypeBool, Description: "Specifies whether to set the schema for unloaded Parquet files based on the logical column data types (i.e. the types in the unload SQL query or source table) or on the unloaded column values (i.e. the smallest data types and precision that support the values in the output columns of the unload SQL statement or source table)."}, + {Name: sdk.TaskParameterErrorOnNondeterministicMerge, Type: schema.TypeBool, Description: "Specifies whether to return an error when the [MERGE](https://docs.snowflake.com/en/sql-reference/sql/merge) command is used to update or delete a target row that joins multiple source rows and the system cannot determine the action to perform on the target row."}, + {Name: sdk.TaskParameterErrorOnNondeterministicUpdate, Type: schema.TypeBool, Description: "Specifies whether to return an error when the [UPDATE](https://docs.snowflake.com/en/sql-reference/sql/update) command is used to update a target row that joins multiple source rows and the system cannot determine the action to perform on the target row."}, + {Name: sdk.TaskParameterGeographyOutputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToGeographyOutputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToGeographyOutputFormat), Description: "Display format for [GEOGRAPHY values](https://docs.snowflake.com/en/sql-reference/data-types-geospatial.html#label-data-types-geography)."}, + {Name: sdk.TaskParameterGeometryOutputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToGeometryOutputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToGeometryOutputFormat), Description: "Display format for [GEOMETRY values](https://docs.snowflake.com/en/sql-reference/data-types-geospatial.html#label-data-types-geometry)."}, + {Name: sdk.TaskParameterJdbcTreatTimestampNtzAsUtc, Type: schema.TypeBool, Description: "Specifies how JDBC processes TIMESTAMP_NTZ values."}, + {Name: sdk.TaskParameterJdbcUseSessionTimezone, Type: schema.TypeBool, Description: "Specifies whether the JDBC Driver uses the time zone of the JVM or the time zone of the session (specified by the [TIMEZONE](https://docs.snowflake.com/en/sql-reference/parameters#label-timezone) parameter) for the getDate(), getTime(), and getTimestamp() methods of the ResultSet class."}, + {Name: sdk.TaskParameterJsonIndent, Type: schema.TypeInt, Description: "Specifies the number of blank spaces to indent each new element in JSON output in the session. Also specifies whether to insert newline characters after each element."}, + {Name: sdk.TaskParameterLockTimeout, Type: schema.TypeInt, Description: "Number of seconds to wait while trying to lock a resource, before timing out and aborting the statement."}, + {Name: sdk.TaskParameterLogLevel, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToLogLevel), DiffSuppress: NormalizeAndCompare(sdk.ToLogLevel), Description: "Specifies the severity level of messages that should be ingested and made available in the active event table. Messages at the specified level (and at more severe levels) are ingested. For more information about log levels, see [Setting log level](https://docs.snowflake.com/en/developer-guide/logging-tracing/logging-log-level)."}, + {Name: sdk.TaskParameterMultiStatementCount, Type: schema.TypeInt, Description: "Number of statements to execute when using the multi-statement capability."}, + {Name: sdk.TaskParameterNoorderSequenceAsDefault, Type: schema.TypeBool, Description: "Specifies whether the ORDER or NOORDER property is set by default when you create a new sequence or add a new table column. The ORDER and NOORDER properties determine whether or not the values are generated for the sequence or auto-incremented column in [increasing or decreasing order](https://docs.snowflake.com/en/user-guide/querying-sequences.html#label-querying-sequences-increasing-values)."}, + {Name: sdk.TaskParameterOdbcTreatDecimalAsInt, Type: schema.TypeBool, Description: "Specifies how ODBC processes columns that have a scale of zero (0)."}, + {Name: sdk.TaskParameterQueryTag, Type: schema.TypeString, Description: "Optional string that can be used to tag queries and other SQL statements executed within a session. The tags are displayed in the output of the [QUERY_HISTORY, QUERY_HISTORY_BY_*](https://docs.snowflake.com/en/sql-reference/functions/query_history) functions."}, + {Name: sdk.TaskParameterQuotedIdentifiersIgnoreCase, Type: schema.TypeBool, Description: "Specifies whether letters in double-quoted object identifiers are stored and resolved as uppercase letters. By default, Snowflake preserves the case of alphabetic characters when storing and resolving double-quoted identifiers (see [Identifier resolution](https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html#label-identifier-casing)). You can use this parameter in situations in which [third-party applications always use double quotes around identifiers](https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html#label-identifier-casing-parameter)."}, + {Name: sdk.TaskParameterRowsPerResultset, Type: schema.TypeInt, Description: "Specifies the maximum number of rows returned in a result set. A value of 0 specifies no maximum."}, + {Name: sdk.TaskParameterS3StageVpceDnsName, Type: schema.TypeString, Description: "Specifies the DNS name of an Amazon S3 interface endpoint. Requests sent to the internal stage of an account via [AWS PrivateLink for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html) use this endpoint to connect. For more information, see [Accessing Internal stages with dedicated interface endpoints](https://docs.snowflake.com/en/user-guide/private-internal-stages-aws.html#label-aws-privatelink-internal-stage-network-isolation)."}, + {Name: sdk.TaskParameterSearchPath, Type: schema.TypeString, Description: "Specifies the path to search to resolve unqualified object names in queries. For more information, see [Name resolution in queries](https://docs.snowflake.com/en/sql-reference/name-resolution.html#label-object-name-resolution-search-path). Comma-separated list of identifiers. An identifier can be a fully or partially qualified schema name."}, + {Name: sdk.TaskParameterStatementQueuedTimeoutInSeconds, Type: schema.TypeInt, Description: "Amount of time, in seconds, a SQL statement (query, DDL, DML, etc.) remains queued for a warehouse before it is canceled by the system. This parameter can be used in conjunction with the [MAX_CONCURRENCY_LEVEL](https://docs.snowflake.com/en/sql-reference/parameters#label-max-concurrency-level) parameter to ensure a warehouse is never backlogged."}, + {Name: sdk.TaskParameterStatementTimeoutInSeconds, Type: schema.TypeInt, Description: "Amount of time, in seconds, after which a running SQL statement (query, DDL, DML, etc.) is canceled by the system."}, + {Name: sdk.TaskParameterStrictJsonOutput, Type: schema.TypeBool, Description: "This parameter specifies whether JSON output in a session is compatible with the general standard (as described by [http://json.org](http://json.org)). By design, Snowflake allows JSON input that contains non-standard values; however, these non-standard values might result in Snowflake outputting JSON that is incompatible with other platforms and languages. This parameter, when enabled, ensures that Snowflake outputs valid/compatible JSON."}, + {Name: sdk.TaskParameterTimestampDayIsAlways24h, Type: schema.TypeBool, Description: "Specifies whether the [DATEADD](https://docs.snowflake.com/en/sql-reference/functions/dateadd) function (and its aliases) always consider a day to be exactly 24 hours for expressions that span multiple days."}, + {Name: sdk.TaskParameterTimestampInputFormat, Type: schema.TypeString, Description: "Specifies the input format for the TIMESTAMP data type alias. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). Any valid, supported timestamp format or AUTO (AUTO specifies that Snowflake attempts to automatically detect the format of timestamps stored in the system during the session)."}, + {Name: sdk.TaskParameterTimestampLtzOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP_LTZ data type. If no format is specified, defaults to [TIMESTAMP_OUTPUT_FORMAT](https://docs.snowflake.com/en/sql-reference/parameters#label-timestamp-output-format). For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, + {Name: sdk.TaskParameterTimestampNtzOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP_NTZ data type."}, + {Name: sdk.TaskParameterTimestampOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP data type alias. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, + {Name: sdk.TaskParameterTimestampTypeMapping, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToTimestampTypeMapping), DiffSuppress: NormalizeAndCompare(sdk.ToTimestampTypeMapping), Description: "Specifies the TIMESTAMP_* variation that the TIMESTAMP data type alias maps to."}, + {Name: sdk.TaskParameterTimestampTzOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP_TZ data type. If no format is specified, defaults to [TIMESTAMP_OUTPUT_FORMAT](https://docs.snowflake.com/en/sql-reference/parameters#label-timestamp-output-format). For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, + {Name: sdk.TaskParameterTimezone, Type: schema.TypeString, Description: "Specifies the time zone for the session. You can specify a [time zone name](https://data.iana.org/time-zones/tzdb-2021a/zone1970.tab) or a [link name](https://data.iana.org/time-zones/tzdb-2021a/backward) from release 2021a of the [IANA Time Zone Database](https://www.iana.org/time-zones) (e.g. America/Los_Angeles, Europe/London, UTC, Etc/GMT, etc.)."}, + {Name: sdk.TaskParameterTimeInputFormat, Type: schema.TypeString, Description: "Specifies the input format for the TIME data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). Any valid, supported time format or AUTO (AUTO specifies that Snowflake attempts to automatically detect the format of times stored in the system during the session)."}, + {Name: sdk.TaskParameterTimeOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIME data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, + {Name: sdk.TaskParameterTraceLevel, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToTraceLevel), DiffSuppress: NormalizeAndCompare(sdk.ToTraceLevel), Description: "Controls how trace events are ingested into the event table. For more information about trace levels, see [Setting trace level](https://docs.snowflake.com/en/developer-guide/logging-tracing/tracing-trace-level)."}, + {Name: sdk.TaskParameterTransactionAbortOnError, Type: schema.TypeBool, Description: "Specifies the action to perform when a statement issued within a non-autocommit transaction returns with an error."}, + {Name: sdk.TaskParameterTransactionDefaultIsolationLevel, Type: schema.TypeString, Description: "Specifies the isolation level for transactions in the user session."}, + {Name: sdk.TaskParameterTwoDigitCenturyStart, Type: schema.TypeInt, Description: "Specifies the β€œcentury start” year for 2-digit years (i.e. the earliest year such dates can represent). This parameter prevents ambiguous dates when importing or converting data with the `YY` date format component (i.e. years represented as 2 digits)."}, + {Name: sdk.TaskParameterUnsupportedDdlAction, Type: schema.TypeString, Description: "Determines if an unsupported (i.e. non-default) value specified for a constraint property returns an error."}, + {Name: sdk.TaskParameterUseCachedResult, Type: schema.TypeBool, Description: "Specifies whether to reuse persisted query results, if available, when a matching query is submitted."}, + {Name: sdk.TaskParameterWeekOfYearPolicy, Type: schema.TypeInt, Description: "Specifies how the weeks in a given year are computed. `0`: The semantics used are equivalent to the ISO semantics, in which a week belongs to a given year if at least 4 days of that week are in that year. `1`: January 1 is included in the first week of the year and December 31 is included in the last week of the year."}, + {Name: sdk.TaskParameterWeekStart, Type: schema.TypeInt, Description: "Specifies the first day of the week (used by week-related date functions). `0`: Legacy Snowflake behavior is used (i.e. ISO-like semantics). `1` (Monday) to `7` (Sunday): All the week-related functions use weeks that start on the specified day of the week."}, + } + + // TODO [SNOW-1645342]: extract this method after moving to SDK + for _, field := range TaskParameterFields { + fieldName := strings.ToLower(string(field.Name)) + + taskParametersSchema[fieldName] = &schema.Schema{ + Type: field.Type, + Description: enrichWithReferenceToParameterDocs(field.Name, field.Description), + Computed: true, + Optional: true, + ValidateDiagFunc: field.ValidateDiag, + DiffSuppressFunc: field.DiffSuppress, + } + } +} + +func taskParametersProvider(ctx context.Context, d ResourceIdProvider, meta any) ([]*sdk.Parameter, error) { + return parametersProvider(ctx, d, meta.(*provider.Context), taskParametersProviderFunc, sdk.ParseSchemaObjectIdentifier) +} + +func taskParametersProviderFunc(c *sdk.Client) showParametersFunc[sdk.SchemaObjectIdentifier] { + return c.Tasks.ShowParameters +} + +// TODO [SNOW-1645342]: make generic based on type definition +func handleTaskParameterRead(d *schema.ResourceData, taskParameters []*sdk.Parameter) error { + for _, p := range taskParameters { + switch p.Key { + case + string(sdk.TaskParameterSuspendTaskAfterNumFailures), + string(sdk.TaskParameterTaskAutoRetryAttempts), + string(sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds), + string(sdk.TaskParameterUserTaskTimeoutMs), + string(sdk.TaskParameterClientMemoryLimit), + string(sdk.TaskParameterClientPrefetchThreads), + string(sdk.TaskParameterClientResultChunkSize), + string(sdk.TaskParameterClientSessionKeepAliveHeartbeatFrequency), + string(sdk.TaskParameterJsonIndent), + string(sdk.TaskParameterLockTimeout), + string(sdk.TaskParameterMultiStatementCount), + string(sdk.TaskParameterRowsPerResultset), + string(sdk.TaskParameterStatementQueuedTimeoutInSeconds), + string(sdk.TaskParameterStatementTimeoutInSeconds), + string(sdk.TaskParameterTwoDigitCenturyStart), + string(sdk.TaskParameterWeekOfYearPolicy), + string(sdk.TaskParameterWeekStart): + value, err := strconv.Atoi(p.Value) + if err != nil { + return err + } + if err := d.Set(strings.ToLower(p.Key), value); err != nil { + return err + } + case + string(sdk.TaskParameterUserTaskManagedInitialWarehouseSize), + string(sdk.TaskParameterBinaryInputFormat), + string(sdk.TaskParameterBinaryOutputFormat), + string(sdk.TaskParameterClientTimestampTypeMapping), + string(sdk.TaskParameterDateInputFormat), + string(sdk.TaskParameterDateOutputFormat), + string(sdk.TaskParameterGeographyOutputFormat), + string(sdk.TaskParameterGeometryOutputFormat), + string(sdk.TaskParameterLogLevel), + string(sdk.TaskParameterQueryTag), + string(sdk.TaskParameterS3StageVpceDnsName), + string(sdk.TaskParameterSearchPath), + string(sdk.TaskParameterTimestampInputFormat), + string(sdk.TaskParameterTimestampLtzOutputFormat), + string(sdk.TaskParameterTimestampNtzOutputFormat), + string(sdk.TaskParameterTimestampOutputFormat), + string(sdk.TaskParameterTimestampTypeMapping), + string(sdk.TaskParameterTimestampTzOutputFormat), + string(sdk.TaskParameterTimezone), + string(sdk.TaskParameterTimeInputFormat), + string(sdk.TaskParameterTimeOutputFormat), + string(sdk.TaskParameterTraceLevel), + string(sdk.TaskParameterTransactionDefaultIsolationLevel), + string(sdk.TaskParameterUnsupportedDdlAction): + if err := d.Set(strings.ToLower(p.Key), p.Value); err != nil { + return err + } + case + string(sdk.TaskParameterAbortDetachedQuery), + string(sdk.TaskParameterAutocommit), + string(sdk.TaskParameterClientMetadataRequestUseConnectionCtx), + string(sdk.TaskParameterClientResultColumnCaseInsensitive), + string(sdk.TaskParameterClientSessionKeepAlive), + string(sdk.TaskParameterEnableUnloadPhysicalTypeOptimization), + string(sdk.TaskParameterErrorOnNondeterministicMerge), + string(sdk.TaskParameterErrorOnNondeterministicUpdate), + string(sdk.TaskParameterJdbcTreatTimestampNtzAsUtc), + string(sdk.TaskParameterJdbcUseSessionTimezone), + string(sdk.TaskParameterNoorderSequenceAsDefault), + string(sdk.TaskParameterOdbcTreatDecimalAsInt), + string(sdk.TaskParameterQuotedIdentifiersIgnoreCase), + string(sdk.TaskParameterStrictJsonOutput), + string(sdk.TaskParameterTimestampDayIsAlways24h), + string(sdk.TaskParameterTransactionAbortOnError), + string(sdk.TaskParameterUseCachedResult): + value, err := strconv.ParseBool(p.Value) + if err != nil { + return err + } + if err := d.Set(strings.ToLower(p.Key), value); err != nil { + return err + } + } + } + + return nil +} + +// TODO [SNOW-1348330]: consider using SessionParameters#setParam during parameters rework +// (because currently setParam already is able to set the right parameter based on the string value input, +// but GetConfigPropertyAsPointerAllowingZeroValue receives typed value, +// so this would be unnecessary running in circles) +// TODO [SNOW-1645342]: include mappers in the param definition (after moving it to the SDK: identity versus concrete) +func handleTaskParametersCreate(d *schema.ResourceData, createOpts *sdk.CreateTaskRequest) diag.Diagnostics { + createOpts.WithSessionParameters(sdk.SessionParameters{}) + if v, ok := d.GetOk("user_task_managed_initial_warehouse_size"); ok { + size, err := sdk.ToWarehouseSize(v.(string)) + if err != nil { + return diag.FromErr(err) + } + createOpts.WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithUserTaskManagedInitialWarehouseSize(size)) + } + diags := JoinDiags( + // task parameters + handleParameterCreate(d, sdk.TaskParameterUserTaskTimeoutMs, &createOpts.UserTaskTimeoutMs), + handleParameterCreate(d, sdk.TaskParameterSuspendTaskAfterNumFailures, &createOpts.SuspendTaskAfterNumFailures), + handleParameterCreate(d, sdk.TaskParameterTaskAutoRetryAttempts, &createOpts.TaskAutoRetryAttempts), + handleParameterCreate(d, sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds, &createOpts.UserTaskMinimumTriggerIntervalInSeconds), + // session parameters + handleParameterCreate(d, sdk.TaskParameterAbortDetachedQuery, &createOpts.SessionParameters.AbortDetachedQuery), + handleParameterCreate(d, sdk.TaskParameterAutocommit, &createOpts.SessionParameters.Autocommit), + handleParameterCreateWithMapping(d, sdk.TaskParameterBinaryInputFormat, &createOpts.SessionParameters.BinaryInputFormat, stringToStringEnumProvider(sdk.ToBinaryInputFormat)), + handleParameterCreateWithMapping(d, sdk.TaskParameterBinaryOutputFormat, &createOpts.SessionParameters.BinaryOutputFormat, stringToStringEnumProvider(sdk.ToBinaryOutputFormat)), + handleParameterCreate(d, sdk.TaskParameterClientMemoryLimit, &createOpts.SessionParameters.ClientMemoryLimit), + handleParameterCreate(d, sdk.TaskParameterClientMetadataRequestUseConnectionCtx, &createOpts.SessionParameters.ClientMetadataRequestUseConnectionCtx), + handleParameterCreate(d, sdk.TaskParameterClientPrefetchThreads, &createOpts.SessionParameters.ClientPrefetchThreads), + handleParameterCreate(d, sdk.TaskParameterClientResultChunkSize, &createOpts.SessionParameters.ClientResultChunkSize), + handleParameterCreate(d, sdk.TaskParameterClientResultColumnCaseInsensitive, &createOpts.SessionParameters.ClientResultColumnCaseInsensitive), + handleParameterCreate(d, sdk.TaskParameterClientSessionKeepAlive, &createOpts.SessionParameters.ClientSessionKeepAlive), + handleParameterCreate(d, sdk.TaskParameterClientSessionKeepAliveHeartbeatFrequency, &createOpts.SessionParameters.ClientSessionKeepAliveHeartbeatFrequency), + handleParameterCreateWithMapping(d, sdk.TaskParameterClientTimestampTypeMapping, &createOpts.SessionParameters.ClientTimestampTypeMapping, stringToStringEnumProvider(sdk.ToClientTimestampTypeMapping)), + handleParameterCreate(d, sdk.TaskParameterDateInputFormat, &createOpts.SessionParameters.DateInputFormat), + handleParameterCreate(d, sdk.TaskParameterDateOutputFormat, &createOpts.SessionParameters.DateOutputFormat), + handleParameterCreate(d, sdk.TaskParameterEnableUnloadPhysicalTypeOptimization, &createOpts.SessionParameters.EnableUnloadPhysicalTypeOptimization), + handleParameterCreate(d, sdk.TaskParameterErrorOnNondeterministicMerge, &createOpts.SessionParameters.ErrorOnNondeterministicMerge), + handleParameterCreate(d, sdk.TaskParameterErrorOnNondeterministicUpdate, &createOpts.SessionParameters.ErrorOnNondeterministicUpdate), + handleParameterCreateWithMapping(d, sdk.TaskParameterGeographyOutputFormat, &createOpts.SessionParameters.GeographyOutputFormat, stringToStringEnumProvider(sdk.ToGeographyOutputFormat)), + handleParameterCreateWithMapping(d, sdk.TaskParameterGeometryOutputFormat, &createOpts.SessionParameters.GeometryOutputFormat, stringToStringEnumProvider(sdk.ToGeometryOutputFormat)), + handleParameterCreate(d, sdk.TaskParameterJdbcTreatTimestampNtzAsUtc, &createOpts.SessionParameters.JdbcTreatTimestampNtzAsUtc), + handleParameterCreate(d, sdk.TaskParameterJdbcUseSessionTimezone, &createOpts.SessionParameters.JdbcUseSessionTimezone), + handleParameterCreate(d, sdk.TaskParameterJsonIndent, &createOpts.SessionParameters.JSONIndent), + handleParameterCreate(d, sdk.TaskParameterLockTimeout, &createOpts.SessionParameters.LockTimeout), + handleParameterCreateWithMapping(d, sdk.TaskParameterLogLevel, &createOpts.SessionParameters.LogLevel, stringToStringEnumProvider(sdk.ToLogLevel)), + handleParameterCreate(d, sdk.TaskParameterMultiStatementCount, &createOpts.SessionParameters.MultiStatementCount), + handleParameterCreate(d, sdk.TaskParameterNoorderSequenceAsDefault, &createOpts.SessionParameters.NoorderSequenceAsDefault), + handleParameterCreate(d, sdk.TaskParameterOdbcTreatDecimalAsInt, &createOpts.SessionParameters.OdbcTreatDecimalAsInt), + handleParameterCreate(d, sdk.TaskParameterQueryTag, &createOpts.SessionParameters.QueryTag), + handleParameterCreate(d, sdk.TaskParameterQuotedIdentifiersIgnoreCase, &createOpts.SessionParameters.QuotedIdentifiersIgnoreCase), + handleParameterCreate(d, sdk.TaskParameterRowsPerResultset, &createOpts.SessionParameters.RowsPerResultset), + handleParameterCreate(d, sdk.TaskParameterS3StageVpceDnsName, &createOpts.SessionParameters.S3StageVpceDnsName), + handleParameterCreate(d, sdk.TaskParameterSearchPath, &createOpts.SessionParameters.SearchPath), + handleParameterCreate(d, sdk.TaskParameterStatementQueuedTimeoutInSeconds, &createOpts.SessionParameters.StatementQueuedTimeoutInSeconds), + handleParameterCreate(d, sdk.TaskParameterStatementTimeoutInSeconds, &createOpts.SessionParameters.StatementTimeoutInSeconds), + handleParameterCreate(d, sdk.TaskParameterStrictJsonOutput, &createOpts.SessionParameters.StrictJSONOutput), + handleParameterCreate(d, sdk.TaskParameterTimestampDayIsAlways24h, &createOpts.SessionParameters.TimestampDayIsAlways24h), + handleParameterCreate(d, sdk.TaskParameterTimestampInputFormat, &createOpts.SessionParameters.TimestampInputFormat), + handleParameterCreate(d, sdk.TaskParameterTimestampLtzOutputFormat, &createOpts.SessionParameters.TimestampLTZOutputFormat), + handleParameterCreate(d, sdk.TaskParameterTimestampNtzOutputFormat, &createOpts.SessionParameters.TimestampNTZOutputFormat), + handleParameterCreate(d, sdk.TaskParameterTimestampOutputFormat, &createOpts.SessionParameters.TimestampOutputFormat), + handleParameterCreateWithMapping(d, sdk.TaskParameterTimestampTypeMapping, &createOpts.SessionParameters.TimestampTypeMapping, stringToStringEnumProvider(sdk.ToTimestampTypeMapping)), + handleParameterCreate(d, sdk.TaskParameterTimestampTzOutputFormat, &createOpts.SessionParameters.TimestampTZOutputFormat), + handleParameterCreate(d, sdk.TaskParameterTimezone, &createOpts.SessionParameters.Timezone), + handleParameterCreate(d, sdk.TaskParameterTimeInputFormat, &createOpts.SessionParameters.TimeInputFormat), + handleParameterCreate(d, sdk.TaskParameterTimeOutputFormat, &createOpts.SessionParameters.TimeOutputFormat), + handleParameterCreateWithMapping(d, sdk.TaskParameterTraceLevel, &createOpts.SessionParameters.TraceLevel, stringToStringEnumProvider(sdk.ToTraceLevel)), + handleParameterCreate(d, sdk.TaskParameterTransactionAbortOnError, &createOpts.SessionParameters.TransactionAbortOnError), + handleParameterCreateWithMapping(d, sdk.TaskParameterTransactionDefaultIsolationLevel, &createOpts.SessionParameters.TransactionDefaultIsolationLevel, stringToStringEnumProvider(sdk.ToTransactionDefaultIsolationLevel)), + handleParameterCreate(d, sdk.TaskParameterTwoDigitCenturyStart, &createOpts.SessionParameters.TwoDigitCenturyStart), + handleParameterCreateWithMapping(d, sdk.TaskParameterUnsupportedDdlAction, &createOpts.SessionParameters.UnsupportedDDLAction, stringToStringEnumProvider(sdk.ToUnsupportedDDLAction)), + handleParameterCreate(d, sdk.TaskParameterUseCachedResult, &createOpts.SessionParameters.UseCachedResult), + handleParameterCreate(d, sdk.TaskParameterWeekOfYearPolicy, &createOpts.SessionParameters.WeekOfYearPolicy), + handleParameterCreate(d, sdk.TaskParameterWeekStart, &createOpts.SessionParameters.WeekStart), + ) + if *createOpts.SessionParameters == (sdk.SessionParameters{}) { + createOpts.SessionParameters = nil + } + return diags +} + +func handleTaskParametersUpdate(d *schema.ResourceData, set *sdk.TaskSetRequest, unset *sdk.TaskUnsetRequest) diag.Diagnostics { + set.WithSessionParameters(sdk.SessionParameters{}) + unset.WithSessionParametersUnset(sdk.SessionParametersUnset{}) + diags := JoinDiags( + // task parameters + handleParameterUpdateWithMapping(d, sdk.TaskParameterUserTaskManagedInitialWarehouseSize, &set.UserTaskManagedInitialWarehouseSize, &unset.UserTaskManagedInitialWarehouseSize, stringToStringEnumProvider(sdk.ToWarehouseSize)), + handleParameterUpdate(d, sdk.TaskParameterUserTaskTimeoutMs, &set.UserTaskTimeoutMs, &unset.UserTaskTimeoutMs), + handleParameterUpdate(d, sdk.TaskParameterSuspendTaskAfterNumFailures, &set.SuspendTaskAfterNumFailures, &unset.SuspendTaskAfterNumFailures), + handleParameterUpdate(d, sdk.TaskParameterTaskAutoRetryAttempts, &set.TaskAutoRetryAttempts, &unset.TaskAutoRetryAttempts), + handleParameterUpdate(d, sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds, &set.UserTaskMinimumTriggerIntervalInSeconds, &unset.UserTaskMinimumTriggerIntervalInSeconds), + // session parameters + handleParameterUpdate(d, sdk.TaskParameterAbortDetachedQuery, &set.SessionParameters.AbortDetachedQuery, &unset.SessionParametersUnset.AbortDetachedQuery), + handleParameterUpdate(d, sdk.TaskParameterAutocommit, &set.SessionParameters.Autocommit, &unset.SessionParametersUnset.Autocommit), + handleParameterUpdateWithMapping(d, sdk.TaskParameterBinaryInputFormat, &set.SessionParameters.BinaryInputFormat, &unset.SessionParametersUnset.BinaryInputFormat, stringToStringEnumProvider(sdk.ToBinaryInputFormat)), + handleParameterUpdateWithMapping(d, sdk.TaskParameterBinaryOutputFormat, &set.SessionParameters.BinaryOutputFormat, &unset.SessionParametersUnset.BinaryOutputFormat, stringToStringEnumProvider(sdk.ToBinaryOutputFormat)), + handleParameterUpdate(d, sdk.TaskParameterClientMemoryLimit, &set.SessionParameters.ClientMemoryLimit, &unset.SessionParametersUnset.ClientMemoryLimit), + handleParameterUpdate(d, sdk.TaskParameterClientMetadataRequestUseConnectionCtx, &set.SessionParameters.ClientMetadataRequestUseConnectionCtx, &unset.SessionParametersUnset.ClientMetadataRequestUseConnectionCtx), + handleParameterUpdate(d, sdk.TaskParameterClientPrefetchThreads, &set.SessionParameters.ClientPrefetchThreads, &unset.SessionParametersUnset.ClientPrefetchThreads), + handleParameterUpdate(d, sdk.TaskParameterClientResultChunkSize, &set.SessionParameters.ClientResultChunkSize, &unset.SessionParametersUnset.ClientResultChunkSize), + handleParameterUpdate(d, sdk.TaskParameterClientResultColumnCaseInsensitive, &set.SessionParameters.ClientResultColumnCaseInsensitive, &unset.SessionParametersUnset.ClientResultColumnCaseInsensitive), + handleParameterUpdate(d, sdk.TaskParameterClientSessionKeepAlive, &set.SessionParameters.ClientSessionKeepAlive, &unset.SessionParametersUnset.ClientSessionKeepAlive), + handleParameterUpdate(d, sdk.TaskParameterClientSessionKeepAliveHeartbeatFrequency, &set.SessionParameters.ClientSessionKeepAliveHeartbeatFrequency, &unset.SessionParametersUnset.ClientSessionKeepAliveHeartbeatFrequency), + handleParameterUpdateWithMapping(d, sdk.TaskParameterClientTimestampTypeMapping, &set.SessionParameters.ClientTimestampTypeMapping, &unset.SessionParametersUnset.ClientTimestampTypeMapping, stringToStringEnumProvider(sdk.ToClientTimestampTypeMapping)), + handleParameterUpdate(d, sdk.TaskParameterDateInputFormat, &set.SessionParameters.DateInputFormat, &unset.SessionParametersUnset.DateInputFormat), + handleParameterUpdate(d, sdk.TaskParameterDateOutputFormat, &set.SessionParameters.DateOutputFormat, &unset.SessionParametersUnset.DateOutputFormat), + handleParameterUpdate(d, sdk.TaskParameterEnableUnloadPhysicalTypeOptimization, &set.SessionParameters.EnableUnloadPhysicalTypeOptimization, &unset.SessionParametersUnset.EnableUnloadPhysicalTypeOptimization), + handleParameterUpdate(d, sdk.TaskParameterErrorOnNondeterministicMerge, &set.SessionParameters.ErrorOnNondeterministicMerge, &unset.SessionParametersUnset.ErrorOnNondeterministicMerge), + handleParameterUpdate(d, sdk.TaskParameterErrorOnNondeterministicUpdate, &set.SessionParameters.ErrorOnNondeterministicUpdate, &unset.SessionParametersUnset.ErrorOnNondeterministicUpdate), + handleParameterUpdateWithMapping(d, sdk.TaskParameterGeographyOutputFormat, &set.SessionParameters.GeographyOutputFormat, &unset.SessionParametersUnset.GeographyOutputFormat, stringToStringEnumProvider(sdk.ToGeographyOutputFormat)), + handleParameterUpdateWithMapping(d, sdk.TaskParameterGeometryOutputFormat, &set.SessionParameters.GeometryOutputFormat, &unset.SessionParametersUnset.GeometryOutputFormat, stringToStringEnumProvider(sdk.ToGeometryOutputFormat)), + handleParameterUpdate(d, sdk.TaskParameterJdbcTreatTimestampNtzAsUtc, &set.SessionParameters.JdbcTreatTimestampNtzAsUtc, &unset.SessionParametersUnset.JdbcTreatTimestampNtzAsUtc), + handleParameterUpdate(d, sdk.TaskParameterJdbcUseSessionTimezone, &set.SessionParameters.JdbcUseSessionTimezone, &unset.SessionParametersUnset.JdbcUseSessionTimezone), + handleParameterUpdate(d, sdk.TaskParameterJsonIndent, &set.SessionParameters.JSONIndent, &unset.SessionParametersUnset.JSONIndent), + handleParameterUpdate(d, sdk.TaskParameterLockTimeout, &set.SessionParameters.LockTimeout, &unset.SessionParametersUnset.LockTimeout), + handleParameterUpdateWithMapping(d, sdk.TaskParameterLogLevel, &set.SessionParameters.LogLevel, &unset.SessionParametersUnset.LogLevel, stringToStringEnumProvider(sdk.ToLogLevel)), + handleParameterUpdate(d, sdk.TaskParameterMultiStatementCount, &set.SessionParameters.MultiStatementCount, &unset.SessionParametersUnset.MultiStatementCount), + handleParameterUpdate(d, sdk.TaskParameterNoorderSequenceAsDefault, &set.SessionParameters.NoorderSequenceAsDefault, &unset.SessionParametersUnset.NoorderSequenceAsDefault), + handleParameterUpdate(d, sdk.TaskParameterOdbcTreatDecimalAsInt, &set.SessionParameters.OdbcTreatDecimalAsInt, &unset.SessionParametersUnset.OdbcTreatDecimalAsInt), + handleParameterUpdate(d, sdk.TaskParameterQueryTag, &set.SessionParameters.QueryTag, &unset.SessionParametersUnset.QueryTag), + handleParameterUpdate(d, sdk.TaskParameterQuotedIdentifiersIgnoreCase, &set.SessionParameters.QuotedIdentifiersIgnoreCase, &unset.SessionParametersUnset.QuotedIdentifiersIgnoreCase), + handleParameterUpdate(d, sdk.TaskParameterRowsPerResultset, &set.SessionParameters.RowsPerResultset, &unset.SessionParametersUnset.RowsPerResultset), + handleParameterUpdate(d, sdk.TaskParameterS3StageVpceDnsName, &set.SessionParameters.S3StageVpceDnsName, &unset.SessionParametersUnset.S3StageVpceDnsName), + handleParameterUpdate(d, sdk.TaskParameterSearchPath, &set.SessionParameters.SearchPath, &unset.SessionParametersUnset.SearchPath), + handleParameterUpdate(d, sdk.TaskParameterStatementQueuedTimeoutInSeconds, &set.SessionParameters.StatementQueuedTimeoutInSeconds, &unset.SessionParametersUnset.StatementQueuedTimeoutInSeconds), + handleParameterUpdate(d, sdk.TaskParameterStatementTimeoutInSeconds, &set.SessionParameters.StatementTimeoutInSeconds, &unset.SessionParametersUnset.StatementTimeoutInSeconds), + handleParameterUpdate(d, sdk.TaskParameterStrictJsonOutput, &set.SessionParameters.StrictJSONOutput, &unset.SessionParametersUnset.StrictJSONOutput), + handleParameterUpdate(d, sdk.TaskParameterTimestampDayIsAlways24h, &set.SessionParameters.TimestampDayIsAlways24h, &unset.SessionParametersUnset.TimestampDayIsAlways24h), + handleParameterUpdate(d, sdk.TaskParameterTimestampInputFormat, &set.SessionParameters.TimestampInputFormat, &unset.SessionParametersUnset.TimestampInputFormat), + handleParameterUpdate(d, sdk.TaskParameterTimestampLtzOutputFormat, &set.SessionParameters.TimestampLTZOutputFormat, &unset.SessionParametersUnset.TimestampLTZOutputFormat), + handleParameterUpdate(d, sdk.TaskParameterTimestampNtzOutputFormat, &set.SessionParameters.TimestampNTZOutputFormat, &unset.SessionParametersUnset.TimestampNTZOutputFormat), + handleParameterUpdate(d, sdk.TaskParameterTimestampOutputFormat, &set.SessionParameters.TimestampOutputFormat, &unset.SessionParametersUnset.TimestampOutputFormat), + handleParameterUpdateWithMapping(d, sdk.TaskParameterTimestampTypeMapping, &set.SessionParameters.TimestampTypeMapping, &unset.SessionParametersUnset.TimestampTypeMapping, stringToStringEnumProvider(sdk.ToTimestampTypeMapping)), + handleParameterUpdate(d, sdk.TaskParameterTimestampTzOutputFormat, &set.SessionParameters.TimestampTZOutputFormat, &unset.SessionParametersUnset.TimestampTZOutputFormat), + handleParameterUpdate(d, sdk.TaskParameterTimezone, &set.SessionParameters.Timezone, &unset.SessionParametersUnset.Timezone), + handleParameterUpdate(d, sdk.TaskParameterTimeInputFormat, &set.SessionParameters.TimeInputFormat, &unset.SessionParametersUnset.TimeInputFormat), + handleParameterUpdate(d, sdk.TaskParameterTimeOutputFormat, &set.SessionParameters.TimeOutputFormat, &unset.SessionParametersUnset.TimeOutputFormat), + handleParameterUpdateWithMapping(d, sdk.TaskParameterTraceLevel, &set.SessionParameters.TraceLevel, &unset.SessionParametersUnset.TraceLevel, stringToStringEnumProvider(sdk.ToTraceLevel)), + handleParameterUpdate(d, sdk.TaskParameterTransactionAbortOnError, &set.SessionParameters.TransactionAbortOnError, &unset.SessionParametersUnset.TransactionAbortOnError), + handleParameterUpdateWithMapping(d, sdk.TaskParameterTransactionDefaultIsolationLevel, &set.SessionParameters.TransactionDefaultIsolationLevel, &unset.SessionParametersUnset.TransactionDefaultIsolationLevel, stringToStringEnumProvider(sdk.ToTransactionDefaultIsolationLevel)), + handleParameterUpdate(d, sdk.TaskParameterTwoDigitCenturyStart, &set.SessionParameters.TwoDigitCenturyStart, &unset.SessionParametersUnset.TwoDigitCenturyStart), + handleParameterUpdateWithMapping(d, sdk.TaskParameterUnsupportedDdlAction, &set.SessionParameters.UnsupportedDDLAction, &unset.SessionParametersUnset.UnsupportedDDLAction, stringToStringEnumProvider(sdk.ToUnsupportedDDLAction)), + handleParameterUpdate(d, sdk.TaskParameterUseCachedResult, &set.SessionParameters.UseCachedResult, &unset.SessionParametersUnset.UseCachedResult), + handleParameterUpdate(d, sdk.TaskParameterWeekOfYearPolicy, &set.SessionParameters.WeekOfYearPolicy, &unset.SessionParametersUnset.WeekOfYearPolicy), + handleParameterUpdate(d, sdk.TaskParameterWeekStart, &set.SessionParameters.WeekStart, &unset.SessionParametersUnset.WeekStart), + ) + if *set.SessionParameters == (sdk.SessionParameters{}) { + set.SessionParameters = nil + } + if *unset.SessionParametersUnset == (sdk.SessionParametersUnset{}) { + unset.SessionParametersUnset = nil + } + return diags +} diff --git a/pkg/schemas/task_parameters.go b/pkg/schemas/task_parameters.go index 1288e1b08c..7ab3b47882 100644 --- a/pkg/schemas/task_parameters.go +++ b/pkg/schemas/task_parameters.go @@ -10,10 +10,72 @@ import ( var ( ShowTaskParametersSchema = make(map[string]*schema.Schema) + taskParameters = []sdk.TaskParameter{ + // task parameters + sdk.TaskParameterSuspendTaskAfterNumFailures, + sdk.TaskParameterTaskAutoRetryAttempts, + sdk.TaskParameterUserTaskManagedInitialWarehouseSize, + sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds, + sdk.TaskParameterUserTaskTimeoutMs, + // session parameters + sdk.TaskParameterAbortDetachedQuery, + sdk.TaskParameterAutocommit, + sdk.TaskParameterBinaryInputFormat, + sdk.TaskParameterBinaryOutputFormat, + sdk.TaskParameterClientMemoryLimit, + sdk.TaskParameterClientMetadataRequestUseConnectionCtx, + sdk.TaskParameterClientPrefetchThreads, + sdk.TaskParameterClientResultChunkSize, + sdk.TaskParameterClientResultColumnCaseInsensitive, + sdk.TaskParameterClientSessionKeepAlive, + sdk.TaskParameterClientSessionKeepAliveHeartbeatFrequency, + sdk.TaskParameterClientTimestampTypeMapping, + sdk.TaskParameterDateInputFormat, + sdk.TaskParameterDateOutputFormat, + sdk.TaskParameterEnableUnloadPhysicalTypeOptimization, + sdk.TaskParameterErrorOnNondeterministicMerge, + sdk.TaskParameterErrorOnNondeterministicUpdate, + sdk.TaskParameterGeographyOutputFormat, + sdk.TaskParameterGeometryOutputFormat, + sdk.TaskParameterJdbcTreatTimestampNtzAsUtc, + sdk.TaskParameterJdbcUseSessionTimezone, + sdk.TaskParameterJsonIndent, + sdk.TaskParameterLockTimeout, + sdk.TaskParameterLogLevel, + sdk.TaskParameterMultiStatementCount, + sdk.TaskParameterNoorderSequenceAsDefault, + sdk.TaskParameterOdbcTreatDecimalAsInt, + sdk.TaskParameterQueryTag, + sdk.TaskParameterQuotedIdentifiersIgnoreCase, + sdk.TaskParameterRowsPerResultset, + sdk.TaskParameterS3StageVpceDnsName, + sdk.TaskParameterSearchPath, + sdk.TaskParameterStatementQueuedTimeoutInSeconds, + sdk.TaskParameterStatementTimeoutInSeconds, + sdk.TaskParameterStrictJsonOutput, + sdk.TaskParameterTimestampDayIsAlways24h, + sdk.TaskParameterTimestampInputFormat, + sdk.TaskParameterTimestampLtzOutputFormat, + sdk.TaskParameterTimestampNtzOutputFormat, + sdk.TaskParameterTimestampOutputFormat, + sdk.TaskParameterTimestampTypeMapping, + sdk.TaskParameterTimestampTzOutputFormat, + sdk.TaskParameterTimezone, + sdk.TaskParameterTimeInputFormat, + sdk.TaskParameterTimeOutputFormat, + sdk.TaskParameterTraceLevel, + sdk.TaskParameterTransactionAbortOnError, + sdk.TaskParameterTransactionDefaultIsolationLevel, + sdk.TaskParameterTwoDigitCenturyStart, + sdk.TaskParameterUnsupportedDdlAction, + sdk.TaskParameterUseCachedResult, + sdk.TaskParameterWeekOfYearPolicy, + sdk.TaskParameterWeekStart, + } ) func init() { - for _, param := range sdk.AllTaskParameters { + for _, param := range taskParameters { ShowTaskParametersSchema[strings.ToLower(string(param))] = ParameterListSchema } } @@ -21,7 +83,7 @@ func init() { func TaskParametersToSchema(parameters []*sdk.Parameter) map[string]any { taskParametersValue := make(map[string]any) for _, param := range parameters { - if slices.Contains(userParameters, sdk.UserParameter(param.Key)) { + if slices.Contains(taskParameters, sdk.TaskParameter(param.Key)) { taskParametersValue[strings.ToLower(param.Key)] = []map[string]any{ParameterToSchema(param)} } } diff --git a/pkg/sdk/tasks_dto_builders_gen.go b/pkg/sdk/tasks_dto_builders_gen.go index 7a0397bca2..1a371b8a71 100644 --- a/pkg/sdk/tasks_dto_builders_gen.go +++ b/pkg/sdk/tasks_dto_builders_gen.go @@ -359,6 +359,11 @@ func (s *TaskUnsetRequest) WithWarehouse(Warehouse bool) *TaskUnsetRequest { return s } +func (s *TaskUnsetRequest) WithUserTaskManagedInitialWarehouseSize(UserTaskManagedInitialWarehouseSize bool) *TaskUnsetRequest { + s.UserTaskManagedInitialWarehouseSize = &UserTaskManagedInitialWarehouseSize + return s +} + func (s *TaskUnsetRequest) WithSchedule(Schedule bool) *TaskUnsetRequest { s.Schedule = &Schedule return s diff --git a/pkg/sdk/tasks_dto_gen.go b/pkg/sdk/tasks_dto_gen.go index e6a2726b4e..f10cbf0051 100644 --- a/pkg/sdk/tasks_dto_gen.go +++ b/pkg/sdk/tasks_dto_gen.go @@ -112,6 +112,7 @@ type TaskSetRequest struct { type TaskUnsetRequest struct { Warehouse *bool + UserTaskManagedInitialWarehouseSize *bool Schedule *bool Config *bool AllowOverlappingExecution *bool diff --git a/pkg/sdk/tasks_gen.go b/pkg/sdk/tasks_gen.go index 589f834984..a98e9eac46 100644 --- a/pkg/sdk/tasks_gen.go +++ b/pkg/sdk/tasks_gen.go @@ -122,6 +122,7 @@ type TaskSet struct { type TaskUnset struct { Warehouse *bool `ddl:"keyword" sql:"WAREHOUSE"` + UserTaskManagedInitialWarehouseSize *bool `ddl:"keyword" sql:"USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE"` Schedule *bool `ddl:"keyword" sql:"SCHEDULE"` Config *bool `ddl:"keyword" sql:"CONFIG"` AllowOverlappingExecution *bool `ddl:"keyword" sql:"ALLOW_OVERLAPPING_EXECUTION"` @@ -187,7 +188,7 @@ type Task struct { SchemaName string Owner string Comment string - Warehouse string // TODO: *AccountObjectIdentifier + Warehouse *AccountObjectIdentifier Schedule string Predecessors []SchemaObjectIdentifier State TaskState diff --git a/pkg/sdk/tasks_impl_gen.go b/pkg/sdk/tasks_impl_gen.go index 88a95c9e93..0502a0f9f5 100644 --- a/pkg/sdk/tasks_impl_gen.go +++ b/pkg/sdk/tasks_impl_gen.go @@ -269,6 +269,7 @@ func (r *AlterTaskRequest) toOpts() *AlterTaskOptions { if r.Unset != nil { opts.Unset = &TaskUnset{ Warehouse: r.Unset.Warehouse, + UserTaskManagedInitialWarehouseSize: r.Unset.UserTaskManagedInitialWarehouseSize, Schedule: r.Unset.Schedule, Config: r.Unset.Config, AllowOverlappingExecution: r.Unset.AllowOverlappingExecution, @@ -325,8 +326,13 @@ func (r taskDBRow) convert() *Task { if r.Comment.Valid { task.Comment = r.Comment.String } - if r.Warehouse.Valid { - task.Warehouse = r.Warehouse.String + if r.Warehouse.Valid && r.Warehouse.String != "null" { + id, err := ParseAccountObjectIdentifier(r.Warehouse.String) + if err != nil { + log.Printf("[DEBUG] failed to parse warehouse: %v", err) + } else { + task.Warehouse = &id + } } if r.Schedule.Valid { task.Schedule = r.Schedule.String diff --git a/pkg/sdk/testint/tasks_gen_integration_test.go b/pkg/sdk/testint/tasks_gen_integration_test.go index 206aa77404..b89bdd2d43 100644 --- a/pkg/sdk/testint/tasks_gen_integration_test.go +++ b/pkg/sdk/testint/tasks_gen_integration_test.go @@ -21,19 +21,8 @@ func TestInt_Tasks(t *testing.T) { ctx := testContext(t) sql := "SELECT CURRENT_TIMESTAMP" - // TODO [SNOW-1017580]: replace with real value - const gcpPubsubSubscriptionName = "projects/project-1234/subscriptions/sub2" - errorIntegrationId := testClientHelper().Ids.RandomAccountObjectIdentifier() - err := client.NotificationIntegrations.Create(ctx, - sdk.NewCreateNotificationIntegrationRequest(errorIntegrationId, true). - WithAutomatedDataLoadsParams(sdk.NewAutomatedDataLoadsParamsRequest(). - WithGoogleAutoParams(sdk.NewGoogleAutoParamsRequest(gcpPubsubSubscriptionName)), - ), - ) - require.NoError(t, err) - t.Cleanup(func() { - require.NoError(t, client.NotificationIntegrations.Drop(ctx, sdk.NewDropNotificationIntegrationRequest(errorIntegrationId).WithIfExists(sdk.Bool(true)))) - }) + errorNotificationIntegration, errorNotificationIntegrationCleanup := testClientHelper().NotificationIntegration.Create(t) + t.Cleanup(errorNotificationIntegrationCleanup) assertTask := func(t *testing.T, task *sdk.Task, id sdk.SchemaObjectIdentifier, warehouseName string) { t.Helper() @@ -278,10 +267,10 @@ func TestInt_Tasks(t *testing.T) { t.Run("create task: complete case", func(t *testing.T) { id := testClientHelper().Ids.RandomSchemaObjectIdentifier() - err = testClient(t).Tasks.Create(ctx, sdk.NewCreateTaskRequest(id, sql). + err := testClient(t).Tasks.Create(ctx, sdk.NewCreateTaskRequest(id, sql). WithOrReplace(true). WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithWarehouse(testClientHelper().Ids.WarehouseId())). - WithErrorNotificationIntegration(errorIntegrationId). + WithErrorNotificationIntegration(errorNotificationIntegration.ID()). WithSchedule("10 MINUTE"). WithConfig(`$${"output_dir": "/temp/test_directory/", "learning_rate": 0.1}$$`). WithAllowOverlappingExecution(true). @@ -298,7 +287,7 @@ func TestInt_Tasks(t *testing.T) { task, err := testClientHelper().Task.Show(t, id) require.NoError(t, err) - assertTaskWithOptions(t, task, id, "some comment", testClientHelper().Ids.WarehouseId().Name(), "10 MINUTE", `SYSTEM$STREAM_HAS_DATA('MYSTREAM')`, true, `{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`, nil, &errorIntegrationId) + assertTaskWithOptions(t, task, id, "some comment", testClientHelper().Ids.WarehouseId().Name(), "10 MINUTE", `SYSTEM$STREAM_HAS_DATA('MYSTREAM')`, true, `{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`, nil, sdk.Pointer(errorNotificationIntegration.ID())) assertions.AssertThat(t, objectparametersassert.TaskParameters(t, id). HasJsonIndent(4). HasUserTaskTimeoutMs(500). @@ -618,7 +607,7 @@ func TestInt_Tasks(t *testing.T) { err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(task.ID()).WithSet(*sdk.NewTaskSetRequest(). // TODO(SNOW-1348116): Cannot set warehouse due to Snowflake error // WithWarehouse(testClientHelper().Ids.WarehouseId()). - WithErrorNotificationIntegration(errorIntegrationId). + WithErrorNotificationIntegration(errorNotificationIntegration.ID()). WithSessionParameters(sessionParametersSet). WithSchedule("10 MINUTE"). WithConfig(`$${"output_dir": "/temp/test_directory/", "learning_rate": 0.1}$$`). @@ -633,7 +622,7 @@ func TestInt_Tasks(t *testing.T) { assertions.AssertThat(t, objectassert.Task(t, task.ID()). // HasWarehouse(testClientHelper().Ids.WarehouseId().Name()). - HasErrorIntegration(sdk.Pointer(errorIntegrationId)). + HasErrorIntegration(sdk.Pointer(errorNotificationIntegration.ID())). HasSchedule("10 MINUTE"). HasConfig(`{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`). HasAllowOverlappingExecution(true). From 12eadb0930f736ef938205bed091c65df402507a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Thu, 3 Oct 2024 15:39:54 +0200 Subject: [PATCH 05/12] wip --- docs/resources/resource_monitor.md | 4 +- docs/resources/task.md | 884 ++++++++- pkg/acceptance/bettertestspoc/README.md | 2 + .../assert/objectassert/task_snowflake_gen.go | 46 +- .../task_parameters_snowflake_gen.go | 2 +- .../resourceassert/task_resource_ext.go | 3 +- .../resourceassert/task_resource_gen.go | 528 ++++- .../task_resource_parameters_ext.go | 3 +- .../task_show_output_ext.go | 3 +- .../task_show_output_gen.go | 33 +- .../config/model/task_model_ext.go | 56 + .../config/model/task_model_gen.go | 632 +++++- pkg/acceptance/helpers/grant_client.go | 28 + pkg/resources/resource_monitor.go | 1 + pkg/resources/task.go | 377 ++-- pkg/resources/task_acceptance_test.go | 1711 +++++++++-------- pkg/resources/task_parameters.go | 8 +- .../testdata/TestAcc_Task_issue2036/1/test.tf | 9 - .../TestAcc_Task_issue2036/1/variables.tf | 15 - .../testdata/TestAcc_Task_issue2036/2/test.tf | 10 - .../TestAcc_Task_issue2036/2/variables.tf | 15 - .../testdata/TestAcc_Task_issue2207/1/test.tf | 20 - .../TestAcc_Task_issue2207/1/variables.tf | 23 - pkg/resources/user_parameters.go | 12 +- pkg/schemas/task_gen.go | 23 +- pkg/sdk/grants_impl.go | 6 +- pkg/sdk/sql_builder.go | 9 + pkg/sdk/tasks_def.go | 18 +- pkg/sdk/tasks_gen.go | 2 +- pkg/sdk/tasks_gen_test.go | 2 +- pkg/sdk/tasks_impl_gen.go | 5 + pkg/sdk/testint/tasks_gen_integration_test.go | 29 +- 32 files changed, 3353 insertions(+), 1166 deletions(-) delete mode 100644 pkg/resources/testdata/TestAcc_Task_issue2036/1/test.tf delete mode 100644 pkg/resources/testdata/TestAcc_Task_issue2036/1/variables.tf delete mode 100644 pkg/resources/testdata/TestAcc_Task_issue2036/2/test.tf delete mode 100644 pkg/resources/testdata/TestAcc_Task_issue2036/2/variables.tf delete mode 100644 pkg/resources/testdata/TestAcc_Task_issue2207/1/test.tf delete mode 100644 pkg/resources/testdata/TestAcc_Task_issue2207/1/variables.tf diff --git a/docs/resources/resource_monitor.md b/docs/resources/resource_monitor.md index dea0fa29b3..b603e6816a 100644 --- a/docs/resources/resource_monitor.md +++ b/docs/resources/resource_monitor.md @@ -2,7 +2,7 @@ page_title: "snowflake_resource_monitor Resource - terraform-provider-snowflake" subcategory: "" description: |- - + Resource used to manage resource monitor objects. For more information, check resource monitor documentation https://docs.snowflake.com/en/user-guide/resource-monitors. --- !> **V1 release candidate** This resource was reworked and is a release candidate for the V1. We do not expect significant changes in it before the V1. We will welcome any feedback and adjust the resource if needed. Any errors reported will be resolved with a higher priority. We encourage checking this resource out before the V1 release. Please follow the [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/MIGRATION_GUIDE.md#v0950--v0960) to use it. @@ -15,7 +15,7 @@ description: |- # snowflake_resource_monitor (Resource) - +Resource used to manage resource monitor objects. For more information, check [resource monitor documentation](https://docs.snowflake.com/en/user-guide/resource-monitors). ## Example Usage diff --git a/docs/resources/task.md b/docs/resources/task.md index 8dd8acb62a..fccf477168 100644 --- a/docs/resources/task.md +++ b/docs/resources/task.md @@ -2,12 +2,12 @@ page_title: "snowflake_task Resource - terraform-provider-snowflake" subcategory: "" description: |- - + Resource used to manage task objects. For more information, check task documentation https://docs.snowflake.com/en/user-guide/tasks-intro. --- # snowflake_task (Resource) - +Resource used to manage task objects. For more information, check [task documentation](https://docs.snowflake.com/en/user-guide/tasks-intro). ## Example Usage @@ -76,30 +76,886 @@ resource "snowflake_task" "test_task" { ### Required -- `database` (String) The database in which to create the task. -- `name` (String) Specifies the identifier for the task; must be unique for the database and schema in which the task is created. -- `schema` (String) The schema in which to create the task. +- `database` (String) The database in which to create the task. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` +- `name` (String) Specifies the identifier for the task; must be unique for the database and schema in which the task is created. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` +- `schema` (String) The schema in which to create the task. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` - `sql_statement` (String) Any single SQL statement, or a call to a stored procedure, executed when the task runs. ### Optional -- `after` (List of String) Specifies one or more predecessor tasks for the current task. Use this option to create a DAG of tasks or add this task to an existing DAG. A DAG is a series of tasks that starts with a scheduled root task and is linked together by dependencies. -- `allow_overlapping_execution` (Boolean) By default, Snowflake ensures that only one instance of a particular DAG is allowed to run at a time, setting the parameter value to TRUE permits DAG runs to overlap. +- `abort_detached_query` (Boolean) Specifies the action that Snowflake performs for in-progress queries if connectivity is lost due to abrupt termination of a session (e.g. network outage, browser termination, service interruption). For more information, check [ABORT_DETACHED_QUERY docs](https://docs.snowflake.com/en/sql-reference/parameters#abort-detached-query). +- `after` (Set of String) Specifies one or more predecessor tasks for the current task. Use this option to create a DAG of tasks or add this task to an existing DAG. A DAG is a series of tasks that starts with a scheduled root task and is linked together by dependencies. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` +- `allow_overlapping_execution` (String) By default, Snowflake ensures that only one instance of a particular DAG is allowed to run at a time, setting the parameter value to TRUE permits DAG runs to overlap. Available options are: "true" or "false". When the value is not set in the configuration the provider will put "default" there which means to use the Snowflake default for this value. +- `autocommit` (Boolean) Specifies whether autocommit is enabled for the session. Autocommit determines whether a DML statement, when executed without an active transaction, is automatically committed after the statement successfully completes. For more information, see [Transactions](https://docs.snowflake.com/en/sql-reference/transactions). For more information, check [AUTOCOMMIT docs](https://docs.snowflake.com/en/sql-reference/parameters#autocommit). +- `binary_input_format` (String) The format of VARCHAR values passed as input to VARCHAR-to-BINARY conversion functions. For more information, see [Binary input and output](https://docs.snowflake.com/en/sql-reference/binary-input-output). For more information, check [BINARY_INPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#binary-input-format). +- `binary_output_format` (String) The format for VARCHAR values returned as output by BINARY-to-VARCHAR conversion functions. For more information, see [Binary input and output](https://docs.snowflake.com/en/sql-reference/binary-input-output). For more information, check [BINARY_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#binary-output-format). +- `client_memory_limit` (Number) Parameter that specifies the maximum amount of memory the JDBC driver or ODBC driver should use for the result set from queries (in MB). For more information, check [CLIENT_MEMORY_LIMIT docs](https://docs.snowflake.com/en/sql-reference/parameters#client-memory-limit). +- `client_metadata_request_use_connection_ctx` (Boolean) For specific ODBC functions and JDBC methods, this parameter can change the default search scope from all databases/schemas to the current database/schema. The narrower search typically returns fewer rows and executes more quickly. For more information, check [CLIENT_METADATA_REQUEST_USE_CONNECTION_CTX docs](https://docs.snowflake.com/en/sql-reference/parameters#client-metadata-request-use-connection-ctx). +- `client_prefetch_threads` (Number) Parameter that specifies the number of threads used by the client to pre-fetch large result sets. The driver will attempt to honor the parameter value, but defines the minimum and maximum values (depending on your system’s resources) to improve performance. For more information, check [CLIENT_PREFETCH_THREADS docs](https://docs.snowflake.com/en/sql-reference/parameters#client-prefetch-threads). +- `client_result_chunk_size` (Number) Parameter that specifies the maximum size of each set (or chunk) of query results to download (in MB). The JDBC driver downloads query results in chunks. For more information, check [CLIENT_RESULT_CHUNK_SIZE docs](https://docs.snowflake.com/en/sql-reference/parameters#client-result-chunk-size). +- `client_result_column_case_insensitive` (Boolean) Parameter that indicates whether to match column name case-insensitively in ResultSet.get* methods in JDBC. For more information, check [CLIENT_RESULT_COLUMN_CASE_INSENSITIVE docs](https://docs.snowflake.com/en/sql-reference/parameters#client-result-column-case-insensitive). +- `client_session_keep_alive` (Boolean) Parameter that indicates whether to force a user to log in again after a period of inactivity in the session. For more information, check [CLIENT_SESSION_KEEP_ALIVE docs](https://docs.snowflake.com/en/sql-reference/parameters#client-session-keep-alive). +- `client_session_keep_alive_heartbeat_frequency` (Number) Number of seconds in-between client attempts to update the token for the session. For more information, check [CLIENT_SESSION_KEEP_ALIVE_HEARTBEAT_FREQUENCY docs](https://docs.snowflake.com/en/sql-reference/parameters#client-session-keep-alive-heartbeat-frequency). +- `client_timestamp_type_mapping` (String) Specifies the [TIMESTAMP_* variation](https://docs.snowflake.com/en/sql-reference/data-types-datetime.html#label-datatypes-timestamp-variations) to use when binding timestamp variables for JDBC or ODBC applications that use the bind API to load data. For more information, check [CLIENT_TIMESTAMP_TYPE_MAPPING docs](https://docs.snowflake.com/en/sql-reference/parameters#client-timestamp-type-mapping). - `comment` (String) Specifies a comment for the task. -- `enabled` (Boolean) Specifies if the task should be started (enabled) after creation or should remain suspended (default). -- `error_integration` (String) Specifies the name of the notification integration used for error notifications. -- `schedule` (String) The schedule for periodically running the task. This can be a cron or interval in minutes. (Conflict with after) -- `session_parameters` (Map of String) Specifies session parameters to set for the session when the task runs. A task supports all session parameters. -- `suspend_task_after_num_failures` (Number) Specifies the number of consecutive failed task runs after which the current task is suspended automatically. The default is 0 (no automatic suspension). -- `user_task_managed_initial_warehouse_size` (String) Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. (Conflicts with warehouse) -- `user_task_timeout_ms` (Number) Specifies the time limit on a single run of the task before it times out (in milliseconds). +- `config` (String) Specifies a string representation of key value pairs that can be accessed by all tasks in the task graph. Must be in JSON format. +- `date_input_format` (String) Specifies the input format for the DATE data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). For more information, check [DATE_INPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#date-input-format). +- `date_output_format` (String) Specifies the display format for the DATE data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). For more information, check [DATE_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#date-output-format). +- `enable_unload_physical_type_optimization` (Boolean) Specifies whether to set the schema for unloaded Parquet files based on the logical column data types (i.e. the types in the unload SQL query or source table) or on the unloaded column values (i.e. the smallest data types and precision that support the values in the output columns of the unload SQL statement or source table). For more information, check [ENABLE_UNLOAD_PHYSICAL_TYPE_OPTIMIZATION docs](https://docs.snowflake.com/en/sql-reference/parameters#enable-unload-physical-type-optimization). +- `enabled` (String) Specifies if the task should be started (enabled) after creation or should remain suspended (default). Available options are: "true" or "false". When the value is not set in the configuration the provider will put "default" there which means to use the Snowflake default for this value. +- `error_integration` (String) Specifies the name of the notification integration used for error notifications. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` +- `error_on_nondeterministic_merge` (Boolean) Specifies whether to return an error when the [MERGE](https://docs.snowflake.com/en/sql-reference/sql/merge) command is used to update or delete a target row that joins multiple source rows and the system cannot determine the action to perform on the target row. For more information, check [ERROR_ON_NONDETERMINISTIC_MERGE docs](https://docs.snowflake.com/en/sql-reference/parameters#error-on-nondeterministic-merge). +- `error_on_nondeterministic_update` (Boolean) Specifies whether to return an error when the [UPDATE](https://docs.snowflake.com/en/sql-reference/sql/update) command is used to update a target row that joins multiple source rows and the system cannot determine the action to perform on the target row. For more information, check [ERROR_ON_NONDETERMINISTIC_UPDATE docs](https://docs.snowflake.com/en/sql-reference/parameters#error-on-nondeterministic-update). +- `finalize` (String) TODO Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` +- `geography_output_format` (String) Display format for [GEOGRAPHY values](https://docs.snowflake.com/en/sql-reference/data-types-geospatial.html#label-data-types-geography). For more information, check [GEOGRAPHY_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#geography-output-format). +- `geometry_output_format` (String) Display format for [GEOMETRY values](https://docs.snowflake.com/en/sql-reference/data-types-geospatial.html#label-data-types-geometry). For more information, check [GEOMETRY_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#geometry-output-format). +- `jdbc_treat_timestamp_ntz_as_utc` (Boolean) Specifies how JDBC processes TIMESTAMP_NTZ values. For more information, check [JDBC_TREAT_TIMESTAMP_NTZ_AS_UTC docs](https://docs.snowflake.com/en/sql-reference/parameters#jdbc-treat-timestamp-ntz-as-utc). +- `jdbc_use_session_timezone` (Boolean) Specifies whether the JDBC Driver uses the time zone of the JVM or the time zone of the session (specified by the [TIMEZONE](https://docs.snowflake.com/en/sql-reference/parameters#label-timezone) parameter) for the getDate(), getTime(), and getTimestamp() methods of the ResultSet class. For more information, check [JDBC_USE_SESSION_TIMEZONE docs](https://docs.snowflake.com/en/sql-reference/parameters#jdbc-use-session-timezone). +- `json_indent` (Number) Specifies the number of blank spaces to indent each new element in JSON output in the session. Also specifies whether to insert newline characters after each element. For more information, check [JSON_INDENT docs](https://docs.snowflake.com/en/sql-reference/parameters#json-indent). +- `lock_timeout` (Number) Number of seconds to wait while trying to lock a resource, before timing out and aborting the statement. For more information, check [LOCK_TIMEOUT docs](https://docs.snowflake.com/en/sql-reference/parameters#lock-timeout). +- `log_level` (String) Specifies the severity level of messages that should be ingested and made available in the active event table. Messages at the specified level (and at more severe levels) are ingested. For more information about log levels, see [Setting log level](https://docs.snowflake.com/en/developer-guide/logging-tracing/logging-log-level). For more information, check [LOG_LEVEL docs](https://docs.snowflake.com/en/sql-reference/parameters#log-level). +- `multi_statement_count` (Number) Number of statements to execute when using the multi-statement capability. For more information, check [MULTI_STATEMENT_COUNT docs](https://docs.snowflake.com/en/sql-reference/parameters#multi-statement-count). +- `noorder_sequence_as_default` (Boolean) Specifies whether the ORDER or NOORDER property is set by default when you create a new sequence or add a new table column. The ORDER and NOORDER properties determine whether or not the values are generated for the sequence or auto-incremented column in [increasing or decreasing order](https://docs.snowflake.com/en/user-guide/querying-sequences.html#label-querying-sequences-increasing-values). For more information, check [NOORDER_SEQUENCE_AS_DEFAULT docs](https://docs.snowflake.com/en/sql-reference/parameters#noorder-sequence-as-default). +- `odbc_treat_decimal_as_int` (Boolean) Specifies how ODBC processes columns that have a scale of zero (0). For more information, check [ODBC_TREAT_DECIMAL_AS_INT docs](https://docs.snowflake.com/en/sql-reference/parameters#odbc-treat-decimal-as-int). +- `query_tag` (String) Optional string that can be used to tag queries and other SQL statements executed within a session. The tags are displayed in the output of the [QUERY_HISTORY, QUERY_HISTORY_BY_*](https://docs.snowflake.com/en/sql-reference/functions/query_history) functions. For more information, check [QUERY_TAG docs](https://docs.snowflake.com/en/sql-reference/parameters#query-tag). +- `quoted_identifiers_ignore_case` (Boolean) Specifies whether letters in double-quoted object identifiers are stored and resolved as uppercase letters. By default, Snowflake preserves the case of alphabetic characters when storing and resolving double-quoted identifiers (see [Identifier resolution](https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html#label-identifier-casing)). You can use this parameter in situations in which [third-party applications always use double quotes around identifiers](https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html#label-identifier-casing-parameter). For more information, check [QUOTED_IDENTIFIERS_IGNORE_CASE docs](https://docs.snowflake.com/en/sql-reference/parameters#quoted-identifiers-ignore-case). +- `rows_per_resultset` (Number) Specifies the maximum number of rows returned in a result set. A value of 0 specifies no maximum. For more information, check [ROWS_PER_RESULTSET docs](https://docs.snowflake.com/en/sql-reference/parameters#rows-per-resultset). +- `s3_stage_vpce_dns_name` (String) Specifies the DNS name of an Amazon S3 interface endpoint. Requests sent to the internal stage of an account via [AWS PrivateLink for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html) use this endpoint to connect. For more information, see [Accessing Internal stages with dedicated interface endpoints](https://docs.snowflake.com/en/user-guide/private-internal-stages-aws.html#label-aws-privatelink-internal-stage-network-isolation). For more information, check [S3_STAGE_VPCE_DNS_NAME docs](https://docs.snowflake.com/en/sql-reference/parameters#s3-stage-vpce-dns-name). +- `schedule` (String) The schedule for periodically running the task. This can be a cron or interval in minutes. (Conflict with finalize and after) +- `search_path` (String) Specifies the path to search to resolve unqualified object names in queries. For more information, see [Name resolution in queries](https://docs.snowflake.com/en/sql-reference/name-resolution.html#label-object-name-resolution-search-path). Comma-separated list of identifiers. An identifier can be a fully or partially qualified schema name. For more information, check [SEARCH_PATH docs](https://docs.snowflake.com/en/sql-reference/parameters#search-path). +- `statement_queued_timeout_in_seconds` (Number) Amount of time, in seconds, a SQL statement (query, DDL, DML, etc.) remains queued for a warehouse before it is canceled by the system. This parameter can be used in conjunction with the [MAX_CONCURRENCY_LEVEL](https://docs.snowflake.com/en/sql-reference/parameters#label-max-concurrency-level) parameter to ensure a warehouse is never backlogged. For more information, check [STATEMENT_QUEUED_TIMEOUT_IN_SECONDS docs](https://docs.snowflake.com/en/sql-reference/parameters#statement-queued-timeout-in-seconds). +- `statement_timeout_in_seconds` (Number) Amount of time, in seconds, after which a running SQL statement (query, DDL, DML, etc.) is canceled by the system. For more information, check [STATEMENT_TIMEOUT_IN_SECONDS docs](https://docs.snowflake.com/en/sql-reference/parameters#statement-timeout-in-seconds). +- `strict_json_output` (Boolean) This parameter specifies whether JSON output in a session is compatible with the general standard (as described by [http://json.org](http://json.org)). By design, Snowflake allows JSON input that contains non-standard values; however, these non-standard values might result in Snowflake outputting JSON that is incompatible with other platforms and languages. This parameter, when enabled, ensures that Snowflake outputs valid/compatible JSON. For more information, check [STRICT_JSON_OUTPUT docs](https://docs.snowflake.com/en/sql-reference/parameters#strict-json-output). +- `suspend_task_after_num_failures` (Number) Specifies the number of consecutive failed task runs after which the current task is suspended automatically. The default is 0 (no automatic suspension). For more information, check [SUSPEND_TASK_AFTER_NUM_FAILURES docs](https://docs.snowflake.com/en/sql-reference/parameters#suspend-task-after-num-failures). +- `task_auto_retry_attempts` (Number) Specifies the number of automatic task graph retry attempts. If any task graphs complete in a FAILED state, Snowflake can automatically retry the task graphs from the last task in the graph that failed. For more information, check [TASK_AUTO_RETRY_ATTEMPTS docs](https://docs.snowflake.com/en/sql-reference/parameters#task-auto-retry-attempts). +- `time_input_format` (String) Specifies the input format for the TIME data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). Any valid, supported time format or AUTO (AUTO specifies that Snowflake attempts to automatically detect the format of times stored in the system during the session). For more information, check [TIME_INPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#time-input-format). +- `time_output_format` (String) Specifies the display format for the TIME data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). For more information, check [TIME_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#time-output-format). +- `timestamp_day_is_always_24h` (Boolean) Specifies whether the [DATEADD](https://docs.snowflake.com/en/sql-reference/functions/dateadd) function (and its aliases) always consider a day to be exactly 24 hours for expressions that span multiple days. For more information, check [TIMESTAMP_DAY_IS_ALWAYS_24H docs](https://docs.snowflake.com/en/sql-reference/parameters#timestamp-day-is-always-24h). +- `timestamp_input_format` (String) Specifies the input format for the TIMESTAMP data type alias. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). Any valid, supported timestamp format or AUTO (AUTO specifies that Snowflake attempts to automatically detect the format of timestamps stored in the system during the session). For more information, check [TIMESTAMP_INPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#timestamp-input-format). +- `timestamp_ltz_output_format` (String) Specifies the display format for the TIMESTAMP_LTZ data type. If no format is specified, defaults to [TIMESTAMP_OUTPUT_FORMAT](https://docs.snowflake.com/en/sql-reference/parameters#label-timestamp-output-format). For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). For more information, check [TIMESTAMP_LTZ_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#timestamp-ltz-output-format). +- `timestamp_ntz_output_format` (String) Specifies the display format for the TIMESTAMP_NTZ data type. For more information, check [TIMESTAMP_NTZ_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#timestamp-ntz-output-format). +- `timestamp_output_format` (String) Specifies the display format for the TIMESTAMP data type alias. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). For more information, check [TIMESTAMP_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#timestamp-output-format). +- `timestamp_type_mapping` (String) Specifies the TIMESTAMP_* variation that the TIMESTAMP data type alias maps to. For more information, check [TIMESTAMP_TYPE_MAPPING docs](https://docs.snowflake.com/en/sql-reference/parameters#timestamp-type-mapping). +- `timestamp_tz_output_format` (String) Specifies the display format for the TIMESTAMP_TZ data type. If no format is specified, defaults to [TIMESTAMP_OUTPUT_FORMAT](https://docs.snowflake.com/en/sql-reference/parameters#label-timestamp-output-format). For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). For more information, check [TIMESTAMP_TZ_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#timestamp-tz-output-format). +- `timezone` (String) Specifies the time zone for the session. You can specify a [time zone name](https://data.iana.org/time-zones/tzdb-2021a/zone1970.tab) or a [link name](https://data.iana.org/time-zones/tzdb-2021a/backward) from release 2021a of the [IANA Time Zone Database](https://www.iana.org/time-zones) (e.g. America/Los_Angeles, Europe/London, UTC, Etc/GMT, etc.). For more information, check [TIMEZONE docs](https://docs.snowflake.com/en/sql-reference/parameters#timezone). +- `trace_level` (String) Controls how trace events are ingested into the event table. For more information about trace levels, see [Setting trace level](https://docs.snowflake.com/en/developer-guide/logging-tracing/tracing-trace-level). For more information, check [TRACE_LEVEL docs](https://docs.snowflake.com/en/sql-reference/parameters#trace-level). +- `transaction_abort_on_error` (Boolean) Specifies the action to perform when a statement issued within a non-autocommit transaction returns with an error. For more information, check [TRANSACTION_ABORT_ON_ERROR docs](https://docs.snowflake.com/en/sql-reference/parameters#transaction-abort-on-error). +- `transaction_default_isolation_level` (String) Specifies the isolation level for transactions in the user session. For more information, check [TRANSACTION_DEFAULT_ISOLATION_LEVEL docs](https://docs.snowflake.com/en/sql-reference/parameters#transaction-default-isolation-level). +- `two_digit_century_start` (Number) Specifies the β€œcentury start” year for 2-digit years (i.e. the earliest year such dates can represent). This parameter prevents ambiguous dates when importing or converting data with the `YY` date format component (i.e. years represented as 2 digits). For more information, check [TWO_DIGIT_CENTURY_START docs](https://docs.snowflake.com/en/sql-reference/parameters#two-digit-century-start). +- `unsupported_ddl_action` (String) Determines if an unsupported (i.e. non-default) value specified for a constraint property returns an error. For more information, check [UNSUPPORTED_DDL_ACTION docs](https://docs.snowflake.com/en/sql-reference/parameters#unsupported-ddl-action). +- `use_cached_result` (Boolean) Specifies whether to reuse persisted query results, if available, when a matching query is submitted. For more information, check [USE_CACHED_RESULT docs](https://docs.snowflake.com/en/sql-reference/parameters#use-cached-result). +- `user_task_managed_initial_warehouse_size` (String) Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. Valid values are (case-insensitive): %s. (Conflicts with warehouse) For more information, check [USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE docs](https://docs.snowflake.com/en/sql-reference/parameters#user-task-managed-initial-warehouse-size). +- `user_task_minimum_trigger_interval_in_seconds` (Number) Minimum amount of time between Triggered Task executions in seconds For more information, check [USER_TASK_MINIMUM_TRIGGER_INTERVAL_IN_SECONDS docs](https://docs.snowflake.com/en/sql-reference/parameters#user-task-minimum-trigger-interval-in-seconds). +- `user_task_timeout_ms` (Number) Specifies the time limit on a single run of the task before it times out (in milliseconds). For more information, check [USER_TASK_TIMEOUT_MS docs](https://docs.snowflake.com/en/sql-reference/parameters#user-task-timeout-ms). - `warehouse` (String) The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. (Conflicts with user_task_managed_initial_warehouse_size) +- `week_of_year_policy` (Number) Specifies how the weeks in a given year are computed. `0`: The semantics used are equivalent to the ISO semantics, in which a week belongs to a given year if at least 4 days of that week are in that year. `1`: January 1 is included in the first week of the year and December 31 is included in the last week of the year. For more information, check [WEEK_OF_YEAR_POLICY docs](https://docs.snowflake.com/en/sql-reference/parameters#week-of-year-policy). +- `week_start` (Number) Specifies the first day of the week (used by week-related date functions). `0`: Legacy Snowflake behavior is used (i.e. ISO-like semantics). `1` (Monday) to `7` (Sunday): All the week-related functions use weeks that start on the specified day of the week. For more information, check [WEEK_START docs](https://docs.snowflake.com/en/sql-reference/parameters#week-start). - `when` (String) Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported. ### Read-Only - `fully_qualified_name` (String) Fully qualified name of the resource. For more information, see [object name resolution](https://docs.snowflake.com/en/sql-reference/name-resolution). - `id` (String) The ID of this resource. +- `parameters` (List of Object) Outputs the result of `SHOW PARAMETERS IN TASK` for the given task. (see [below for nested schema](#nestedatt--parameters)) +- `show_output` (List of Object) Outputs the result of `SHOW TASKS` for the given task. (see [below for nested schema](#nestedatt--show_output)) + + +### Nested Schema for `parameters` + +Read-Only: + +- `abort_detached_query` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--abort_detached_query)) +- `autocommit` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--autocommit)) +- `binary_input_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--binary_input_format)) +- `binary_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--binary_output_format)) +- `client_memory_limit` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_memory_limit)) +- `client_metadata_request_use_connection_ctx` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_metadata_request_use_connection_ctx)) +- `client_prefetch_threads` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_prefetch_threads)) +- `client_result_chunk_size` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_result_chunk_size)) +- `client_result_column_case_insensitive` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_result_column_case_insensitive)) +- `client_session_keep_alive` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_session_keep_alive)) +- `client_session_keep_alive_heartbeat_frequency` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_session_keep_alive_heartbeat_frequency)) +- `client_timestamp_type_mapping` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_timestamp_type_mapping)) +- `date_input_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--date_input_format)) +- `date_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--date_output_format)) +- `enable_unload_physical_type_optimization` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--enable_unload_physical_type_optimization)) +- `error_on_nondeterministic_merge` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--error_on_nondeterministic_merge)) +- `error_on_nondeterministic_update` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--error_on_nondeterministic_update)) +- `geography_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--geography_output_format)) +- `geometry_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--geometry_output_format)) +- `jdbc_treat_timestamp_ntz_as_utc` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--jdbc_treat_timestamp_ntz_as_utc)) +- `jdbc_use_session_timezone` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--jdbc_use_session_timezone)) +- `json_indent` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--json_indent)) +- `lock_timeout` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--lock_timeout)) +- `log_level` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--log_level)) +- `multi_statement_count` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--multi_statement_count)) +- `noorder_sequence_as_default` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--noorder_sequence_as_default)) +- `odbc_treat_decimal_as_int` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--odbc_treat_decimal_as_int)) +- `query_tag` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--query_tag)) +- `quoted_identifiers_ignore_case` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--quoted_identifiers_ignore_case)) +- `rows_per_resultset` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--rows_per_resultset)) +- `s3_stage_vpce_dns_name` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--s3_stage_vpce_dns_name)) +- `search_path` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--search_path)) +- `statement_queued_timeout_in_seconds` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--statement_queued_timeout_in_seconds)) +- `statement_timeout_in_seconds` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--statement_timeout_in_seconds)) +- `strict_json_output` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--strict_json_output)) +- `suspend_task_after_num_failures` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--suspend_task_after_num_failures)) +- `task_auto_retry_attempts` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--task_auto_retry_attempts)) +- `time_input_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--time_input_format)) +- `time_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--time_output_format)) +- `timestamp_day_is_always_24h` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timestamp_day_is_always_24h)) +- `timestamp_input_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timestamp_input_format)) +- `timestamp_ltz_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timestamp_ltz_output_format)) +- `timestamp_ntz_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timestamp_ntz_output_format)) +- `timestamp_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timestamp_output_format)) +- `timestamp_type_mapping` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timestamp_type_mapping)) +- `timestamp_tz_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timestamp_tz_output_format)) +- `timezone` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timezone)) +- `trace_level` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--trace_level)) +- `transaction_abort_on_error` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--transaction_abort_on_error)) +- `transaction_default_isolation_level` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--transaction_default_isolation_level)) +- `two_digit_century_start` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--two_digit_century_start)) +- `unsupported_ddl_action` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--unsupported_ddl_action)) +- `use_cached_result` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--use_cached_result)) +- `user_task_managed_initial_warehouse_size` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--user_task_managed_initial_warehouse_size)) +- `user_task_minimum_trigger_interval_in_seconds` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--user_task_minimum_trigger_interval_in_seconds)) +- `user_task_timeout_ms` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--user_task_timeout_ms)) +- `week_of_year_policy` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--week_of_year_policy)) +- `week_start` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--week_start)) + + +### Nested Schema for `parameters.abort_detached_query` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.autocommit` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.binary_input_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.binary_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_memory_limit` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_metadata_request_use_connection_ctx` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_prefetch_threads` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_result_chunk_size` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_result_column_case_insensitive` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_session_keep_alive` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_session_keep_alive_heartbeat_frequency` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_timestamp_type_mapping` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.date_input_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.date_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.enable_unload_physical_type_optimization` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.error_on_nondeterministic_merge` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.error_on_nondeterministic_update` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.geography_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.geometry_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.jdbc_treat_timestamp_ntz_as_utc` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.jdbc_use_session_timezone` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.json_indent` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.lock_timeout` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.log_level` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.multi_statement_count` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.noorder_sequence_as_default` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.odbc_treat_decimal_as_int` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.query_tag` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.quoted_identifiers_ignore_case` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.rows_per_resultset` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.s3_stage_vpce_dns_name` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.search_path` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.statement_queued_timeout_in_seconds` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.statement_timeout_in_seconds` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.strict_json_output` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.suspend_task_after_num_failures` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.task_auto_retry_attempts` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.time_input_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.time_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timestamp_day_is_always_24h` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timestamp_input_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timestamp_ltz_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timestamp_ntz_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timestamp_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timestamp_type_mapping` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timestamp_tz_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timezone` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.trace_level` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.transaction_abort_on_error` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.transaction_default_isolation_level` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.two_digit_century_start` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.unsupported_ddl_action` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.use_cached_result` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.user_task_managed_initial_warehouse_size` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.user_task_minimum_trigger_interval_in_seconds` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.user_task_timeout_ms` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.week_of_year_policy` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.week_start` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + + +### Nested Schema for `show_output` + +Read-Only: + +- `allow_overlapping_execution` (Boolean) +- `budget` (String) +- `comment` (String) +- `condition` (String) +- `config` (String) +- `created_on` (String) +- `database_name` (String) +- `definition` (String) +- `error_integration` (String) +- `id` (String) +- `last_committed_on` (String) +- `last_suspended_on` (String) +- `last_suspended_reason` (String) +- `name` (String) +- `owner` (String) +- `owner_role_type` (String) +- `predecessors` (Set of String) +- `schedule` (String) +- `schema_name` (String) +- `state` (String) +- `task_relations` (List of Object) (see [below for nested schema](#nestedobjatt--show_output--task_relations)) +- `warehouse` (String) + + +### Nested Schema for `show_output.task_relations` + +Read-Only: + +- `finalized_root_task` (String) +- `finalizer` (String) +- `predecessors` (List of String) ## Import diff --git a/pkg/acceptance/bettertestspoc/README.md b/pkg/acceptance/bettertestspoc/README.md index a97717c232..587d745c4f 100644 --- a/pkg/acceptance/bettertestspoc/README.md +++ b/pkg/acceptance/bettertestspoc/README.md @@ -349,3 +349,5 @@ func (w *WarehouseDatasourceShowOutputAssert) IsEmpty() { The error is a result of both things: 1. Lists of objects are partially generated, and only parameter name is generated in some functions (the type has to be added manually). 2. `testing` is a package name that makes Go think that we want to have unnamed parameter there, but we just didn't generate the type for that field in the function argument. +- Additional asserts for sets and lists that wouldn't rely on the order of items saved to the state (SNOW-1706544) + \ No newline at end of file diff --git a/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go b/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go index 938969f272..cbabb0ca66 100644 --- a/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go +++ b/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go @@ -107,11 +107,17 @@ func (t *TaskAssert) HasComment(expected string) *TaskAssert { return t } -func (t *TaskAssert) HasWarehouse(expected string) *TaskAssert { +func (t *TaskAssert) HasWarehouse(expected *sdk.AccountObjectIdentifier) *TaskAssert { t.AddAssertion(func(t *testing.T, o *sdk.Task) error { t.Helper() - if o.Warehouse != nil && o.Warehouse.FullyQualifiedName() != expected { - return fmt.Errorf("expected warehouse: %v; got: %v", expected, o.Warehouse) + if o.Warehouse == nil && expected != nil { + return fmt.Errorf("expected warehouse to have value; got: nil") + } + if o.Warehouse != nil && expected == nil { + return fmt.Errorf("expected warehouse to no have value; got: %s", o.Warehouse.Name()) + } + if o.Warehouse != nil && expected != nil && o.Warehouse.Name() != expected.Name() { + return fmt.Errorf("expected warehouse: %v; got: %v", expected.Name(), o.Warehouse.Name()) } return nil }) @@ -129,6 +135,18 @@ func (t *TaskAssert) HasSchedule(expected string) *TaskAssert { return t } +// TODO: +//func (t *TaskAssert) HasPredecessors(expected []sdk.SchemaObjectIdentifier) *TaskAssert { +// t.AddAssertion(func(t *testing.T, o *sdk.Task) error { +// t.Helper() +// if o.Predecessors != expected { +// return fmt.Errorf("expected predecessors: %v; got: %v", expected, o.Predecessors) +// } +// return nil +// }) +// return t +//} + func (t *TaskAssert) HasState(expected sdk.TaskState) *TaskAssert { t.AddAssertion(func(t *testing.T, o *sdk.Task) error { t.Helper() @@ -176,8 +194,14 @@ func (t *TaskAssert) HasAllowOverlappingExecution(expected bool) *TaskAssert { func (t *TaskAssert) HasErrorIntegration(expected *sdk.AccountObjectIdentifier) *TaskAssert { t.AddAssertion(func(t *testing.T, o *sdk.Task) error { t.Helper() - if o.ErrorIntegration != expected { - return fmt.Errorf("expected error integration: %v; got: %v", expected, o.ErrorIntegration) + if o.ErrorIntegration == nil && expected != nil { + return fmt.Errorf("expected error integration to have value; got: nil") + } + if o.ErrorIntegration != nil && expected == nil { + return fmt.Errorf("expected error integration to have no value; got: %s", o.ErrorIntegration.Name()) + } + if o.ErrorIntegration != nil && expected != nil && o.ErrorIntegration.Name() != expected.Name() { + return fmt.Errorf("expected error integration: %v; got: %v", expected.Name(), o.ErrorIntegration.Name()) } return nil }) @@ -239,6 +263,18 @@ func (t *TaskAssert) HasBudget(expected string) *TaskAssert { return t } +// TODO: +//func (t *TaskAssert) HasTaskRelations(expected sdk.TaskRelations) *TaskAssert { +// t.AddAssertion(func(t *testing.T, o *sdk.Task) error { +// t.Helper() +// if o.TaskRelations != expected { +// return fmt.Errorf("expected task relations: %v; got: %v", expected, o.TaskRelations) +// } +// return nil +// }) +// return t +//} + func (t *TaskAssert) HasLastSuspendedReason(expected string) *TaskAssert { t.AddAssertion(func(t *testing.T, o *sdk.Task) error { t.Helper() diff --git a/pkg/acceptance/bettertestspoc/assert/objectparametersassert/task_parameters_snowflake_gen.go b/pkg/acceptance/bettertestspoc/assert/objectparametersassert/task_parameters_snowflake_gen.go index 24ac1f78bd..b5c571149d 100644 --- a/pkg/acceptance/bettertestspoc/assert/objectparametersassert/task_parameters_snowflake_gen.go +++ b/pkg/acceptance/bettertestspoc/assert/objectparametersassert/task_parameters_snowflake_gen.go @@ -1027,7 +1027,7 @@ func (t *TaskParametersAssert) HasDefaultTaskAutoRetryAttemptsValueExplicit() *T } func (t *TaskParametersAssert) HasDefaultUserTaskManagedInitialWarehouseSizeValueExplicit() *TaskParametersAssert { - return t.HasUserTaskManagedInitialWarehouseSize("Medium") + return t.HasUserTaskManagedInitialWarehouseSize(sdk.WarehouseSize("Medium")) } func (t *TaskParametersAssert) HasDefaultUserTaskMinimumTriggerIntervalInSecondsValueExplicit() *TaskParametersAssert { diff --git a/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go index 6ae80d8689..5e4ff905b0 100644 --- a/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go +++ b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go @@ -1,8 +1,9 @@ package resourceassert import ( - "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" "strconv" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" ) func (t *TaskResourceAssert) HasAfterLen(len int) *TaskResourceAssert { diff --git a/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go index 543134a4ca..dfe0369762 100644 --- a/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go +++ b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go @@ -32,6 +32,11 @@ func ImportedTaskResource(t *testing.T, id string) *TaskResourceAssert { // Attribute value string checks // /////////////////////////////////// +func (t *TaskResourceAssert) HasAbortDetachedQueryString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("abort_detached_query", expected)) + return t +} + func (t *TaskResourceAssert) HasAfterString(expected string) *TaskResourceAssert { t.AddAssertion(assert.ValueSet("after", expected)) return t @@ -42,6 +47,61 @@ func (t *TaskResourceAssert) HasAllowOverlappingExecutionString(expected string) return t } +func (t *TaskResourceAssert) HasAutocommitString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("autocommit", expected)) + return t +} + +func (t *TaskResourceAssert) HasBinaryInputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("binary_input_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasBinaryOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("binary_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientMemoryLimitString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_memory_limit", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientMetadataRequestUseConnectionCtxString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_metadata_request_use_connection_ctx", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientPrefetchThreadsString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_prefetch_threads", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientResultChunkSizeString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_result_chunk_size", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientResultColumnCaseInsensitiveString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_result_column_case_insensitive", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientSessionKeepAliveString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_session_keep_alive", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientSessionKeepAliveHeartbeatFrequencyString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_session_keep_alive_heartbeat_frequency", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientTimestampTypeMappingString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_timestamp_type_mapping", expected)) + return t +} + func (t *TaskResourceAssert) HasCommentString(expected string) *TaskResourceAssert { t.AddAssertion(assert.ValueSet("comment", expected)) return t @@ -57,6 +117,21 @@ func (t *TaskResourceAssert) HasDatabaseString(expected string) *TaskResourceAss return t } +func (t *TaskResourceAssert) HasDateInputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("date_input_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasDateOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("date_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasEnableUnloadPhysicalTypeOptimizationString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("enable_unload_physical_type_optimization", expected)) + return t +} + func (t *TaskResourceAssert) HasEnabledString(expected string) *TaskResourceAssert { t.AddAssertion(assert.ValueSet("enabled", expected)) return t @@ -67,6 +142,16 @@ func (t *TaskResourceAssert) HasErrorIntegrationString(expected string) *TaskRes return t } +func (t *TaskResourceAssert) HasErrorOnNondeterministicMergeString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("error_on_nondeterministic_merge", expected)) + return t +} + +func (t *TaskResourceAssert) HasErrorOnNondeterministicUpdateString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("error_on_nondeterministic_update", expected)) + return t +} + func (t *TaskResourceAssert) HasFinalizeString(expected string) *TaskResourceAssert { t.AddAssertion(assert.ValueSet("finalize", expected)) return t @@ -77,11 +162,81 @@ func (t *TaskResourceAssert) HasFullyQualifiedNameString(expected string) *TaskR return t } +func (t *TaskResourceAssert) HasGeographyOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("geography_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasGeometryOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("geometry_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasJdbcTreatTimestampNtzAsUtcString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("jdbc_treat_timestamp_ntz_as_utc", expected)) + return t +} + +func (t *TaskResourceAssert) HasJdbcUseSessionTimezoneString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("jdbc_use_session_timezone", expected)) + return t +} + +func (t *TaskResourceAssert) HasJsonIndentString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("json_indent", expected)) + return t +} + +func (t *TaskResourceAssert) HasLockTimeoutString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("lock_timeout", expected)) + return t +} + +func (t *TaskResourceAssert) HasLogLevelString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("log_level", expected)) + return t +} + +func (t *TaskResourceAssert) HasMultiStatementCountString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("multi_statement_count", expected)) + return t +} + func (t *TaskResourceAssert) HasNameString(expected string) *TaskResourceAssert { t.AddAssertion(assert.ValueSet("name", expected)) return t } +func (t *TaskResourceAssert) HasNoorderSequenceAsDefaultString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("noorder_sequence_as_default", expected)) + return t +} + +func (t *TaskResourceAssert) HasOdbcTreatDecimalAsIntString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("odbc_treat_decimal_as_int", expected)) + return t +} + +func (t *TaskResourceAssert) HasQueryTagString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("query_tag", expected)) + return t +} + +func (t *TaskResourceAssert) HasQuotedIdentifiersIgnoreCaseString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("quoted_identifiers_ignore_case", expected)) + return t +} + +func (t *TaskResourceAssert) HasRowsPerResultsetString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("rows_per_resultset", expected)) + return t +} + +func (t *TaskResourceAssert) HasS3StageVpceDnsNameString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("s3_stage_vpce_dns_name", expected)) + return t +} + func (t *TaskResourceAssert) HasScheduleString(expected string) *TaskResourceAssert { t.AddAssertion(assert.ValueSet("schedule", expected)) return t @@ -92,8 +247,8 @@ func (t *TaskResourceAssert) HasSchemaString(expected string) *TaskResourceAsser return t } -func (t *TaskResourceAssert) HasSessionParametersString(expected string) *TaskResourceAssert { - t.AddAssertion(assert.ValueSet("session_parameters", expected)) +func (t *TaskResourceAssert) HasSearchPathString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("search_path", expected)) return t } @@ -102,6 +257,21 @@ func (t *TaskResourceAssert) HasSqlStatementString(expected string) *TaskResourc return t } +func (t *TaskResourceAssert) HasStatementQueuedTimeoutInSecondsString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("statement_queued_timeout_in_seconds", expected)) + return t +} + +func (t *TaskResourceAssert) HasStatementTimeoutInSecondsString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("statement_timeout_in_seconds", expected)) + return t +} + +func (t *TaskResourceAssert) HasStrictJsonOutputString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("strict_json_output", expected)) + return t +} + func (t *TaskResourceAssert) HasSuspendTaskAfterNumFailuresString(expected string) *TaskResourceAssert { t.AddAssertion(assert.ValueSet("suspend_task_after_num_failures", expected)) return t @@ -112,6 +282,86 @@ func (t *TaskResourceAssert) HasTaskAutoRetryAttemptsString(expected string) *Ta return t } +func (t *TaskResourceAssert) HasTimeInputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("time_input_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimeOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("time_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimestampDayIsAlways24hString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timestamp_day_is_always_24h", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimestampInputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timestamp_input_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimestampLtzOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timestamp_ltz_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimestampNtzOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timestamp_ntz_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimestampOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timestamp_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimestampTypeMappingString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timestamp_type_mapping", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimestampTzOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timestamp_tz_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimezoneString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timezone", expected)) + return t +} + +func (t *TaskResourceAssert) HasTraceLevelString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("trace_level", expected)) + return t +} + +func (t *TaskResourceAssert) HasTransactionAbortOnErrorString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("transaction_abort_on_error", expected)) + return t +} + +func (t *TaskResourceAssert) HasTransactionDefaultIsolationLevelString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("transaction_default_isolation_level", expected)) + return t +} + +func (t *TaskResourceAssert) HasTwoDigitCenturyStartString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("two_digit_century_start", expected)) + return t +} + +func (t *TaskResourceAssert) HasUnsupportedDdlActionString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("unsupported_ddl_action", expected)) + return t +} + +func (t *TaskResourceAssert) HasUseCachedResultString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("use_cached_result", expected)) + return t +} + func (t *TaskResourceAssert) HasUserTaskManagedInitialWarehouseSizeString(expected string) *TaskResourceAssert { t.AddAssertion(assert.ValueSet("user_task_managed_initial_warehouse_size", expected)) return t @@ -132,6 +382,16 @@ func (t *TaskResourceAssert) HasWarehouseString(expected string) *TaskResourceAs return t } +func (t *TaskResourceAssert) HasWeekOfYearPolicyString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("week_of_year_policy", expected)) + return t +} + +func (t *TaskResourceAssert) HasWeekStartString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("week_start", expected)) + return t +} + func (t *TaskResourceAssert) HasWhenString(expected string) *TaskResourceAssert { t.AddAssertion(assert.ValueSet("when", expected)) return t @@ -141,6 +401,11 @@ func (t *TaskResourceAssert) HasWhenString(expected string) *TaskResourceAssert // Attribute empty checks // //////////////////////////// +func (t *TaskResourceAssert) HasNoAbortDetachedQuery() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("abort_detached_query")) + return t +} + func (t *TaskResourceAssert) HasNoAfter() *TaskResourceAssert { t.AddAssertion(assert.ValueNotSet("after")) return t @@ -151,6 +416,61 @@ func (t *TaskResourceAssert) HasNoAllowOverlappingExecution() *TaskResourceAsser return t } +func (t *TaskResourceAssert) HasNoAutocommit() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("autocommit")) + return t +} + +func (t *TaskResourceAssert) HasNoBinaryInputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("binary_input_format")) + return t +} + +func (t *TaskResourceAssert) HasNoBinaryOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("binary_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoClientMemoryLimit() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_memory_limit")) + return t +} + +func (t *TaskResourceAssert) HasNoClientMetadataRequestUseConnectionCtx() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_metadata_request_use_connection_ctx")) + return t +} + +func (t *TaskResourceAssert) HasNoClientPrefetchThreads() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_prefetch_threads")) + return t +} + +func (t *TaskResourceAssert) HasNoClientResultChunkSize() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_result_chunk_size")) + return t +} + +func (t *TaskResourceAssert) HasNoClientResultColumnCaseInsensitive() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_result_column_case_insensitive")) + return t +} + +func (t *TaskResourceAssert) HasNoClientSessionKeepAlive() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_session_keep_alive")) + return t +} + +func (t *TaskResourceAssert) HasNoClientSessionKeepAliveHeartbeatFrequency() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_session_keep_alive_heartbeat_frequency")) + return t +} + +func (t *TaskResourceAssert) HasNoClientTimestampTypeMapping() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_timestamp_type_mapping")) + return t +} + func (t *TaskResourceAssert) HasNoComment() *TaskResourceAssert { t.AddAssertion(assert.ValueNotSet("comment")) return t @@ -166,6 +486,21 @@ func (t *TaskResourceAssert) HasNoDatabase() *TaskResourceAssert { return t } +func (t *TaskResourceAssert) HasNoDateInputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("date_input_format")) + return t +} + +func (t *TaskResourceAssert) HasNoDateOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("date_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoEnableUnloadPhysicalTypeOptimization() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("enable_unload_physical_type_optimization")) + return t +} + func (t *TaskResourceAssert) HasNoEnabled() *TaskResourceAssert { t.AddAssertion(assert.ValueNotSet("enabled")) return t @@ -176,6 +511,16 @@ func (t *TaskResourceAssert) HasNoErrorIntegration() *TaskResourceAssert { return t } +func (t *TaskResourceAssert) HasNoErrorOnNondeterministicMerge() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("error_on_nondeterministic_merge")) + return t +} + +func (t *TaskResourceAssert) HasNoErrorOnNondeterministicUpdate() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("error_on_nondeterministic_update")) + return t +} + func (t *TaskResourceAssert) HasNoFinalize() *TaskResourceAssert { t.AddAssertion(assert.ValueNotSet("finalize")) return t @@ -186,11 +531,81 @@ func (t *TaskResourceAssert) HasNoFullyQualifiedName() *TaskResourceAssert { return t } +func (t *TaskResourceAssert) HasNoGeographyOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("geography_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoGeometryOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("geometry_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoJdbcTreatTimestampNtzAsUtc() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("jdbc_treat_timestamp_ntz_as_utc")) + return t +} + +func (t *TaskResourceAssert) HasNoJdbcUseSessionTimezone() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("jdbc_use_session_timezone")) + return t +} + +func (t *TaskResourceAssert) HasNoJsonIndent() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("json_indent")) + return t +} + +func (t *TaskResourceAssert) HasNoLockTimeout() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("lock_timeout")) + return t +} + +func (t *TaskResourceAssert) HasNoLogLevel() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("log_level")) + return t +} + +func (t *TaskResourceAssert) HasNoMultiStatementCount() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("multi_statement_count")) + return t +} + func (t *TaskResourceAssert) HasNoName() *TaskResourceAssert { t.AddAssertion(assert.ValueNotSet("name")) return t } +func (t *TaskResourceAssert) HasNoNoorderSequenceAsDefault() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("noorder_sequence_as_default")) + return t +} + +func (t *TaskResourceAssert) HasNoOdbcTreatDecimalAsInt() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("odbc_treat_decimal_as_int")) + return t +} + +func (t *TaskResourceAssert) HasNoQueryTag() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("query_tag")) + return t +} + +func (t *TaskResourceAssert) HasNoQuotedIdentifiersIgnoreCase() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("quoted_identifiers_ignore_case")) + return t +} + +func (t *TaskResourceAssert) HasNoRowsPerResultset() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("rows_per_resultset")) + return t +} + +func (t *TaskResourceAssert) HasNoS3StageVpceDnsName() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("s3_stage_vpce_dns_name")) + return t +} + func (t *TaskResourceAssert) HasNoSchedule() *TaskResourceAssert { t.AddAssertion(assert.ValueNotSet("schedule")) return t @@ -201,8 +616,8 @@ func (t *TaskResourceAssert) HasNoSchema() *TaskResourceAssert { return t } -func (t *TaskResourceAssert) HasNoSessionParameters() *TaskResourceAssert { - t.AddAssertion(assert.ValueNotSet("session_parameters")) +func (t *TaskResourceAssert) HasNoSearchPath() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("search_path")) return t } @@ -211,6 +626,21 @@ func (t *TaskResourceAssert) HasNoSqlStatement() *TaskResourceAssert { return t } +func (t *TaskResourceAssert) HasNoStatementQueuedTimeoutInSeconds() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("statement_queued_timeout_in_seconds")) + return t +} + +func (t *TaskResourceAssert) HasNoStatementTimeoutInSeconds() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("statement_timeout_in_seconds")) + return t +} + +func (t *TaskResourceAssert) HasNoStrictJsonOutput() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("strict_json_output")) + return t +} + func (t *TaskResourceAssert) HasNoSuspendTaskAfterNumFailures() *TaskResourceAssert { t.AddAssertion(assert.ValueNotSet("suspend_task_after_num_failures")) return t @@ -221,6 +651,86 @@ func (t *TaskResourceAssert) HasNoTaskAutoRetryAttempts() *TaskResourceAssert { return t } +func (t *TaskResourceAssert) HasNoTimeInputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("time_input_format")) + return t +} + +func (t *TaskResourceAssert) HasNoTimeOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("time_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoTimestampDayIsAlways24h() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timestamp_day_is_always_24h")) + return t +} + +func (t *TaskResourceAssert) HasNoTimestampInputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timestamp_input_format")) + return t +} + +func (t *TaskResourceAssert) HasNoTimestampLtzOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timestamp_ltz_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoTimestampNtzOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timestamp_ntz_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoTimestampOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timestamp_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoTimestampTypeMapping() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timestamp_type_mapping")) + return t +} + +func (t *TaskResourceAssert) HasNoTimestampTzOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timestamp_tz_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoTimezone() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timezone")) + return t +} + +func (t *TaskResourceAssert) HasNoTraceLevel() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("trace_level")) + return t +} + +func (t *TaskResourceAssert) HasNoTransactionAbortOnError() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("transaction_abort_on_error")) + return t +} + +func (t *TaskResourceAssert) HasNoTransactionDefaultIsolationLevel() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("transaction_default_isolation_level")) + return t +} + +func (t *TaskResourceAssert) HasNoTwoDigitCenturyStart() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("two_digit_century_start")) + return t +} + +func (t *TaskResourceAssert) HasNoUnsupportedDdlAction() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("unsupported_ddl_action")) + return t +} + +func (t *TaskResourceAssert) HasNoUseCachedResult() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("use_cached_result")) + return t +} + func (t *TaskResourceAssert) HasNoUserTaskManagedInitialWarehouseSize() *TaskResourceAssert { t.AddAssertion(assert.ValueNotSet("user_task_managed_initial_warehouse_size")) return t @@ -241,6 +751,16 @@ func (t *TaskResourceAssert) HasNoWarehouse() *TaskResourceAssert { return t } +func (t *TaskResourceAssert) HasNoWeekOfYearPolicy() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("week_of_year_policy")) + return t +} + +func (t *TaskResourceAssert) HasNoWeekStart() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("week_start")) + return t +} + func (t *TaskResourceAssert) HasNoWhen() *TaskResourceAssert { t.AddAssertion(assert.ValueNotSet("when")) return t diff --git a/pkg/acceptance/bettertestspoc/assert/resourceparametersassert/task_resource_parameters_ext.go b/pkg/acceptance/bettertestspoc/assert/resourceparametersassert/task_resource_parameters_ext.go index 463151941b..45a2def403 100644 --- a/pkg/acceptance/bettertestspoc/assert/resourceparametersassert/task_resource_parameters_ext.go +++ b/pkg/acceptance/bettertestspoc/assert/resourceparametersassert/task_resource_parameters_ext.go @@ -1,8 +1,9 @@ package resourceparametersassert import ( - "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "strings" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" ) func (u *TaskResourceParametersAssert) HasAllDefaults() *TaskResourceParametersAssert { diff --git a/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go index 6d99220a58..6cb2885b1c 100644 --- a/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go +++ b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go @@ -2,9 +2,10 @@ package resourceshowoutputassert import ( "fmt" + "strconv" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" - "strconv" ) func (t *TaskShowOutputAssert) HasCreatedOnNotEmpty() *TaskShowOutputAssert { diff --git a/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go index a150ce6084..b11e40af69 100644 --- a/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go +++ b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go @@ -1,8 +1,10 @@ -//Code generated by assertions generator; DO NOT EDIT. +// Code generated by assertions generator; DO NOT EDIT. package resourceshowoutputassert import ( + "fmt" + "strconv" "testing" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" @@ -76,7 +78,7 @@ func (t *TaskShowOutputAssert) HasComment(expected string) *TaskShowOutputAssert } func (t *TaskShowOutputAssert) HasWarehouse(expected string) *TaskShowOutputAssert { - t.AddAssertion(assert.ResourceShowOutputValueSet("warehouse", expected)) + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("warehouse", expected)) return t } @@ -85,11 +87,6 @@ func (t *TaskShowOutputAssert) HasSchedule(expected string) *TaskShowOutputAsser return t } -//func (t *TaskShowOutputAssert) HasPredecessors(expected []sdk.SchemaObjectIdentifier) *TaskShowOutputAssert { -// t.AddAssertion(assert.ResourceShowOutputValueSet("predecessors", collections.Map(expected, sdk.SchemaObjectIdentifier.FullyQualifiedName))) -// return t -//} - func (t *TaskShowOutputAssert) HasState(expected sdk.TaskState) *TaskShowOutputAssert { t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("state", expected)) return t @@ -110,8 +107,8 @@ func (t *TaskShowOutputAssert) HasAllowOverlappingExecution(expected bool) *Task return t } -func (t *TaskShowOutputAssert) HasErrorIntegration(expected sdk.AccountObjectIdentifier) *TaskShowOutputAssert { - t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("error_integration", expected.Name())) +func (t *TaskShowOutputAssert) HasErrorIntegration(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("error_integration", expected)) return t } @@ -140,10 +137,20 @@ func (t *TaskShowOutputAssert) HasBudget(expected string) *TaskShowOutputAssert return t } -//func (t *TaskShowOutputAssert) HasTaskRelations(expected sdk.TaskRelations) *TaskShowOutputAssert { -// t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations", expected)) -// return t -//} +func (t *TaskShowOutputAssert) HasTaskRelations(expected sdk.TaskRelations) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.#", "1")) + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.0.predecessors.#", strconv.Itoa(len(expected.Predecessors)))) + for i, predecessor := range expected.Predecessors { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet(fmt.Sprintf("task_relations.0.predecessors.%d", i), predecessor.FullyQualifiedName())) + } + if expected.FinalizerTask != nil && len(expected.FinalizerTask.Name()) > 0 { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.0.finalizer", expected.FinalizerTask.FullyQualifiedName())) + } + if expected.FinalizedRootTask != nil && len(expected.FinalizedRootTask.Name()) > 0 { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.0.finalized_root_task", expected.FinalizedRootTask.FullyQualifiedName())) + } + return t +} func (t *TaskShowOutputAssert) HasLastSuspendedReason(expected string) *TaskShowOutputAssert { t.AddAssertion(assert.ResourceShowOutputValueSet("last_suspended_reason", expected)) diff --git a/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go b/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go index 34dce36081..37560fb839 100644 --- a/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go +++ b/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go @@ -4,6 +4,7 @@ import ( "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + tfconfig "github.com/hashicorp/terraform-plugin-testing/config" ) func TaskWithId(resourceName string, id sdk.SchemaObjectIdentifier, sqlStatement string) *TaskModel { @@ -14,3 +15,58 @@ func TaskWithId(resourceName string, id sdk.SchemaObjectIdentifier, sqlStatement t.WithSqlStatement(sqlStatement) return t } + +func (t *TaskModel) WithBinaryInputFormatEnum(binaryInputFormat sdk.BinaryInputFormat) *TaskModel { + t.BinaryInputFormat = tfconfig.StringVariable(string(binaryInputFormat)) + return t +} + +func (t *TaskModel) WithBinaryOutputFormatEnum(binaryOutputFormat sdk.BinaryOutputFormat) *TaskModel { + t.BinaryOutputFormat = tfconfig.StringVariable(string(binaryOutputFormat)) + return t +} + +func (t *TaskModel) WithClientTimestampTypeMappingEnum(clientTimestampTypeMapping sdk.ClientTimestampTypeMapping) *TaskModel { + t.ClientTimestampTypeMapping = tfconfig.StringVariable(string(clientTimestampTypeMapping)) + return t +} + +func (t *TaskModel) WithGeographyOutputFormatEnum(geographyOutputFormat sdk.GeographyOutputFormat) *TaskModel { + t.GeographyOutputFormat = tfconfig.StringVariable(string(geographyOutputFormat)) + return t +} + +func (t *TaskModel) WithGeometryOutputFormatEnum(geometryOutputFormat sdk.GeometryOutputFormat) *TaskModel { + t.GeometryOutputFormat = tfconfig.StringVariable(string(geometryOutputFormat)) + return t +} + +func (t *TaskModel) WithLogLevelEnum(logLevel sdk.LogLevel) *TaskModel { + t.LogLevel = tfconfig.StringVariable(string(logLevel)) + return t +} + +func (t *TaskModel) WithTimestampTypeMappingEnum(timestampTypeMapping sdk.TimestampTypeMapping) *TaskModel { + t.TimestampTypeMapping = tfconfig.StringVariable(string(timestampTypeMapping)) + return t +} + +func (t *TaskModel) WithTraceLevelEnum(traceLevel sdk.TraceLevel) *TaskModel { + t.TraceLevel = tfconfig.StringVariable(string(traceLevel)) + return t +} + +func (t *TaskModel) WithTransactionDefaultIsolationLevelEnum(transactionDefaultIsolationLevel sdk.TransactionDefaultIsolationLevel) *TaskModel { + t.TransactionDefaultIsolationLevel = tfconfig.StringVariable(string(transactionDefaultIsolationLevel)) + return t +} + +func (t *TaskModel) WithUnsupportedDdlActionEnum(unsupportedDdlAction sdk.UnsupportedDDLAction) *TaskModel { + t.UnsupportedDdlAction = tfconfig.StringVariable(string(unsupportedDdlAction)) + return t +} + +func (t *TaskModel) WithUserTaskManagedInitialWarehouseSizeEnum(warehouseSize sdk.WarehouseSize) *TaskModel { + t.UserTaskManagedInitialWarehouseSize = tfconfig.StringVariable(string(warehouseSize)) + return t +} diff --git a/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go b/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go index ec5ab9477d..4cbc0f8c8d 100644 --- a/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go +++ b/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go @@ -10,27 +10,79 @@ import ( ) type TaskModel struct { - After tfconfig.Variable `json:"after,omitempty"` - AllowOverlappingExecution tfconfig.Variable `json:"allow_overlapping_execution,omitempty"` - Comment tfconfig.Variable `json:"comment,omitempty"` - Config tfconfig.Variable `json:"config,omitempty"` - Database tfconfig.Variable `json:"database,omitempty"` - Enabled tfconfig.Variable `json:"enabled,omitempty"` - ErrorIntegration tfconfig.Variable `json:"error_integration,omitempty"` - Finalize tfconfig.Variable `json:"finalize,omitempty"` - FullyQualifiedName tfconfig.Variable `json:"fully_qualified_name,omitempty"` - Name tfconfig.Variable `json:"name,omitempty"` - Schedule tfconfig.Variable `json:"schedule,omitempty"` - Schema tfconfig.Variable `json:"schema,omitempty"` - SessionParameters tfconfig.Variable `json:"session_parameters,omitempty"` - SqlStatement tfconfig.Variable `json:"sql_statement,omitempty"` - SuspendTaskAfterNumFailures tfconfig.Variable `json:"suspend_task_after_num_failures,omitempty"` - TaskAutoRetryAttempts tfconfig.Variable `json:"task_auto_retry_attempts,omitempty"` - UserTaskManagedInitialWarehouseSize tfconfig.Variable `json:"user_task_managed_initial_warehouse_size,omitempty"` - UserTaskMinimumTriggerIntervalInSeconds tfconfig.Variable `json:"user_task_minimum_trigger_interval_in_seconds,omitempty"` - UserTaskTimeoutMs tfconfig.Variable `json:"user_task_timeout_ms,omitempty"` - Warehouse tfconfig.Variable `json:"warehouse,omitempty"` - When tfconfig.Variable `json:"when,omitempty"` + AbortDetachedQuery tfconfig.Variable `json:"abort_detached_query,omitempty"` + After tfconfig.Variable `json:"after,omitempty"` + AllowOverlappingExecution tfconfig.Variable `json:"allow_overlapping_execution,omitempty"` + Autocommit tfconfig.Variable `json:"autocommit,omitempty"` + BinaryInputFormat tfconfig.Variable `json:"binary_input_format,omitempty"` + BinaryOutputFormat tfconfig.Variable `json:"binary_output_format,omitempty"` + ClientMemoryLimit tfconfig.Variable `json:"client_memory_limit,omitempty"` + ClientMetadataRequestUseConnectionCtx tfconfig.Variable `json:"client_metadata_request_use_connection_ctx,omitempty"` + ClientPrefetchThreads tfconfig.Variable `json:"client_prefetch_threads,omitempty"` + ClientResultChunkSize tfconfig.Variable `json:"client_result_chunk_size,omitempty"` + ClientResultColumnCaseInsensitive tfconfig.Variable `json:"client_result_column_case_insensitive,omitempty"` + ClientSessionKeepAlive tfconfig.Variable `json:"client_session_keep_alive,omitempty"` + ClientSessionKeepAliveHeartbeatFrequency tfconfig.Variable `json:"client_session_keep_alive_heartbeat_frequency,omitempty"` + ClientTimestampTypeMapping tfconfig.Variable `json:"client_timestamp_type_mapping,omitempty"` + Comment tfconfig.Variable `json:"comment,omitempty"` + Config tfconfig.Variable `json:"config,omitempty"` + Database tfconfig.Variable `json:"database,omitempty"` + DateInputFormat tfconfig.Variable `json:"date_input_format,omitempty"` + DateOutputFormat tfconfig.Variable `json:"date_output_format,omitempty"` + EnableUnloadPhysicalTypeOptimization tfconfig.Variable `json:"enable_unload_physical_type_optimization,omitempty"` + Enabled tfconfig.Variable `json:"enabled,omitempty"` + ErrorIntegration tfconfig.Variable `json:"error_integration,omitempty"` + ErrorOnNondeterministicMerge tfconfig.Variable `json:"error_on_nondeterministic_merge,omitempty"` + ErrorOnNondeterministicUpdate tfconfig.Variable `json:"error_on_nondeterministic_update,omitempty"` + Finalize tfconfig.Variable `json:"finalize,omitempty"` + FullyQualifiedName tfconfig.Variable `json:"fully_qualified_name,omitempty"` + GeographyOutputFormat tfconfig.Variable `json:"geography_output_format,omitempty"` + GeometryOutputFormat tfconfig.Variable `json:"geometry_output_format,omitempty"` + JdbcTreatTimestampNtzAsUtc tfconfig.Variable `json:"jdbc_treat_timestamp_ntz_as_utc,omitempty"` + JdbcUseSessionTimezone tfconfig.Variable `json:"jdbc_use_session_timezone,omitempty"` + JsonIndent tfconfig.Variable `json:"json_indent,omitempty"` + LockTimeout tfconfig.Variable `json:"lock_timeout,omitempty"` + LogLevel tfconfig.Variable `json:"log_level,omitempty"` + MultiStatementCount tfconfig.Variable `json:"multi_statement_count,omitempty"` + Name tfconfig.Variable `json:"name,omitempty"` + NoorderSequenceAsDefault tfconfig.Variable `json:"noorder_sequence_as_default,omitempty"` + OdbcTreatDecimalAsInt tfconfig.Variable `json:"odbc_treat_decimal_as_int,omitempty"` + QueryTag tfconfig.Variable `json:"query_tag,omitempty"` + QuotedIdentifiersIgnoreCase tfconfig.Variable `json:"quoted_identifiers_ignore_case,omitempty"` + RowsPerResultset tfconfig.Variable `json:"rows_per_resultset,omitempty"` + S3StageVpceDnsName tfconfig.Variable `json:"s3_stage_vpce_dns_name,omitempty"` + Schedule tfconfig.Variable `json:"schedule,omitempty"` + Schema tfconfig.Variable `json:"schema,omitempty"` + SearchPath tfconfig.Variable `json:"search_path,omitempty"` + SqlStatement tfconfig.Variable `json:"sql_statement,omitempty"` + StatementQueuedTimeoutInSeconds tfconfig.Variable `json:"statement_queued_timeout_in_seconds,omitempty"` + StatementTimeoutInSeconds tfconfig.Variable `json:"statement_timeout_in_seconds,omitempty"` + StrictJsonOutput tfconfig.Variable `json:"strict_json_output,omitempty"` + SuspendTaskAfterNumFailures tfconfig.Variable `json:"suspend_task_after_num_failures,omitempty"` + TaskAutoRetryAttempts tfconfig.Variable `json:"task_auto_retry_attempts,omitempty"` + TimeInputFormat tfconfig.Variable `json:"time_input_format,omitempty"` + TimeOutputFormat tfconfig.Variable `json:"time_output_format,omitempty"` + TimestampDayIsAlways24h tfconfig.Variable `json:"timestamp_day_is_always_24h,omitempty"` + TimestampInputFormat tfconfig.Variable `json:"timestamp_input_format,omitempty"` + TimestampLtzOutputFormat tfconfig.Variable `json:"timestamp_ltz_output_format,omitempty"` + TimestampNtzOutputFormat tfconfig.Variable `json:"timestamp_ntz_output_format,omitempty"` + TimestampOutputFormat tfconfig.Variable `json:"timestamp_output_format,omitempty"` + TimestampTypeMapping tfconfig.Variable `json:"timestamp_type_mapping,omitempty"` + TimestampTzOutputFormat tfconfig.Variable `json:"timestamp_tz_output_format,omitempty"` + Timezone tfconfig.Variable `json:"timezone,omitempty"` + TraceLevel tfconfig.Variable `json:"trace_level,omitempty"` + TransactionAbortOnError tfconfig.Variable `json:"transaction_abort_on_error,omitempty"` + TransactionDefaultIsolationLevel tfconfig.Variable `json:"transaction_default_isolation_level,omitempty"` + TwoDigitCenturyStart tfconfig.Variable `json:"two_digit_century_start,omitempty"` + UnsupportedDdlAction tfconfig.Variable `json:"unsupported_ddl_action,omitempty"` + UseCachedResult tfconfig.Variable `json:"use_cached_result,omitempty"` + UserTaskManagedInitialWarehouseSize tfconfig.Variable `json:"user_task_managed_initial_warehouse_size,omitempty"` + UserTaskMinimumTriggerIntervalInSeconds tfconfig.Variable `json:"user_task_minimum_trigger_interval_in_seconds,omitempty"` + UserTaskTimeoutMs tfconfig.Variable `json:"user_task_timeout_ms,omitempty"` + Warehouse tfconfig.Variable `json:"warehouse,omitempty"` + WeekOfYearPolicy tfconfig.Variable `json:"week_of_year_policy,omitempty"` + WeekStart tfconfig.Variable `json:"week_start,omitempty"` + When tfconfig.Variable `json:"when,omitempty"` *config.ResourceModelMeta } @@ -72,10 +124,70 @@ func TaskWithDefaultMeta( // below all the proper values // ///////////////////////////////// +func (t *TaskModel) WithAbortDetachedQuery(abortDetachedQuery bool) *TaskModel { + t.AbortDetachedQuery = tfconfig.BoolVariable(abortDetachedQuery) + return t +} + // after attribute type is not yet supported, so WithAfter can't be generated -func (t *TaskModel) WithAllowOverlappingExecution(allowOverlappingExecution bool) *TaskModel { - t.AllowOverlappingExecution = tfconfig.BoolVariable(allowOverlappingExecution) +func (t *TaskModel) WithAllowOverlappingExecution(allowOverlappingExecution string) *TaskModel { + t.AllowOverlappingExecution = tfconfig.StringVariable(allowOverlappingExecution) + return t +} + +func (t *TaskModel) WithAutocommit(autocommit bool) *TaskModel { + t.Autocommit = tfconfig.BoolVariable(autocommit) + return t +} + +func (t *TaskModel) WithBinaryInputFormat(binaryInputFormat string) *TaskModel { + t.BinaryInputFormat = tfconfig.StringVariable(binaryInputFormat) + return t +} + +func (t *TaskModel) WithBinaryOutputFormat(binaryOutputFormat string) *TaskModel { + t.BinaryOutputFormat = tfconfig.StringVariable(binaryOutputFormat) + return t +} + +func (t *TaskModel) WithClientMemoryLimit(clientMemoryLimit int) *TaskModel { + t.ClientMemoryLimit = tfconfig.IntegerVariable(clientMemoryLimit) + return t +} + +func (t *TaskModel) WithClientMetadataRequestUseConnectionCtx(clientMetadataRequestUseConnectionCtx bool) *TaskModel { + t.ClientMetadataRequestUseConnectionCtx = tfconfig.BoolVariable(clientMetadataRequestUseConnectionCtx) + return t +} + +func (t *TaskModel) WithClientPrefetchThreads(clientPrefetchThreads int) *TaskModel { + t.ClientPrefetchThreads = tfconfig.IntegerVariable(clientPrefetchThreads) + return t +} + +func (t *TaskModel) WithClientResultChunkSize(clientResultChunkSize int) *TaskModel { + t.ClientResultChunkSize = tfconfig.IntegerVariable(clientResultChunkSize) + return t +} + +func (t *TaskModel) WithClientResultColumnCaseInsensitive(clientResultColumnCaseInsensitive bool) *TaskModel { + t.ClientResultColumnCaseInsensitive = tfconfig.BoolVariable(clientResultColumnCaseInsensitive) + return t +} + +func (t *TaskModel) WithClientSessionKeepAlive(clientSessionKeepAlive bool) *TaskModel { + t.ClientSessionKeepAlive = tfconfig.BoolVariable(clientSessionKeepAlive) + return t +} + +func (t *TaskModel) WithClientSessionKeepAliveHeartbeatFrequency(clientSessionKeepAliveHeartbeatFrequency int) *TaskModel { + t.ClientSessionKeepAliveHeartbeatFrequency = tfconfig.IntegerVariable(clientSessionKeepAliveHeartbeatFrequency) + return t +} + +func (t *TaskModel) WithClientTimestampTypeMapping(clientTimestampTypeMapping string) *TaskModel { + t.ClientTimestampTypeMapping = tfconfig.StringVariable(clientTimestampTypeMapping) return t } @@ -94,6 +206,21 @@ func (t *TaskModel) WithDatabase(database string) *TaskModel { return t } +func (t *TaskModel) WithDateInputFormat(dateInputFormat string) *TaskModel { + t.DateInputFormat = tfconfig.StringVariable(dateInputFormat) + return t +} + +func (t *TaskModel) WithDateOutputFormat(dateOutputFormat string) *TaskModel { + t.DateOutputFormat = tfconfig.StringVariable(dateOutputFormat) + return t +} + +func (t *TaskModel) WithEnableUnloadPhysicalTypeOptimization(enableUnloadPhysicalTypeOptimization bool) *TaskModel { + t.EnableUnloadPhysicalTypeOptimization = tfconfig.BoolVariable(enableUnloadPhysicalTypeOptimization) + return t +} + func (t *TaskModel) WithEnabled(enabled string) *TaskModel { t.Enabled = tfconfig.StringVariable(enabled) return t @@ -104,18 +231,101 @@ func (t *TaskModel) WithErrorIntegration(errorIntegration string) *TaskModel { return t } -// finalize attribute type is not yet supported, so WithFinalize can't be generated +func (t *TaskModel) WithErrorOnNondeterministicMerge(errorOnNondeterministicMerge bool) *TaskModel { + t.ErrorOnNondeterministicMerge = tfconfig.BoolVariable(errorOnNondeterministicMerge) + return t +} + +func (t *TaskModel) WithErrorOnNondeterministicUpdate(errorOnNondeterministicUpdate bool) *TaskModel { + t.ErrorOnNondeterministicUpdate = tfconfig.BoolVariable(errorOnNondeterministicUpdate) + return t +} + +func (t *TaskModel) WithFinalize(finalize string) *TaskModel { + t.Finalize = tfconfig.StringVariable(finalize) + return t +} func (t *TaskModel) WithFullyQualifiedName(fullyQualifiedName string) *TaskModel { t.FullyQualifiedName = tfconfig.StringVariable(fullyQualifiedName) return t } +func (t *TaskModel) WithGeographyOutputFormat(geographyOutputFormat string) *TaskModel { + t.GeographyOutputFormat = tfconfig.StringVariable(geographyOutputFormat) + return t +} + +func (t *TaskModel) WithGeometryOutputFormat(geometryOutputFormat string) *TaskModel { + t.GeometryOutputFormat = tfconfig.StringVariable(geometryOutputFormat) + return t +} + +func (t *TaskModel) WithJdbcTreatTimestampNtzAsUtc(jdbcTreatTimestampNtzAsUtc bool) *TaskModel { + t.JdbcTreatTimestampNtzAsUtc = tfconfig.BoolVariable(jdbcTreatTimestampNtzAsUtc) + return t +} + +func (t *TaskModel) WithJdbcUseSessionTimezone(jdbcUseSessionTimezone bool) *TaskModel { + t.JdbcUseSessionTimezone = tfconfig.BoolVariable(jdbcUseSessionTimezone) + return t +} + +func (t *TaskModel) WithJsonIndent(jsonIndent int) *TaskModel { + t.JsonIndent = tfconfig.IntegerVariable(jsonIndent) + return t +} + +func (t *TaskModel) WithLockTimeout(lockTimeout int) *TaskModel { + t.LockTimeout = tfconfig.IntegerVariable(lockTimeout) + return t +} + +func (t *TaskModel) WithLogLevel(logLevel string) *TaskModel { + t.LogLevel = tfconfig.StringVariable(logLevel) + return t +} + +func (t *TaskModel) WithMultiStatementCount(multiStatementCount int) *TaskModel { + t.MultiStatementCount = tfconfig.IntegerVariable(multiStatementCount) + return t +} + func (t *TaskModel) WithName(name string) *TaskModel { t.Name = tfconfig.StringVariable(name) return t } +func (t *TaskModel) WithNoorderSequenceAsDefault(noorderSequenceAsDefault bool) *TaskModel { + t.NoorderSequenceAsDefault = tfconfig.BoolVariable(noorderSequenceAsDefault) + return t +} + +func (t *TaskModel) WithOdbcTreatDecimalAsInt(odbcTreatDecimalAsInt bool) *TaskModel { + t.OdbcTreatDecimalAsInt = tfconfig.BoolVariable(odbcTreatDecimalAsInt) + return t +} + +func (t *TaskModel) WithQueryTag(queryTag string) *TaskModel { + t.QueryTag = tfconfig.StringVariable(queryTag) + return t +} + +func (t *TaskModel) WithQuotedIdentifiersIgnoreCase(quotedIdentifiersIgnoreCase bool) *TaskModel { + t.QuotedIdentifiersIgnoreCase = tfconfig.BoolVariable(quotedIdentifiersIgnoreCase) + return t +} + +func (t *TaskModel) WithRowsPerResultset(rowsPerResultset int) *TaskModel { + t.RowsPerResultset = tfconfig.IntegerVariable(rowsPerResultset) + return t +} + +func (t *TaskModel) WithS3StageVpceDnsName(s3StageVpceDnsName string) *TaskModel { + t.S3StageVpceDnsName = tfconfig.StringVariable(s3StageVpceDnsName) + return t +} + func (t *TaskModel) WithSchedule(schedule string) *TaskModel { t.Schedule = tfconfig.StringVariable(schedule) return t @@ -126,13 +336,31 @@ func (t *TaskModel) WithSchema(schema string) *TaskModel { return t } -// session_parameters attribute type is not yet supported, so WithSessionParameters can't be generated +func (t *TaskModel) WithSearchPath(searchPath string) *TaskModel { + t.SearchPath = tfconfig.StringVariable(searchPath) + return t +} func (t *TaskModel) WithSqlStatement(sqlStatement string) *TaskModel { t.SqlStatement = tfconfig.StringVariable(sqlStatement) return t } +func (t *TaskModel) WithStatementQueuedTimeoutInSeconds(statementQueuedTimeoutInSeconds int) *TaskModel { + t.StatementQueuedTimeoutInSeconds = tfconfig.IntegerVariable(statementQueuedTimeoutInSeconds) + return t +} + +func (t *TaskModel) WithStatementTimeoutInSeconds(statementTimeoutInSeconds int) *TaskModel { + t.StatementTimeoutInSeconds = tfconfig.IntegerVariable(statementTimeoutInSeconds) + return t +} + +func (t *TaskModel) WithStrictJsonOutput(strictJsonOutput bool) *TaskModel { + t.StrictJsonOutput = tfconfig.BoolVariable(strictJsonOutput) + return t +} + func (t *TaskModel) WithSuspendTaskAfterNumFailures(suspendTaskAfterNumFailures int) *TaskModel { t.SuspendTaskAfterNumFailures = tfconfig.IntegerVariable(suspendTaskAfterNumFailures) return t @@ -143,6 +371,86 @@ func (t *TaskModel) WithTaskAutoRetryAttempts(taskAutoRetryAttempts int) *TaskMo return t } +func (t *TaskModel) WithTimeInputFormat(timeInputFormat string) *TaskModel { + t.TimeInputFormat = tfconfig.StringVariable(timeInputFormat) + return t +} + +func (t *TaskModel) WithTimeOutputFormat(timeOutputFormat string) *TaskModel { + t.TimeOutputFormat = tfconfig.StringVariable(timeOutputFormat) + return t +} + +func (t *TaskModel) WithTimestampDayIsAlways24h(timestampDayIsAlways24h bool) *TaskModel { + t.TimestampDayIsAlways24h = tfconfig.BoolVariable(timestampDayIsAlways24h) + return t +} + +func (t *TaskModel) WithTimestampInputFormat(timestampInputFormat string) *TaskModel { + t.TimestampInputFormat = tfconfig.StringVariable(timestampInputFormat) + return t +} + +func (t *TaskModel) WithTimestampLtzOutputFormat(timestampLtzOutputFormat string) *TaskModel { + t.TimestampLtzOutputFormat = tfconfig.StringVariable(timestampLtzOutputFormat) + return t +} + +func (t *TaskModel) WithTimestampNtzOutputFormat(timestampNtzOutputFormat string) *TaskModel { + t.TimestampNtzOutputFormat = tfconfig.StringVariable(timestampNtzOutputFormat) + return t +} + +func (t *TaskModel) WithTimestampOutputFormat(timestampOutputFormat string) *TaskModel { + t.TimestampOutputFormat = tfconfig.StringVariable(timestampOutputFormat) + return t +} + +func (t *TaskModel) WithTimestampTypeMapping(timestampTypeMapping string) *TaskModel { + t.TimestampTypeMapping = tfconfig.StringVariable(timestampTypeMapping) + return t +} + +func (t *TaskModel) WithTimestampTzOutputFormat(timestampTzOutputFormat string) *TaskModel { + t.TimestampTzOutputFormat = tfconfig.StringVariable(timestampTzOutputFormat) + return t +} + +func (t *TaskModel) WithTimezone(timezone string) *TaskModel { + t.Timezone = tfconfig.StringVariable(timezone) + return t +} + +func (t *TaskModel) WithTraceLevel(traceLevel string) *TaskModel { + t.TraceLevel = tfconfig.StringVariable(traceLevel) + return t +} + +func (t *TaskModel) WithTransactionAbortOnError(transactionAbortOnError bool) *TaskModel { + t.TransactionAbortOnError = tfconfig.BoolVariable(transactionAbortOnError) + return t +} + +func (t *TaskModel) WithTransactionDefaultIsolationLevel(transactionDefaultIsolationLevel string) *TaskModel { + t.TransactionDefaultIsolationLevel = tfconfig.StringVariable(transactionDefaultIsolationLevel) + return t +} + +func (t *TaskModel) WithTwoDigitCenturyStart(twoDigitCenturyStart int) *TaskModel { + t.TwoDigitCenturyStart = tfconfig.IntegerVariable(twoDigitCenturyStart) + return t +} + +func (t *TaskModel) WithUnsupportedDdlAction(unsupportedDdlAction string) *TaskModel { + t.UnsupportedDdlAction = tfconfig.StringVariable(unsupportedDdlAction) + return t +} + +func (t *TaskModel) WithUseCachedResult(useCachedResult bool) *TaskModel { + t.UseCachedResult = tfconfig.BoolVariable(useCachedResult) + return t +} + func (t *TaskModel) WithUserTaskManagedInitialWarehouseSize(userTaskManagedInitialWarehouseSize string) *TaskModel { t.UserTaskManagedInitialWarehouseSize = tfconfig.StringVariable(userTaskManagedInitialWarehouseSize) return t @@ -163,6 +471,16 @@ func (t *TaskModel) WithWarehouse(warehouse string) *TaskModel { return t } +func (t *TaskModel) WithWeekOfYearPolicy(weekOfYearPolicy int) *TaskModel { + t.WeekOfYearPolicy = tfconfig.IntegerVariable(weekOfYearPolicy) + return t +} + +func (t *TaskModel) WithWeekStart(weekStart int) *TaskModel { + t.WeekStart = tfconfig.IntegerVariable(weekStart) + return t +} + func (t *TaskModel) WithWhen(when string) *TaskModel { t.When = tfconfig.StringVariable(when) return t @@ -172,6 +490,11 @@ func (t *TaskModel) WithWhen(when string) *TaskModel { // below it's possible to set any value // ////////////////////////////////////////// +func (t *TaskModel) WithAbortDetachedQueryValue(value tfconfig.Variable) *TaskModel { + t.AbortDetachedQuery = value + return t +} + func (t *TaskModel) WithAfterValue(value tfconfig.Variable) *TaskModel { t.After = value return t @@ -182,6 +505,61 @@ func (t *TaskModel) WithAllowOverlappingExecutionValue(value tfconfig.Variable) return t } +func (t *TaskModel) WithAutocommitValue(value tfconfig.Variable) *TaskModel { + t.Autocommit = value + return t +} + +func (t *TaskModel) WithBinaryInputFormatValue(value tfconfig.Variable) *TaskModel { + t.BinaryInputFormat = value + return t +} + +func (t *TaskModel) WithBinaryOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.BinaryOutputFormat = value + return t +} + +func (t *TaskModel) WithClientMemoryLimitValue(value tfconfig.Variable) *TaskModel { + t.ClientMemoryLimit = value + return t +} + +func (t *TaskModel) WithClientMetadataRequestUseConnectionCtxValue(value tfconfig.Variable) *TaskModel { + t.ClientMetadataRequestUseConnectionCtx = value + return t +} + +func (t *TaskModel) WithClientPrefetchThreadsValue(value tfconfig.Variable) *TaskModel { + t.ClientPrefetchThreads = value + return t +} + +func (t *TaskModel) WithClientResultChunkSizeValue(value tfconfig.Variable) *TaskModel { + t.ClientResultChunkSize = value + return t +} + +func (t *TaskModel) WithClientResultColumnCaseInsensitiveValue(value tfconfig.Variable) *TaskModel { + t.ClientResultColumnCaseInsensitive = value + return t +} + +func (t *TaskModel) WithClientSessionKeepAliveValue(value tfconfig.Variable) *TaskModel { + t.ClientSessionKeepAlive = value + return t +} + +func (t *TaskModel) WithClientSessionKeepAliveHeartbeatFrequencyValue(value tfconfig.Variable) *TaskModel { + t.ClientSessionKeepAliveHeartbeatFrequency = value + return t +} + +func (t *TaskModel) WithClientTimestampTypeMappingValue(value tfconfig.Variable) *TaskModel { + t.ClientTimestampTypeMapping = value + return t +} + func (t *TaskModel) WithCommentValue(value tfconfig.Variable) *TaskModel { t.Comment = value return t @@ -197,6 +575,21 @@ func (t *TaskModel) WithDatabaseValue(value tfconfig.Variable) *TaskModel { return t } +func (t *TaskModel) WithDateInputFormatValue(value tfconfig.Variable) *TaskModel { + t.DateInputFormat = value + return t +} + +func (t *TaskModel) WithDateOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.DateOutputFormat = value + return t +} + +func (t *TaskModel) WithEnableUnloadPhysicalTypeOptimizationValue(value tfconfig.Variable) *TaskModel { + t.EnableUnloadPhysicalTypeOptimization = value + return t +} + func (t *TaskModel) WithEnabledValue(value tfconfig.Variable) *TaskModel { t.Enabled = value return t @@ -207,6 +600,16 @@ func (t *TaskModel) WithErrorIntegrationValue(value tfconfig.Variable) *TaskMode return t } +func (t *TaskModel) WithErrorOnNondeterministicMergeValue(value tfconfig.Variable) *TaskModel { + t.ErrorOnNondeterministicMerge = value + return t +} + +func (t *TaskModel) WithErrorOnNondeterministicUpdateValue(value tfconfig.Variable) *TaskModel { + t.ErrorOnNondeterministicUpdate = value + return t +} + func (t *TaskModel) WithFinalizeValue(value tfconfig.Variable) *TaskModel { t.Finalize = value return t @@ -217,11 +620,81 @@ func (t *TaskModel) WithFullyQualifiedNameValue(value tfconfig.Variable) *TaskMo return t } +func (t *TaskModel) WithGeographyOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.GeographyOutputFormat = value + return t +} + +func (t *TaskModel) WithGeometryOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.GeometryOutputFormat = value + return t +} + +func (t *TaskModel) WithJdbcTreatTimestampNtzAsUtcValue(value tfconfig.Variable) *TaskModel { + t.JdbcTreatTimestampNtzAsUtc = value + return t +} + +func (t *TaskModel) WithJdbcUseSessionTimezoneValue(value tfconfig.Variable) *TaskModel { + t.JdbcUseSessionTimezone = value + return t +} + +func (t *TaskModel) WithJsonIndentValue(value tfconfig.Variable) *TaskModel { + t.JsonIndent = value + return t +} + +func (t *TaskModel) WithLockTimeoutValue(value tfconfig.Variable) *TaskModel { + t.LockTimeout = value + return t +} + +func (t *TaskModel) WithLogLevelValue(value tfconfig.Variable) *TaskModel { + t.LogLevel = value + return t +} + +func (t *TaskModel) WithMultiStatementCountValue(value tfconfig.Variable) *TaskModel { + t.MultiStatementCount = value + return t +} + func (t *TaskModel) WithNameValue(value tfconfig.Variable) *TaskModel { t.Name = value return t } +func (t *TaskModel) WithNoorderSequenceAsDefaultValue(value tfconfig.Variable) *TaskModel { + t.NoorderSequenceAsDefault = value + return t +} + +func (t *TaskModel) WithOdbcTreatDecimalAsIntValue(value tfconfig.Variable) *TaskModel { + t.OdbcTreatDecimalAsInt = value + return t +} + +func (t *TaskModel) WithQueryTagValue(value tfconfig.Variable) *TaskModel { + t.QueryTag = value + return t +} + +func (t *TaskModel) WithQuotedIdentifiersIgnoreCaseValue(value tfconfig.Variable) *TaskModel { + t.QuotedIdentifiersIgnoreCase = value + return t +} + +func (t *TaskModel) WithRowsPerResultsetValue(value tfconfig.Variable) *TaskModel { + t.RowsPerResultset = value + return t +} + +func (t *TaskModel) WithS3StageVpceDnsNameValue(value tfconfig.Variable) *TaskModel { + t.S3StageVpceDnsName = value + return t +} + func (t *TaskModel) WithScheduleValue(value tfconfig.Variable) *TaskModel { t.Schedule = value return t @@ -232,8 +705,8 @@ func (t *TaskModel) WithSchemaValue(value tfconfig.Variable) *TaskModel { return t } -func (t *TaskModel) WithSessionParametersValue(value tfconfig.Variable) *TaskModel { - t.SessionParameters = value +func (t *TaskModel) WithSearchPathValue(value tfconfig.Variable) *TaskModel { + t.SearchPath = value return t } @@ -242,6 +715,21 @@ func (t *TaskModel) WithSqlStatementValue(value tfconfig.Variable) *TaskModel { return t } +func (t *TaskModel) WithStatementQueuedTimeoutInSecondsValue(value tfconfig.Variable) *TaskModel { + t.StatementQueuedTimeoutInSeconds = value + return t +} + +func (t *TaskModel) WithStatementTimeoutInSecondsValue(value tfconfig.Variable) *TaskModel { + t.StatementTimeoutInSeconds = value + return t +} + +func (t *TaskModel) WithStrictJsonOutputValue(value tfconfig.Variable) *TaskModel { + t.StrictJsonOutput = value + return t +} + func (t *TaskModel) WithSuspendTaskAfterNumFailuresValue(value tfconfig.Variable) *TaskModel { t.SuspendTaskAfterNumFailures = value return t @@ -252,6 +740,86 @@ func (t *TaskModel) WithTaskAutoRetryAttemptsValue(value tfconfig.Variable) *Tas return t } +func (t *TaskModel) WithTimeInputFormatValue(value tfconfig.Variable) *TaskModel { + t.TimeInputFormat = value + return t +} + +func (t *TaskModel) WithTimeOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.TimeOutputFormat = value + return t +} + +func (t *TaskModel) WithTimestampDayIsAlways24hValue(value tfconfig.Variable) *TaskModel { + t.TimestampDayIsAlways24h = value + return t +} + +func (t *TaskModel) WithTimestampInputFormatValue(value tfconfig.Variable) *TaskModel { + t.TimestampInputFormat = value + return t +} + +func (t *TaskModel) WithTimestampLtzOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.TimestampLtzOutputFormat = value + return t +} + +func (t *TaskModel) WithTimestampNtzOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.TimestampNtzOutputFormat = value + return t +} + +func (t *TaskModel) WithTimestampOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.TimestampOutputFormat = value + return t +} + +func (t *TaskModel) WithTimestampTypeMappingValue(value tfconfig.Variable) *TaskModel { + t.TimestampTypeMapping = value + return t +} + +func (t *TaskModel) WithTimestampTzOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.TimestampTzOutputFormat = value + return t +} + +func (t *TaskModel) WithTimezoneValue(value tfconfig.Variable) *TaskModel { + t.Timezone = value + return t +} + +func (t *TaskModel) WithTraceLevelValue(value tfconfig.Variable) *TaskModel { + t.TraceLevel = value + return t +} + +func (t *TaskModel) WithTransactionAbortOnErrorValue(value tfconfig.Variable) *TaskModel { + t.TransactionAbortOnError = value + return t +} + +func (t *TaskModel) WithTransactionDefaultIsolationLevelValue(value tfconfig.Variable) *TaskModel { + t.TransactionDefaultIsolationLevel = value + return t +} + +func (t *TaskModel) WithTwoDigitCenturyStartValue(value tfconfig.Variable) *TaskModel { + t.TwoDigitCenturyStart = value + return t +} + +func (t *TaskModel) WithUnsupportedDdlActionValue(value tfconfig.Variable) *TaskModel { + t.UnsupportedDdlAction = value + return t +} + +func (t *TaskModel) WithUseCachedResultValue(value tfconfig.Variable) *TaskModel { + t.UseCachedResult = value + return t +} + func (t *TaskModel) WithUserTaskManagedInitialWarehouseSizeValue(value tfconfig.Variable) *TaskModel { t.UserTaskManagedInitialWarehouseSize = value return t @@ -272,6 +840,16 @@ func (t *TaskModel) WithWarehouseValue(value tfconfig.Variable) *TaskModel { return t } +func (t *TaskModel) WithWeekOfYearPolicyValue(value tfconfig.Variable) *TaskModel { + t.WeekOfYearPolicy = value + return t +} + +func (t *TaskModel) WithWeekStartValue(value tfconfig.Variable) *TaskModel { + t.WeekStart = value + return t +} + func (t *TaskModel) WithWhenValue(value tfconfig.Variable) *TaskModel { t.When = value return t diff --git a/pkg/acceptance/helpers/grant_client.go b/pkg/acceptance/helpers/grant_client.go index 4fd7791bfb..9ca7f6bcbc 100644 --- a/pkg/acceptance/helpers/grant_client.go +++ b/pkg/acceptance/helpers/grant_client.go @@ -74,6 +74,34 @@ func (c *GrantClient) RevokePrivilegesOnSchemaObjectFromAccountRole( require.NoError(t, err) } +func (c *GrantClient) GrantPrivilegesOnWarehouseToAccountRole( + t *testing.T, + accountRoleId sdk.AccountObjectIdentifier, + warehouseId sdk.AccountObjectIdentifier, + privileges []sdk.AccountObjectPrivilege, + withGrantOption bool, +) { + t.Helper() + ctx := context.Background() + + err := c.client().GrantPrivilegesToAccountRole( + ctx, + &sdk.AccountRoleGrantPrivileges{ + AccountObjectPrivileges: privileges, + }, + &sdk.AccountRoleGrantOn{ + AccountObject: &sdk.GrantOnAccountObject{ + Warehouse: &warehouseId, + }, + }, + accountRoleId, + &sdk.GrantPrivilegesToAccountRoleOptions{ + WithGrantOption: sdk.Bool(withGrantOption), + }, + ) + require.NoError(t, err) +} + func (c *GrantClient) GrantPrivilegesOnSchemaObjectToAccountRole( t *testing.T, accountRoleId sdk.AccountObjectIdentifier, diff --git a/pkg/resources/resource_monitor.go b/pkg/resources/resource_monitor.go index f2b8d63db0..b0a382ba4f 100644 --- a/pkg/resources/resource_monitor.go +++ b/pkg/resources/resource_monitor.go @@ -101,6 +101,7 @@ func ResourceMonitor() *schema.Resource { ReadContext: ReadResourceMonitor(true), UpdateContext: UpdateResourceMonitor, DeleteContext: DeleteResourceMonitor, + Description: "Resource used to manage resource monitor objects. For more information, check [resource monitor documentation](https://docs.snowflake.com/en/user-guide/resource-monitors).", Schema: resourceMonitorSchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/task.go b/pkg/resources/task.go index 0fcf1f2da4..2bcb967fe9 100644 --- a/pkg/resources/task.go +++ b/pkg/resources/task.go @@ -4,13 +4,15 @@ import ( "context" "errors" "fmt" + "log" + "slices" + "strings" + "time" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "log" - "strings" - "time" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -18,7 +20,6 @@ import ( "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) // TODO: Go through descriptions @@ -46,12 +47,17 @@ var taskSchema = map[string]*schema.Schema{ Description: blocklistedCharactersFieldDescription("Specifies the identifier for the task; must be unique for the database and schema in which the task is created."), }, "enabled": { - Type: schema.TypeString, - Optional: true, - Default: BooleanDefault, - ValidateDiagFunc: validateBooleanString, - DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShowWithMapping("state", func(state any) any { return state.(string) == string(sdk.TaskStateStarted) }), - Description: booleanStringFieldDescription("Specifies if the task should be started (enabled) after creation or should remain suspended (default)."), + Type: schema.TypeBool, + Required: true, + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShowWithMapping("state", func(state any) any { + log.Printf("The value is diff suppress for state is: %v\n", state) + stateEnum, err := sdk.ToTaskState(state.(string)) + if err != nil { + return false + } + return stateEnum == sdk.TaskStateStarted + }), + Description: "Specifies if the task should be started (enabled) after creation or should remain suspended (default).", }, "warehouse": { Type: schema.TypeString, @@ -61,17 +67,6 @@ var taskSchema = map[string]*schema.Schema{ Description: "The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. (Conflicts with user_task_managed_initial_warehouse_size)", ConflictsWith: []string{"user_task_managed_initial_warehouse_size"}, }, - //"user_task_managed_initial_warehouse_size": { - // Type: schema.TypeString, - // Optional: true, - // ValidateDiagFunc: sdkValidation(sdk.ToWarehouseSize), - // DiffSuppressFunc: SuppressIfAny( - // NormalizeAndCompare(sdk.ToWarehouseSize), - // IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterUserTaskManagedInitialWarehouseSize))), - // ), - // Description: fmt.Sprintf("Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. Valid values are (case-insensitive): %s. (Conflicts with warehouse)", possibleValuesListed(sdk.ValidWarehouseSizesString)), - // ConflictsWith: []string{"warehouse"}, - //}, "schedule": { Type: schema.TypeString, Optional: true, @@ -85,8 +80,7 @@ var taskSchema = map[string]*schema.Schema{ DiffSuppressFunc: SuppressIfAny( IgnoreChangeToCurrentSnowflakeValueInShow("config"), func(k, oldValue, newValue string, d *schema.ResourceData) bool { - // TODO: Trim left and right instead of replace all + extract - return strings.ReplaceAll(oldValue, "$", "") == strings.ReplaceAll(newValue, "$", "") + return strings.Trim(oldValue, "$") == strings.Trim(newValue, "$") }, ), // TODO: it could be retrieved with system function and show/desc (which should be used?) @@ -101,28 +95,12 @@ var taskSchema = map[string]*schema.Schema{ DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShow("allow_overlapping_execution"), Description: booleanStringFieldDescription("By default, Snowflake ensures that only one instance of a particular DAG is allowed to run at a time, setting the parameter value to TRUE permits DAG runs to overlap."), }, - //"user_task_timeout_ms": { - // Type: schema.TypeInt, - // Optional: true, - // Default: IntDefault, - // ValidateFunc: validation.IntAtLeast(0), - // DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterUserTaskTimeoutMs))+".0.value"), - // Description: "Specifies the time limit on a single run of the task before it times out (in milliseconds).", - //}, - //"suspend_task_after_num_failures": { - // Type: schema.TypeInt, - // Optional: true, - // Default: IntDefault, - // ValidateFunc: validation.IntAtLeast(0), - // DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterSuspendTaskAfterNumFailures))+".0.value"), - // Description: "Specifies the number of consecutive failed task runs after which the current task is suspended automatically. The default is 0 (no automatic suspension).", - //}, "error_integration": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: IsValidIdentifier[sdk.AccountObjectIdentifier](), DiffSuppressFunc: SuppressIfAny(suppressIdentifierQuoting, IgnoreChangeToCurrentSnowflakeValueInShow("error_integration")), - Description: "Specifies the name of the notification integration used for error notifications.", + Description: blocklistedCharactersFieldDescription("Specifies the name of the notification integration used for error notifications."), }, "comment": { Type: schema.TypeString, @@ -135,26 +113,11 @@ var taskSchema = map[string]*schema.Schema{ ValidateDiagFunc: IsValidIdentifier[sdk.SchemaObjectIdentifier](), DiffSuppressFunc: SuppressIfAny( suppressIdentifierQuoting, - IgnoreChangeToCurrentSnowflakeValueInShow("task_relations.0.finalize"), + IgnoreChangeToCurrentSnowflakeValueInShow("task_relations.0.finalized_root_task"), ), + Description: blocklistedCharactersFieldDescription("TODO"), ConflictsWith: []string{"schedule", "after"}, }, - //"task_auto_retry_attempts": { - // Type: schema.TypeInt, - // Optional: true, - // Default: IntDefault, - // ValidateFunc: validation.IntAtLeast(0), - // DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterTaskAutoRetryAttempts))+".0.value"), - // Description: "Specifies the number of automatic task graph retry attempts. If any task graphs complete in a FAILED state, Snowflake can automatically retry the task graphs from the last task in the graph that failed.", - //}, - "user_task_minimum_trigger_interval_in_seconds": { - Type: schema.TypeInt, - Optional: true, - Default: IntDefault, - ValidateFunc: validation.IntAtLeast(15), - DiffSuppressFunc: IgnoreChangeToCurrentSnowflakePlainValueInOutput(ParametersAttributeName, strings.ToLower(string(sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds))+".0.value"), - Description: "Defines how frequently a task can execute in seconds. If data changes occur more often than the specified minimum, changes will be grouped and processed together.", - }, "after": { Type: schema.TypeSet, Elem: &schema.Schema{ @@ -162,10 +125,8 @@ var taskSchema = map[string]*schema.Schema{ DiffSuppressFunc: suppressIdentifierQuoting, ValidateDiagFunc: IsValidIdentifier[sdk.SchemaObjectIdentifier](), }, - Optional: true, - // TODO: Check - // Cannot use IgnoreChangeToCurrentSnowflakeValueInShow because output from predecessors may be ordered - Description: "Specifies one or more predecessor tasks for the current task. Use this option to create a DAG of tasks or add this task to an existing DAG. A DAG is a series of tasks that starts with a scheduled root task and is linked together by dependencies.", + Optional: true, + Description: blocklistedCharactersFieldDescription("Specifies one or more predecessor tasks for the current task. Use this option to create a DAG of tasks or add this task to an existing DAG. A DAG is a series of tasks that starts with a scheduled root task and is linked together by dependencies."), ConflictsWith: []string{"schedule", "finalize"}, }, "when": { @@ -174,7 +135,7 @@ var taskSchema = map[string]*schema.Schema{ DiffSuppressFunc: SuppressIfAny(DiffSuppressStatement, IgnoreChangeToCurrentSnowflakeValueInShow("condition")), Description: "Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported.", }, - "sql_statement": { // TODO: Test all possibilities of this field (procedure, procedural logic, single sql statement) + "sql_statement": { Type: schema.TypeString, Required: true, ForceNew: false, @@ -206,6 +167,7 @@ func Task() *schema.Resource { UpdateContext: UpdateTask, ReadContext: ReadTask(true), DeleteContext: DeleteTask, + Description: "Resource used to manage task objects. For more information, check [task documentation](https://docs.snowflake.com/en/user-guide/tasks-intro).", Schema: helpers.MergeMaps(taskSchema, taskParametersSchema), Importer: &schema.ResourceImporter{ @@ -239,10 +201,7 @@ func ImportTask(ctx context.Context, d *schema.ResourceData, meta any) ([]*schem return nil, err } - if err = errors.Join( - d.Set("enabled", booleanStringFromBool(task.State == sdk.TaskStateStarted)), - d.Set("allow_overlapping_execution", booleanStringFromBool(task.AllowOverlappingExecution)), - ); err != nil { + if err := d.Set("allow_overlapping_execution", booleanStringFromBool(task.AllowOverlappingExecution)); err != nil { return nil, err } @@ -258,6 +217,8 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag id := sdk.NewSchemaObjectIdentifier(databaseName, schemaName, name) req := sdk.NewCreateTaskRequest(id, d.Get("sql_statement").(string)) + tasksToResume := make([]sdk.SchemaObjectIdentifier, 0) + if v, ok := d.GetOk("warehouse"); ok { warehouseId, err := sdk.ParseAccountObjectIdentifier(v.(string)) if err != nil { @@ -282,22 +243,6 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag req.WithAllowOverlappingExecution(parsedBool) } - //if v, ok := d.GetOk("session_parameters"); ok { - // sessionParameters, err := sdk.GetSessionParametersFrom(v.(map[string]any)) - // if err != nil { - // return diag.FromErr(err) - // } - // req.WithSessionParameters(*sessionParameters) - //} - - //if v := d.Get("user_task_timeout_ms"); v != IntDefault { - // req.WithUserTaskTimeoutMs(v.(int)) - //} - // - //if v := d.Get("suspend_task_after_num_failures"); v != IntDefault { - // req.WithSuspendTaskAfterNumFailures(v.(int)) - //} - // TODO: Decide on name (error_notification_integration ?) if v, ok := d.GetOk("error_integration"); ok { notificationIntegrationId, err := sdk.ParseAccountObjectIdentifier(v.(string)) @@ -312,36 +257,41 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag } if v, ok := d.GetOk("finalize"); ok { + // TODO: Create with finalize rootTaskId, err := sdk.ParseSchemaObjectIdentifier(v.(string)) if err != nil { return diag.FromErr(err) } + + rootTask, err := client.Tasks.ShowByID(ctx, rootTaskId) + if err != nil { + return diag.FromErr(err) + } + + if rootTask.State == sdk.TaskStateStarted { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(rootTaskId).WithSuspend(true)); err != nil { + return diag.FromErr(sdk.JoinErrors(err)) + } + tasksToResume = append(tasksToResume, rootTaskId) + } + req.WithFinalize(rootTaskId) } - //if v := d.Get("task_auto_retry_attempts"); v != IntDefault { - // req.WithTaskAutoRetryAttempts(v.(int)) - //} - // - //if v := d.Get("user_task_minimum_trigger_interval_in_seconds"); v != IntDefault { - // req.WithUserTaskMinimumTriggerIntervalInSeconds(v.(int)) - //} - if v, ok := d.GetOk("after"); ok { // TODO: Should after take in task names or fully qualified names? - after := expandStringList(v.([]interface{})) + after := expandStringList(v.(*schema.Set).List()) precedingTasks := make([]sdk.SchemaObjectIdentifier, 0) - for _, dep := range after { - precedingTaskId := sdk.NewSchemaObjectIdentifier(databaseName, schemaName, dep) - tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, precedingTaskId, id) // TODO: What if this fails and only half of the tasks are suspended? - defer func() { - if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { - log.Printf("[WARN] failed to resume tasks: %s", err) - } - }() + for _, parentTaskIdString := range after { + parentTaskId, err := sdk.ParseSchemaObjectIdentifier(parentTaskIdString) if err != nil { return diag.FromErr(err) } - precedingTasks = append(precedingTasks, precedingTaskId) + resumeTasks, err := client.Tasks.SuspendRootTasks(ctx, parentTaskId, id) // TODO: What if this fails and only half of the tasks are suspended? + tasksToResume = append(tasksToResume, resumeTasks...) + if err != nil { + return diag.FromErr(sdk.JoinErrors(err)) + } + precedingTasks = append(precedingTasks, parentTaskId) } req.WithAfter(precedingTasks) } @@ -361,16 +311,22 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag // TODO: State upgrader for "id" d.SetId(helpers.EncodeResourceIdentifier(id)) - if v := d.Get("enabled").(string); v != BooleanDefault { - enabled, err := booleanStringToBool(v) - if err != nil { - return diag.FromErr(err) - } - if enabled { - if err := waitForTaskStart(ctx, client, id); err != nil { - log.Printf("[WARN] failed to resume task %s", name) + if d.Get("enabled").(bool) { + if err := waitForTaskStart(ctx, client, id); err != nil { + return diag.Diagnostics{ + { + Severity: diag.Warning, + Summary: "Failed to start the task", + Detail: fmt.Sprintf("Id: %s, err: %s", id.FullyQualifiedName(), err), + }, } } + // TODO: Check documentation + // Tasks are created as suspended + } + + if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { + log.Printf("[WARN] failed to resume tasks: %s", err) } return ReadTask(false)(ctx, d, meta) @@ -383,18 +339,29 @@ func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag return diag.FromErr(err) } - tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, id, id) - defer func() { - if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { - log.Printf("[WARN] failed to resume tasks: %s", err) - } - }() + // TODO: Fix the order of actions + // TODO: Move suspending etc. to SDK + + task, err := client.Tasks.ShowByID(ctx, id) if err != nil { return diag.FromErr(err) } - set := sdk.NewTaskSetRequest() + // TODO: Should it be defer ? + tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, id, id) + if err != nil { + return diag.FromErr(sdk.JoinErrors(err)) + } + + if task.State == sdk.TaskStateStarted { + log.Printf("Suspending the task in if") + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSuspend(true)); err != nil { + return diag.FromErr(sdk.JoinErrors(err)) + } + } + unset := sdk.NewTaskUnsetRequest() + set := sdk.NewTaskSetRequest() err = errors.Join( accountObjectIdentifierAttributeUpdate(d, "warehouse", &set.Warehouse, &unset.Warehouse), @@ -412,12 +379,6 @@ func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag return updateDiags } - if *set != (sdk.TaskSetRequest{}) { - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSet(*set)); err != nil { - return diag.FromErr(err) - } - } - if *unset != (sdk.TaskUnsetRequest{}) { if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithUnset(*unset)); err != nil { return diag.FromErr(err) @@ -442,30 +403,85 @@ func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag } } - if d.HasChange("after") { - // TOOD: after - // Making changes to after require suspending the current task - // (the task will be brought up to the correct running state in the "enabled" check at the bottom of Update function). - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSuspend(true)); err != nil { - return diag.FromErr(err) + if d.HasChange("finalize") { + if v, ok := d.GetOk("finalize"); ok { + rootTaskId, err := sdk.ParseSchemaObjectIdentifier(v.(string)) + if err != nil { + return diag.FromErr(err) + } + + rootTask, err := client.Tasks.ShowByID(ctx, rootTaskId) + if err != nil { + return diag.FromErr(err) + } + + if rootTask.State == sdk.TaskStateStarted { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(rootTaskId).WithSuspend(true)); err != nil { + return diag.FromErr(sdk.JoinErrors(err)) + } + } + + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSetFinalize(rootTaskId)); err != nil { + return diag.FromErr(err) + } + + if rootTask.State == sdk.TaskStateStarted && !slices.ContainsFunc(tasksToResume, func(identifier sdk.SchemaObjectIdentifier) bool { + return identifier.FullyQualifiedName() == rootTaskId.FullyQualifiedName() + }) { + tasksToResume = append(tasksToResume, rootTaskId) + } + } else { + if task.TaskRelations.FinalizedRootTask == nil { + return diag.Errorf("trying to remove the finalizer when it's already unset") + } + + rootTask, err := client.Tasks.ShowByID(ctx, *task.TaskRelations.FinalizedRootTask) + if err != nil { + return diag.FromErr(err) + } + + if rootTask.State == sdk.TaskStateStarted { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(rootTask.ID()).WithSuspend(true)); err != nil { + return diag.FromErr(sdk.JoinErrors(err)) + } + } + + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithUnsetFinalize(true)); err != nil { + return diag.FromErr(err) + } + + if rootTask.State == sdk.TaskStateStarted && !slices.ContainsFunc(tasksToResume, func(identifier sdk.SchemaObjectIdentifier) bool { + return identifier.FullyQualifiedName() == rootTask.ID().FullyQualifiedName() + }) { + tasksToResume = append(tasksToResume, rootTask.ID()) + } } + } + if d.HasChange("after") { oldAfter, newAfter := d.GetChange("after") addedTasks, removedTasks := ListDiff( expandStringList(oldAfter.(*schema.Set).List()), expandStringList(newAfter.(*schema.Set).List()), ) - // Order of commands matters: - // The "after"s can only be added when the task doesn't have a "schedule". - // That's why this ALTER has to be below regular ALTER SET/UNSET commands. if len(addedTasks) > 0 { addedTaskIds, err := collections.MapErr(addedTasks, sdk.ParseSchemaObjectIdentifier) if err != nil { return diag.FromErr(err) } - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithAddAfter(addedTaskIds)); err != nil { + for _, addedTaskId := range addedTaskIds { + // TODO: Look into suspend root tasks function + addedTasksToResume, err := client.Tasks.SuspendRootTasks(ctx, addedTaskId, sdk.NewSchemaObjectIdentifier("", "", "")) + tasksToResume = append(tasksToResume, addedTasksToResume...) + if err != nil { + return diag.FromErr(sdk.JoinErrors(err)) + } + } + + err = client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithAddAfter(addedTaskIds)) + if err != nil { return diag.FromErr(err) } } @@ -475,32 +491,31 @@ func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag if err != nil { return diag.FromErr(err) } - - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithRemoveAfter(removedTaskIds)); err != nil { + err = client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithRemoveAfter(removedTaskIds)) + if err != nil { return diag.FromErr(err) } } } - if d.HasChange("enabled") { - if v := d.Get("enabled").(string); v != BooleanDefault { - enabled, err := booleanStringToBool(v) - if err != nil { - return diag.FromErr(err) - } + if *set != (sdk.TaskSetRequest{}) { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSet(*set)); err != nil { + return diag.FromErr(err) + } + } - if enabled { - if waitForTaskStart(ctx, client, id) != nil { - log.Printf("[WARN] failed to resume task %s", id.FullyQualifiedName()) - } - } else { - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSuspend(true)); err != nil { - return diag.FromErr(err) - } - } + if d.Get("enable").(bool) { + log.Printf("Resuming the task in handled update") + if err := waitForTaskStart(ctx, client, id); err != nil { + return diag.FromErr(fmt.Errorf("failed to resume task %s, err = %w", id.FullyQualifiedName(), err)) } } + log.Printf("Resuming the root tasks: %v", collections.Map(tasksToResume, sdk.SchemaObjectIdentifier.Name)) + if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { + log.Printf("[WARN] failed to resume tasks: %s", err) + } + return ReadTask(false)(ctx, d, meta) } @@ -534,17 +549,21 @@ func ReadTask(withExternalChangesMarking bool) schema.ReadContextFunc { if withExternalChangesMarking { if err = handleExternalChangesToObjectInShow(d, - showMapping{"state", "enabled", string(task.State), booleanStringFromBool(task.State == sdk.TaskStateStarted), nil}, showMapping{"allow_overlapping_execution", "allow_overlapping_execution", task.AllowOverlappingExecution, booleanStringFromBool(task.AllowOverlappingExecution), nil}, ); err != nil { return diag.FromErr(err) } + } else { + if err = setStateToValuesFromConfig(d, taskSchema, []string{ + "allow_overlapping_execution", + }); err != nil { + return diag.FromErr(err) + } } - if err = setStateToValuesFromConfig(d, taskSchema, []string{ - "enabled", - "allow_overlapping_execution", - }); err != nil { - return diag.FromErr(err) + + warehouseId := "" + if task.Warehouse != nil { + warehouseId = task.Warehouse.Name() } errorIntegrationId := "" @@ -552,15 +571,14 @@ func ReadTask(withExternalChangesMarking bool) schema.ReadContextFunc { errorIntegrationId = task.ErrorIntegration.Name() } - finalizedTaskId := "" - if task.TaskRelations.FinalizerTask != nil { - finalizedTaskId = task.TaskRelations.FinalizerTask.FullyQualifiedName() + finalizedRootTaskId := "" + if task.TaskRelations.FinalizedRootTask != nil { + finalizedRootTaskId = task.TaskRelations.FinalizedRootTask.FullyQualifiedName() } if errs := errors.Join( - // TODO: handleTaskParametersRead(d, taskParameters) - // TODO: Reorder - d.Set("warehouse", task.Warehouse), + d.Set("enable", task.State == sdk.TaskStateStarted), + d.Set("warehouse", warehouseId), d.Set("schedule", task.Schedule), d.Set("when", task.Condition), d.Set("config", task.Config), @@ -568,7 +586,7 @@ func ReadTask(withExternalChangesMarking bool) schema.ReadContextFunc { d.Set("comment", task.Comment), d.Set("sql_statement", task.Definition), d.Set("after", collections.Map(task.Predecessors, sdk.SchemaObjectIdentifier.FullyQualifiedName)), - d.Set("finalize", finalizedTaskId), + d.Set("finalize", finalizedRootTaskId), handleTaskParameterRead(d, taskParameters), d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()), d.Set(ShowOutputAttributeName, []map[string]any{schemas.TaskToSchema(task)}), @@ -595,7 +613,7 @@ func DeleteTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag } }() if err != nil { - return diag.FromErr(err) + return diag.FromErr(sdk.JoinErrors(err)) } err = client.Tasks.Drop(ctx, sdk.NewDropTaskRequest(id).WithIfExists(true)) @@ -624,28 +642,19 @@ func waitForTaskStart(ctx context.Context, client *sdk.Client, id sdk.SchemaObje }) } -// TODO: Remove functions below - -// difference find keys in 'a' but not in 'b'. -func difference(a, b map[string]any) map[string]any { - diff := make(map[string]any) - for k := range a { - if _, ok := b[k]; !ok { - diff[k] = a[k] - } +func waitForTaskSuspend(ctx context.Context, client *sdk.Client, id sdk.SchemaObjectIdentifier) error { + err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSuspend(true)) + if err != nil { + return fmt.Errorf("error suspending task %s err = %w", id.FullyQualifiedName(), err) } - return diff -} - -// differentValue find keys present both in 'a' and 'b' but having different values. -func differentValue(a, b map[string]any) map[string]any { - diff := make(map[string]any) - for k, va := range a { - if vb, ok := b[k]; ok { - if vb != va { - diff[k] = vb - } + return util.Retry(5, 5*time.Second, func() (error, bool) { + task, err := client.Tasks.ShowByID(ctx, id) + if err != nil { + return fmt.Errorf("error suspending task %s err = %w", id.FullyQualifiedName(), err), false } - } - return diff + if task.State != sdk.TaskStateSuspended { + return nil, false + } + return nil, true + }) } diff --git a/pkg/resources/task_acceptance_test.go b/pkg/resources/task_acceptance_test.go index 0844eaa94c..6d25ba2239 100644 --- a/pkg/resources/task_acceptance_test.go +++ b/pkg/resources/task_acceptance_test.go @@ -1,9 +1,12 @@ package resources_test import ( - "fmt" + "strings" + "testing" + acc "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/objectparametersassert" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceassert" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceparametersassert" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert" @@ -15,21 +18,21 @@ import ( r "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" configvariable "github.com/hashicorp/terraform-plugin-testing/config" - "strings" - "testing" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-plugin-testing/tfversion" ) -// TODO: More tests for complicated DAGs +// TODO(SNOW-1348116 - next pr): More tests for complicated DAGs func TestAcc_Task_Basic(t *testing.T) { _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) acc.TestAccPreCheck(t) + currentRole := acc.TestClient().Context.CurrentRole(t) + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() statement := "SELECT 1" configModel := model.TaskWithId("test", id, statement) @@ -45,7 +48,7 @@ func TestAcc_Task_Basic(t *testing.T) { { Config: config.FromModel(t, configModel), Check: assert.AssertThat(t, - resourceassert.TaskResource(t, "snowflake_task.test"). + resourceassert.TaskResource(t, configModel.ResourceReference()). HasFullyQualifiedNameString(id.FullyQualifiedName()). HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). @@ -61,13 +64,13 @@ func TestAcc_Task_Basic(t *testing.T) { HasAfterLen(0). HasWhenString(""). HasSqlStatementString(statement), - resourceshowoutputassert.TaskShowOutput(t, "snowflake_task.test"). + resourceshowoutputassert.TaskShowOutput(t, configModel.ResourceReference()). HasCreatedOnNotEmpty(). HasName(id.Name()). HasIdNotEmpty(). HasDatabaseName(id.DatabaseName()). HasSchemaName(id.SchemaName()). - HasOwner("ACCOUNTADMIN"). // TODO: Current role + HasOwner(currentRole.Name()). HasComment(""). HasWarehouse(""). HasSchedule(""). @@ -76,19 +79,19 @@ func TestAcc_Task_Basic(t *testing.T) { HasDefinition(statement). HasCondition(""). HasAllowOverlappingExecution(false). - HasErrorIntegration(sdk.NewAccountObjectIdentifier("")). // TODO: *sdk.AOI + HasErrorIntegration(""). HasLastCommittedOn(""). HasLastSuspendedOn(""). HasOwnerRoleType("ROLE"). HasConfig(""). - HasBudget(""), - //HasTaskRelations(sdk.TaskRelations{}). // TODO: - resourceparametersassert.TaskResourceParameters(t, "snowflake_task.test"). + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}), + resourceparametersassert.TaskResourceParameters(t, configModel.ResourceReference()). HasAllDefaults(), ), }, { - ResourceName: "snowflake_task.test", + ResourceName: configModel.ResourceReference(), ImportState: true, ImportStateCheck: assert.AssertThatImport(t, resourceassert.ImportedTaskResource(t, helpers.EncodeResourceIdentifier(id)). @@ -117,6 +120,8 @@ func TestAcc_Task_Complete(t *testing.T) { _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) acc.TestAccPreCheck(t) + currentRole := acc.TestClient().Context.CurrentRole(t) + errorNotificationIntegration, errorNotificationIntegrationCleanup := acc.TestClient().NotificationIntegration.Create(t) t.Cleanup(errorNotificationIntegrationCleanup) @@ -134,7 +139,7 @@ func TestAcc_Task_Complete(t *testing.T) { WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). WithSchedule("10 MINUTES"). WithConfigValue(configvariable.StringVariable(taskConfigVariableValue)). - WithAllowOverlappingExecution(true). + WithAllowOverlappingExecution(r.BooleanTrue). WithErrorIntegration(errorNotificationIntegration.ID().Name()). WithComment(comment). WithWhen(condition) @@ -150,7 +155,7 @@ func TestAcc_Task_Complete(t *testing.T) { { Config: config.FromModel(t, configModel), Check: assert.AssertThat(t, - resourceassert.TaskResource(t, "snowflake_task.test"). + resourceassert.TaskResource(t, configModel.ResourceReference()). HasFullyQualifiedNameString(id.FullyQualifiedName()). HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). @@ -166,29 +171,29 @@ func TestAcc_Task_Complete(t *testing.T) { HasNoAfter(). HasWhenString(condition). HasSqlStatementString(statement), - resourceshowoutputassert.TaskShowOutput(t, "snowflake_task.test"). + resourceshowoutputassert.TaskShowOutput(t, configModel.ResourceReference()). HasCreatedOnNotEmpty(). HasName(id.Name()). - //HasId(id.FullyQualifiedName()). // TODO: not empty + HasIdNotEmpty(). HasDatabaseName(id.DatabaseName()). HasSchemaName(id.SchemaName()). - HasOwner("ACCOUNTADMIN"). // TODO: Current role + HasOwner(currentRole.Name()). HasComment(comment). HasWarehouse(acc.TestClient().Ids.WarehouseId().Name()). HasSchedule("10 MINUTES"). - //HasPredecessors(nil). // TODO: + HasPredecessors(). HasState(sdk.TaskStateStarted). HasDefinition(statement). HasCondition(condition). HasAllowOverlappingExecution(true). - HasErrorIntegration(errorNotificationIntegration.ID()). + HasErrorIntegration(errorNotificationIntegration.ID().Name()). HasLastCommittedOnNotEmpty(). HasLastSuspendedOn(""). HasOwnerRoleType("ROLE"). HasConfig(expectedTaskConfig). - HasBudget(""), - //HasTaskRelations(sdk.TaskRelations{}). // TODO: - resourceparametersassert.TaskResourceParameters(t, "snowflake_task.test"). + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}), + resourceparametersassert.TaskResourceParameters(t, configModel.ResourceReference()). HasAllDefaults(), ), }, @@ -222,12 +227,12 @@ func TestAcc_Task_Updates(t *testing.T) { _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) acc.TestAccPreCheck(t) + currentRole := acc.TestClient().Context.CurrentRole(t) + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() statement := "SELECT 1" basicConfigModel := model.TaskWithId("test", id, statement) - // TODO: Assert the rest of fields (e.g. parameters) - errorNotificationIntegration, errorNotificationIntegrationCleanup := acc.TestClient().NotificationIntegration.Create(t) t.Cleanup(errorNotificationIntegrationCleanup) @@ -240,11 +245,12 @@ func TestAcc_Task_Updates(t *testing.T) { condition := `SYSTEM$STREAM_HAS_DATA('MYSTREAM')` completeConfigModel := model.TaskWithId("test", id, statement). WithEnabled(r.BooleanTrue). - // TODO: Warehouse cannot be set (error) - //WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). - WithSchedule("10 MINUTES"). + // TODO(SNOW-1348116 - decide in next prs): This won't work because alter set warehouse is broken + // we could actually make it work by enabling only uppercased ids in the warehouse field until it's fixed. + // WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithSchedule("5 MINUTES"). WithConfigValue(configvariable.StringVariable(taskConfigVariableValue)). - WithAllowOverlappingExecution(true). + WithAllowOverlappingExecution(r.BooleanTrue). WithErrorIntegration(errorNotificationIntegration.ID().Name()). WithComment(comment). WithWhen(condition) @@ -260,7 +266,7 @@ func TestAcc_Task_Updates(t *testing.T) { { Config: config.FromModel(t, basicConfigModel), Check: assert.AssertThat(t, - resourceassert.TaskResource(t, "snowflake_task.test"). + resourceassert.TaskResource(t, basicConfigModel.ResourceReference()). HasFullyQualifiedNameString(id.FullyQualifiedName()). HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). @@ -276,42 +282,41 @@ func TestAcc_Task_Updates(t *testing.T) { HasAfterLen(0). HasWhenString(""). HasSqlStatementString(statement), - resourceshowoutputassert.TaskShowOutput(t, "snowflake_task.test"). - //HasCreatedOnNotEmpty(), + resourceshowoutputassert.TaskShowOutput(t, basicConfigModel.ResourceReference()). + HasCreatedOnNotEmpty(). HasName(id.Name()). - //HasId(id.FullyQualifiedName()). // TODO: not empty + HasIdNotEmpty(). HasDatabaseName(id.DatabaseName()). HasSchemaName(id.SchemaName()). - HasOwner("ACCOUNTADMIN"). // TODO: Current role + HasOwner(currentRole.Name()). HasComment(""). HasWarehouse(""). HasSchedule(""). - //HasPredecessors(nil). // TODO: + HasPredecessors(). HasState(sdk.TaskStateSuspended). HasDefinition(statement). HasCondition(""). HasAllowOverlappingExecution(false). - HasErrorIntegration(sdk.NewAccountObjectIdentifier("")). // TODO: *sdk.AOI + HasErrorIntegration(""). HasLastCommittedOn(""). HasLastSuspendedOn(""). HasOwnerRoleType("ROLE"). HasConfig(""). - HasBudget(""), - //HasTaskRelations(sdk.TaskRelations{}). // TODO: + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}), ), }, // Set { Config: config.FromModel(t, completeConfigModel), Check: assert.AssertThat(t, - resourceassert.TaskResource(t, "snowflake_task.test"). + resourceassert.TaskResource(t, completeConfigModel.ResourceReference()). HasFullyQualifiedNameString(id.FullyQualifiedName()). HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). HasNameString(id.Name()). HasEnabledString(r.BooleanTrue). - //HasWarehouseString(acc.TestClient().Ids.WarehouseId().Name()). - HasScheduleString("10 MINUTES"). + HasScheduleString("5 MINUTES"). HasConfigString(expectedTaskConfig). HasAllowOverlappingExecutionString(r.BooleanTrue). HasErrorIntegrationString(errorNotificationIntegration.ID().Name()). @@ -320,35 +325,34 @@ func TestAcc_Task_Updates(t *testing.T) { HasAfterLen(0). HasWhenString(condition). HasSqlStatementString(statement), - resourceshowoutputassert.TaskShowOutput(t, "snowflake_task.test"). + resourceshowoutputassert.TaskShowOutput(t, completeConfigModel.ResourceReference()). HasCreatedOnNotEmpty(). HasName(id.Name()). - //HasId(id.FullyQualifiedName()). // TODO: not empty + HasIdNotEmpty(). HasDatabaseName(id.DatabaseName()). HasSchemaName(id.SchemaName()). - HasOwner("ACCOUNTADMIN"). // TODO: Current role + HasOwner(currentRole.Name()). HasComment(comment). - //HasWarehouse(acc.TestClient().Ids.WarehouseId().Name()). - HasSchedule("10 MINUTES"). - //HasPredecessors(nil). // TODO: + HasSchedule("5 MINUTES"). + HasPredecessors(). HasState(sdk.TaskStateStarted). HasDefinition(statement). HasCondition(condition). HasAllowOverlappingExecution(true). - HasErrorIntegration(errorNotificationIntegration.ID()). + HasErrorIntegration(errorNotificationIntegration.ID().Name()). HasLastCommittedOnNotEmpty(). HasLastSuspendedOn(""). HasOwnerRoleType("ROLE"). HasConfig(expectedTaskConfig). - HasBudget(""), - //HasTaskRelations(sdk.TaskRelations{}). // TODO: + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}), ), }, // Unset { Config: config.FromModel(t, basicConfigModel), Check: assert.AssertThat(t, - resourceassert.TaskResource(t, "snowflake_task.test"). + resourceassert.TaskResource(t, basicConfigModel.ResourceReference()). HasFullyQualifiedNameString(id.FullyQualifiedName()). HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). @@ -364,28 +368,28 @@ func TestAcc_Task_Updates(t *testing.T) { HasAfterLen(0). HasWhenString(""). HasSqlStatementString(statement), - resourceshowoutputassert.TaskShowOutput(t, "snowflake_task.test"). - //HasCreatedOnNotEmpty(), + resourceshowoutputassert.TaskShowOutput(t, basicConfigModel.ResourceReference()). + HasCreatedOnNotEmpty(). HasName(id.Name()). - //HasId(id.FullyQualifiedName()). // TODO: not empty + HasIdNotEmpty(). HasDatabaseName(id.DatabaseName()). HasSchemaName(id.SchemaName()). - HasOwner("ACCOUNTADMIN"). // TODO: Current role + HasOwner(currentRole.Name()). HasComment(""). HasWarehouse(""). HasSchedule(""). - //HasPredecessors(nil). // TODO: + HasPredecessors(). HasState(sdk.TaskStateSuspended). HasDefinition(statement). HasCondition(""). HasAllowOverlappingExecution(false). - HasErrorIntegration(sdk.NewAccountObjectIdentifier("")). // TODO: *sdk.AOI + HasErrorIntegration(""). HasLastCommittedOnNotEmpty(). HasLastSuspendedOnNotEmpty(). HasOwnerRoleType("ROLE"). HasConfig(""). - HasBudget(""), - //HasTaskRelations(sdk.TaskRelations{}). // TODO: + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}), ), }, }, @@ -395,552 +399,586 @@ func TestAcc_Task_Updates(t *testing.T) { func TestAcc_Task_AllParameters(t *testing.T) { _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) acc.TestAccPreCheck(t) -} -// TODO: Test other paths (alter finalize, after, itd) - -//type ( -// AccTaskTestSettings struct { -// DatabaseName string -// WarehouseName string -// RootTask *TaskSettings -// ChildTask *TaskSettings -// SoloTask *TaskSettings -// } -// -// TaskSettings struct { -// Name string -// Enabled bool -// Schema string -// SQL string -// Schedule string -// Comment string -// When string -// SessionParams map[string]string -// UserTaskTimeoutMs int64 -// } -//) -// -//var ( -// rootname = acc.TestClient().Ids.AlphaContaining("_root_task") -// rootId = sdk.NewSchemaObjectIdentifier(acc.TestDatabaseName, acc.TestSchemaName, rootname) -// childname = acc.TestClient().Ids.AlphaContaining("_child_task") -// childId = sdk.NewSchemaObjectIdentifier(acc.TestDatabaseName, acc.TestSchemaName, childname) -// soloname = acc.TestClient().Ids.AlphaContaining("_standalone_task") -// -// initialState = &AccTaskTestSettings{ //nolint -// WarehouseName: acc.TestWarehouseName, -// DatabaseName: acc.TestDatabaseName, -// RootTask: &TaskSettings{ -// Name: rootname, -// Schema: acc.TestSchemaName, -// SQL: "SHOW FUNCTIONS", -// Enabled: true, -// Schedule: "5 MINUTE", -// UserTaskTimeoutMs: 1800000, -// SessionParams: map[string]string{ -// string(sdk.SessionParameterLockTimeout): "1000", -// string(sdk.SessionParameterStrictJSONOutput): "true", -// }, -// }, -// -// ChildTask: &TaskSettings{ -// Name: childname, -// SQL: "SELECT 1", -// Enabled: false, -// Comment: "initial state", -// }, -// -// SoloTask: &TaskSettings{ -// Name: soloname, -// Schema: acc.TestSchemaName, -// SQL: "SELECT 1", -// When: "TRUE", -// Enabled: false, -// }, -// } -// -// // Enables the Child and changes the SQL. -// stepOne = &AccTaskTestSettings{ //nolint -// WarehouseName: acc.TestWarehouseName, -// DatabaseName: acc.TestDatabaseName, -// RootTask: &TaskSettings{ -// Name: rootname, -// Schema: acc.TestSchemaName, -// SQL: "SHOW FUNCTIONS", -// Enabled: true, -// Schedule: "5 MINUTE", -// UserTaskTimeoutMs: 1800000, -// SessionParams: map[string]string{ -// string(sdk.SessionParameterLockTimeout): "1000", -// string(sdk.SessionParameterStrictJSONOutput): "true", -// }, -// }, -// -// ChildTask: &TaskSettings{ -// Name: childname, -// SQL: "SELECT *", -// Enabled: true, -// Comment: "secondary state", -// }, -// -// SoloTask: &TaskSettings{ -// Name: soloname, -// Schema: acc.TestSchemaName, -// SQL: "SELECT *", -// When: "TRUE", -// Enabled: true, -// SessionParams: map[string]string{ -// string(sdk.SessionParameterTimestampInputFormat): "YYYY-MM-DD HH24", -// }, -// Schedule: "5 MINUTE", -// UserTaskTimeoutMs: 1800000, -// }, -// } -// -// // Changes Root Schedule and SQL. -// stepTwo = &AccTaskTestSettings{ //nolint -// WarehouseName: acc.TestWarehouseName, -// DatabaseName: acc.TestDatabaseName, -// RootTask: &TaskSettings{ -// Name: rootname, -// Schema: acc.TestSchemaName, -// SQL: "SHOW TABLES", -// Enabled: true, -// Schedule: "15 MINUTE", -// UserTaskTimeoutMs: 1800000, -// SessionParams: map[string]string{ -// string(sdk.SessionParameterLockTimeout): "1000", -// string(sdk.SessionParameterStrictJSONOutput): "true", -// }, -// }, -// -// ChildTask: &TaskSettings{ -// Name: childname, -// SQL: "SELECT 1", -// Enabled: true, -// Comment: "third state", -// }, -// -// SoloTask: &TaskSettings{ -// Name: soloname, -// Schema: acc.TestSchemaName, -// SQL: "SELECT *", -// When: "FALSE", -// Enabled: true, -// Schedule: "15 MINUTE", -// UserTaskTimeoutMs: 900000, -// }, -// } -// -// stepThree = &AccTaskTestSettings{ //nolint -// WarehouseName: acc.TestWarehouseName, -// DatabaseName: acc.TestDatabaseName, -// -// RootTask: &TaskSettings{ -// Name: rootname, -// Schema: acc.TestSchemaName, -// SQL: "SHOW FUNCTIONS", -// Enabled: false, -// Schedule: "5 MINUTE", -// UserTaskTimeoutMs: 1800000, -// // Changes session params: one is updated, one is removed, one is added -// SessionParams: map[string]string{ -// string(sdk.SessionParameterLockTimeout): "2000", -// string(sdk.SessionParameterMultiStatementCount): "5", -// }, -// }, -// -// ChildTask: &TaskSettings{ -// Name: childname, -// SQL: "SELECT 1", -// Enabled: false, -// Comment: "reset", -// }, -// -// SoloTask: &TaskSettings{ -// Name: soloname, -// Schema: acc.TestSchemaName, -// SQL: "SELECT 1", -// When: "TRUE", -// Enabled: true, -// SessionParams: map[string]string{ -// string(sdk.SessionParameterTimestampInputFormat): "YYYY-MM-DD HH24", -// }, -// Schedule: "5 MINUTE", -// UserTaskTimeoutMs: 0, -// }, -// } -//) - -//func TestAcc_Task(t *testing.T) { -// resource.Test(t, resource.TestCase{ -// ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, -// TerraformVersionChecks: []tfversion.TerraformVersionCheck{ -// tfversion.RequireAbove(tfversion.Version1_5_0), -// }, -// PreCheck: func() { acc.TestAccPreCheck(t) }, -// CheckDestroy: acc.CheckDestroy(t, resources.Task), -// Steps: []resource.TestStep{ -// { -// Config: taskConfig(initialState), -// Check: resource.ComposeTestCheckFunc( -// resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "false"), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", initialState.RootTask.SQL), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", initialState.ChildTask.SQL), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "after.0", rootname), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", initialState.ChildTask.Comment), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", initialState.RootTask.Schedule), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", initialState.ChildTask.Schedule), -// checkInt64("snowflake_task.root_task", "user_task_timeout_ms", initialState.RootTask.UserTaskTimeoutMs), -// resource.TestCheckNoResourceAttr("snowflake_task.solo_task", "user_task_timeout_ms"), -// checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), -// resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), -// ), -// }, -// { -// Config: taskConfig(stepOne), -// Check: resource.ComposeTestCheckFunc( -// resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", stepOne.RootTask.SQL), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", stepOne.ChildTask.SQL), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", stepOne.ChildTask.Comment), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", stepOne.RootTask.Schedule), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", stepOne.ChildTask.Schedule), -// checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepOne.RootTask.UserTaskTimeoutMs), -// checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", stepOne.SoloTask.UserTaskTimeoutMs), -// checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), -// resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), -// ), -// }, -// { -// Config: taskConfig(stepTwo), -// Check: resource.ComposeTestCheckFunc( -// resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", stepTwo.RootTask.SQL), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", stepTwo.ChildTask.SQL), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", stepTwo.ChildTask.Comment), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", stepTwo.RootTask.Schedule), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", stepTwo.ChildTask.Schedule), -// checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepTwo.RootTask.UserTaskTimeoutMs), -// checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", stepTwo.SoloTask.UserTaskTimeoutMs), -// checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), -// resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), -// ), -// }, -// { -// Config: taskConfig(stepThree), -// Check: resource.ComposeTestCheckFunc( -// resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "false"), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "false"), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", stepThree.RootTask.SQL), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", stepThree.ChildTask.SQL), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", stepThree.ChildTask.Comment), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", stepThree.RootTask.Schedule), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", stepThree.ChildTask.Schedule), -// checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepThree.RootTask.UserTaskTimeoutMs), -// checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", stepThree.SoloTask.UserTaskTimeoutMs), -// checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 2000), -// resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT"), -// checkInt64("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT", 5), -// ), -// }, -// { -// Config: taskConfig(initialState), -// Check: resource.ComposeTestCheckFunc( -// resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "false"), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", initialState.RootTask.SQL), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", initialState.ChildTask.SQL), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", initialState.ChildTask.Comment), -// checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepOne.RootTask.UserTaskTimeoutMs), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", initialState.RootTask.Schedule), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", initialState.ChildTask.Schedule), -// // Terraform SDK is not able to differentiate if the -// // attribute has deleted or set to zero value. -// // ResourceData.GetChange returns the zero value of defined -// // type in schema as new the value. Provider handles 0 for -// // `user_task_timeout_ms` by unsetting the -// // USER_TASK_TIMEOUT_MS session variable. -// checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", initialState.ChildTask.UserTaskTimeoutMs), -// checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), -// resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), -// resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), -// ), -// }, -// }, -// }) -//} - -//func taskConfig(settings *AccTaskTestSettings) string { //nolint -// config, err := template.New("task_acceptance_test_config").Parse(` -//resource "snowflake_warehouse" "wh" { -// name = "{{ .WarehouseName }}-{{ .RootTask.Name }}" -//} -//resource "snowflake_task" "root_task" { -// name = "{{ .RootTask.Name }}" -// database = "{{ .DatabaseName }}" -// schema = "{{ .RootTask.Schema }}" -// warehouse = "${snowflake_warehouse.wh.name}" -// sql_statement = "{{ .RootTask.SQL }}" -// enabled = {{ .RootTask.Enabled }} -// schedule = "{{ .RootTask.Schedule }}" -// {{ if .RootTask.UserTaskTimeoutMs }} -// user_task_timeout_ms = {{ .RootTask.UserTaskTimeoutMs }} -// {{- end }} -// -// {{ if .RootTask.SessionParams }} -// session_parameters = { -// {{ range $key, $value := .RootTask.SessionParams}} -// {{ $key }} = "{{ $value }}", -// {{- end }} -// } -// {{- end }} -//} -//resource "snowflake_task" "child_task" { -// name = "{{ .ChildTask.Name }}" -// database = snowflake_task.root_task.database -// schema = snowflake_task.root_task.schema -// warehouse = snowflake_task.root_task.warehouse -// sql_statement = "{{ .ChildTask.SQL }}" -// enabled = {{ .ChildTask.Enabled }} -// after = [snowflake_task.root_task.name] -// comment = "{{ .ChildTask.Comment }}" -// {{ if .ChildTask.UserTaskTimeoutMs }} -// user_task_timeout_ms = {{ .ChildTask.UserTaskTimeoutMs }} -// {{- end }} -// -// {{ if .ChildTask.SessionParams }} -// session_parameters = { -// {{ range $key, $value := .ChildTask.SessionParams}} -// {{ $key }} = "{{ $value }}", -// {{- end }} -// } -// {{- end }} -//} -//resource "snowflake_task" "solo_task" { -// name = "{{ .SoloTask.Name }}" -// database = "{{ .DatabaseName }}" -// schema = "{{ .SoloTask.Schema }}" -// warehouse = "{{ .WarehouseName }}" -// sql_statement = "{{ .SoloTask.SQL }}" -// enabled = {{ .SoloTask.Enabled }} -// when = "{{ .SoloTask.When }}" -// {{ if .SoloTask.Schedule }} -// schedule = "{{ .SoloTask.Schedule }}" -// {{- end }} -// -// {{ if .SoloTask.UserTaskTimeoutMs }} -// user_task_timeout_ms = {{ .SoloTask.UserTaskTimeoutMs }} -// {{- end }} -// -// {{ if .SoloTask.SessionParams }} -// session_parameters = { -// {{ range $key, $value := .SoloTask.SessionParams}} -// {{ $key }} = "{{ $value }}", -// {{- end }} -// } -// {{- end }} -//} -// `) -// if err != nil { -// fmt.Println(err) -// } -// -// var result bytes.Buffer -// config.Execute(&result, settings) //nolint -// -// return result.String() -//} - -/* -todo: this test is failing due to error message below. Need to figure out why this is happening -=== RUN TestAcc_Task_Managed - - task_acceptance_test.go:371: Step 2/4 error: Error running apply: exit status 1 - - Error: error updating warehouse on task "terraform_test_database"."terraform_test_schema"."tst-terraform-DBMPMESYJB" err = 091083 (42601): Nonexistent warehouse terraform_test_warehouse-tst-terraform-DBMPMESYJB was specified. - - with snowflake_task.managed_task, - on terraform_plugin_test.tf line 7, in resource "snowflake_task" "managed_task": - 7: resource "snowflake_task" "managed_task" { - - - func TestAcc_Task_Managed(t *testing.T) { - accName := acc.TestClient().Ids.Alpha() - resource.Test(t, resource.TestCase{ - ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + configModel := model.TaskWithId("test", id, statement) + configModelWithAllParametersSet := model.TaskWithId("test", id, statement). + WithSuspendTaskAfterNumFailures(15). + WithTaskAutoRetryAttempts(15). + WithUserTaskManagedInitialWarehouseSizeEnum(sdk.WarehouseSizeXSmall). + WithUserTaskMinimumTriggerIntervalInSeconds(30). + WithUserTaskTimeoutMs(1000). + WithAbortDetachedQuery(true). + WithAutocommit(false). + WithBinaryInputFormatEnum(sdk.BinaryInputFormatUTF8). + WithBinaryOutputFormatEnum(sdk.BinaryOutputFormatBase64). + WithClientMemoryLimit(1024). + WithClientMetadataRequestUseConnectionCtx(true). + WithClientPrefetchThreads(2). + WithClientResultChunkSize(48). + WithClientResultColumnCaseInsensitive(true). + WithClientSessionKeepAlive(true). + WithClientSessionKeepAliveHeartbeatFrequency(2400). + WithClientTimestampTypeMappingEnum(sdk.ClientTimestampTypeMappingNtz). + WithDateInputFormat("YYYY-MM-DD"). + WithDateOutputFormat("YY-MM-DD"). + WithEnableUnloadPhysicalTypeOptimization(false). + WithErrorOnNondeterministicMerge(false). + WithErrorOnNondeterministicUpdate(true). + WithGeographyOutputFormatEnum(sdk.GeographyOutputFormatWKB). + WithGeometryOutputFormatEnum(sdk.GeometryOutputFormatWKB). + WithJdbcUseSessionTimezone(false). + WithJsonIndent(4). + WithLockTimeout(21222). + WithLogLevelEnum(sdk.LogLevelError). + WithMultiStatementCount(0). + WithNoorderSequenceAsDefault(false). + WithOdbcTreatDecimalAsInt(true). + WithQueryTag("some_tag"). + WithQuotedIdentifiersIgnoreCase(true). + WithRowsPerResultset(2). + WithS3StageVpceDnsName("vpce-id.s3.region.vpce.amazonaws.com"). + WithSearchPath("$public, $current"). + WithStatementQueuedTimeoutInSeconds(10). + WithStatementTimeoutInSeconds(10). + WithStrictJsonOutput(true). + WithTimestampDayIsAlways24h(true). + WithTimestampInputFormat("YYYY-MM-DD"). + WithTimestampLtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + WithTimestampNtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + WithTimestampOutputFormat("YYYY-MM-DD HH24:MI:SS"). + WithTimestampTypeMappingEnum(sdk.TimestampTypeMappingLtz). + WithTimestampTzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + WithTimezone("Europe/Warsaw"). + WithTimeInputFormat("HH24:MI"). + WithTimeOutputFormat("HH24:MI"). + WithTraceLevelEnum(sdk.TraceLevelOnEvent). + WithTransactionAbortOnError(true). + WithTransactionDefaultIsolationLevelEnum(sdk.TransactionDefaultIsolationLevelReadCommitted). + WithTwoDigitCenturyStart(1980). + WithUnsupportedDdlActionEnum(sdk.UnsupportedDDLActionFail). + WithUseCachedResult(false). + WithWeekOfYearPolicy(1). + WithWeekStart(1) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.RequireAbove(tfversion.Version1_5_0), }, - PreCheck: func() { acc.TestAccPreCheck(t) }, - CheckDestroy: acc.CheckDestroy(t, resources.Task), - Steps: []resource.TestStep{ - { - Config: taskConfigManaged1(accName, acc.TestDatabaseName, acc.TestSchemaName), - Check: resource.ComposeTestCheckFunc( - checkBool("snowflake_task.managed_task", "enabled", true), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schedule", "5 MINUTE"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "user_task_managed_initial_warehouse_size", "XSMALL"), - resource.TestCheckResourceAttr("snowflake_task.managed_task_no_init", "user_task_managed_initial_warehouse_size", ""), - resource.TestCheckResourceAttr("snowflake_task.managed_task_no_init", "session_parameters.TIMESTAMP_INPUT_FORMAT", "YYYY-MM-DD HH24"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "warehouse", ""), - ), - }, - { - Config: taskConfigManaged2(accName, acc.TestDatabaseName, acc.TestSchemaName, acc.TestWarehouseName), - Check: resource.ComposeTestCheckFunc( - checkBool("snowflake_task.managed_task", "enabled", true), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schedule", "5 MINUTE"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "user_task_managed_initial_warehouse_size", ""), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "warehouse", fmt.Sprintf("%s-%s", acc.TestWarehouseName, accName)), - ), - }, - { - Config: taskConfigManaged1(accName, acc.TestDatabaseName, acc.TestSchemaName), - Check: resource.ComposeTestCheckFunc( - checkBool("snowflake_task.managed_task", "enabled", true), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schedule", "5 MINUTE"), - resource.TestCheckResourceAttr("snowflake_task.managed_task_no_init", "session_parameters.TIMESTAMP_INPUT_FORMAT", "YYYY-MM-DD HH24"), - resource.TestCheckResourceAttr("snowflake_task.managed_task_no_init", "user_task_managed_initial_warehouse_size", ""), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "warehouse", ""), - ), - }, - { - Config: taskConfigManaged3(accName, acc.TestDatabaseName, acc.TestSchemaName), - Check: resource.ComposeTestCheckFunc( - checkBool("snowflake_task.managed_task", "enabled", true), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schedule", "5 MINUTE"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "user_task_managed_initial_warehouse_size", "SMALL"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "warehouse", ""), - ), - }, + PreCheck: func() { acc.TestAccPreCheck(t) }, + CheckDestroy: acc.CheckDestroy(t, resources.User), + Steps: []resource.TestStep{ + // create with default values for all the parameters + { + Config: config.FromModel(t, configModel), + Check: assert.AssertThat(t, + objectparametersassert.TaskParameters(t, id). + HasAllDefaults(). + HasAllDefaultsExplicit(), + resourceparametersassert.TaskResourceParameters(t, configModel.ResourceReference()). + HasAllDefaults(), + ), }, - }) - } -*/ -func taskConfigManaged1(name string, databaseName string, schemaName string) string { - s := ` -resource "snowflake_task" "managed_task" { - name = "%s" - database = "%s" - schema = "%s" - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" - user_task_managed_initial_warehouse_size = "XSMALL" -} -resource "snowflake_task" "managed_task_no_init" { - name = "%s_no_init" - database = "%s" - schema = "%s" - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" - session_parameters = { - TIMESTAMP_INPUT_FORMAT = "YYYY-MM-DD HH24", - } + // import when no parameter set + { + ResourceName: configModel.ResourceReference(), + ImportState: true, + ImportStateCheck: assert.AssertThatImport(t, + resourceparametersassert.ImportedTaskResourceParameters(t, helpers.EncodeResourceIdentifier(id)). + HasAllDefaults(), + ), + }, + // set all parameters + { + Config: config.FromModel(t, configModelWithAllParametersSet), + Check: assert.AssertThat(t, + objectparametersassert.TaskParameters(t, id). + HasSuspendTaskAfterNumFailures(15). + HasTaskAutoRetryAttempts(15). + HasUserTaskManagedInitialWarehouseSize(sdk.WarehouseSizeXSmall). + HasUserTaskMinimumTriggerIntervalInSeconds(30). + HasUserTaskTimeoutMs(1000). + HasAbortDetachedQuery(true). + HasAutocommit(false). + HasBinaryInputFormat(sdk.BinaryInputFormatUTF8). + HasBinaryOutputFormat(sdk.BinaryOutputFormatBase64). + HasClientMemoryLimit(1024). + HasClientMetadataRequestUseConnectionCtx(true). + HasClientPrefetchThreads(2). + HasClientResultChunkSize(48). + HasClientResultColumnCaseInsensitive(true). + HasClientSessionKeepAlive(true). + HasClientSessionKeepAliveHeartbeatFrequency(2400). + HasClientTimestampTypeMapping(sdk.ClientTimestampTypeMappingNtz). + HasDateInputFormat("YYYY-MM-DD"). + HasDateOutputFormat("YY-MM-DD"). + HasEnableUnloadPhysicalTypeOptimization(false). + HasErrorOnNondeterministicMerge(false). + HasErrorOnNondeterministicUpdate(true). + HasGeographyOutputFormat(sdk.GeographyOutputFormatWKB). + HasGeometryOutputFormat(sdk.GeometryOutputFormatWKB). + HasJdbcUseSessionTimezone(false). + HasJsonIndent(4). + HasLockTimeout(21222). + HasLogLevel(sdk.LogLevelError). + HasMultiStatementCount(0). + HasNoorderSequenceAsDefault(false). + HasOdbcTreatDecimalAsInt(true). + HasQueryTag("some_tag"). + HasQuotedIdentifiersIgnoreCase(true). + HasRowsPerResultset(2). + HasS3StageVpceDnsName("vpce-id.s3.region.vpce.amazonaws.com"). + HasSearchPath("$public, $current"). + HasStatementQueuedTimeoutInSeconds(10). + HasStatementTimeoutInSeconds(10). + HasStrictJsonOutput(true). + HasTimestampDayIsAlways24h(true). + HasTimestampInputFormat("YYYY-MM-DD"). + HasTimestampLtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampNtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampTypeMapping(sdk.TimestampTypeMappingLtz). + HasTimestampTzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimezone("Europe/Warsaw"). + HasTimeInputFormat("HH24:MI"). + HasTimeOutputFormat("HH24:MI"). + HasTraceLevel(sdk.TraceLevelOnEvent). + HasTransactionAbortOnError(true). + HasTransactionDefaultIsolationLevel(sdk.TransactionDefaultIsolationLevelReadCommitted). + HasTwoDigitCenturyStart(1980). + HasUnsupportedDdlAction(sdk.UnsupportedDDLActionFail). + HasUseCachedResult(false). + HasWeekOfYearPolicy(1). + HasWeekStart(1), + resourceparametersassert.TaskResourceParameters(t, configModelWithAllParametersSet.ResourceReference()). + HasSuspendTaskAfterNumFailures(15). + HasTaskAutoRetryAttempts(15). + HasUserTaskManagedInitialWarehouseSize(sdk.WarehouseSizeXSmall). + HasUserTaskMinimumTriggerIntervalInSeconds(30). + HasUserTaskTimeoutMs(1000). + HasAbortDetachedQuery(true). + HasAutocommit(false). + HasBinaryInputFormat(sdk.BinaryInputFormatUTF8). + HasBinaryOutputFormat(sdk.BinaryOutputFormatBase64). + HasClientMemoryLimit(1024). + HasClientMetadataRequestUseConnectionCtx(true). + HasClientPrefetchThreads(2). + HasClientResultChunkSize(48). + HasClientResultColumnCaseInsensitive(true). + HasClientSessionKeepAlive(true). + HasClientSessionKeepAliveHeartbeatFrequency(2400). + HasClientTimestampTypeMapping(sdk.ClientTimestampTypeMappingNtz). + HasDateInputFormat("YYYY-MM-DD"). + HasDateOutputFormat("YY-MM-DD"). + HasEnableUnloadPhysicalTypeOptimization(false). + HasErrorOnNondeterministicMerge(false). + HasErrorOnNondeterministicUpdate(true). + HasGeographyOutputFormat(sdk.GeographyOutputFormatWKB). + HasGeometryOutputFormat(sdk.GeometryOutputFormatWKB). + HasJdbcUseSessionTimezone(false). + HasJsonIndent(4). + HasLockTimeout(21222). + HasLogLevel(sdk.LogLevelError). + HasMultiStatementCount(0). + HasNoorderSequenceAsDefault(false). + HasOdbcTreatDecimalAsInt(true). + HasQueryTag("some_tag"). + HasQuotedIdentifiersIgnoreCase(true). + HasRowsPerResultset(2). + HasS3StageVpceDnsName("vpce-id.s3.region.vpce.amazonaws.com"). + HasSearchPath("$public, $current"). + HasStatementQueuedTimeoutInSeconds(10). + HasStatementTimeoutInSeconds(10). + HasStrictJsonOutput(true). + HasTimestampDayIsAlways24h(true). + HasTimestampInputFormat("YYYY-MM-DD"). + HasTimestampLtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampNtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampTypeMapping(sdk.TimestampTypeMappingLtz). + HasTimestampTzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimezone("Europe/Warsaw"). + HasTimeInputFormat("HH24:MI"). + HasTimeOutputFormat("HH24:MI"). + HasTraceLevel(sdk.TraceLevelOnEvent). + HasTransactionAbortOnError(true). + HasTransactionDefaultIsolationLevel(sdk.TransactionDefaultIsolationLevelReadCommitted). + HasTwoDigitCenturyStart(1980). + HasUnsupportedDdlAction(sdk.UnsupportedDDLActionFail). + HasUseCachedResult(false). + HasWeekOfYearPolicy(1). + HasWeekStart(1), + ), + }, + // import when all parameters set + { + ResourceName: configModelWithAllParametersSet.ResourceReference(), + ImportState: true, + ImportStateCheck: assert.AssertThatImport(t, + resourceparametersassert.ImportedTaskResourceParameters(t, helpers.EncodeResourceIdentifier(id)). + HasSuspendTaskAfterNumFailures(15). + HasTaskAutoRetryAttempts(15). + HasUserTaskManagedInitialWarehouseSize(sdk.WarehouseSizeXSmall). + HasUserTaskMinimumTriggerIntervalInSeconds(30). + HasUserTaskTimeoutMs(1000). + HasAbortDetachedQuery(true). + HasAutocommit(false). + HasBinaryInputFormat(sdk.BinaryInputFormatUTF8). + HasBinaryOutputFormat(sdk.BinaryOutputFormatBase64). + HasClientMemoryLimit(1024). + HasClientMetadataRequestUseConnectionCtx(true). + HasClientPrefetchThreads(2). + HasClientResultChunkSize(48). + HasClientResultColumnCaseInsensitive(true). + HasClientSessionKeepAlive(true). + HasClientSessionKeepAliveHeartbeatFrequency(2400). + HasClientTimestampTypeMapping(sdk.ClientTimestampTypeMappingNtz). + HasDateInputFormat("YYYY-MM-DD"). + HasDateOutputFormat("YY-MM-DD"). + HasEnableUnloadPhysicalTypeOptimization(false). + HasErrorOnNondeterministicMerge(false). + HasErrorOnNondeterministicUpdate(true). + HasGeographyOutputFormat(sdk.GeographyOutputFormatWKB). + HasGeometryOutputFormat(sdk.GeometryOutputFormatWKB). + HasJdbcUseSessionTimezone(false). + HasJsonIndent(4). + HasLockTimeout(21222). + HasLogLevel(sdk.LogLevelError). + HasMultiStatementCount(0). + HasNoorderSequenceAsDefault(false). + HasOdbcTreatDecimalAsInt(true). + HasQueryTag("some_tag"). + HasQuotedIdentifiersIgnoreCase(true). + HasRowsPerResultset(2). + HasS3StageVpceDnsName("vpce-id.s3.region.vpce.amazonaws.com"). + HasSearchPath("$public, $current"). + HasStatementQueuedTimeoutInSeconds(10). + HasStatementTimeoutInSeconds(10). + HasStrictJsonOutput(true). + HasTimestampDayIsAlways24h(true). + HasTimestampInputFormat("YYYY-MM-DD"). + HasTimestampLtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampNtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampTypeMapping(sdk.TimestampTypeMappingLtz). + HasTimestampTzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimezone("Europe/Warsaw"). + HasTimeInputFormat("HH24:MI"). + HasTimeOutputFormat("HH24:MI"). + HasTraceLevel(sdk.TraceLevelOnEvent). + HasTransactionAbortOnError(true). + HasTransactionDefaultIsolationLevel(sdk.TransactionDefaultIsolationLevelReadCommitted). + HasTwoDigitCenturyStart(1980). + HasUnsupportedDdlAction(sdk.UnsupportedDDLActionFail). + HasUseCachedResult(false). + HasWeekOfYearPolicy(1). + HasWeekStart(1), + ), + }, + // unset all the parameters + { + Config: config.FromModel(t, configModel), + Check: assert.AssertThat(t, + objectparametersassert.TaskParameters(t, id). + HasAllDefaults(). + HasAllDefaultsExplicit(), + resourceparametersassert.TaskResourceParameters(t, configModel.ResourceReference()). + HasAllDefaults(), + ), + }, + }, + }) } -` - return fmt.Sprintf(s, name, databaseName, schemaName, name, databaseName, schemaName) -} +func TestAcc_Task_Enabled(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) -func taskConfigManaged2(name, databaseName, schemaName, warehouseName string) string { - s := ` -resource "snowflake_warehouse" "wh" { - name = "%s-%s" -} + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + configModelEnabled := model.TaskWithId("test", id, statement). + WithSchedule("5 MINUTES"). + WithEnabled(r.BooleanTrue) + configModelDisabled := model.TaskWithId("test", id, statement). + WithSchedule("5 MINUTES"). + WithEnabled(r.BooleanFalse) -resource "snowflake_task" "managed_task" { - name = "%s" - database = "%s" - schema = "%s" - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" - warehouse = snowflake_warehouse.wh.name -} -` - return fmt.Sprintf(s, warehouseName, name, name, databaseName, schemaName) + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, configModelDisabled), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelDisabled.ResourceReference()). + HasEnabledString(r.BooleanFalse), + resourceshowoutputassert.TaskShowOutput(t, configModelDisabled.ResourceReference()). + HasState(sdk.TaskStateSuspended), + ), + }, + { + Config: config.FromModel(t, configModelEnabled), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelEnabled.ResourceReference()). + HasEnabledString(r.BooleanTrue), + resourceshowoutputassert.TaskShowOutput(t, configModelEnabled.ResourceReference()). + HasState(sdk.TaskStateStarted), + ), + }, + { + Config: config.FromModel(t, configModelDisabled), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelDisabled.ResourceReference()). + HasEnabledString(r.BooleanFalse), + resourceshowoutputassert.TaskShowOutput(t, configModelDisabled.ResourceReference()). + HasState(sdk.TaskStateSuspended), + ), + }, + }, + }) } -func taskConfigManaged3(name, databaseName, schemaName string) string { - s := ` -resource "snowflake_task" "managed_task" { - name = "%s" - database = "%s" - schema = "%s" - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" - user_task_managed_initial_warehouse_size = "SMALL" +// TODO: This test may also be not deterministic and sometimes it fail when resuming a task while other task is modifying DAG (removing after) +func TestAcc_Task_ConvertStandaloneTaskToSubtask(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + id2 := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := "5 MINUTES" + + firstTaskStandaloneModel := model.TaskWithId("main_task", id, statement). + WithSchedule(schedule). + WithEnabled(r.BooleanTrue). + WithSuspendTaskAfterNumFailures(1) + secondTaskStandaloneModel := model.TaskWithId("second_task", id2, statement). + WithSchedule(schedule). + WithEnabled(r.BooleanTrue) + + rootTaskModel := model.TaskWithId("main_task", id, statement). + WithSchedule(schedule). + WithEnabled(r.BooleanTrue). + WithSuspendTaskAfterNumFailures(2) + childTaskModel := model.TaskWithId("second_task", id2, statement). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(id.FullyQualifiedName()))). + WithEnabled(r.BooleanTrue) + childTaskModel.SetDependsOn([]string{rootTaskModel.ResourceReference()}) + + firstTaskStandaloneModelDisabled := model.TaskWithId("main_task", id, statement). + WithSchedule(schedule). + WithEnabled(r.BooleanFalse) + secondTaskStandaloneModelDisabled := model.TaskWithId("second_task", id2, statement). + WithSchedule(schedule). + WithEnabled(r.BooleanFalse) + secondTaskStandaloneModelDisabled.SetDependsOn([]string{firstTaskStandaloneModelDisabled.ResourceReference()}) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, firstTaskStandaloneModel) + config.FromModel(t, secondTaskStandaloneModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, firstTaskStandaloneModel.ResourceReference()). + HasScheduleString(schedule). + HasEnabledString(r.BooleanTrue). + HasSuspendTaskAfterNumFailuresString("1"), + resourceshowoutputassert.TaskShowOutput(t, firstTaskStandaloneModel.ResourceReference()). + HasSchedule(schedule). + HasState(sdk.TaskStateStarted), + resourceassert.TaskResource(t, secondTaskStandaloneModel.ResourceReference()). + HasScheduleString(schedule). + HasEnabledString(r.BooleanTrue), + resourceshowoutputassert.TaskShowOutput(t, secondTaskStandaloneModel.ResourceReference()). + HasSchedule(schedule). + HasState(sdk.TaskStateStarted), + ), + }, + // Change the second task to run after the first one (creating a DAG) + { + Config: config.FromModel(t, rootTaskModel) + config.FromModel(t, childTaskModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskModel.ResourceReference()). + HasScheduleString(schedule). + HasEnabledString(r.BooleanTrue). + HasSuspendTaskAfterNumFailuresString("2"), + resourceshowoutputassert.TaskShowOutput(t, rootTaskModel.ResourceReference()). + HasSchedule(schedule). + HasState(sdk.TaskStateStarted), + resourceassert.TaskResource(t, childTaskModel.ResourceReference()). + HasAfterLen(1). + HasEnabledString(r.BooleanTrue), + resourceshowoutputassert.TaskShowOutput(t, childTaskModel.ResourceReference()). + HasPredecessors(id). + HasState(sdk.TaskStateStarted), + ), + }, + // Change tasks in DAG to standalone tasks (disabled to check if resuming/suspending works correctly) + { + Config: config.FromModel(t, firstTaskStandaloneModelDisabled) + config.FromModel(t, secondTaskStandaloneModelDisabled), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, firstTaskStandaloneModelDisabled.ResourceReference()). + HasScheduleString(schedule). + HasEnabledString(r.BooleanFalse). + HasSuspendTaskAfterNumFailuresString("10"), + resourceshowoutputassert.TaskShowOutput(t, firstTaskStandaloneModelDisabled.ResourceReference()). + HasSchedule(schedule). + HasState(sdk.TaskStateSuspended), + resourceassert.TaskResource(t, secondTaskStandaloneModelDisabled.ResourceReference()). + HasScheduleString(schedule). + HasEnabledString(r.BooleanFalse), + resourceshowoutputassert.TaskShowOutput(t, secondTaskStandaloneModelDisabled.ResourceReference()). + HasSchedule(schedule). + HasState(sdk.TaskStateSuspended), + ), + }, + }, + }) } -` - return fmt.Sprintf(s, name, databaseName, schemaName) + +func TestAcc_Task_ConvertStandaloneTaskToFinalizer(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + rootTaskId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + finalizerTaskId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := "5 MINUTES" + + firstTaskStandaloneModel := model.TaskWithId("main_task", rootTaskId, statement). + WithSchedule(schedule). + WithEnabled(r.BooleanTrue). + WithSuspendTaskAfterNumFailures(1) + secondTaskStandaloneModel := model.TaskWithId("second_task", finalizerTaskId, statement). + WithSchedule(schedule). + WithEnabled(r.BooleanTrue) + + rootTaskModel := model.TaskWithId("main_task", rootTaskId, statement). + WithSchedule(schedule). + WithEnabled(r.BooleanTrue). + WithSuspendTaskAfterNumFailures(2) + childTaskModel := model.TaskWithId("second_task", finalizerTaskId, statement). + WithFinalize(rootTaskId.FullyQualifiedName()). + WithEnabled(r.BooleanTrue) + childTaskModel.SetDependsOn([]string{rootTaskModel.ResourceReference()}) + + firstTaskStandaloneModelDisabled := model.TaskWithId("main_task", rootTaskId, statement). + WithSchedule(schedule). + WithEnabled(r.BooleanFalse) + secondTaskStandaloneModelDisabled := model.TaskWithId("second_task", finalizerTaskId, statement). + WithSchedule(schedule). + WithEnabled(r.BooleanFalse) + secondTaskStandaloneModelDisabled.SetDependsOn([]string{firstTaskStandaloneModelDisabled.ResourceReference()}) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, firstTaskStandaloneModel) + config.FromModel(t, secondTaskStandaloneModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, firstTaskStandaloneModel.ResourceReference()). + HasScheduleString(schedule). + HasEnabledString(r.BooleanTrue). + HasSuspendTaskAfterNumFailuresString("1"), + resourceshowoutputassert.TaskShowOutput(t, firstTaskStandaloneModel.ResourceReference()). + HasSchedule(schedule). + HasState(sdk.TaskStateStarted), + resourceassert.TaskResource(t, secondTaskStandaloneModel.ResourceReference()). + HasScheduleString(schedule). + HasEnabledString(r.BooleanTrue), + resourceshowoutputassert.TaskShowOutput(t, secondTaskStandaloneModel.ResourceReference()). + HasSchedule(schedule). + HasState(sdk.TaskStateStarted), + ), + }, + // Change the second task to run after the first one (creating a DAG) + { + Config: config.FromModel(t, rootTaskModel) + config.FromModel(t, childTaskModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskModel.ResourceReference()). + HasScheduleString(schedule). + HasEnabledString(r.BooleanTrue). + HasSuspendTaskAfterNumFailuresString("2"), + resourceshowoutputassert.TaskShowOutput(t, rootTaskModel.ResourceReference()). + HasSchedule(schedule). + // HasTaskRelations(sdk.TaskRelations{FinalizerTask: &finalizerTaskId}). + HasState(sdk.TaskStateStarted), + resourceassert.TaskResource(t, childTaskModel.ResourceReference()). + HasEnabledString(r.BooleanTrue), + resourceshowoutputassert.TaskShowOutput(t, childTaskModel.ResourceReference()). + // HasTaskRelations(sdk.TaskRelations{FinalizedRootTask: &rootTaskId}). + HasState(sdk.TaskStateStarted), + ), + }, + // Change tasks in DAG to standalone tasks (disabled to check if resuming/suspending works correctly) + { + Config: config.FromModel(t, firstTaskStandaloneModelDisabled) + config.FromModel(t, secondTaskStandaloneModelDisabled), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, firstTaskStandaloneModelDisabled.ResourceReference()). + HasScheduleString(schedule). + HasEnabledString(r.BooleanFalse). + HasSuspendTaskAfterNumFailuresString("10"), + resourceshowoutputassert.TaskShowOutput(t, firstTaskStandaloneModelDisabled.ResourceReference()). + HasSchedule(schedule). + HasState(sdk.TaskStateSuspended), + resourceassert.TaskResource(t, secondTaskStandaloneModelDisabled.ResourceReference()). + HasScheduleString(schedule). + HasEnabledString(r.BooleanFalse), + resourceshowoutputassert.TaskShowOutput(t, secondTaskStandaloneModelDisabled.ResourceReference()). + HasSchedule(schedule). + HasState(sdk.TaskStateSuspended), + ), + }, + }, + }) } -func TestAcc_Task_SwitchScheduled(t *testing.T) { - accName := acc.TestClient().Ids.Alpha() - taskRootName := acc.TestClient().Ids.Alpha() +// TODO(SNOW-1348116 - analyse in next pr): This test is not deterministic and sometimes it fails when resuming a task while other task is modifying DAG (removing after) +func TestAcc_Task_SwitchScheduledWithAfter(t *testing.T) { + rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := "5 MINUTES" + rootTaskConfigModel := model.TaskWithId("root", rootId, statement). + WithEnabled(r.BooleanTrue). + WithSchedule(schedule). + WithSuspendTaskAfterNumFailures(1) + childTaskConfigModel := model.TaskWithId("child", childId, statement). + WithEnabled(r.BooleanTrue). + WithSchedule(schedule) + + rootTaskConfigModelAfterSuspendFailuresUpdate := model.TaskWithId("root", rootId, statement). + WithEnabled(r.BooleanTrue). + WithSchedule(schedule). + WithSuspendTaskAfterNumFailures(2) + childTaskConfigModelWithAfter := model.TaskWithId("child", childId, statement). + WithEnabled(r.BooleanTrue). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))) + childTaskConfigModelWithAfter.SetDependsOn([]string{rootTaskConfigModelAfterSuspendFailuresUpdate.ResourceReference()}) + + rootTaskConfigModelDisabled := model.TaskWithId("root", rootId, statement). + WithEnabled(r.BooleanFalse). + WithSchedule(schedule) + childTaskConfigModelDisabled := model.TaskWithId("child", childId, statement). + WithEnabled(r.BooleanFalse). + WithSchedule(schedule) + childTaskConfigModelDisabled.SetDependsOn([]string{rootTaskConfigModelDisabled.ResourceReference()}) resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, @@ -951,228 +989,319 @@ func TestAcc_Task_SwitchScheduled(t *testing.T) { CheckDestroy: acc.CheckDestroy(t, resources.Task), Steps: []resource.TestStep{ { - Config: taskConfigManagedScheduled(accName, taskRootName, acc.TestDatabaseName, acc.TestSchemaName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schedule", "5 MINUTE"), - resource.TestCheckResourceAttr("snowflake_task.test_task_root", "suspend_task_after_num_failures", "1"), + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, "snowflake_task.child"). + HasEnabledString(r.BooleanTrue). + HasScheduleString(schedule). + HasAfterLen(0). + HasSuspendTaskAfterNumFailuresString("10"), + resourceassert.TaskResource(t, "snowflake_task.root"). + HasEnabledString(r.BooleanTrue). + HasScheduleString(schedule). + HasSuspendTaskAfterNumFailuresString("1"), ), }, { - Config: taskConfigManagedScheduled2(accName, taskRootName, acc.TestDatabaseName, acc.TestSchemaName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schedule", ""), - resource.TestCheckResourceAttr("snowflake_task.test_task_root", "suspend_task_after_num_failures", "2"), + Config: config.FromModel(t, rootTaskConfigModelAfterSuspendFailuresUpdate) + config.FromModel(t, childTaskConfigModelWithAfter), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, "snowflake_task.child"). + HasEnabledString(r.BooleanTrue). + HasScheduleString(""). + HasAfterLen(1). + HasSuspendTaskAfterNumFailuresString("10"), + resourceassert.TaskResource(t, "snowflake_task.root"). + HasEnabledString(r.BooleanTrue). + HasScheduleString(schedule). + HasSuspendTaskAfterNumFailuresString("2"), ), }, { - Config: taskConfigManagedScheduled(accName, taskRootName, acc.TestDatabaseName, acc.TestSchemaName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schedule", "5 MINUTE"), - resource.TestCheckResourceAttr("snowflake_task.test_task_root", "suspend_task_after_num_failures", "1"), + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, "snowflake_task.child"). + HasEnabledString(r.BooleanTrue). + HasScheduleString(schedule). + HasAfterLen(0). + HasSuspendTaskAfterNumFailuresString("10"), + resourceassert.TaskResource(t, "snowflake_task.root"). + HasEnabledString(r.BooleanTrue). + HasScheduleString(schedule). + HasSuspendTaskAfterNumFailuresString("1"), ), }, { - Config: taskConfigManagedScheduled3(accName, taskRootName, acc.TestDatabaseName, acc.TestSchemaName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "false"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schedule", ""), - resource.TestCheckResourceAttr("snowflake_task.test_task_root", "suspend_task_after_num_failures", "0"), + Config: config.FromModel(t, rootTaskConfigModelDisabled) + config.FromModel(t, childTaskConfigModelDisabled), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, "snowflake_task.child"). + HasEnabledString(r.BooleanFalse). + HasScheduleString(schedule). + HasAfterLen(0). + HasSuspendTaskAfterNumFailuresString("10"), + resourceassert.TaskResource(t, "snowflake_task.root"). + HasEnabledString(r.BooleanFalse). + HasScheduleString(schedule). + HasSuspendTaskAfterNumFailuresString("10"), ), }, }, }) } -func taskConfigManagedScheduled(name string, taskRootName string, databaseName string, schemaName string) string { - return fmt.Sprintf(` -resource "snowflake_task" "test_task_root" { - name = "%[1]s" - database = "%[2]s" - schema = "%[3]s" - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" - suspend_task_after_num_failures = 1 -} +func TestAcc_Task_WithAfter(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) -resource "snowflake_task" "test_task" { - depends_on = [snowflake_task.test_task_root] - name = "%[4]s" - database = "%[2]s" - schema = "%[3]s" - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" -} -`, taskRootName, databaseName, schemaName, name) -} + rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := "5 MINUTES" -func taskConfigManagedScheduled2(name string, taskRootName string, databaseName string, schemaName string) string { - return fmt.Sprintf(` -resource "snowflake_task" "test_task_root" { - name = "%[1]s" - database = "%[2]s" - schema = "%[3]s" - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" - suspend_task_after_num_failures = 2 -} + rootTaskConfigModel := model.TaskWithId("root", rootId, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithEnabled(r.BooleanTrue). + WithSchedule(schedule). + WithSqlStatement(statement) -resource "snowflake_task" "test_task" { - name = "%[4]s" - database = "%[2]s" - schema = "%[3]s" - sql_statement = "SELECT 1" - enabled = true - after = [snowflake_task.test_task_root.name] -} -`, taskRootName, databaseName, schemaName, name) -} + childTaskConfigModelWithAfter := model.TaskWithId("child", childId, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithEnabled(r.BooleanTrue). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). + WithSqlStatement(statement) + childTaskConfigModelWithAfter.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) -func taskConfigManagedScheduled3(name string, taskRootName string, databaseName string, schemaName string) string { - s := ` -resource "snowflake_task" "test_task_root" { - name = "%s" - database = "%s" - schema = "%s" - sql_statement = "SELECT 1" - enabled = false - schedule = "5 MINUTE" -} + childTaskConfigModelWithoutAfter := model.TaskWithId("child", childId, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithEnabled(r.BooleanTrue). + WithSchedule(schedule). + WithSqlStatement(statement) + childTaskConfigModelWithoutAfter.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) -resource "snowflake_task" "test_task" { - name = "%s" - database = "%s" - schema = "%s" - sql_statement = "SELECT 1" - enabled = false - after = [snowflake_task.test_task_root.name] + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithAfter), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasScheduleString(schedule), + resourceassert.TaskResource(t, childTaskConfigModelWithAfter.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasAfterLen(1), + ), + }, + { + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithoutAfter), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasScheduleString(schedule), + resourceassert.TaskResource(t, childTaskConfigModelWithoutAfter.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasAfterLen(0), + ), + }, + }, + }) } -` - return fmt.Sprintf(s, taskRootName, databaseName, schemaName, name, databaseName, schemaName) + +func TestAcc_Task_WithFinalizer(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := "5 MINUTES" + + rootTaskConfigModel := model.TaskWithId("root", rootId, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithEnabled(r.BooleanTrue). + WithSchedule(schedule). + WithSqlStatement(statement) + + childTaskConfigModelWithFinalizer := model.TaskWithId("child", childId, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithEnabled(r.BooleanTrue). + WithFinalize(rootId.FullyQualifiedName()). + WithSqlStatement(statement) + childTaskConfigModelWithFinalizer.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + + childTaskConfigModelWithoutFinalizer := model.TaskWithId("child", childId, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithEnabled(r.BooleanTrue). + WithSchedule(schedule). + WithSqlStatement(statement) + childTaskConfigModelWithoutFinalizer.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithFinalizer), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasScheduleString(schedule), + resourceassert.TaskResource(t, childTaskConfigModelWithFinalizer.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasFinalizeString(rootId.FullyQualifiedName()), + ), + }, + { + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithoutFinalizer), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasScheduleString(schedule), + resourceassert.TaskResource(t, childTaskConfigModelWithoutFinalizer.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasFinalizeString(""), + ), + }, + }, + }) } -func checkInt64(name, key string, value int64) func(*terraform.State) error { - return func(state *terraform.State) error { - return resource.TestCheckResourceAttr(name, key, fmt.Sprintf("%v", value))(state) - } +func TestAcc_Task_issue2207(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := "5 MINUTES" + + rootTaskConfigModel := model.TaskWithId("root", rootId, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithEnabled(r.BooleanTrue). + WithSchedule(schedule). + WithSqlStatement(statement) + + childTaskConfigModel := model.TaskWithId("child", childId, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithEnabled(r.BooleanTrue). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). + WithComment("abc"). + WithSqlStatement(statement) + childTaskConfigModel.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + + childTaskConfigModelWithDifferentComment := model.TaskWithId("child", childId, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithEnabled(r.BooleanTrue). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). + WithComment("def"). + WithSqlStatement(statement) + childTaskConfigModelWithDifferentComment.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasScheduleString(schedule), + resourceassert.TaskResource(t, childTaskConfigModel.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasAfterLen(1). + HasCommentString("abc"), + ), + }, + // change comment + { + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(childTaskConfigModelWithDifferentComment.ResourceReference(), plancheck.ResourceActionUpdate), + }, + }, + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithDifferentComment), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasScheduleString(schedule), + resourceassert.TaskResource(t, childTaskConfigModelWithDifferentComment.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasAfterLen(1). + HasCommentString("def"), + ), + }, + }, + }) } -//func TestAcc_Task_issue2207(t *testing.T) { -// prefix := acc.TestClient().Ids.Alpha() -// rootName := prefix + "_root_task" -// childName := prefix + "_child_task" -// -// m := func() map[string]config.Variable { -// return map[string]config.Variable{ -// "root_name": config.StringVariable(rootName), -// "database": config.StringVariable(acc.TestDatabaseName), -// "schema": config.StringVariable(acc.TestSchemaName), -// "warehouse": config.StringVariable(acc.TestWarehouseName), -// "child_name": config.StringVariable(childName), -// "comment": config.StringVariable("abc"), -// } -// } -// m2 := m() -// m2["comment"] = config.StringVariable("def") -// -// resource.Test(t, resource.TestCase{ -// ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, -// PreCheck: func() { acc.TestAccPreCheck(t) }, -// TerraformVersionChecks: []tfversion.TerraformVersionCheck{ -// tfversion.RequireAbove(tfversion.Version1_5_0), -// }, -// CheckDestroy: acc.CheckDestroy(t, resources.Task), -// Steps: []resource.TestStep{ -// { -// ConfigDirectory: config.TestStepDirectory(), -// ConfigVariables: m(), -// Check: resource.ComposeTestCheckFunc( -// resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), -// ), -// ConfigPlanChecks: resource.ConfigPlanChecks{ -// PostApplyPostRefresh: []plancheck.PlanCheck{ -// plancheck.ExpectEmptyPlan(), -// }, -// }, -// }, -// // change comment -// { -// ConfigDirectory: acc.ConfigurationSameAsStepN(1), -// ConfigVariables: m2, -// Check: resource.ComposeTestCheckFunc( -// resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), -// resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), -// ), -// }, -// }, -// }) -//} -// -//func TestAcc_Task_issue2036(t *testing.T) { -// name := acc.TestClient().Ids.Alpha() -// -// m := func() map[string]config.Variable { -// return map[string]config.Variable{ -// "name": config.StringVariable(name), -// "database": config.StringVariable(acc.TestDatabaseName), -// "schema": config.StringVariable(acc.TestSchemaName), -// "warehouse": config.StringVariable(acc.TestWarehouseName), -// } -// } -// -// resource.Test(t, resource.TestCase{ -// ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, -// PreCheck: func() { acc.TestAccPreCheck(t) }, -// TerraformVersionChecks: []tfversion.TerraformVersionCheck{ -// tfversion.RequireAbove(tfversion.Version1_5_0), -// }, -// CheckDestroy: acc.CheckDestroy(t, resources.Task), -// Steps: []resource.TestStep{ -// // create without when -// { -// ConfigDirectory: config.TestStepDirectory(), -// ConfigVariables: m(), -// Check: resource.ComposeTestCheckFunc( -// resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), -// resource.TestCheckResourceAttr("snowflake_task.test_task", "when", ""), -// ), -// }, -// // add when -// { -// ConfigDirectory: config.TestStepDirectory(), -// ConfigVariables: m(), -// Check: resource.ComposeTestCheckFunc( -// resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), -// resource.TestCheckResourceAttr("snowflake_task.test_task", "when", "TRUE"), -// ), -// }, -// // remove when -// { -// ConfigDirectory: acc.ConfigurationSameAsStepN(1), -// ConfigVariables: m(), -// Check: resource.ComposeTestCheckFunc( -// resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), -// resource.TestCheckResourceAttr("snowflake_task.test_task", "when", ""), -// ), -// }, -// }, -// }) -//} +func TestAcc_Task_issue2036(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := "5 MINUTES" + when := "TRUE" + + taskConfigModelWithoutWhen := model.TaskWithId("test", id, statement). + WithEnabled(r.BooleanTrue). + WithSchedule(schedule). + WithSqlStatement(statement) + + taskConfigModelWithWhen := model.TaskWithId("test", id, statement). + WithEnabled(r.BooleanTrue). + WithSchedule(schedule). + WithSqlStatement(statement). + WithWhen(when) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + // create without when + { + Config: config.FromModel(t, taskConfigModelWithoutWhen), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, taskConfigModelWithoutWhen.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasWhenString(""), + ), + }, + // add when + { + Config: config.FromModel(t, taskConfigModelWithWhen), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, taskConfigModelWithWhen.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasWhenString("TRUE"), + ), + }, + // remove when + { + Config: config.FromModel(t, taskConfigModelWithoutWhen), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, taskConfigModelWithoutWhen.ResourceReference()). + HasEnabledString(r.BooleanTrue). + HasWhenString(""), + ), + }, + }, + }) +} diff --git a/pkg/resources/task_parameters.go b/pkg/resources/task_parameters.go index e326c10fbc..0c6f24d66f 100644 --- a/pkg/resources/task_parameters.go +++ b/pkg/resources/task_parameters.go @@ -2,13 +2,14 @@ package resources import ( "context" + "strconv" + "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "strconv" - "strings" ) var ( @@ -84,7 +85,7 @@ func init() { // task parameters {Name: sdk.TaskParameterSuspendTaskAfterNumFailures, Type: schema.TypeInt, ValidateDiag: validation.ToDiagFunc(validation.IntAtLeast(0)), Description: "Specifies the number of consecutive failed task runs after which the current task is suspended automatically. The default is 0 (no automatic suspension)."}, {Name: sdk.TaskParameterTaskAutoRetryAttempts, Type: schema.TypeInt, ValidateDiag: validation.ToDiagFunc(validation.IntAtLeast(0)), Description: "Specifies the number of automatic task graph retry attempts. If any task graphs complete in a FAILED state, Snowflake can automatically retry the task graphs from the last task in the graph that failed."}, - {Name: sdk.TaskParameterUserTaskManagedInitialWarehouseSize, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToWarehouseSize), DiffSuppress: NormalizeAndCompare(sdk.ToWarehouseSize), Description: "Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. Valid values are (case-insensitive): %s. (Conflicts with warehouse)"}, + {Name: sdk.TaskParameterUserTaskManagedInitialWarehouseSize, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToWarehouseSize), DiffSuppress: NormalizeAndCompare(sdk.ToWarehouseSize), ConflictsWith: []string{"warehouse"}, Description: "Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. Valid values are (case-insensitive): %s. (Conflicts with warehouse)"}, {Name: sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds, Type: schema.TypeInt, ValidateDiag: validation.ToDiagFunc(validation.IntAtLeast(0)), Description: "Minimum amount of time between Triggered Task executions in seconds"}, {Name: sdk.TaskParameterUserTaskTimeoutMs, Type: schema.TypeInt, ValidateDiag: validation.ToDiagFunc(validation.IntAtLeast(0)), Description: "Specifies the time limit on a single run of the task before it times out (in milliseconds)."}, // session params @@ -154,6 +155,7 @@ func init() { Optional: true, ValidateDiagFunc: field.ValidateDiag, DiffSuppressFunc: field.DiffSuppress, + ConflictsWith: field.ConflictsWith, } } } diff --git a/pkg/resources/testdata/TestAcc_Task_issue2036/1/test.tf b/pkg/resources/testdata/TestAcc_Task_issue2036/1/test.tf deleted file mode 100644 index d095e18684..0000000000 --- a/pkg/resources/testdata/TestAcc_Task_issue2036/1/test.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "snowflake_task" "test_task" { - name = var.name - database = var.database - schema = var.schema - warehouse = var.warehouse - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" -} \ No newline at end of file diff --git a/pkg/resources/testdata/TestAcc_Task_issue2036/1/variables.tf b/pkg/resources/testdata/TestAcc_Task_issue2036/1/variables.tf deleted file mode 100644 index 01e8e1a797..0000000000 --- a/pkg/resources/testdata/TestAcc_Task_issue2036/1/variables.tf +++ /dev/null @@ -1,15 +0,0 @@ -variable "database" { - type = string -} - -variable "schema" { - type = string -} - -variable "warehouse" { - type = string -} - -variable "name" { - type = string -} diff --git a/pkg/resources/testdata/TestAcc_Task_issue2036/2/test.tf b/pkg/resources/testdata/TestAcc_Task_issue2036/2/test.tf deleted file mode 100644 index 4c6e9d5521..0000000000 --- a/pkg/resources/testdata/TestAcc_Task_issue2036/2/test.tf +++ /dev/null @@ -1,10 +0,0 @@ -resource "snowflake_task" "test_task" { - name = var.name - database = var.database - schema = var.schema - warehouse = var.warehouse - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" - when = "TRUE" -} diff --git a/pkg/resources/testdata/TestAcc_Task_issue2036/2/variables.tf b/pkg/resources/testdata/TestAcc_Task_issue2036/2/variables.tf deleted file mode 100644 index 01e8e1a797..0000000000 --- a/pkg/resources/testdata/TestAcc_Task_issue2036/2/variables.tf +++ /dev/null @@ -1,15 +0,0 @@ -variable "database" { - type = string -} - -variable "schema" { - type = string -} - -variable "warehouse" { - type = string -} - -variable "name" { - type = string -} diff --git a/pkg/resources/testdata/TestAcc_Task_issue2207/1/test.tf b/pkg/resources/testdata/TestAcc_Task_issue2207/1/test.tf deleted file mode 100644 index 0d57ab1811..0000000000 --- a/pkg/resources/testdata/TestAcc_Task_issue2207/1/test.tf +++ /dev/null @@ -1,20 +0,0 @@ -resource "snowflake_task" "root_task" { - name = var.root_name - database = var.database - schema = var.schema - warehouse = var.warehouse - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" -} - -resource "snowflake_task" "child_task" { - name = var.child_name - database = snowflake_task.root_task.database - schema = snowflake_task.root_task.schema - warehouse = snowflake_task.root_task.warehouse - sql_statement = "SELECT 1" - enabled = true - after = [snowflake_task.root_task.name] - comment = var.comment -} diff --git a/pkg/resources/testdata/TestAcc_Task_issue2207/1/variables.tf b/pkg/resources/testdata/TestAcc_Task_issue2207/1/variables.tf deleted file mode 100644 index fe59da5d99..0000000000 --- a/pkg/resources/testdata/TestAcc_Task_issue2207/1/variables.tf +++ /dev/null @@ -1,23 +0,0 @@ -variable "database" { - type = string -} - -variable "schema" { - type = string -} - -variable "warehouse" { - type = string -} - -variable "root_name" { - type = string -} - -variable "child_name" { - type = string -} - -variable "comment" { - type = string -} diff --git a/pkg/resources/user_parameters.go b/pkg/resources/user_parameters.go index 968f2c7c3c..05ecc15e0c 100644 --- a/pkg/resources/user_parameters.go +++ b/pkg/resources/user_parameters.go @@ -77,11 +77,12 @@ var ( ) type parameterDef[T ~string] struct { - Name T - Type schema.ValueType - Description string - DiffSuppress schema.SchemaDiffSuppressFunc - ValidateDiag schema.SchemaValidateDiagFunc + Name T + Type schema.ValueType + Description string + DiffSuppress schema.SchemaDiffSuppressFunc + ValidateDiag schema.SchemaValidateDiagFunc + ConflictsWith []string } func init() { @@ -159,6 +160,7 @@ func init() { Optional: true, ValidateDiagFunc: field.ValidateDiag, DiffSuppressFunc: field.DiffSuppress, + ConflictsWith: field.ConflictsWith, } } } diff --git a/pkg/schemas/task_gen.go b/pkg/schemas/task_gen.go index 6431bf93ff..97a80a0976 100644 --- a/pkg/schemas/task_gen.go +++ b/pkg/schemas/task_gen.go @@ -101,7 +101,11 @@ var ShowTaskSchema = map[string]*schema.Schema{ Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "finalize": { + "finalizer": { + Type: schema.TypeString, + Computed: true, + }, + "finalized_root_task": { Type: schema.TypeString, Computed: true, }, @@ -125,7 +129,9 @@ func TaskToSchema(task *sdk.Task) map[string]any { taskSchema["schema_name"] = task.SchemaName taskSchema["owner"] = task.Owner taskSchema["comment"] = task.Comment - taskSchema["warehouse"] = task.Warehouse + if task.Warehouse != nil { + taskSchema["warehouse"] = task.Warehouse.Name() + } taskSchema["schedule"] = task.Schedule taskSchema["predecessors"] = collections.Map(task.Predecessors, sdk.SchemaObjectIdentifier.FullyQualifiedName) taskSchema["state"] = string(task.State) @@ -141,10 +147,19 @@ func TaskToSchema(task *sdk.Task) map[string]any { taskSchema["config"] = task.Config taskSchema["budget"] = task.Budget taskSchema["last_suspended_reason"] = task.LastSuspendedReason + finalizer := "" + if task.TaskRelations.FinalizerTask != nil { + finalizer = task.TaskRelations.FinalizerTask.FullyQualifiedName() + } + finalizedRootTask := "" + if task.TaskRelations.FinalizedRootTask != nil { + finalizedRootTask = task.TaskRelations.FinalizedRootTask.FullyQualifiedName() + } taskSchema["task_relations"] = []any{ map[string]any{ - "predecessors": collections.Map(task.TaskRelations.Predecessors, sdk.SchemaObjectIdentifier.FullyQualifiedName), - "finalize": task.TaskRelations.FinalizerTask, + "predecessors": collections.Map(task.TaskRelations.Predecessors, sdk.SchemaObjectIdentifier.FullyQualifiedName), + "finalizer": finalizer, + "finalized_root_task": finalizedRootTask, }, } return taskSchema diff --git a/pkg/sdk/grants_impl.go b/pkg/sdk/grants_impl.go index d0399d605b..678f328bf9 100644 --- a/pkg/sdk/grants_impl.go +++ b/pkg/sdk/grants_impl.go @@ -403,11 +403,15 @@ func (v *grants) grantOwnershipOnTask(ctx context.Context, taskId SchemaObjectId return err } + if currentTask.Warehouse == nil { + return fmt.Errorf("no warehouse found to be attached to the task: %s", taskId.FullyQualifiedName()) + } + currentGrantsOnTaskWarehouse, err := v.client.Grants.Show(ctx, &ShowGrantOptions{ On: &ShowGrantsOn{ Object: &Object{ ObjectType: ObjectTypeWarehouse, - Name: NewAccountObjectIdentifier(currentTask.Warehouse), + Name: *currentTask.Warehouse, }, }, }) diff --git a/pkg/sdk/sql_builder.go b/pkg/sdk/sql_builder.go index bae8ef485c..79b27831fc 100644 --- a/pkg/sdk/sql_builder.go +++ b/pkg/sdk/sql_builder.go @@ -244,6 +244,7 @@ func (b sqlBuilder) parseInterface(v interface{}, tag reflect.StructTag) (sqlCla return sqlIdentifierClause{ key: sqlTag, value: v.(Identifier), + qm: b.getModifier(tag, "ddl", quoteModifierType, NoQuotes).(quoteModifier), em: b.getModifier(tag, "ddl", equalsModifierType, NoEquals).(equalsModifier), }, nil } @@ -330,6 +331,7 @@ func (b sqlBuilder) parseFieldStruct(field reflect.StructField, value reflect.Va return sqlIdentifierClause{ key: sqlTag, value: reflectedValue.(Identifier), + qm: b.getModifier(field.Tag, "ddl", quoteModifierType, NoQuotes).(quoteModifier), em: b.getModifier(field.Tag, "ddl", equalsModifierType, NoEquals).(equalsModifier), }, nil } @@ -400,6 +402,7 @@ func (b sqlBuilder) parseFieldSlice(field reflect.StructField, value reflect.Val if ok { listClauses = append(listClauses, sqlIdentifierClause{ value: identifier, + qm: b.getModifier(field.Tag, "ddl", quoteModifierType, NoQuotes).(quoteModifier), em: b.getModifier(field.Tag, "ddl", equalsModifierType, NoEquals).(equalsModifier), }) continue @@ -532,6 +535,7 @@ func (b sqlBuilder) parseField(field reflect.StructField, value reflect.Value) ( clause = sqlIdentifierClause{ key: sqlTag, value: reflectedValue.(Identifier), + qm: b.getModifier(field.Tag, "ddl", quoteModifierType, NoQuotes).(quoteModifier), em: b.getModifier(field.Tag, "ddl", equalsModifierType, NoEquals).(equalsModifier), } case "parameter": @@ -603,6 +607,7 @@ func (v sqlKeywordClause) String() string { type sqlIdentifierClause struct { key string value Identifier + qm quoteModifier em equalsModifier } @@ -616,6 +621,10 @@ func (v sqlIdentifierClause) String() string { } // else try to get the string value if v.key != "" { + if v.qm == SingleQuotes { + name = `'` + name + `'` + } + return v.em.Modify(v.key) + name } return name diff --git a/pkg/sdk/tasks_def.go b/pkg/sdk/tasks_def.go index 26214a521d..4329f295a8 100644 --- a/pkg/sdk/tasks_def.go +++ b/pkg/sdk/tasks_def.go @@ -27,8 +27,9 @@ func ToTaskState(s string) (TaskState, error) { } type TaskRelationsRepresentation struct { - Predecessors []string `json:"Predecessors"` - FinalizerTask string `json:"FinalizerTask"` + Predecessors []string `json:"Predecessors"` + FinalizerTask string `json:"FinalizerTask"` + FinalizedRootTask string `json:"FinalizedRootTask"` } func (r *TaskRelationsRepresentation) ToTaskRelations() (TaskRelations, error) { @@ -53,12 +54,21 @@ func (r *TaskRelationsRepresentation) ToTaskRelations() (TaskRelations, error) { taskRelations.FinalizerTask = &finalizerTask } + if len(r.FinalizedRootTask) > 0 { + finalizedRootTask, err := ParseSchemaObjectIdentifier(r.FinalizedRootTask) + if err != nil { + return TaskRelations{}, err + } + taskRelations.FinalizedRootTask = &finalizedRootTask + } + return taskRelations, nil } type TaskRelations struct { - Predecessors []SchemaObjectIdentifier - FinalizerTask *SchemaObjectIdentifier + Predecessors []SchemaObjectIdentifier + FinalizerTask *SchemaObjectIdentifier + FinalizedRootTask *SchemaObjectIdentifier } func ToTaskRelations(s string) (TaskRelations, error) { diff --git a/pkg/sdk/tasks_gen.go b/pkg/sdk/tasks_gen.go index a98e9eac46..f3b67f6330 100644 --- a/pkg/sdk/tasks_gen.go +++ b/pkg/sdk/tasks_gen.go @@ -106,7 +106,7 @@ type AlterTaskOptions struct { } type TaskSet struct { - Warehouse *AccountObjectIdentifier `ddl:"identifier,equals" sql:"WAREHOUSE"` + Warehouse *AccountObjectIdentifier `ddl:"identifier,equals,single_quotes" sql:"WAREHOUSE"` UserTaskManagedInitialWarehouseSize *WarehouseSize `ddl:"parameter,single_quotes" sql:"USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE"` Schedule *string `ddl:"parameter,single_quotes" sql:"SCHEDULE"` Config *string `ddl:"parameter,no_quotes" sql:"CONFIG"` diff --git a/pkg/sdk/tasks_gen_test.go b/pkg/sdk/tasks_gen_test.go index 53aa7d8ea4..c82690cc4d 100644 --- a/pkg/sdk/tasks_gen_test.go +++ b/pkg/sdk/tasks_gen_test.go @@ -328,7 +328,7 @@ func TestTasks_Alter(t *testing.T) { opts.Set = &TaskSet{ Warehouse: &warehouseId, } - assertOptsValidAndSQLEquals(t, opts, "ALTER TASK %s SET WAREHOUSE = %s", id.FullyQualifiedName(), warehouseId.FullyQualifiedName()) + assertOptsValidAndSQLEquals(t, opts, "ALTER TASK %s SET WAREHOUSE = '%s'", id.FullyQualifiedName(), warehouseId.FullyQualifiedName()) }) t.Run("alter set session parameter", func(t *testing.T) { diff --git a/pkg/sdk/tasks_impl_gen.go b/pkg/sdk/tasks_impl_gen.go index 0502a0f9f5..cb4bcd81f0 100644 --- a/pkg/sdk/tasks_impl_gen.go +++ b/pkg/sdk/tasks_impl_gen.go @@ -153,6 +153,11 @@ func GetRootTasks(v Tasks, ctx context.Context, id SchemaObjectIdentifier) ([]Ta return nil, err } + if task.TaskRelations.FinalizedRootTask != nil { + tasksToExamine.Push(*task.TaskRelations.FinalizedRootTask) + continue + } + predecessors := task.Predecessors if len(predecessors) == 0 { rootTasks = append(rootTasks, *task) diff --git a/pkg/sdk/testint/tasks_gen_integration_test.go b/pkg/sdk/testint/tasks_gen_integration_test.go index b89bdd2d43..de77c56c40 100644 --- a/pkg/sdk/testint/tasks_gen_integration_test.go +++ b/pkg/sdk/testint/tasks_gen_integration_test.go @@ -24,7 +24,7 @@ func TestInt_Tasks(t *testing.T) { errorNotificationIntegration, errorNotificationIntegrationCleanup := testClientHelper().NotificationIntegration.Create(t) t.Cleanup(errorNotificationIntegrationCleanup) - assertTask := func(t *testing.T, task *sdk.Task, id sdk.SchemaObjectIdentifier, warehouseName string) { + assertTask := func(t *testing.T, task *sdk.Task, id sdk.SchemaObjectIdentifier, warehouseId *sdk.AccountObjectIdentifier) { t.Helper() assertions.AssertThat(t, objectassert.TaskFromObject(t, task). HasNotEmptyCreatedOn(). @@ -34,7 +34,7 @@ func TestInt_Tasks(t *testing.T) { HasSchemaName(testClientHelper().Ids.SchemaId().Name()). HasOwner("ACCOUNTADMIN"). HasComment(""). - HasWarehouse(warehouseName). + HasWarehouse(warehouseId). HasSchedule(""). HasPredecessors(). HasState(sdk.TaskStateStarted). @@ -52,7 +52,7 @@ func TestInt_Tasks(t *testing.T) { ) } - assertTaskWithOptions := func(t *testing.T, task *sdk.Task, id sdk.SchemaObjectIdentifier, comment string, warehouse string, schedule string, condition string, allowOverlappingExecution bool, config string, predecessor *sdk.SchemaObjectIdentifier, errorIntegrationName *sdk.AccountObjectIdentifier) { + assertTaskWithOptions := func(t *testing.T, task *sdk.Task, id sdk.SchemaObjectIdentifier, comment string, warehouse *sdk.AccountObjectIdentifier, schedule string, condition string, allowOverlappingExecution bool, config string, predecessor *sdk.SchemaObjectIdentifier, errorIntegrationName *sdk.AccountObjectIdentifier) { t.Helper() asserts := objectassert.TaskFromObject(t, task). @@ -102,7 +102,7 @@ func TestInt_Tasks(t *testing.T) { HasId(""). HasOwner(""). HasComment(""). - HasWarehouse(""). + HasWarehouse(nil). HasPredecessors(). HasState(""). HasDefinition(""). @@ -242,7 +242,7 @@ func TestInt_Tasks(t *testing.T) { task, err := testClientHelper().Task.Show(t, id) require.NoError(t, err) - assertTask(t, task, id, "") + assertTask(t, task, id, nil) assertions.AssertThat(t, objectparametersassert.TaskParameters(t, id).HasAllDefaults()) }) @@ -261,7 +261,7 @@ func TestInt_Tasks(t *testing.T) { HasUserTaskManagedInitialWarehouseSize(sdk.WarehouseSizeXSmall), ) - assertTask(t, task, id, "") + assertTask(t, task, id, nil) }) t.Run("create task: complete case", func(t *testing.T) { @@ -287,7 +287,7 @@ func TestInt_Tasks(t *testing.T) { task, err := testClientHelper().Task.Show(t, id) require.NoError(t, err) - assertTaskWithOptions(t, task, id, "some comment", testClientHelper().Ids.WarehouseId().Name(), "10 MINUTE", `SYSTEM$STREAM_HAS_DATA('MYSTREAM')`, true, `{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`, nil, sdk.Pointer(errorNotificationIntegration.ID())) + assertTaskWithOptions(t, task, id, "some comment", sdk.Pointer(testClientHelper().Ids.WarehouseId()), "10 MINUTE", `SYSTEM$STREAM_HAS_DATA('MYSTREAM')`, true, `{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`, nil, sdk.Pointer(errorNotificationIntegration.ID())) assertions.AssertThat(t, objectparametersassert.TaskParameters(t, id). HasJsonIndent(4). HasUserTaskTimeoutMs(500). @@ -310,7 +310,7 @@ func TestInt_Tasks(t *testing.T) { task, err := testClientHelper().Task.Show(t, id) require.NoError(t, err) - assertTaskWithOptions(t, task, id, "", "", "", "", false, "", &rootTaskId, nil) + assertTaskWithOptions(t, task, id, "", nil, "", "", false, "", &rootTaskId, nil) }) t.Run("create task: with after and finalizer", func(t *testing.T) { @@ -336,6 +336,13 @@ func TestInt_Tasks(t *testing.T) { FinalizerTask: &finalizerId, }), ) + + assertions.AssertThat(t, objectassert.Task(t, finalizerId). + HasTaskRelations(sdk.TaskRelations{ + Predecessors: []sdk.SchemaObjectIdentifier{}, + FinalizedRootTask: &rootTaskId, + }), + ) }) // Tested graph @@ -544,7 +551,7 @@ func TestInt_Tasks(t *testing.T) { createdOn := task.CreatedOn assertions.AssertThat(t, objectassert.TaskFromObject(t, task). - HasWarehouse(testClientHelper().Ids.WarehouseId().Name()). + HasWarehouse(sdk.Pointer(testClientHelper().Ids.WarehouseId())). HasSchedule("10 MINUTES"). HasConfig(`{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`). HasAllowOverlappingExecution(true). @@ -565,7 +572,7 @@ func TestInt_Tasks(t *testing.T) { require.NoError(t, err) assertions.AssertThat(t, objectassert.TaskFromObject(t, alteredTask). - HasWarehouse(""). + HasWarehouse(nil). HasSchedule(""). HasConfig(""). HasAllowOverlappingExecution(false). @@ -901,7 +908,7 @@ func TestInt_Tasks(t *testing.T) { returnedTask, err := client.Tasks.Describe(ctx, task.ID()) require.NoError(t, err) - assertTask(t, returnedTask, task.ID(), testClientHelper().Ids.WarehouseId().Name()) + assertTask(t, returnedTask, task.ID(), sdk.Pointer(testClientHelper().Ids.WarehouseId())) }) t.Run("execute task: default", func(t *testing.T) { From 28bf6a5cb6a95fa2d9cb42a558710bb4ad08ee58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Thu, 3 Oct 2024 16:40:03 +0200 Subject: [PATCH 06/12] wip --- .../assert/objectassert/task_snowflake_gen.go | 24 ---- .../config/model/task_model_ext.go | 3 +- .../config/model/task_model_gen.go | 8 +- pkg/resources/task.go | 7 +- pkg/resources/task_acceptance_test.go | 135 +++++++----------- 5 files changed, 64 insertions(+), 113 deletions(-) diff --git a/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go b/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go index cbabb0ca66..2bfff8bb86 100644 --- a/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go +++ b/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go @@ -135,18 +135,6 @@ func (t *TaskAssert) HasSchedule(expected string) *TaskAssert { return t } -// TODO: -//func (t *TaskAssert) HasPredecessors(expected []sdk.SchemaObjectIdentifier) *TaskAssert { -// t.AddAssertion(func(t *testing.T, o *sdk.Task) error { -// t.Helper() -// if o.Predecessors != expected { -// return fmt.Errorf("expected predecessors: %v; got: %v", expected, o.Predecessors) -// } -// return nil -// }) -// return t -//} - func (t *TaskAssert) HasState(expected sdk.TaskState) *TaskAssert { t.AddAssertion(func(t *testing.T, o *sdk.Task) error { t.Helper() @@ -263,18 +251,6 @@ func (t *TaskAssert) HasBudget(expected string) *TaskAssert { return t } -// TODO: -//func (t *TaskAssert) HasTaskRelations(expected sdk.TaskRelations) *TaskAssert { -// t.AddAssertion(func(t *testing.T, o *sdk.Task) error { -// t.Helper() -// if o.TaskRelations != expected { -// return fmt.Errorf("expected task relations: %v; got: %v", expected, o.TaskRelations) -// } -// return nil -// }) -// return t -//} - func (t *TaskAssert) HasLastSuspendedReason(expected string) *TaskAssert { t.AddAssertion(func(t *testing.T, o *sdk.Task) error { t.Helper() diff --git a/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go b/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go index 37560fb839..0ed435e295 100644 --- a/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go +++ b/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go @@ -7,11 +7,12 @@ import ( tfconfig "github.com/hashicorp/terraform-plugin-testing/config" ) -func TaskWithId(resourceName string, id sdk.SchemaObjectIdentifier, sqlStatement string) *TaskModel { +func TaskWithId(resourceName string, id sdk.SchemaObjectIdentifier, enabled bool, sqlStatement string) *TaskModel { t := &TaskModel{ResourceModelMeta: config.Meta(resourceName, resources.Task)} t.WithDatabase(id.DatabaseName()) t.WithSchema(id.SchemaName()) t.WithName(id.Name()) + t.WithEnabled(enabled) t.WithSqlStatement(sqlStatement) return t } diff --git a/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go b/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go index 4cbc0f8c8d..0274125b96 100644 --- a/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go +++ b/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go @@ -94,12 +94,14 @@ type TaskModel struct { func Task( resourceName string, database string, + enabled bool, name string, schema string, sqlStatement string, ) *TaskModel { t := &TaskModel{ResourceModelMeta: config.Meta(resourceName, resources.Task)} t.WithDatabase(database) + t.WithEnabled(enabled) t.WithName(name) t.WithSchema(schema) t.WithSqlStatement(sqlStatement) @@ -108,12 +110,14 @@ func Task( func TaskWithDefaultMeta( database string, + enabled bool, name string, schema string, sqlStatement string, ) *TaskModel { t := &TaskModel{ResourceModelMeta: config.DefaultMeta(resources.Task)} t.WithDatabase(database) + t.WithEnabled(enabled) t.WithName(name) t.WithSchema(schema) t.WithSqlStatement(sqlStatement) @@ -221,8 +225,8 @@ func (t *TaskModel) WithEnableUnloadPhysicalTypeOptimization(enableUnloadPhysica return t } -func (t *TaskModel) WithEnabled(enabled string) *TaskModel { - t.Enabled = tfconfig.StringVariable(enabled) +func (t *TaskModel) WithEnabled(enabled bool) *TaskModel { + t.Enabled = tfconfig.BoolVariable(enabled) return t } diff --git a/pkg/resources/task.go b/pkg/resources/task.go index 2bcb967fe9..c7eb482828 100644 --- a/pkg/resources/task.go +++ b/pkg/resources/task.go @@ -315,7 +315,7 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag if err := waitForTaskStart(ctx, client, id); err != nil { return diag.Diagnostics{ { - Severity: diag.Warning, + Severity: diag.Error, Summary: "Failed to start the task", Detail: fmt.Sprintf("Id: %s, err: %s", id.FullyQualifiedName(), err), }, @@ -504,12 +504,13 @@ func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag } } - if d.Get("enable").(bool) { + if d.Get("enabled").(bool) { log.Printf("Resuming the task in handled update") if err := waitForTaskStart(ctx, client, id); err != nil { return diag.FromErr(fmt.Errorf("failed to resume task %s, err = %w", id.FullyQualifiedName(), err)) } } + // We don't process the else case, because the task was already suspended at the beginning of the Update method. log.Printf("Resuming the root tasks: %v", collections.Map(tasksToResume, sdk.SchemaObjectIdentifier.Name)) if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { @@ -577,7 +578,7 @@ func ReadTask(withExternalChangesMarking bool) schema.ReadContextFunc { } if errs := errors.Join( - d.Set("enable", task.State == sdk.TaskStateStarted), + d.Set("enabled", task.State == sdk.TaskStateStarted), d.Set("warehouse", warehouseId), d.Set("schedule", task.Schedule), d.Set("when", task.Condition), diff --git a/pkg/resources/task_acceptance_test.go b/pkg/resources/task_acceptance_test.go index 6d25ba2239..4be98cdac7 100644 --- a/pkg/resources/task_acceptance_test.go +++ b/pkg/resources/task_acceptance_test.go @@ -35,7 +35,7 @@ func TestAcc_Task_Basic(t *testing.T) { id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() statement := "SELECT 1" - configModel := model.TaskWithId("test", id, statement) + configModel := model.TaskWithId("test", id, false, statement) resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, @@ -53,7 +53,7 @@ func TestAcc_Task_Basic(t *testing.T) { HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). HasNameString(id.Name()). - HasEnabledString(r.BooleanDefault). + HasEnabledString(r.BooleanFalse). HasWarehouseString(""). HasScheduleString(""). HasConfigString(""). @@ -134,8 +134,7 @@ func TestAcc_Task_Complete(t *testing.T) { taskConfigVariableValue := "$" + taskConfig comment := random.Comment() condition := `SYSTEM$STREAM_HAS_DATA('MYSTREAM')` - configModel := model.TaskWithId("test", id, statement). - WithEnabled(r.BooleanTrue). + configModel := model.TaskWithId("test", id, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). WithSchedule("10 MINUTES"). WithConfigValue(configvariable.StringVariable(taskConfigVariableValue)). @@ -231,7 +230,7 @@ func TestAcc_Task_Updates(t *testing.T) { id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() statement := "SELECT 1" - basicConfigModel := model.TaskWithId("test", id, statement) + basicConfigModel := model.TaskWithId("test", id, false, statement) errorNotificationIntegration, errorNotificationIntegrationCleanup := acc.TestClient().NotificationIntegration.Create(t) t.Cleanup(errorNotificationIntegrationCleanup) @@ -243,8 +242,7 @@ func TestAcc_Task_Updates(t *testing.T) { taskConfigVariableValue := "$" + taskConfig comment := random.Comment() condition := `SYSTEM$STREAM_HAS_DATA('MYSTREAM')` - completeConfigModel := model.TaskWithId("test", id, statement). - WithEnabled(r.BooleanTrue). + completeConfigModel := model.TaskWithId("test", id, true, statement). // TODO(SNOW-1348116 - decide in next prs): This won't work because alter set warehouse is broken // we could actually make it work by enabling only uppercased ids in the warehouse field until it's fixed. // WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). @@ -271,7 +269,7 @@ func TestAcc_Task_Updates(t *testing.T) { HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). HasNameString(id.Name()). - HasEnabledString(r.BooleanDefault). + HasEnabledString(r.BooleanFalse). HasWarehouseString(""). HasScheduleString(""). HasConfigString(""). @@ -357,7 +355,7 @@ func TestAcc_Task_Updates(t *testing.T) { HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). HasNameString(id.Name()). - HasEnabledString(r.BooleanDefault). + HasEnabledString(r.BooleanFalse). HasWarehouseString(""). HasScheduleString(""). HasConfigString(""). @@ -402,8 +400,10 @@ func TestAcc_Task_AllParameters(t *testing.T) { id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() statement := "SELECT 1" - configModel := model.TaskWithId("test", id, statement) - configModelWithAllParametersSet := model.TaskWithId("test", id, statement). + configModel := model.TaskWithId("test", id, true, statement). + WithSchedule("5 MINUTES") + configModelWithAllParametersSet := model.TaskWithId("test", id, true, statement). + WithSchedule("5 MINUTES"). WithSuspendTaskAfterNumFailures(15). WithTaskAutoRetryAttempts(15). WithUserTaskManagedInitialWarehouseSizeEnum(sdk.WarehouseSizeXSmall). @@ -698,12 +698,10 @@ func TestAcc_Task_Enabled(t *testing.T) { id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() statement := "SELECT 1" - configModelEnabled := model.TaskWithId("test", id, statement). - WithSchedule("5 MINUTES"). - WithEnabled(r.BooleanTrue) - configModelDisabled := model.TaskWithId("test", id, statement). - WithSchedule("5 MINUTES"). - WithEnabled(r.BooleanFalse) + configModelEnabled := model.TaskWithId("test", id, true, statement). + WithSchedule("5 MINUTES") + configModelDisabled := model.TaskWithId("test", id, false, statement). + WithSchedule("5 MINUTES") resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, @@ -754,29 +752,23 @@ func TestAcc_Task_ConvertStandaloneTaskToSubtask(t *testing.T) { statement := "SELECT 1" schedule := "5 MINUTES" - firstTaskStandaloneModel := model.TaskWithId("main_task", id, statement). + firstTaskStandaloneModel := model.TaskWithId("main_task", id, true, statement). WithSchedule(schedule). - WithEnabled(r.BooleanTrue). WithSuspendTaskAfterNumFailures(1) - secondTaskStandaloneModel := model.TaskWithId("second_task", id2, statement). - WithSchedule(schedule). - WithEnabled(r.BooleanTrue) + secondTaskStandaloneModel := model.TaskWithId("second_task", id2, true, statement). + WithSchedule(schedule) - rootTaskModel := model.TaskWithId("main_task", id, statement). + rootTaskModel := model.TaskWithId("main_task", id, true, statement). WithSchedule(schedule). - WithEnabled(r.BooleanTrue). WithSuspendTaskAfterNumFailures(2) - childTaskModel := model.TaskWithId("second_task", id2, statement). - WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(id.FullyQualifiedName()))). - WithEnabled(r.BooleanTrue) + childTaskModel := model.TaskWithId("second_task", id2, true, statement). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(id.FullyQualifiedName()))) childTaskModel.SetDependsOn([]string{rootTaskModel.ResourceReference()}) - firstTaskStandaloneModelDisabled := model.TaskWithId("main_task", id, statement). - WithSchedule(schedule). - WithEnabled(r.BooleanFalse) - secondTaskStandaloneModelDisabled := model.TaskWithId("second_task", id2, statement). - WithSchedule(schedule). - WithEnabled(r.BooleanFalse) + firstTaskStandaloneModelDisabled := model.TaskWithId("main_task", id, false, statement). + WithSchedule(schedule) + secondTaskStandaloneModelDisabled := model.TaskWithId("second_task", id2, false, statement). + WithSchedule(schedule) secondTaskStandaloneModelDisabled.SetDependsOn([]string{firstTaskStandaloneModelDisabled.ResourceReference()}) resource.Test(t, resource.TestCase{ @@ -856,29 +848,23 @@ func TestAcc_Task_ConvertStandaloneTaskToFinalizer(t *testing.T) { statement := "SELECT 1" schedule := "5 MINUTES" - firstTaskStandaloneModel := model.TaskWithId("main_task", rootTaskId, statement). + firstTaskStandaloneModel := model.TaskWithId("main_task", rootTaskId, true, statement). WithSchedule(schedule). - WithEnabled(r.BooleanTrue). WithSuspendTaskAfterNumFailures(1) - secondTaskStandaloneModel := model.TaskWithId("second_task", finalizerTaskId, statement). - WithSchedule(schedule). - WithEnabled(r.BooleanTrue) + secondTaskStandaloneModel := model.TaskWithId("second_task", finalizerTaskId, true, statement). + WithSchedule(schedule) - rootTaskModel := model.TaskWithId("main_task", rootTaskId, statement). + rootTaskModel := model.TaskWithId("main_task", rootTaskId, true, statement). WithSchedule(schedule). - WithEnabled(r.BooleanTrue). WithSuspendTaskAfterNumFailures(2) - childTaskModel := model.TaskWithId("second_task", finalizerTaskId, statement). - WithFinalize(rootTaskId.FullyQualifiedName()). - WithEnabled(r.BooleanTrue) + childTaskModel := model.TaskWithId("second_task", finalizerTaskId, true, statement). + WithFinalize(rootTaskId.FullyQualifiedName()) childTaskModel.SetDependsOn([]string{rootTaskModel.ResourceReference()}) - firstTaskStandaloneModelDisabled := model.TaskWithId("main_task", rootTaskId, statement). - WithSchedule(schedule). - WithEnabled(r.BooleanFalse) - secondTaskStandaloneModelDisabled := model.TaskWithId("second_task", finalizerTaskId, statement). - WithSchedule(schedule). - WithEnabled(r.BooleanFalse) + firstTaskStandaloneModelDisabled := model.TaskWithId("main_task", rootTaskId, false, statement). + WithSchedule(schedule) + secondTaskStandaloneModelDisabled := model.TaskWithId("second_task", finalizerTaskId, false, statement). + WithSchedule(schedule) secondTaskStandaloneModelDisabled.SetDependsOn([]string{firstTaskStandaloneModelDisabled.ResourceReference()}) resource.Test(t, resource.TestCase{ @@ -955,28 +941,22 @@ func TestAcc_Task_SwitchScheduledWithAfter(t *testing.T) { childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() statement := "SELECT 1" schedule := "5 MINUTES" - rootTaskConfigModel := model.TaskWithId("root", rootId, statement). - WithEnabled(r.BooleanTrue). + rootTaskConfigModel := model.TaskWithId("root", rootId, true, statement). WithSchedule(schedule). WithSuspendTaskAfterNumFailures(1) - childTaskConfigModel := model.TaskWithId("child", childId, statement). - WithEnabled(r.BooleanTrue). + childTaskConfigModel := model.TaskWithId("child", childId, true, statement). WithSchedule(schedule) - rootTaskConfigModelAfterSuspendFailuresUpdate := model.TaskWithId("root", rootId, statement). - WithEnabled(r.BooleanTrue). + rootTaskConfigModelAfterSuspendFailuresUpdate := model.TaskWithId("root", rootId, true, statement). WithSchedule(schedule). WithSuspendTaskAfterNumFailures(2) - childTaskConfigModelWithAfter := model.TaskWithId("child", childId, statement). - WithEnabled(r.BooleanTrue). + childTaskConfigModelWithAfter := model.TaskWithId("child", childId, true, statement). WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))) childTaskConfigModelWithAfter.SetDependsOn([]string{rootTaskConfigModelAfterSuspendFailuresUpdate.ResourceReference()}) - rootTaskConfigModelDisabled := model.TaskWithId("root", rootId, statement). - WithEnabled(r.BooleanFalse). + rootTaskConfigModelDisabled := model.TaskWithId("root", rootId, false, statement). WithSchedule(schedule) - childTaskConfigModelDisabled := model.TaskWithId("child", childId, statement). - WithEnabled(r.BooleanFalse). + childTaskConfigModelDisabled := model.TaskWithId("child", childId, false, statement). WithSchedule(schedule) childTaskConfigModelDisabled.SetDependsOn([]string{rootTaskConfigModelDisabled.ResourceReference()}) @@ -1057,22 +1037,19 @@ func TestAcc_Task_WithAfter(t *testing.T) { statement := "SELECT 1" schedule := "5 MINUTES" - rootTaskConfigModel := model.TaskWithId("root", rootId, statement). + rootTaskConfigModel := model.TaskWithId("root", rootId, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). - WithEnabled(r.BooleanTrue). WithSchedule(schedule). WithSqlStatement(statement) - childTaskConfigModelWithAfter := model.TaskWithId("child", childId, statement). + childTaskConfigModelWithAfter := model.TaskWithId("child", childId, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). - WithEnabled(r.BooleanTrue). WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). WithSqlStatement(statement) childTaskConfigModelWithAfter.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) - childTaskConfigModelWithoutAfter := model.TaskWithId("child", childId, statement). + childTaskConfigModelWithoutAfter := model.TaskWithId("child", childId, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). - WithEnabled(r.BooleanTrue). WithSchedule(schedule). WithSqlStatement(statement) childTaskConfigModelWithoutAfter.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) @@ -1120,22 +1097,19 @@ func TestAcc_Task_WithFinalizer(t *testing.T) { statement := "SELECT 1" schedule := "5 MINUTES" - rootTaskConfigModel := model.TaskWithId("root", rootId, statement). + rootTaskConfigModel := model.TaskWithId("root", rootId, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). - WithEnabled(r.BooleanTrue). WithSchedule(schedule). WithSqlStatement(statement) - childTaskConfigModelWithFinalizer := model.TaskWithId("child", childId, statement). + childTaskConfigModelWithFinalizer := model.TaskWithId("child", childId, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). - WithEnabled(r.BooleanTrue). WithFinalize(rootId.FullyQualifiedName()). WithSqlStatement(statement) childTaskConfigModelWithFinalizer.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) - childTaskConfigModelWithoutFinalizer := model.TaskWithId("child", childId, statement). + childTaskConfigModelWithoutFinalizer := model.TaskWithId("child", childId, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). - WithEnabled(r.BooleanTrue). WithSchedule(schedule). WithSqlStatement(statement) childTaskConfigModelWithoutFinalizer.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) @@ -1183,23 +1157,20 @@ func TestAcc_Task_issue2207(t *testing.T) { statement := "SELECT 1" schedule := "5 MINUTES" - rootTaskConfigModel := model.TaskWithId("root", rootId, statement). + rootTaskConfigModel := model.TaskWithId("root", rootId, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). - WithEnabled(r.BooleanTrue). WithSchedule(schedule). WithSqlStatement(statement) - childTaskConfigModel := model.TaskWithId("child", childId, statement). + childTaskConfigModel := model.TaskWithId("child", childId, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). - WithEnabled(r.BooleanTrue). WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). WithComment("abc"). WithSqlStatement(statement) childTaskConfigModel.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) - childTaskConfigModelWithDifferentComment := model.TaskWithId("child", childId, statement). + childTaskConfigModelWithDifferentComment := model.TaskWithId("child", childId, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). - WithEnabled(r.BooleanTrue). WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). WithComment("def"). WithSqlStatement(statement) @@ -1256,13 +1227,11 @@ func TestAcc_Task_issue2036(t *testing.T) { schedule := "5 MINUTES" when := "TRUE" - taskConfigModelWithoutWhen := model.TaskWithId("test", id, statement). - WithEnabled(r.BooleanTrue). + taskConfigModelWithoutWhen := model.TaskWithId("test", id, true, statement). WithSchedule(schedule). WithSqlStatement(statement) - taskConfigModelWithWhen := model.TaskWithId("test", id, statement). - WithEnabled(r.BooleanTrue). + taskConfigModelWithWhen := model.TaskWithId("test", id, true, statement). WithSchedule(schedule). WithSqlStatement(statement). WithWhen(when) From 62adccb771be11faeea226dd4b4b4c29fcea2833 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Fri, 4 Oct 2024 11:11:38 +0200 Subject: [PATCH 07/12] wip --- .../resourceassert/task_resource_ext.go | 9 +++- pkg/acceptance/helpers/grant_client.go | 28 ----------- .../collections/collection_helpers.go | 6 +-- .../collections/collection_helpers_test.go | 48 +++++++++++++++++++ pkg/resources/task.go | 38 +++++---------- pkg/resources/task_acceptance_test.go | 42 +++++++++------- pkg/sdk/sql_builder.go | 9 ---- pkg/sdk/tasks_def.go | 12 ++--- pkg/sdk/tasks_dto_builders_gen.go | 12 ++--- pkg/sdk/tasks_dto_gen.go | 34 ++++++------- pkg/sdk/tasks_gen.go | 42 ++++++++-------- pkg/sdk/tasks_gen_test.go | 6 +-- pkg/sdk/tasks_impl_gen.go | 32 ++++++------- pkg/sdk/tasks_validations_gen.go | 14 +++--- pkg/sdk/testint/tasks_gen_integration_test.go | 12 ++--- 15 files changed, 177 insertions(+), 167 deletions(-) diff --git a/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go index 5e4ff905b0..ded3d9a83d 100644 --- a/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go +++ b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go @@ -1,12 +1,17 @@ package resourceassert import ( + "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "strconv" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" ) -func (t *TaskResourceAssert) HasAfterLen(len int) *TaskResourceAssert { - t.AddAssertion(assert.ValueSet("after.#", strconv.FormatInt(int64(len), 10))) +func (t *TaskResourceAssert) HasAfterIds(ids ...sdk.SchemaObjectIdentifier) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("after.#", strconv.FormatInt(int64(len(ids)), 10))) + for i, id := range ids { + t.AddAssertion(assert.ValueSet(fmt.Sprintf("after.%d", i), id.FullyQualifiedName())) + } return t } diff --git a/pkg/acceptance/helpers/grant_client.go b/pkg/acceptance/helpers/grant_client.go index 9ca7f6bcbc..4fd7791bfb 100644 --- a/pkg/acceptance/helpers/grant_client.go +++ b/pkg/acceptance/helpers/grant_client.go @@ -74,34 +74,6 @@ func (c *GrantClient) RevokePrivilegesOnSchemaObjectFromAccountRole( require.NoError(t, err) } -func (c *GrantClient) GrantPrivilegesOnWarehouseToAccountRole( - t *testing.T, - accountRoleId sdk.AccountObjectIdentifier, - warehouseId sdk.AccountObjectIdentifier, - privileges []sdk.AccountObjectPrivilege, - withGrantOption bool, -) { - t.Helper() - ctx := context.Background() - - err := c.client().GrantPrivilegesToAccountRole( - ctx, - &sdk.AccountRoleGrantPrivileges{ - AccountObjectPrivileges: privileges, - }, - &sdk.AccountRoleGrantOn{ - AccountObject: &sdk.GrantOnAccountObject{ - Warehouse: &warehouseId, - }, - }, - accountRoleId, - &sdk.GrantPrivilegesToAccountRoleOptions{ - WithGrantOption: sdk.Bool(withGrantOption), - }, - ) - require.NoError(t, err) -} - func (c *GrantClient) GrantPrivilegesOnSchemaObjectToAccountRole( t *testing.T, accountRoleId sdk.AccountObjectIdentifier, diff --git a/pkg/internal/collections/collection_helpers.go b/pkg/internal/collections/collection_helpers.go index f0ed9be9aa..9f07b94fe5 100644 --- a/pkg/internal/collections/collection_helpers.go +++ b/pkg/internal/collections/collection_helpers.go @@ -23,15 +23,15 @@ func Map[T any, R any](collection []T, mapper func(T) R) []R { return result } -// TODO: Test func MapErr[T any, R any](collection []T, mapper func(T) (R, error)) ([]R, error) { result := make([]R, len(collection)) + errs := make([]error, 0) for i, elem := range collection { value, err := mapper(elem) if err != nil { - return nil, err + errs = append(errs, err) } result[i] = value } - return result, nil + return result, errors.Join(errs...) } diff --git a/pkg/internal/collections/collection_helpers_test.go b/pkg/internal/collections/collection_helpers_test.go index 87260e1113..ebebe35348 100644 --- a/pkg/internal/collections/collection_helpers_test.go +++ b/pkg/internal/collections/collection_helpers_test.go @@ -1,6 +1,9 @@ package collections import ( + "errors" + "fmt" + "github.com/stretchr/testify/assert" "strings" "testing" @@ -58,3 +61,48 @@ func Test_Map(t *testing.T) { }) }) } + +func Test_MapErr(t *testing.T) { + t.Run("basic mapping", func(t *testing.T) { + stringSlice := []string{"1", "22", "333"} + stringLenSlice, err := MapErr(stringSlice, func(s string) (int, error) { return len(s), nil }) + assert.NoError(t, err) + assert.Equal(t, stringLenSlice, []int{1, 2, 3}) + }) + + t.Run("basic mapping - multiple errors", func(t *testing.T) { + stringSlice := []string{"1", "22", "333"} + stringLenSlice, err := MapErr(stringSlice, func(s string) (int, error) { + if s == "1" { + return -1, fmt.Errorf("error: 1") + } + if s == "22" { + return -1, fmt.Errorf("error: 22") + } + return len(s), nil + }) + assert.Equal(t, stringLenSlice, []int{-1, -1, 3}) + assert.ErrorContains(t, err, errors.Join(fmt.Errorf("error: 1"), fmt.Errorf("error: 22")).Error()) + }) + + t.Run("validation: empty slice", func(t *testing.T) { + stringSlice := make([]string, 0) + stringLenSlice, err := MapErr(stringSlice, func(s string) (int, error) { return len(s), nil }) + assert.NoError(t, err) + assert.Equal(t, stringLenSlice, []int{}) + }) + + t.Run("validation: nil slice", func(t *testing.T) { + var stringSlice []string = nil + stringLenSlice, err := MapErr(stringSlice, func(s string) (int, error) { return len(s), nil }) + assert.NoError(t, err) + assert.Equal(t, stringLenSlice, []int{}) + }) + + t.Run("validation: nil mapping function", func(t *testing.T) { + assert.PanicsWithError(t, "runtime error: invalid memory address or nil pointer dereference", func() { + stringSlice := []string{"1", "22", "333"} + _, _ = MapErr[string, int](stringSlice, nil) + }) + }) +} diff --git a/pkg/resources/task.go b/pkg/resources/task.go index c7eb482828..6e2ed52a93 100644 --- a/pkg/resources/task.go +++ b/pkg/resources/task.go @@ -22,8 +22,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -// TODO: Go through descriptions - var taskSchema = map[string]*schema.Schema{ "database": { Type: schema.TypeString, @@ -50,28 +48,27 @@ var taskSchema = map[string]*schema.Schema{ Type: schema.TypeBool, Required: true, DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShowWithMapping("state", func(state any) any { - log.Printf("The value is diff suppress for state is: %v\n", state) stateEnum, err := sdk.ToTaskState(state.(string)) if err != nil { return false } return stateEnum == sdk.TaskStateStarted }), - Description: "Specifies if the task should be started (enabled) after creation or should remain suspended (default).", + Description: "Specifies if the task should be started or suspended.", }, "warehouse": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: IsValidIdentifier[sdk.AccountObjectIdentifier](), DiffSuppressFunc: suppressIdentifierQuoting, - Description: "The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. (Conflicts with user_task_managed_initial_warehouse_size)", + Description: "The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. Due to Snowflake limitations warehouse identifier can consist of only upper-cased letters. (Conflicts with user_task_managed_initial_warehouse_size)", ConflictsWith: []string{"user_task_managed_initial_warehouse_size"}, }, "schedule": { Type: schema.TypeString, Optional: true, DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShow("schedule"), - Description: "The schedule for periodically running the task. This can be a cron or interval in minutes. (Conflict with finalize and after)", + Description: "The schedule for periodically running the task. This can be a cron or interval in minutes. (Conflicts with finalize and after)", ConflictsWith: []string{"finalize", "after"}, }, "config": { @@ -79,12 +76,11 @@ var taskSchema = map[string]*schema.Schema{ Optional: true, DiffSuppressFunc: SuppressIfAny( IgnoreChangeToCurrentSnowflakeValueInShow("config"), + // TODO(SNOW-1348116 - next pr): Currently config has to be passed with $$ prefix and suffix. The best solution would be to put there only json, so it could be retrieved from file, etc. Move $$ adding to the SDK. func(k, oldValue, newValue string, d *schema.ResourceData) bool { return strings.Trim(oldValue, "$") == strings.Trim(newValue, "$") }, ), - // TODO: it could be retrieved with system function and show/desc (which should be used?) - // TODO: Doc request: there's no schema for JSON config format Description: "Specifies a string representation of key value pairs that can be accessed by all tasks in the task graph. Must be in JSON format.", }, "allow_overlapping_execution": { @@ -115,7 +111,7 @@ var taskSchema = map[string]*schema.Schema{ suppressIdentifierQuoting, IgnoreChangeToCurrentSnowflakeValueInShow("task_relations.0.finalized_root_task"), ), - Description: blocklistedCharactersFieldDescription("TODO"), + Description: blocklistedCharactersFieldDescription("Specifies the name of a root task that the finalizer task is associated with. Finalizer tasks run after all other tasks in the task graph run to completion. You can define the SQL of a finalizer task to handle notifications and the release and cleanup of resources that a task graph uses. For more information, see [Release and cleanup of task graphs](https://docs.snowflake.com/en/user-guide/tasks-graphs.html#label-finalizer-task)."), ConflictsWith: []string{"schedule", "after"}, }, "after": { @@ -126,7 +122,7 @@ var taskSchema = map[string]*schema.Schema{ ValidateDiagFunc: IsValidIdentifier[sdk.SchemaObjectIdentifier](), }, Optional: true, - Description: blocklistedCharactersFieldDescription("Specifies one or more predecessor tasks for the current task. Use this option to create a DAG of tasks or add this task to an existing DAG. A DAG is a series of tasks that starts with a scheduled root task and is linked together by dependencies."), + Description: blocklistedCharactersFieldDescription("Specifies one or more predecessor tasks for the current task. Use this option to [create a DAG](https://docs.snowflake.com/en/user-guide/tasks-graphs.html#label-task-dag) of tasks or add this task to an existing DAG. A DAG is a series of tasks that starts with a scheduled root task and is linked together by dependencies."), ConflictsWith: []string{"schedule", "finalize"}, }, "when": { @@ -228,7 +224,7 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag } if v, ok := d.GetOk("schedule"); ok { - req.WithSchedule(v.(string)) // TODO: What about cron, how do we track changed (only through show) + req.WithSchedule(v.(string)) } if v, ok := d.GetOk("config"); ok { @@ -243,13 +239,12 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag req.WithAllowOverlappingExecution(parsedBool) } - // TODO: Decide on name (error_notification_integration ?) if v, ok := d.GetOk("error_integration"); ok { notificationIntegrationId, err := sdk.ParseAccountObjectIdentifier(v.(string)) if err != nil { return diag.FromErr(err) } - req.WithErrorNotificationIntegration(notificationIntegrationId) + req.WithErrorIntegration(notificationIntegrationId) } if v, ok := d.GetOk("comment"); ok { @@ -257,7 +252,6 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag } if v, ok := d.GetOk("finalize"); ok { - // TODO: Create with finalize rootTaskId, err := sdk.ParseSchemaObjectIdentifier(v.(string)) if err != nil { return diag.FromErr(err) @@ -278,7 +272,7 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag req.WithFinalize(rootTaskId) } - if v, ok := d.GetOk("after"); ok { // TODO: Should after take in task names or fully qualified names? + if v, ok := d.GetOk("after"); ok { after := expandStringList(v.(*schema.Set).List()) precedingTasks := make([]sdk.SchemaObjectIdentifier, 0) for _, parentTaskIdString := range after { @@ -286,7 +280,7 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag if err != nil { return diag.FromErr(err) } - resumeTasks, err := client.Tasks.SuspendRootTasks(ctx, parentTaskId, id) // TODO: What if this fails and only half of the tasks are suspended? + resumeTasks, err := client.Tasks.SuspendRootTasks(ctx, parentTaskId, id) tasksToResume = append(tasksToResume, resumeTasks...) if err != nil { return diag.FromErr(sdk.JoinErrors(err)) @@ -308,7 +302,7 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag return diag.FromErr(err) } - // TODO: State upgrader for "id" + // TODO(SNOW-1348116 - next pr): State upgrader for "id" (and potentially other fields) d.SetId(helpers.EncodeResourceIdentifier(id)) if d.Get("enabled").(bool) { @@ -321,8 +315,7 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag }, } } - // TODO: Check documentation - // Tasks are created as suspended + // Else case not handled, because tasks are created as suspended (https://docs.snowflake.com/en/sql-reference/sql/create-task; "important" section) } if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { @@ -339,15 +332,11 @@ func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag return diag.FromErr(err) } - // TODO: Fix the order of actions - // TODO: Move suspending etc. to SDK - task, err := client.Tasks.ShowByID(ctx, id) if err != nil { return diag.FromErr(err) } - // TODO: Should it be defer ? tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, id, id) if err != nil { return diag.FromErr(sdk.JoinErrors(err)) @@ -368,7 +357,7 @@ func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag stringAttributeUpdate(d, "schedule", &set.Schedule, &unset.Schedule), stringAttributeUpdate(d, "config", &set.Config, &unset.Config), booleanStringAttributeUpdate(d, "allow_overlapping_execution", &set.AllowOverlappingExecution, &unset.AllowOverlappingExecution), - accountObjectIdentifierAttributeUpdate(d, "error_integration", &set.ErrorNotificationIntegration, &unset.ErrorIntegration), // TODO: name inconsistency + accountObjectIdentifierAttributeUpdate(d, "error_integration", &set.ErrorIntegration, &unset.ErrorIntegration), stringAttributeUpdate(d, "comment", &set.Comment, &unset.Comment), ) if err != nil { @@ -472,7 +461,6 @@ func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag } for _, addedTaskId := range addedTaskIds { - // TODO: Look into suspend root tasks function addedTasksToResume, err := client.Tasks.SuspendRootTasks(ctx, addedTaskId, sdk.NewSchemaObjectIdentifier("", "", "")) tasksToResume = append(tasksToResume, addedTasksToResume...) if err != nil { diff --git a/pkg/resources/task_acceptance_test.go b/pkg/resources/task_acceptance_test.go index 4be98cdac7..94fe877fa6 100644 --- a/pkg/resources/task_acceptance_test.go +++ b/pkg/resources/task_acceptance_test.go @@ -26,6 +26,8 @@ import ( ) // TODO(SNOW-1348116 - next pr): More tests for complicated DAGs +// TODO(SNOW-1348116 - next pr): Test for stored procedures passed to sql_statement (decide on name) +// TODO(SNOW-1348116 - next pr): Test with cron schedule func TestAcc_Task_Basic(t *testing.T) { _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) @@ -61,7 +63,7 @@ func TestAcc_Task_Basic(t *testing.T) { HasErrorIntegrationString(""). HasCommentString(""). HasFinalizeString(""). - HasAfterLen(0). + HasAfterIds(). HasWhenString(""). HasSqlStatementString(statement), resourceshowoutputassert.TaskShowOutput(t, configModel.ResourceReference()). @@ -232,6 +234,10 @@ func TestAcc_Task_Updates(t *testing.T) { statement := "SELECT 1" basicConfigModel := model.TaskWithId("test", id, false, statement) + // New warehouse created, because the common one has lower-case letters that won't work + warehouse, warehouseCleanup := acc.TestClient().Warehouse.CreateWarehouse(t) + t.Cleanup(warehouseCleanup) + errorNotificationIntegration, errorNotificationIntegrationCleanup := acc.TestClient().NotificationIntegration.Create(t) t.Cleanup(errorNotificationIntegrationCleanup) @@ -243,9 +249,7 @@ func TestAcc_Task_Updates(t *testing.T) { comment := random.Comment() condition := `SYSTEM$STREAM_HAS_DATA('MYSTREAM')` completeConfigModel := model.TaskWithId("test", id, true, statement). - // TODO(SNOW-1348116 - decide in next prs): This won't work because alter set warehouse is broken - // we could actually make it work by enabling only uppercased ids in the warehouse field until it's fixed. - // WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithWarehouse(warehouse.ID().Name()). WithSchedule("5 MINUTES"). WithConfigValue(configvariable.StringVariable(taskConfigVariableValue)). WithAllowOverlappingExecution(r.BooleanTrue). @@ -277,7 +281,7 @@ func TestAcc_Task_Updates(t *testing.T) { HasErrorIntegrationString(""). HasCommentString(""). HasFinalizeString(""). - HasAfterLen(0). + HasAfterIds(). HasWhenString(""). HasSqlStatementString(statement), resourceshowoutputassert.TaskShowOutput(t, basicConfigModel.ResourceReference()). @@ -314,13 +318,14 @@ func TestAcc_Task_Updates(t *testing.T) { HasSchemaString(id.SchemaName()). HasNameString(id.Name()). HasEnabledString(r.BooleanTrue). + HasWarehouseString(warehouse.ID().Name()). HasScheduleString("5 MINUTES"). HasConfigString(expectedTaskConfig). HasAllowOverlappingExecutionString(r.BooleanTrue). HasErrorIntegrationString(errorNotificationIntegration.ID().Name()). HasCommentString(comment). HasFinalizeString(""). - HasAfterLen(0). + HasAfterIds(). HasWhenString(condition). HasSqlStatementString(statement), resourceshowoutputassert.TaskShowOutput(t, completeConfigModel.ResourceReference()). @@ -330,6 +335,7 @@ func TestAcc_Task_Updates(t *testing.T) { HasDatabaseName(id.DatabaseName()). HasSchemaName(id.SchemaName()). HasOwner(currentRole.Name()). + HasWarehouse(warehouse.ID().Name()). HasComment(comment). HasSchedule("5 MINUTES"). HasPredecessors(). @@ -363,7 +369,7 @@ func TestAcc_Task_Updates(t *testing.T) { HasErrorIntegrationString(""). HasCommentString(""). HasFinalizeString(""). - HasAfterLen(0). + HasAfterIds(). HasWhenString(""). HasSqlStatementString(statement), resourceshowoutputassert.TaskShowOutput(t, basicConfigModel.ResourceReference()). @@ -742,7 +748,7 @@ func TestAcc_Task_Enabled(t *testing.T) { }) } -// TODO: This test may also be not deterministic and sometimes it fail when resuming a task while other task is modifying DAG (removing after) +// TODO(SNOW-1348116 - analyze in next pr): This test may also be not deterministic and sometimes it fail when resuming a task while other task is modifying DAG (removing after) func TestAcc_Task_ConvertStandaloneTaskToSubtask(t *testing.T) { _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) acc.TestAccPreCheck(t) @@ -809,7 +815,7 @@ func TestAcc_Task_ConvertStandaloneTaskToSubtask(t *testing.T) { HasSchedule(schedule). HasState(sdk.TaskStateStarted), resourceassert.TaskResource(t, childTaskModel.ResourceReference()). - HasAfterLen(1). + HasAfterIds(id). HasEnabledString(r.BooleanTrue), resourceshowoutputassert.TaskShowOutput(t, childTaskModel.ResourceReference()). HasPredecessors(id). @@ -935,7 +941,7 @@ func TestAcc_Task_ConvertStandaloneTaskToFinalizer(t *testing.T) { }) } -// TODO(SNOW-1348116 - analyse in next pr): This test is not deterministic and sometimes it fails when resuming a task while other task is modifying DAG (removing after) +// TODO(SNOW-1348116 - analyze in next pr): This test is not deterministic and sometimes it fails when resuming a task while other task is modifying DAG (removing after) func TestAcc_Task_SwitchScheduledWithAfter(t *testing.T) { rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() @@ -974,7 +980,7 @@ func TestAcc_Task_SwitchScheduledWithAfter(t *testing.T) { resourceassert.TaskResource(t, "snowflake_task.child"). HasEnabledString(r.BooleanTrue). HasScheduleString(schedule). - HasAfterLen(0). + HasAfterIds(). HasSuspendTaskAfterNumFailuresString("10"), resourceassert.TaskResource(t, "snowflake_task.root"). HasEnabledString(r.BooleanTrue). @@ -988,7 +994,7 @@ func TestAcc_Task_SwitchScheduledWithAfter(t *testing.T) { resourceassert.TaskResource(t, "snowflake_task.child"). HasEnabledString(r.BooleanTrue). HasScheduleString(""). - HasAfterLen(1). + HasAfterIds(rootId). HasSuspendTaskAfterNumFailuresString("10"), resourceassert.TaskResource(t, "snowflake_task.root"). HasEnabledString(r.BooleanTrue). @@ -1002,7 +1008,7 @@ func TestAcc_Task_SwitchScheduledWithAfter(t *testing.T) { resourceassert.TaskResource(t, "snowflake_task.child"). HasEnabledString(r.BooleanTrue). HasScheduleString(schedule). - HasAfterLen(0). + HasAfterIds(). HasSuspendTaskAfterNumFailuresString("10"), resourceassert.TaskResource(t, "snowflake_task.root"). HasEnabledString(r.BooleanTrue). @@ -1016,7 +1022,7 @@ func TestAcc_Task_SwitchScheduledWithAfter(t *testing.T) { resourceassert.TaskResource(t, "snowflake_task.child"). HasEnabledString(r.BooleanFalse). HasScheduleString(schedule). - HasAfterLen(0). + HasAfterIds(). HasSuspendTaskAfterNumFailuresString("10"), resourceassert.TaskResource(t, "snowflake_task.root"). HasEnabledString(r.BooleanFalse). @@ -1070,7 +1076,7 @@ func TestAcc_Task_WithAfter(t *testing.T) { HasScheduleString(schedule), resourceassert.TaskResource(t, childTaskConfigModelWithAfter.ResourceReference()). HasEnabledString(r.BooleanTrue). - HasAfterLen(1), + HasAfterIds(rootId), ), }, { @@ -1081,7 +1087,7 @@ func TestAcc_Task_WithAfter(t *testing.T) { HasScheduleString(schedule), resourceassert.TaskResource(t, childTaskConfigModelWithoutAfter.ResourceReference()). HasEnabledString(r.BooleanTrue). - HasAfterLen(0), + HasAfterIds(), ), }, }, @@ -1192,7 +1198,7 @@ func TestAcc_Task_issue2207(t *testing.T) { HasScheduleString(schedule), resourceassert.TaskResource(t, childTaskConfigModel.ResourceReference()). HasEnabledString(r.BooleanTrue). - HasAfterLen(1). + HasAfterIds(rootId). HasCommentString("abc"), ), }, @@ -1210,7 +1216,7 @@ func TestAcc_Task_issue2207(t *testing.T) { HasScheduleString(schedule), resourceassert.TaskResource(t, childTaskConfigModelWithDifferentComment.ResourceReference()). HasEnabledString(r.BooleanTrue). - HasAfterLen(1). + HasAfterIds(rootId). HasCommentString("def"), ), }, diff --git a/pkg/sdk/sql_builder.go b/pkg/sdk/sql_builder.go index 79b27831fc..bae8ef485c 100644 --- a/pkg/sdk/sql_builder.go +++ b/pkg/sdk/sql_builder.go @@ -244,7 +244,6 @@ func (b sqlBuilder) parseInterface(v interface{}, tag reflect.StructTag) (sqlCla return sqlIdentifierClause{ key: sqlTag, value: v.(Identifier), - qm: b.getModifier(tag, "ddl", quoteModifierType, NoQuotes).(quoteModifier), em: b.getModifier(tag, "ddl", equalsModifierType, NoEquals).(equalsModifier), }, nil } @@ -331,7 +330,6 @@ func (b sqlBuilder) parseFieldStruct(field reflect.StructField, value reflect.Va return sqlIdentifierClause{ key: sqlTag, value: reflectedValue.(Identifier), - qm: b.getModifier(field.Tag, "ddl", quoteModifierType, NoQuotes).(quoteModifier), em: b.getModifier(field.Tag, "ddl", equalsModifierType, NoEquals).(equalsModifier), }, nil } @@ -402,7 +400,6 @@ func (b sqlBuilder) parseFieldSlice(field reflect.StructField, value reflect.Val if ok { listClauses = append(listClauses, sqlIdentifierClause{ value: identifier, - qm: b.getModifier(field.Tag, "ddl", quoteModifierType, NoQuotes).(quoteModifier), em: b.getModifier(field.Tag, "ddl", equalsModifierType, NoEquals).(equalsModifier), }) continue @@ -535,7 +532,6 @@ func (b sqlBuilder) parseField(field reflect.StructField, value reflect.Value) ( clause = sqlIdentifierClause{ key: sqlTag, value: reflectedValue.(Identifier), - qm: b.getModifier(field.Tag, "ddl", quoteModifierType, NoQuotes).(quoteModifier), em: b.getModifier(field.Tag, "ddl", equalsModifierType, NoEquals).(equalsModifier), } case "parameter": @@ -607,7 +603,6 @@ func (v sqlKeywordClause) String() string { type sqlIdentifierClause struct { key string value Identifier - qm quoteModifier em equalsModifier } @@ -621,10 +616,6 @@ func (v sqlIdentifierClause) String() string { } // else try to get the string value if v.key != "" { - if v.qm == SingleQuotes { - name = `'` + name + `'` - } - return v.em.Modify(v.key) + name } return name diff --git a/pkg/sdk/tasks_def.go b/pkg/sdk/tasks_def.go index 4329f295a8..19b9db8df3 100644 --- a/pkg/sdk/tasks_def.go +++ b/pkg/sdk/tasks_def.go @@ -156,7 +156,7 @@ var TasksDef = g.NewInterface( OptionalSessionParameters(). OptionalNumberAssignment("USER_TASK_TIMEOUT_MS", nil). OptionalNumberAssignment("SUSPEND_TASK_AFTER_NUM_FAILURES", nil). - OptionalIdentifier("ErrorNotificationIntegration", g.KindOfT[AccountObjectIdentifier](), g.IdentifierOptions().Equals().SQL("ERROR_INTEGRATION")). + OptionalIdentifier("ErrorIntegration", g.KindOfT[AccountObjectIdentifier](), g.IdentifierOptions().Equals().SQL("ERROR_INTEGRATION")). OptionalTextAssignment("COMMENT", g.ParameterOptions().SingleQuotes()). OptionalIdentifier("Finalize", g.KindOfT[SchemaObjectIdentifier](), g.IdentifierOptions().Equals().SQL("FINALIZE")). OptionalNumberAssignment("TASK_AUTO_RETRY_ATTEMPTS", g.ParameterOptions()). @@ -167,7 +167,7 @@ var TasksDef = g.NewInterface( SQL("AS"). Text("sql", g.KeywordOptions().NoQuotes().Required()). WithValidation(g.ValidIdentifier, "name"). - WithValidation(g.ValidIdentifierIfSet, "ErrorNotificationIntegration"). + WithValidation(g.ValidIdentifierIfSet, "ErrorIntegration"). WithValidation(g.ConflictingFields, "OrReplace", "IfNotExists"), taskCreateWarehouse, ). @@ -185,7 +185,7 @@ var TasksDef = g.NewInterface( OptionalNumberAssignment("USER_TASK_TIMEOUT_MS", nil). OptionalSessionParameters(). OptionalNumberAssignment("SUSPEND_TASK_AFTER_NUM_FAILURES", nil). - OptionalIdentifier("ErrorNotificationIntegration", g.KindOfT[AccountObjectIdentifier](), g.IdentifierOptions().Equals().SQL("ERROR_INTEGRATION")). + OptionalIdentifier("ErrorIntegration", g.KindOfT[AccountObjectIdentifier](), g.IdentifierOptions().Equals().SQL("ERROR_INTEGRATION")). OptionalTextAssignment("COMMENT", g.ParameterOptions().SingleQuotes()). OptionalIdentifier("Finalize", g.KindOfT[SchemaObjectIdentifier](), g.IdentifierOptions().Equals().SQL("FINALIZE")). OptionalNumberAssignment("TASK_AUTO_RETRY_ATTEMPTS", g.ParameterOptions()). @@ -194,7 +194,7 @@ var TasksDef = g.NewInterface( SQL("AS"). Text("sql", g.KeywordOptions().NoQuotes().Required()). WithValidation(g.ValidIdentifier, "name"). - WithValidation(g.ValidIdentifierIfSet, "ErrorNotificationIntegration"), + WithValidation(g.ValidIdentifierIfSet, "ErrorIntegration"), ). CustomOperation( "Clone", @@ -231,14 +231,14 @@ var TasksDef = g.NewInterface( OptionalBooleanAssignment("ALLOW_OVERLAPPING_EXECUTION", nil). OptionalNumberAssignment("USER_TASK_TIMEOUT_MS", nil). OptionalNumberAssignment("SUSPEND_TASK_AFTER_NUM_FAILURES", nil). - OptionalIdentifier("ErrorNotificationIntegration", g.KindOfT[AccountObjectIdentifier](), g.IdentifierOptions().Equals().SQL("ERROR_INTEGRATION")). + OptionalIdentifier("ErrorIntegration", g.KindOfT[AccountObjectIdentifier](), g.IdentifierOptions().Equals().SQL("ERROR_INTEGRATION")). OptionalTextAssignment("COMMENT", g.ParameterOptions().SingleQuotes()). OptionalSessionParameters(). OptionalNumberAssignment("TASK_AUTO_RETRY_ATTEMPTS", nil). OptionalNumberAssignment("USER_TASK_MINIMUM_TRIGGER_INTERVAL_IN_SECONDS", nil). WithValidation(g.AtLeastOneValueSet, "Warehouse", "UserTaskManagedInitialWarehouseSize", "Schedule", "Config", "AllowOverlappingExecution", "UserTaskTimeoutMs", "SuspendTaskAfterNumFailures", "ErrorIntegration", "Comment", "SessionParameters", "TaskAutoRetryAttempts", "UserTaskMinimumTriggerIntervalInSeconds"). WithValidation(g.ConflictingFields, "Warehouse", "UserTaskManagedInitialWarehouseSize"). - WithValidation(g.ValidIdentifierIfSet, "ErrorNotificationIntegration"), + WithValidation(g.ValidIdentifierIfSet, "ErrorIntegration"), g.ListOptions().SQL("SET"), ). OptionalQueryStructField( diff --git a/pkg/sdk/tasks_dto_builders_gen.go b/pkg/sdk/tasks_dto_builders_gen.go index 1a371b8a71..6f56cf41b6 100644 --- a/pkg/sdk/tasks_dto_builders_gen.go +++ b/pkg/sdk/tasks_dto_builders_gen.go @@ -59,8 +59,8 @@ func (s *CreateTaskRequest) WithSuspendTaskAfterNumFailures(SuspendTaskAfterNumF return s } -func (s *CreateTaskRequest) WithErrorNotificationIntegration(ErrorNotificationIntegration AccountObjectIdentifier) *CreateTaskRequest { - s.ErrorNotificationIntegration = &ErrorNotificationIntegration +func (s *CreateTaskRequest) WithErrorIntegration(ErrorIntegration AccountObjectIdentifier) *CreateTaskRequest { + s.ErrorIntegration = &ErrorIntegration return s } @@ -158,8 +158,8 @@ func (s *CreateOrAlterTaskRequest) WithSuspendTaskAfterNumFailures(SuspendTaskAf return s } -func (s *CreateOrAlterTaskRequest) WithErrorNotificationIntegration(ErrorNotificationIntegration AccountObjectIdentifier) *CreateOrAlterTaskRequest { - s.ErrorNotificationIntegration = &ErrorNotificationIntegration +func (s *CreateOrAlterTaskRequest) WithErrorIntegration(ErrorIntegration AccountObjectIdentifier) *CreateOrAlterTaskRequest { + s.ErrorIntegration = &ErrorIntegration return s } @@ -325,8 +325,8 @@ func (s *TaskSetRequest) WithSuspendTaskAfterNumFailures(SuspendTaskAfterNumFail return s } -func (s *TaskSetRequest) WithErrorNotificationIntegration(ErrorNotificationIntegration AccountObjectIdentifier) *TaskSetRequest { - s.ErrorNotificationIntegration = &ErrorNotificationIntegration +func (s *TaskSetRequest) WithErrorIntegration(ErrorIntegration AccountObjectIdentifier) *TaskSetRequest { + s.ErrorIntegration = &ErrorIntegration return s } diff --git a/pkg/sdk/tasks_dto_gen.go b/pkg/sdk/tasks_dto_gen.go index f10cbf0051..a6986c9ea2 100644 --- a/pkg/sdk/tasks_dto_gen.go +++ b/pkg/sdk/tasks_dto_gen.go @@ -24,7 +24,7 @@ type CreateTaskRequest struct { SessionParameters *SessionParameters UserTaskTimeoutMs *int SuspendTaskAfterNumFailures *int - ErrorNotificationIntegration *AccountObjectIdentifier + ErrorIntegration *AccountObjectIdentifier Comment *string Finalize *SchemaObjectIdentifier TaskAutoRetryAttempts *int @@ -45,21 +45,21 @@ func (r *CreateTaskRequest) GetName() SchemaObjectIdentifier { } type CreateOrAlterTaskRequest struct { - name SchemaObjectIdentifier // required - Warehouse *CreateTaskWarehouseRequest - Schedule *string - Config *string - AllowOverlappingExecution *bool - UserTaskTimeoutMs *int - SessionParameters *SessionParameters - SuspendTaskAfterNumFailures *int - ErrorNotificationIntegration *AccountObjectIdentifier - Comment *string - Finalize *SchemaObjectIdentifier - TaskAutoRetryAttempts *int - After []SchemaObjectIdentifier - When *string - sql string // required + name SchemaObjectIdentifier // required + Warehouse *CreateTaskWarehouseRequest + Schedule *string + Config *string + AllowOverlappingExecution *bool + UserTaskTimeoutMs *int + SessionParameters *SessionParameters + SuspendTaskAfterNumFailures *int + ErrorIntegration *AccountObjectIdentifier + Comment *string + Finalize *SchemaObjectIdentifier + TaskAutoRetryAttempts *int + After []SchemaObjectIdentifier + When *string + sql string // required } func (r *CreateOrAlterTaskRequest) GetName() SchemaObjectIdentifier { @@ -103,7 +103,7 @@ type TaskSetRequest struct { AllowOverlappingExecution *bool UserTaskTimeoutMs *int SuspendTaskAfterNumFailures *int - ErrorNotificationIntegration *AccountObjectIdentifier + ErrorIntegration *AccountObjectIdentifier Comment *string SessionParameters *SessionParameters TaskAutoRetryAttempts *int diff --git a/pkg/sdk/tasks_gen.go b/pkg/sdk/tasks_gen.go index f3b67f6330..3f8798351c 100644 --- a/pkg/sdk/tasks_gen.go +++ b/pkg/sdk/tasks_gen.go @@ -34,7 +34,7 @@ type CreateTaskOptions struct { SessionParameters *SessionParameters `ddl:"list,no_parentheses"` UserTaskTimeoutMs *int `ddl:"parameter" sql:"USER_TASK_TIMEOUT_MS"` SuspendTaskAfterNumFailures *int `ddl:"parameter" sql:"SUSPEND_TASK_AFTER_NUM_FAILURES"` - ErrorNotificationIntegration *AccountObjectIdentifier `ddl:"identifier,equals" sql:"ERROR_INTEGRATION"` + ErrorIntegration *AccountObjectIdentifier `ddl:"identifier,equals" sql:"ERROR_INTEGRATION"` Comment *string `ddl:"parameter,single_quotes" sql:"COMMENT"` Finalize *SchemaObjectIdentifier `ddl:"identifier,equals" sql:"FINALIZE"` TaskAutoRetryAttempts *int `ddl:"parameter" sql:"TASK_AUTO_RETRY_ATTEMPTS"` @@ -53,24 +53,24 @@ type CreateTaskWarehouse struct { // CreateOrAlterTaskOptions is based on https://docs.snowflake.com/en/sql-reference/sql/create-task#create-or-alter-task. type CreateOrAlterTaskOptions struct { - createOrAlter bool `ddl:"static" sql:"CREATE OR ALTER"` - task bool `ddl:"static" sql:"TASK"` - name SchemaObjectIdentifier `ddl:"identifier"` - Warehouse *CreateTaskWarehouse `ddl:"keyword"` - Schedule *string `ddl:"parameter,single_quotes" sql:"SCHEDULE"` - Config *string `ddl:"parameter,no_quotes" sql:"CONFIG"` - AllowOverlappingExecution *bool `ddl:"parameter" sql:"ALLOW_OVERLAPPING_EXECUTION"` - UserTaskTimeoutMs *int `ddl:"parameter" sql:"USER_TASK_TIMEOUT_MS"` - SessionParameters *SessionParameters `ddl:"list,no_parentheses"` - SuspendTaskAfterNumFailures *int `ddl:"parameter" sql:"SUSPEND_TASK_AFTER_NUM_FAILURES"` - ErrorNotificationIntegration *AccountObjectIdentifier `ddl:"identifier,equals" sql:"ERROR_INTEGRATION"` - Comment *string `ddl:"parameter,single_quotes" sql:"COMMENT"` - Finalize *SchemaObjectIdentifier `ddl:"identifier,equals" sql:"FINALIZE"` - TaskAutoRetryAttempts *int `ddl:"parameter" sql:"TASK_AUTO_RETRY_ATTEMPTS"` - After []SchemaObjectIdentifier `ddl:"parameter,no_equals" sql:"AFTER"` - When *string `ddl:"parameter,no_quotes,no_equals" sql:"WHEN"` - as bool `ddl:"static" sql:"AS"` - sql string `ddl:"keyword,no_quotes"` + createOrAlter bool `ddl:"static" sql:"CREATE OR ALTER"` + task bool `ddl:"static" sql:"TASK"` + name SchemaObjectIdentifier `ddl:"identifier"` + Warehouse *CreateTaskWarehouse `ddl:"keyword"` + Schedule *string `ddl:"parameter,single_quotes" sql:"SCHEDULE"` + Config *string `ddl:"parameter,no_quotes" sql:"CONFIG"` + AllowOverlappingExecution *bool `ddl:"parameter" sql:"ALLOW_OVERLAPPING_EXECUTION"` + UserTaskTimeoutMs *int `ddl:"parameter" sql:"USER_TASK_TIMEOUT_MS"` + SessionParameters *SessionParameters `ddl:"list,no_parentheses"` + SuspendTaskAfterNumFailures *int `ddl:"parameter" sql:"SUSPEND_TASK_AFTER_NUM_FAILURES"` + ErrorIntegration *AccountObjectIdentifier `ddl:"identifier,equals" sql:"ERROR_INTEGRATION"` + Comment *string `ddl:"parameter,single_quotes" sql:"COMMENT"` + Finalize *SchemaObjectIdentifier `ddl:"identifier,equals" sql:"FINALIZE"` + TaskAutoRetryAttempts *int `ddl:"parameter" sql:"TASK_AUTO_RETRY_ATTEMPTS"` + After []SchemaObjectIdentifier `ddl:"parameter,no_equals" sql:"AFTER"` + When *string `ddl:"parameter,no_quotes,no_equals" sql:"WHEN"` + as bool `ddl:"static" sql:"AS"` + sql string `ddl:"keyword,no_quotes"` } // CloneTaskOptions is based on https://docs.snowflake.com/en/sql-reference/sql/create-task#create-task-clone. @@ -106,14 +106,14 @@ type AlterTaskOptions struct { } type TaskSet struct { - Warehouse *AccountObjectIdentifier `ddl:"identifier,equals,single_quotes" sql:"WAREHOUSE"` + Warehouse *AccountObjectIdentifier `ddl:"identifier,equals" sql:"WAREHOUSE"` UserTaskManagedInitialWarehouseSize *WarehouseSize `ddl:"parameter,single_quotes" sql:"USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE"` Schedule *string `ddl:"parameter,single_quotes" sql:"SCHEDULE"` Config *string `ddl:"parameter,no_quotes" sql:"CONFIG"` AllowOverlappingExecution *bool `ddl:"parameter" sql:"ALLOW_OVERLAPPING_EXECUTION"` UserTaskTimeoutMs *int `ddl:"parameter" sql:"USER_TASK_TIMEOUT_MS"` SuspendTaskAfterNumFailures *int `ddl:"parameter" sql:"SUSPEND_TASK_AFTER_NUM_FAILURES"` - ErrorNotificationIntegration *AccountObjectIdentifier `ddl:"identifier,equals" sql:"ERROR_INTEGRATION"` + ErrorIntegration *AccountObjectIdentifier `ddl:"identifier,equals" sql:"ERROR_INTEGRATION"` Comment *string `ddl:"parameter,single_quotes" sql:"COMMENT"` SessionParameters *SessionParameters `ddl:"list,no_parentheses"` TaskAutoRetryAttempts *int `ddl:"parameter" sql:"TASK_AUTO_RETRY_ATTEMPTS"` diff --git a/pkg/sdk/tasks_gen_test.go b/pkg/sdk/tasks_gen_test.go index c82690cc4d..fa3e9edbbc 100644 --- a/pkg/sdk/tasks_gen_test.go +++ b/pkg/sdk/tasks_gen_test.go @@ -81,7 +81,7 @@ func TestTasks_Create(t *testing.T) { } opts.UserTaskTimeoutMs = Int(5) opts.SuspendTaskAfterNumFailures = Int(6) - opts.ErrorNotificationIntegration = Pointer(NewAccountObjectIdentifier("some_error_integration")) + opts.ErrorIntegration = Pointer(NewAccountObjectIdentifier("some_error_integration")) opts.Comment = String("some comment") opts.Finalize = &finalizerId opts.TaskAutoRetryAttempts = Int(10) @@ -157,7 +157,7 @@ func TestTasks_CreateOrAlter(t *testing.T) { LockTimeout: Int(5), } opts.SuspendTaskAfterNumFailures = Int(6) - opts.ErrorNotificationIntegration = Pointer(NewAccountObjectIdentifier("some_error_integration")) + opts.ErrorIntegration = Pointer(NewAccountObjectIdentifier("some_error_integration")) opts.Comment = String("some comment") opts.Finalize = &finalizerId opts.TaskAutoRetryAttempts = Int(10) @@ -328,7 +328,7 @@ func TestTasks_Alter(t *testing.T) { opts.Set = &TaskSet{ Warehouse: &warehouseId, } - assertOptsValidAndSQLEquals(t, opts, "ALTER TASK %s SET WAREHOUSE = '%s'", id.FullyQualifiedName(), warehouseId.FullyQualifiedName()) + assertOptsValidAndSQLEquals(t, opts, "ALTER TASK %s SET WAREHOUSE = %s", id.FullyQualifiedName(), warehouseId.FullyQualifiedName()) }) t.Run("alter set session parameter", func(t *testing.T) { diff --git a/pkg/sdk/tasks_impl_gen.go b/pkg/sdk/tasks_impl_gen.go index cb4bcd81f0..d3e10bcc86 100644 --- a/pkg/sdk/tasks_impl_gen.go +++ b/pkg/sdk/tasks_impl_gen.go @@ -183,7 +183,7 @@ func (r *CreateTaskRequest) toOpts() *CreateTaskOptions { SessionParameters: r.SessionParameters, UserTaskTimeoutMs: r.UserTaskTimeoutMs, SuspendTaskAfterNumFailures: r.SuspendTaskAfterNumFailures, - ErrorNotificationIntegration: r.ErrorNotificationIntegration, + ErrorIntegration: r.ErrorIntegration, Comment: r.Comment, Finalize: r.Finalize, TaskAutoRetryAttempts: r.TaskAutoRetryAttempts, @@ -204,20 +204,20 @@ func (r *CreateTaskRequest) toOpts() *CreateTaskOptions { func (r *CreateOrAlterTaskRequest) toOpts() *CreateOrAlterTaskOptions { opts := &CreateOrAlterTaskOptions{ - name: r.name, - Schedule: r.Schedule, - Config: r.Config, - AllowOverlappingExecution: r.AllowOverlappingExecution, - UserTaskTimeoutMs: r.UserTaskTimeoutMs, - SessionParameters: r.SessionParameters, - SuspendTaskAfterNumFailures: r.SuspendTaskAfterNumFailures, - ErrorNotificationIntegration: r.ErrorNotificationIntegration, - Comment: r.Comment, - Finalize: r.Finalize, - TaskAutoRetryAttempts: r.TaskAutoRetryAttempts, - After: r.After, - When: r.When, - sql: r.sql, + name: r.name, + Schedule: r.Schedule, + Config: r.Config, + AllowOverlappingExecution: r.AllowOverlappingExecution, + UserTaskTimeoutMs: r.UserTaskTimeoutMs, + SessionParameters: r.SessionParameters, + SuspendTaskAfterNumFailures: r.SuspendTaskAfterNumFailures, + ErrorIntegration: r.ErrorIntegration, + Comment: r.Comment, + Finalize: r.Finalize, + TaskAutoRetryAttempts: r.TaskAutoRetryAttempts, + After: r.After, + When: r.When, + sql: r.sql, } if r.Warehouse != nil { opts.Warehouse = &CreateTaskWarehouse{ @@ -264,7 +264,7 @@ func (r *AlterTaskRequest) toOpts() *AlterTaskOptions { AllowOverlappingExecution: r.Set.AllowOverlappingExecution, UserTaskTimeoutMs: r.Set.UserTaskTimeoutMs, SuspendTaskAfterNumFailures: r.Set.SuspendTaskAfterNumFailures, - ErrorNotificationIntegration: r.Set.ErrorNotificationIntegration, + ErrorIntegration: r.Set.ErrorIntegration, Comment: r.Set.Comment, SessionParameters: r.Set.SessionParameters, TaskAutoRetryAttempts: r.Set.TaskAutoRetryAttempts, diff --git a/pkg/sdk/tasks_validations_gen.go b/pkg/sdk/tasks_validations_gen.go index 6a5392457b..501b0f48e4 100644 --- a/pkg/sdk/tasks_validations_gen.go +++ b/pkg/sdk/tasks_validations_gen.go @@ -32,8 +32,8 @@ func (opts *CreateTaskOptions) validate() error { if everyValueSet(opts.OrReplace, opts.IfNotExists) { errs = append(errs, errOneOf("CreateTaskOptions", "OrReplace", "IfNotExists")) } - if opts.ErrorNotificationIntegration != nil && !ValidObjectIdentifier(opts.ErrorNotificationIntegration) { - errs = append(errs, errInvalidIdentifier("CreateTaskOptions", "ErrorNotificationIntegration")) + if opts.ErrorIntegration != nil && !ValidObjectIdentifier(opts.ErrorIntegration) { + errs = append(errs, errInvalidIdentifier("CreateTaskOptions", "ErrorIntegration")) } return JoinErrors(errs...) } @@ -56,8 +56,8 @@ func (opts *CreateOrAlterTaskOptions) validate() error { if !ValidObjectIdentifier(opts.name) { errs = append(errs, ErrInvalidObjectIdentifier) } - if opts.ErrorNotificationIntegration != nil && !ValidObjectIdentifier(opts.ErrorNotificationIntegration) { - errs = append(errs, errInvalidIdentifier("CreateOrAlterTaskOptions", "ErrorNotificationIntegration")) + if opts.ErrorIntegration != nil && !ValidObjectIdentifier(opts.ErrorIntegration) { + errs = append(errs, errInvalidIdentifier("CreateOrAlterTaskOptions", "ErrorIntegration")) } return JoinErrors(errs...) } @@ -93,14 +93,14 @@ func (opts *AlterTaskOptions) validate() error { errs = append(errs, err) } } - if !anyValueSet(opts.Set.Warehouse, opts.Set.UserTaskManagedInitialWarehouseSize, opts.Set.Schedule, opts.Set.Config, opts.Set.AllowOverlappingExecution, opts.Set.UserTaskTimeoutMs, opts.Set.SuspendTaskAfterNumFailures, opts.Set.ErrorNotificationIntegration, opts.Set.Comment, opts.Set.SessionParameters, opts.Set.TaskAutoRetryAttempts, opts.Set.UserTaskMinimumTriggerIntervalInSeconds) { + if !anyValueSet(opts.Set.Warehouse, opts.Set.UserTaskManagedInitialWarehouseSize, opts.Set.Schedule, opts.Set.Config, opts.Set.AllowOverlappingExecution, opts.Set.UserTaskTimeoutMs, opts.Set.SuspendTaskAfterNumFailures, opts.Set.ErrorIntegration, opts.Set.Comment, opts.Set.SessionParameters, opts.Set.TaskAutoRetryAttempts, opts.Set.UserTaskMinimumTriggerIntervalInSeconds) { errs = append(errs, errAtLeastOneOf("AlterTaskOptions.Set", "Warehouse", "UserTaskManagedInitialWarehouseSize", "Schedule", "Config", "AllowOverlappingExecution", "UserTaskTimeoutMs", "SuspendTaskAfterNumFailures", "ErrorIntegration", "Comment", "SessionParameters", "TaskAutoRetryAttempts", "UserTaskMinimumTriggerIntervalInSeconds")) } if everyValueSet(opts.Set.Warehouse, opts.Set.UserTaskManagedInitialWarehouseSize) { errs = append(errs, errOneOf("AlterTaskOptions.Set", "Warehouse", "UserTaskManagedInitialWarehouseSize")) } - if opts.Set.ErrorNotificationIntegration != nil && !ValidObjectIdentifier(opts.Set.ErrorNotificationIntegration) { - errs = append(errs, errInvalidIdentifier("AlterTaskOptions.Set", "ErrorNotificationIntegration")) + if opts.Set.ErrorIntegration != nil && !ValidObjectIdentifier(opts.Set.ErrorIntegration) { + errs = append(errs, errInvalidIdentifier("AlterTaskOptions.Set", "ErrorIntegration")) } } if valueSet(opts.Unset) { diff --git a/pkg/sdk/testint/tasks_gen_integration_test.go b/pkg/sdk/testint/tasks_gen_integration_test.go index de77c56c40..bc3da46a95 100644 --- a/pkg/sdk/testint/tasks_gen_integration_test.go +++ b/pkg/sdk/testint/tasks_gen_integration_test.go @@ -21,8 +21,8 @@ func TestInt_Tasks(t *testing.T) { ctx := testContext(t) sql := "SELECT CURRENT_TIMESTAMP" - errorNotificationIntegration, errorNotificationIntegrationCleanup := testClientHelper().NotificationIntegration.Create(t) - t.Cleanup(errorNotificationIntegrationCleanup) + errorIntegration, ErrorIntegrationCleanup := testClientHelper().NotificationIntegration.Create(t) + t.Cleanup(ErrorIntegrationCleanup) assertTask := func(t *testing.T, task *sdk.Task, id sdk.SchemaObjectIdentifier, warehouseId *sdk.AccountObjectIdentifier) { t.Helper() @@ -270,7 +270,7 @@ func TestInt_Tasks(t *testing.T) { err := testClient(t).Tasks.Create(ctx, sdk.NewCreateTaskRequest(id, sql). WithOrReplace(true). WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithWarehouse(testClientHelper().Ids.WarehouseId())). - WithErrorNotificationIntegration(errorNotificationIntegration.ID()). + WithErrorIntegration(errorIntegration.ID()). WithSchedule("10 MINUTE"). WithConfig(`$${"output_dir": "/temp/test_directory/", "learning_rate": 0.1}$$`). WithAllowOverlappingExecution(true). @@ -287,7 +287,7 @@ func TestInt_Tasks(t *testing.T) { task, err := testClientHelper().Task.Show(t, id) require.NoError(t, err) - assertTaskWithOptions(t, task, id, "some comment", sdk.Pointer(testClientHelper().Ids.WarehouseId()), "10 MINUTE", `SYSTEM$STREAM_HAS_DATA('MYSTREAM')`, true, `{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`, nil, sdk.Pointer(errorNotificationIntegration.ID())) + assertTaskWithOptions(t, task, id, "some comment", sdk.Pointer(testClientHelper().Ids.WarehouseId()), "10 MINUTE", `SYSTEM$STREAM_HAS_DATA('MYSTREAM')`, true, `{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`, nil, sdk.Pointer(errorIntegration.ID())) assertions.AssertThat(t, objectparametersassert.TaskParameters(t, id). HasJsonIndent(4). HasUserTaskTimeoutMs(500). @@ -614,7 +614,7 @@ func TestInt_Tasks(t *testing.T) { err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(task.ID()).WithSet(*sdk.NewTaskSetRequest(). // TODO(SNOW-1348116): Cannot set warehouse due to Snowflake error // WithWarehouse(testClientHelper().Ids.WarehouseId()). - WithErrorNotificationIntegration(errorNotificationIntegration.ID()). + WithErrorIntegration(errorIntegration.ID()). WithSessionParameters(sessionParametersSet). WithSchedule("10 MINUTE"). WithConfig(`$${"output_dir": "/temp/test_directory/", "learning_rate": 0.1}$$`). @@ -629,7 +629,7 @@ func TestInt_Tasks(t *testing.T) { assertions.AssertThat(t, objectassert.Task(t, task.ID()). // HasWarehouse(testClientHelper().Ids.WarehouseId().Name()). - HasErrorIntegration(sdk.Pointer(errorNotificationIntegration.ID())). + HasErrorIntegration(sdk.Pointer(errorIntegration.ID())). HasSchedule("10 MINUTE"). HasConfig(`{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`). HasAllowOverlappingExecution(true). From 80ca12e07e4e1b0fa5a0d6a8245c017496fa00cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Mon, 14 Oct 2024 12:48:52 +0200 Subject: [PATCH 08/12] wip --- docs/resources/task.md | 10 +- .../assert/objectassert/task_snowflake_ext.go | 34 ++ .../assert/objectassert/task_snowflake_gen.go | 34 -- .../resourceassert/task_resource_ext.go | 3 +- .../resourceassert/task_resource_gen.go | 20 +- .../task_show_output_ext.go | 15 + .../task_show_output_gen.go | 25 +- .../config/model/task_model_ext.go | 4 +- .../config/model/task_model_gen.go | 30 +- pkg/acceptance/helpers/task_client.go | 8 + .../collections/collection_helpers_test.go | 3 +- pkg/resources/task.go | 12 +- pkg/resources/task_acceptance_test.go | 342 +++++++++++++++--- pkg/schemas/gen/README.md | 1 + pkg/schemas/task_gen.go | 3 +- pkg/sdk/tasks_impl_gen.go | 1 + 16 files changed, 391 insertions(+), 154 deletions(-) diff --git a/docs/resources/task.md b/docs/resources/task.md index fccf477168..56540d3c95 100644 --- a/docs/resources/task.md +++ b/docs/resources/task.md @@ -77,6 +77,7 @@ resource "snowflake_task" "test_task" { ### Required - `database` (String) The database in which to create the task. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` +- `enabled` (Boolean) Specifies if the task should be started or suspended. - `name` (String) Specifies the identifier for the task; must be unique for the database and schema in which the task is created. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` - `schema` (String) The schema in which to create the task. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` - `sql_statement` (String) Any single SQL statement, or a call to a stored procedure, executed when the task runs. @@ -84,7 +85,7 @@ resource "snowflake_task" "test_task" { ### Optional - `abort_detached_query` (Boolean) Specifies the action that Snowflake performs for in-progress queries if connectivity is lost due to abrupt termination of a session (e.g. network outage, browser termination, service interruption). For more information, check [ABORT_DETACHED_QUERY docs](https://docs.snowflake.com/en/sql-reference/parameters#abort-detached-query). -- `after` (Set of String) Specifies one or more predecessor tasks for the current task. Use this option to create a DAG of tasks or add this task to an existing DAG. A DAG is a series of tasks that starts with a scheduled root task and is linked together by dependencies. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` +- `after` (Set of String) Specifies one or more predecessor tasks for the current task. Use this option to [create a DAG](https://docs.snowflake.com/en/user-guide/tasks-graphs.html#label-task-dag) of tasks or add this task to an existing DAG. A DAG is a series of tasks that starts with a scheduled root task and is linked together by dependencies. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` - `allow_overlapping_execution` (String) By default, Snowflake ensures that only one instance of a particular DAG is allowed to run at a time, setting the parameter value to TRUE permits DAG runs to overlap. Available options are: "true" or "false". When the value is not set in the configuration the provider will put "default" there which means to use the Snowflake default for this value. - `autocommit` (Boolean) Specifies whether autocommit is enabled for the session. Autocommit determines whether a DML statement, when executed without an active transaction, is automatically committed after the statement successfully completes. For more information, see [Transactions](https://docs.snowflake.com/en/sql-reference/transactions). For more information, check [AUTOCOMMIT docs](https://docs.snowflake.com/en/sql-reference/parameters#autocommit). - `binary_input_format` (String) The format of VARCHAR values passed as input to VARCHAR-to-BINARY conversion functions. For more information, see [Binary input and output](https://docs.snowflake.com/en/sql-reference/binary-input-output). For more information, check [BINARY_INPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#binary-input-format). @@ -102,11 +103,10 @@ resource "snowflake_task" "test_task" { - `date_input_format` (String) Specifies the input format for the DATE data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). For more information, check [DATE_INPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#date-input-format). - `date_output_format` (String) Specifies the display format for the DATE data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). For more information, check [DATE_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#date-output-format). - `enable_unload_physical_type_optimization` (Boolean) Specifies whether to set the schema for unloaded Parquet files based on the logical column data types (i.e. the types in the unload SQL query or source table) or on the unloaded column values (i.e. the smallest data types and precision that support the values in the output columns of the unload SQL statement or source table). For more information, check [ENABLE_UNLOAD_PHYSICAL_TYPE_OPTIMIZATION docs](https://docs.snowflake.com/en/sql-reference/parameters#enable-unload-physical-type-optimization). -- `enabled` (String) Specifies if the task should be started (enabled) after creation or should remain suspended (default). Available options are: "true" or "false". When the value is not set in the configuration the provider will put "default" there which means to use the Snowflake default for this value. - `error_integration` (String) Specifies the name of the notification integration used for error notifications. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` - `error_on_nondeterministic_merge` (Boolean) Specifies whether to return an error when the [MERGE](https://docs.snowflake.com/en/sql-reference/sql/merge) command is used to update or delete a target row that joins multiple source rows and the system cannot determine the action to perform on the target row. For more information, check [ERROR_ON_NONDETERMINISTIC_MERGE docs](https://docs.snowflake.com/en/sql-reference/parameters#error-on-nondeterministic-merge). - `error_on_nondeterministic_update` (Boolean) Specifies whether to return an error when the [UPDATE](https://docs.snowflake.com/en/sql-reference/sql/update) command is used to update a target row that joins multiple source rows and the system cannot determine the action to perform on the target row. For more information, check [ERROR_ON_NONDETERMINISTIC_UPDATE docs](https://docs.snowflake.com/en/sql-reference/parameters#error-on-nondeterministic-update). -- `finalize` (String) TODO Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` +- `finalize` (String) Specifies the name of a root task that the finalizer task is associated with. Finalizer tasks run after all other tasks in the task graph run to completion. You can define the SQL of a finalizer task to handle notifications and the release and cleanup of resources that a task graph uses. For more information, see [Release and cleanup of task graphs](https://docs.snowflake.com/en/user-guide/tasks-graphs.html#label-finalizer-task). Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` - `geography_output_format` (String) Display format for [GEOGRAPHY values](https://docs.snowflake.com/en/sql-reference/data-types-geospatial.html#label-data-types-geography). For more information, check [GEOGRAPHY_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#geography-output-format). - `geometry_output_format` (String) Display format for [GEOMETRY values](https://docs.snowflake.com/en/sql-reference/data-types-geospatial.html#label-data-types-geometry). For more information, check [GEOMETRY_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#geometry-output-format). - `jdbc_treat_timestamp_ntz_as_utc` (Boolean) Specifies how JDBC processes TIMESTAMP_NTZ values. For more information, check [JDBC_TREAT_TIMESTAMP_NTZ_AS_UTC docs](https://docs.snowflake.com/en/sql-reference/parameters#jdbc-treat-timestamp-ntz-as-utc). @@ -121,7 +121,7 @@ resource "snowflake_task" "test_task" { - `quoted_identifiers_ignore_case` (Boolean) Specifies whether letters in double-quoted object identifiers are stored and resolved as uppercase letters. By default, Snowflake preserves the case of alphabetic characters when storing and resolving double-quoted identifiers (see [Identifier resolution](https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html#label-identifier-casing)). You can use this parameter in situations in which [third-party applications always use double quotes around identifiers](https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html#label-identifier-casing-parameter). For more information, check [QUOTED_IDENTIFIERS_IGNORE_CASE docs](https://docs.snowflake.com/en/sql-reference/parameters#quoted-identifiers-ignore-case). - `rows_per_resultset` (Number) Specifies the maximum number of rows returned in a result set. A value of 0 specifies no maximum. For more information, check [ROWS_PER_RESULTSET docs](https://docs.snowflake.com/en/sql-reference/parameters#rows-per-resultset). - `s3_stage_vpce_dns_name` (String) Specifies the DNS name of an Amazon S3 interface endpoint. Requests sent to the internal stage of an account via [AWS PrivateLink for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html) use this endpoint to connect. For more information, see [Accessing Internal stages with dedicated interface endpoints](https://docs.snowflake.com/en/user-guide/private-internal-stages-aws.html#label-aws-privatelink-internal-stage-network-isolation). For more information, check [S3_STAGE_VPCE_DNS_NAME docs](https://docs.snowflake.com/en/sql-reference/parameters#s3-stage-vpce-dns-name). -- `schedule` (String) The schedule for periodically running the task. This can be a cron or interval in minutes. (Conflict with finalize and after) +- `schedule` (String) The schedule for periodically running the task. This can be a cron or interval in minutes. (Conflicts with finalize and after) - `search_path` (String) Specifies the path to search to resolve unqualified object names in queries. For more information, see [Name resolution in queries](https://docs.snowflake.com/en/sql-reference/name-resolution.html#label-object-name-resolution-search-path). Comma-separated list of identifiers. An identifier can be a fully or partially qualified schema name. For more information, check [SEARCH_PATH docs](https://docs.snowflake.com/en/sql-reference/parameters#search-path). - `statement_queued_timeout_in_seconds` (Number) Amount of time, in seconds, a SQL statement (query, DDL, DML, etc.) remains queued for a warehouse before it is canceled by the system. This parameter can be used in conjunction with the [MAX_CONCURRENCY_LEVEL](https://docs.snowflake.com/en/sql-reference/parameters#label-max-concurrency-level) parameter to ensure a warehouse is never backlogged. For more information, check [STATEMENT_QUEUED_TIMEOUT_IN_SECONDS docs](https://docs.snowflake.com/en/sql-reference/parameters#statement-queued-timeout-in-seconds). - `statement_timeout_in_seconds` (Number) Amount of time, in seconds, after which a running SQL statement (query, DDL, DML, etc.) is canceled by the system. For more information, check [STATEMENT_TIMEOUT_IN_SECONDS docs](https://docs.snowflake.com/en/sql-reference/parameters#statement-timeout-in-seconds). @@ -147,7 +147,7 @@ resource "snowflake_task" "test_task" { - `user_task_managed_initial_warehouse_size` (String) Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. Valid values are (case-insensitive): %s. (Conflicts with warehouse) For more information, check [USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE docs](https://docs.snowflake.com/en/sql-reference/parameters#user-task-managed-initial-warehouse-size). - `user_task_minimum_trigger_interval_in_seconds` (Number) Minimum amount of time between Triggered Task executions in seconds For more information, check [USER_TASK_MINIMUM_TRIGGER_INTERVAL_IN_SECONDS docs](https://docs.snowflake.com/en/sql-reference/parameters#user-task-minimum-trigger-interval-in-seconds). - `user_task_timeout_ms` (Number) Specifies the time limit on a single run of the task before it times out (in milliseconds). For more information, check [USER_TASK_TIMEOUT_MS docs](https://docs.snowflake.com/en/sql-reference/parameters#user-task-timeout-ms). -- `warehouse` (String) The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. (Conflicts with user_task_managed_initial_warehouse_size) +- `warehouse` (String) The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. Due to Snowflake limitations warehouse identifier can consist of only upper-cased letters. (Conflicts with user_task_managed_initial_warehouse_size) - `week_of_year_policy` (Number) Specifies how the weeks in a given year are computed. `0`: The semantics used are equivalent to the ISO semantics, in which a week belongs to a given year if at least 4 days of that week are in that year. `1`: January 1 is included in the first week of the year and December 31 is included in the last week of the year. For more information, check [WEEK_OF_YEAR_POLICY docs](https://docs.snowflake.com/en/sql-reference/parameters#week-of-year-policy). - `week_start` (Number) Specifies the first day of the week (used by week-related date functions). `0`: Legacy Snowflake behavior is used (i.e. ISO-like semantics). `1` (Monday) to `7` (Sunday): All the week-related functions use weeks that start on the specified day of the week. For more information, check [WEEK_START docs](https://docs.snowflake.com/en/sql-reference/parameters#week-start). - `when` (String) Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported. diff --git a/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_ext.go b/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_ext.go index 40f5894ba3..e340d8c13a 100644 --- a/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_ext.go +++ b/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_ext.go @@ -72,3 +72,37 @@ func (t *TaskAssert) HasTaskRelations(expected sdk.TaskRelations) *TaskAssert { }) return t } + +func (t *TaskAssert) HasWarehouse(expected *sdk.AccountObjectIdentifier) *TaskAssert { + t.AddAssertion(func(t *testing.T, o *sdk.Task) error { + t.Helper() + if o.Warehouse == nil && expected != nil { + return fmt.Errorf("expected warehouse to have value; got: nil") + } + if o.Warehouse != nil && expected == nil { + return fmt.Errorf("expected warehouse to no have value; got: %s", o.Warehouse.Name()) + } + if o.Warehouse != nil && expected != nil && o.Warehouse.Name() != expected.Name() { + return fmt.Errorf("expected warehouse: %v; got: %v", expected.Name(), o.Warehouse.Name()) + } + return nil + }) + return t +} + +func (t *TaskAssert) HasErrorIntegration(expected *sdk.AccountObjectIdentifier) *TaskAssert { + t.AddAssertion(func(t *testing.T, o *sdk.Task) error { + t.Helper() + if o.ErrorIntegration == nil && expected != nil { + return fmt.Errorf("expected error integration to have value; got: nil") + } + if o.ErrorIntegration != nil && expected == nil { + return fmt.Errorf("expected error integration to have no value; got: %s", o.ErrorIntegration.Name()) + } + if o.ErrorIntegration != nil && expected != nil && o.ErrorIntegration.Name() != expected.Name() { + return fmt.Errorf("expected error integration: %v; got: %v", expected.Name(), o.ErrorIntegration.Name()) + } + return nil + }) + return t +} diff --git a/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go b/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go index 2bfff8bb86..0d2f32c3a0 100644 --- a/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go +++ b/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go @@ -107,23 +107,6 @@ func (t *TaskAssert) HasComment(expected string) *TaskAssert { return t } -func (t *TaskAssert) HasWarehouse(expected *sdk.AccountObjectIdentifier) *TaskAssert { - t.AddAssertion(func(t *testing.T, o *sdk.Task) error { - t.Helper() - if o.Warehouse == nil && expected != nil { - return fmt.Errorf("expected warehouse to have value; got: nil") - } - if o.Warehouse != nil && expected == nil { - return fmt.Errorf("expected warehouse to no have value; got: %s", o.Warehouse.Name()) - } - if o.Warehouse != nil && expected != nil && o.Warehouse.Name() != expected.Name() { - return fmt.Errorf("expected warehouse: %v; got: %v", expected.Name(), o.Warehouse.Name()) - } - return nil - }) - return t -} - func (t *TaskAssert) HasSchedule(expected string) *TaskAssert { t.AddAssertion(func(t *testing.T, o *sdk.Task) error { t.Helper() @@ -179,23 +162,6 @@ func (t *TaskAssert) HasAllowOverlappingExecution(expected bool) *TaskAssert { return t } -func (t *TaskAssert) HasErrorIntegration(expected *sdk.AccountObjectIdentifier) *TaskAssert { - t.AddAssertion(func(t *testing.T, o *sdk.Task) error { - t.Helper() - if o.ErrorIntegration == nil && expected != nil { - return fmt.Errorf("expected error integration to have value; got: nil") - } - if o.ErrorIntegration != nil && expected == nil { - return fmt.Errorf("expected error integration to have no value; got: %s", o.ErrorIntegration.Name()) - } - if o.ErrorIntegration != nil && expected != nil && o.ErrorIntegration.Name() != expected.Name() { - return fmt.Errorf("expected error integration: %v; got: %v", expected.Name(), o.ErrorIntegration.Name()) - } - return nil - }) - return t -} - func (t *TaskAssert) HasLastCommittedOn(expected string) *TaskAssert { t.AddAssertion(func(t *testing.T, o *sdk.Task) error { t.Helper() diff --git a/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go index ded3d9a83d..4dd36b2cf8 100644 --- a/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go +++ b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go @@ -2,9 +2,10 @@ package resourceassert import ( "fmt" - "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "strconv" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" ) diff --git a/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go index dfe0369762..27dd1a43ec 100644 --- a/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go +++ b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go @@ -132,11 +132,6 @@ func (t *TaskResourceAssert) HasEnableUnloadPhysicalTypeOptimizationString(expec return t } -func (t *TaskResourceAssert) HasEnabledString(expected string) *TaskResourceAssert { - t.AddAssertion(assert.ValueSet("enabled", expected)) - return t -} - func (t *TaskResourceAssert) HasErrorIntegrationString(expected string) *TaskResourceAssert { t.AddAssertion(assert.ValueSet("error_integration", expected)) return t @@ -257,6 +252,11 @@ func (t *TaskResourceAssert) HasSqlStatementString(expected string) *TaskResourc return t } +func (t *TaskResourceAssert) HasStartedString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("started", expected)) + return t +} + func (t *TaskResourceAssert) HasStatementQueuedTimeoutInSecondsString(expected string) *TaskResourceAssert { t.AddAssertion(assert.ValueSet("statement_queued_timeout_in_seconds", expected)) return t @@ -501,11 +501,6 @@ func (t *TaskResourceAssert) HasNoEnableUnloadPhysicalTypeOptimization() *TaskRe return t } -func (t *TaskResourceAssert) HasNoEnabled() *TaskResourceAssert { - t.AddAssertion(assert.ValueNotSet("enabled")) - return t -} - func (t *TaskResourceAssert) HasNoErrorIntegration() *TaskResourceAssert { t.AddAssertion(assert.ValueNotSet("error_integration")) return t @@ -626,6 +621,11 @@ func (t *TaskResourceAssert) HasNoSqlStatement() *TaskResourceAssert { return t } +func (t *TaskResourceAssert) HasNoStarted() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("started")) + return t +} + func (t *TaskResourceAssert) HasNoStatementQueuedTimeoutInSeconds() *TaskResourceAssert { t.AddAssertion(assert.ValueNotSet("statement_queued_timeout_in_seconds")) return t diff --git a/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go index 6cb2885b1c..c058df020e 100644 --- a/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go +++ b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go @@ -35,3 +35,18 @@ func (t *TaskShowOutputAssert) HasPredecessors(predecessors ...sdk.SchemaObjectI } return t } + +func (t *TaskShowOutputAssert) HasTaskRelations(expected sdk.TaskRelations) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.#", "1")) + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.0.predecessors.#", strconv.Itoa(len(expected.Predecessors)))) + for i, predecessor := range expected.Predecessors { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet(fmt.Sprintf("task_relations.0.predecessors.%d", i), predecessor.FullyQualifiedName())) + } + if expected.FinalizerTask != nil && len(expected.FinalizerTask.Name()) > 0 { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.0.finalizer", expected.FinalizerTask.FullyQualifiedName())) + } + if expected.FinalizedRootTask != nil && len(expected.FinalizedRootTask.Name()) > 0 { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.0.finalized_root_task", expected.FinalizedRootTask.FullyQualifiedName())) + } + return t +} diff --git a/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go index b11e40af69..2b09ca5def 100644 --- a/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go +++ b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go @@ -3,8 +3,6 @@ package resourceshowoutputassert import ( - "fmt" - "strconv" "testing" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" @@ -77,8 +75,8 @@ func (t *TaskShowOutputAssert) HasComment(expected string) *TaskShowOutputAssert return t } -func (t *TaskShowOutputAssert) HasWarehouse(expected string) *TaskShowOutputAssert { - t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("warehouse", expected)) +func (t *TaskShowOutputAssert) HasWarehouse(expected sdk.AccountObjectIdentifier) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("warehouse", expected.Name())) return t } @@ -107,8 +105,8 @@ func (t *TaskShowOutputAssert) HasAllowOverlappingExecution(expected bool) *Task return t } -func (t *TaskShowOutputAssert) HasErrorIntegration(expected string) *TaskShowOutputAssert { - t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("error_integration", expected)) +func (t *TaskShowOutputAssert) HasErrorIntegration(expected sdk.AccountObjectIdentifier) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("error_integration", expected.Name())) return t } @@ -137,21 +135,6 @@ func (t *TaskShowOutputAssert) HasBudget(expected string) *TaskShowOutputAssert return t } -func (t *TaskShowOutputAssert) HasTaskRelations(expected sdk.TaskRelations) *TaskShowOutputAssert { - t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.#", "1")) - t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.0.predecessors.#", strconv.Itoa(len(expected.Predecessors)))) - for i, predecessor := range expected.Predecessors { - t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet(fmt.Sprintf("task_relations.0.predecessors.%d", i), predecessor.FullyQualifiedName())) - } - if expected.FinalizerTask != nil && len(expected.FinalizerTask.Name()) > 0 { - t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.0.finalizer", expected.FinalizerTask.FullyQualifiedName())) - } - if expected.FinalizedRootTask != nil && len(expected.FinalizedRootTask.Name()) > 0 { - t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.0.finalized_root_task", expected.FinalizedRootTask.FullyQualifiedName())) - } - return t -} - func (t *TaskShowOutputAssert) HasLastSuspendedReason(expected string) *TaskShowOutputAssert { t.AddAssertion(assert.ResourceShowOutputValueSet("last_suspended_reason", expected)) return t diff --git a/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go b/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go index 0ed435e295..4968d42ed0 100644 --- a/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go +++ b/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go @@ -7,12 +7,12 @@ import ( tfconfig "github.com/hashicorp/terraform-plugin-testing/config" ) -func TaskWithId(resourceName string, id sdk.SchemaObjectIdentifier, enabled bool, sqlStatement string) *TaskModel { +func TaskWithId(resourceName string, id sdk.SchemaObjectIdentifier, started bool, sqlStatement string) *TaskModel { t := &TaskModel{ResourceModelMeta: config.Meta(resourceName, resources.Task)} t.WithDatabase(id.DatabaseName()) t.WithSchema(id.SchemaName()) t.WithName(id.Name()) - t.WithEnabled(enabled) + t.WithStarted(started) t.WithSqlStatement(sqlStatement) return t } diff --git a/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go b/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go index 0274125b96..b6f96f259a 100644 --- a/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go +++ b/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go @@ -30,7 +30,6 @@ type TaskModel struct { DateInputFormat tfconfig.Variable `json:"date_input_format,omitempty"` DateOutputFormat tfconfig.Variable `json:"date_output_format,omitempty"` EnableUnloadPhysicalTypeOptimization tfconfig.Variable `json:"enable_unload_physical_type_optimization,omitempty"` - Enabled tfconfig.Variable `json:"enabled,omitempty"` ErrorIntegration tfconfig.Variable `json:"error_integration,omitempty"` ErrorOnNondeterministicMerge tfconfig.Variable `json:"error_on_nondeterministic_merge,omitempty"` ErrorOnNondeterministicUpdate tfconfig.Variable `json:"error_on_nondeterministic_update,omitempty"` @@ -55,6 +54,7 @@ type TaskModel struct { Schema tfconfig.Variable `json:"schema,omitempty"` SearchPath tfconfig.Variable `json:"search_path,omitempty"` SqlStatement tfconfig.Variable `json:"sql_statement,omitempty"` + Started tfconfig.Variable `json:"started,omitempty"` StatementQueuedTimeoutInSeconds tfconfig.Variable `json:"statement_queued_timeout_in_seconds,omitempty"` StatementTimeoutInSeconds tfconfig.Variable `json:"statement_timeout_in_seconds,omitempty"` StrictJsonOutput tfconfig.Variable `json:"strict_json_output,omitempty"` @@ -94,33 +94,33 @@ type TaskModel struct { func Task( resourceName string, database string, - enabled bool, name string, schema string, sqlStatement string, + started bool, ) *TaskModel { t := &TaskModel{ResourceModelMeta: config.Meta(resourceName, resources.Task)} t.WithDatabase(database) - t.WithEnabled(enabled) t.WithName(name) t.WithSchema(schema) t.WithSqlStatement(sqlStatement) + t.WithStarted(started) return t } func TaskWithDefaultMeta( database string, - enabled bool, name string, schema string, sqlStatement string, + started bool, ) *TaskModel { t := &TaskModel{ResourceModelMeta: config.DefaultMeta(resources.Task)} t.WithDatabase(database) - t.WithEnabled(enabled) t.WithName(name) t.WithSchema(schema) t.WithSqlStatement(sqlStatement) + t.WithStarted(started) return t } @@ -225,11 +225,6 @@ func (t *TaskModel) WithEnableUnloadPhysicalTypeOptimization(enableUnloadPhysica return t } -func (t *TaskModel) WithEnabled(enabled bool) *TaskModel { - t.Enabled = tfconfig.BoolVariable(enabled) - return t -} - func (t *TaskModel) WithErrorIntegration(errorIntegration string) *TaskModel { t.ErrorIntegration = tfconfig.StringVariable(errorIntegration) return t @@ -350,6 +345,11 @@ func (t *TaskModel) WithSqlStatement(sqlStatement string) *TaskModel { return t } +func (t *TaskModel) WithStarted(started bool) *TaskModel { + t.Started = tfconfig.BoolVariable(started) + return t +} + func (t *TaskModel) WithStatementQueuedTimeoutInSeconds(statementQueuedTimeoutInSeconds int) *TaskModel { t.StatementQueuedTimeoutInSeconds = tfconfig.IntegerVariable(statementQueuedTimeoutInSeconds) return t @@ -594,11 +594,6 @@ func (t *TaskModel) WithEnableUnloadPhysicalTypeOptimizationValue(value tfconfig return t } -func (t *TaskModel) WithEnabledValue(value tfconfig.Variable) *TaskModel { - t.Enabled = value - return t -} - func (t *TaskModel) WithErrorIntegrationValue(value tfconfig.Variable) *TaskModel { t.ErrorIntegration = value return t @@ -719,6 +714,11 @@ func (t *TaskModel) WithSqlStatementValue(value tfconfig.Variable) *TaskModel { return t } +func (t *TaskModel) WithStartedValue(value tfconfig.Variable) *TaskModel { + t.Started = value + return t +} + func (t *TaskModel) WithStatementQueuedTimeoutInSecondsValue(value tfconfig.Variable) *TaskModel { t.StatementQueuedTimeoutInSeconds = value return t diff --git a/pkg/acceptance/helpers/task_client.go b/pkg/acceptance/helpers/task_client.go index 968662e436..98592b5eec 100644 --- a/pkg/acceptance/helpers/task_client.go +++ b/pkg/acceptance/helpers/task_client.go @@ -61,6 +61,14 @@ func (c *TaskClient) CreateWithRequest(t *testing.T, request *sdk.CreateTaskRequ return task, c.DropFunc(t, id) } +func (c *TaskClient) Alter(t *testing.T, req *sdk.AlterTaskRequest) { + t.Helper() + ctx := context.Background() + + err := c.client().Alter(ctx, req) + require.NoError(t, err) +} + func (c *TaskClient) DropFunc(t *testing.T, id sdk.SchemaObjectIdentifier) func() { t.Helper() ctx := context.Background() diff --git a/pkg/internal/collections/collection_helpers_test.go b/pkg/internal/collections/collection_helpers_test.go index ebebe35348..c9ff5c8b86 100644 --- a/pkg/internal/collections/collection_helpers_test.go +++ b/pkg/internal/collections/collection_helpers_test.go @@ -3,10 +3,11 @@ package collections import ( "errors" "fmt" - "github.com/stretchr/testify/assert" "strings" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) diff --git a/pkg/resources/task.go b/pkg/resources/task.go index 6e2ed52a93..708b4c6bdc 100644 --- a/pkg/resources/task.go +++ b/pkg/resources/task.go @@ -44,7 +44,7 @@ var taskSchema = map[string]*schema.Schema{ DiffSuppressFunc: suppressIdentifierQuoting, Description: blocklistedCharactersFieldDescription("Specifies the identifier for the task; must be unique for the database and schema in which the task is created."), }, - "enabled": { + "started": { Type: schema.TypeBool, Required: true, DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShowWithMapping("state", func(state any) any { @@ -129,7 +129,7 @@ var taskSchema = map[string]*schema.Schema{ Type: schema.TypeString, Optional: true, DiffSuppressFunc: SuppressIfAny(DiffSuppressStatement, IgnoreChangeToCurrentSnowflakeValueInShow("condition")), - Description: "Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported.", + Description: "Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported. When a task is triggered (based on its SCHEDULE or AFTER setting), it validates the conditions of the expression to determine whether to execute. If the conditions of the expression are not met, then the task skips the current run. Any tasks that identify this task as a predecessor also don’t run.", }, "sql_statement": { Type: schema.TypeString, @@ -171,7 +171,7 @@ func Task() *schema.Resource { }, CustomizeDiff: customdiff.All( - ComputedIfAnyAttributeChanged(taskSchema, ShowOutputAttributeName, "name", "enabled", "warehouse", "user_task_managed_initial_warehouse_size", "schedule", "config", "allow_overlapping_execution", "error_integration", "comment", "finalize", "after", "when"), + ComputedIfAnyAttributeChanged(taskSchema, ShowOutputAttributeName, "name", "started", "warehouse", "user_task_managed_initial_warehouse_size", "schedule", "config", "allow_overlapping_execution", "error_integration", "comment", "finalize", "after", "when"), ComputedIfAnyAttributeChanged(taskParametersSchema, ParametersAttributeName, collections.Map(sdk.AsStringList(sdk.AllTaskParameters), strings.ToLower)...), ComputedIfAnyAttributeChanged(taskSchema, FullyQualifiedNameAttributeName, "name"), taskParametersCustomDiff, @@ -305,7 +305,7 @@ func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag // TODO(SNOW-1348116 - next pr): State upgrader for "id" (and potentially other fields) d.SetId(helpers.EncodeResourceIdentifier(id)) - if d.Get("enabled").(bool) { + if d.Get("started").(bool) { if err := waitForTaskStart(ctx, client, id); err != nil { return diag.Diagnostics{ { @@ -492,7 +492,7 @@ func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) diag.Diag } } - if d.Get("enabled").(bool) { + if d.Get("started").(bool) { log.Printf("Resuming the task in handled update") if err := waitForTaskStart(ctx, client, id); err != nil { return diag.FromErr(fmt.Errorf("failed to resume task %s, err = %w", id.FullyQualifiedName(), err)) @@ -566,7 +566,7 @@ func ReadTask(withExternalChangesMarking bool) schema.ReadContextFunc { } if errs := errors.Join( - d.Set("enabled", task.State == sdk.TaskStateStarted), + d.Set("started", task.State == sdk.TaskStateStarted), d.Set("warehouse", warehouseId), d.Set("schedule", task.Schedule), d.Set("when", task.Condition), diff --git a/pkg/resources/task_acceptance_test.go b/pkg/resources/task_acceptance_test.go index 94fe877fa6..6d09415520 100644 --- a/pkg/resources/task_acceptance_test.go +++ b/pkg/resources/task_acceptance_test.go @@ -28,6 +28,7 @@ import ( // TODO(SNOW-1348116 - next pr): More tests for complicated DAGs // TODO(SNOW-1348116 - next pr): Test for stored procedures passed to sql_statement (decide on name) // TODO(SNOW-1348116 - next pr): Test with cron schedule +// TODO(SNOW-1348116 - next pr): More test with external changes func TestAcc_Task_Basic(t *testing.T) { _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) @@ -55,7 +56,7 @@ func TestAcc_Task_Basic(t *testing.T) { HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). HasNameString(id.Name()). - HasEnabledString(r.BooleanFalse). + HasStartedString(r.BooleanFalse). HasWarehouseString(""). HasScheduleString(""). HasConfigString(""). @@ -74,14 +75,14 @@ func TestAcc_Task_Basic(t *testing.T) { HasSchemaName(id.SchemaName()). HasOwner(currentRole.Name()). HasComment(""). - HasWarehouse(""). + HasWarehouse(sdk.NewAccountObjectIdentifier("")). HasSchedule(""). HasPredecessors(). HasState(sdk.TaskStateSuspended). HasDefinition(statement). HasCondition(""). HasAllowOverlappingExecution(false). - HasErrorIntegration(""). + HasErrorIntegration(sdk.NewAccountObjectIdentifier("")). HasLastCommittedOn(""). HasLastSuspendedOn(""). HasOwnerRoleType("ROLE"). @@ -101,7 +102,7 @@ func TestAcc_Task_Basic(t *testing.T) { HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). HasNameString(id.Name()). - HasEnabledString(r.BooleanFalse). + HasStartedString(r.BooleanFalse). HasWarehouseString(""). HasScheduleString(""). HasConfigString(""). @@ -161,7 +162,7 @@ func TestAcc_Task_Complete(t *testing.T) { HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). HasNameString(id.Name()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasWarehouseString(acc.TestClient().Ids.WarehouseId().Name()). HasScheduleString("10 MINUTES"). HasConfigString(expectedTaskConfig). @@ -180,14 +181,14 @@ func TestAcc_Task_Complete(t *testing.T) { HasSchemaName(id.SchemaName()). HasOwner(currentRole.Name()). HasComment(comment). - HasWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + HasWarehouse(acc.TestClient().Ids.WarehouseId()). HasSchedule("10 MINUTES"). HasPredecessors(). HasState(sdk.TaskStateStarted). HasDefinition(statement). HasCondition(condition). HasAllowOverlappingExecution(true). - HasErrorIntegration(errorNotificationIntegration.ID().Name()). + HasErrorIntegration(errorNotificationIntegration.ID()). HasLastCommittedOnNotEmpty(). HasLastSuspendedOn(""). HasOwnerRoleType("ROLE"). @@ -207,7 +208,7 @@ func TestAcc_Task_Complete(t *testing.T) { HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). HasNameString(id.Name()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasWarehouseString(acc.TestClient().Ids.WarehouseId().Name()). HasScheduleString("10 MINUTES"). HasConfigString(expectedTaskConfig). @@ -234,7 +235,7 @@ func TestAcc_Task_Updates(t *testing.T) { statement := "SELECT 1" basicConfigModel := model.TaskWithId("test", id, false, statement) - // New warehouse created, because the common one has lower-case letters that won't work + // TODO(SNOW-1736173): New warehouse created, because the common one has lower-case letters that won't work warehouse, warehouseCleanup := acc.TestClient().Warehouse.CreateWarehouse(t) t.Cleanup(warehouseCleanup) @@ -273,7 +274,7 @@ func TestAcc_Task_Updates(t *testing.T) { HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). HasNameString(id.Name()). - HasEnabledString(r.BooleanFalse). + HasStartedString(r.BooleanFalse). HasWarehouseString(""). HasScheduleString(""). HasConfigString(""). @@ -292,14 +293,14 @@ func TestAcc_Task_Updates(t *testing.T) { HasSchemaName(id.SchemaName()). HasOwner(currentRole.Name()). HasComment(""). - HasWarehouse(""). + HasWarehouse(sdk.NewAccountObjectIdentifier("")). HasSchedule(""). HasPredecessors(). HasState(sdk.TaskStateSuspended). HasDefinition(statement). HasCondition(""). HasAllowOverlappingExecution(false). - HasErrorIntegration(""). + HasErrorIntegration(sdk.NewAccountObjectIdentifier("")). HasLastCommittedOn(""). HasLastSuspendedOn(""). HasOwnerRoleType("ROLE"). @@ -317,7 +318,7 @@ func TestAcc_Task_Updates(t *testing.T) { HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). HasNameString(id.Name()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasWarehouseString(warehouse.ID().Name()). HasScheduleString("5 MINUTES"). HasConfigString(expectedTaskConfig). @@ -335,7 +336,7 @@ func TestAcc_Task_Updates(t *testing.T) { HasDatabaseName(id.DatabaseName()). HasSchemaName(id.SchemaName()). HasOwner(currentRole.Name()). - HasWarehouse(warehouse.ID().Name()). + HasWarehouse(warehouse.ID()). HasComment(comment). HasSchedule("5 MINUTES"). HasPredecessors(). @@ -343,7 +344,7 @@ func TestAcc_Task_Updates(t *testing.T) { HasDefinition(statement). HasCondition(condition). HasAllowOverlappingExecution(true). - HasErrorIntegration(errorNotificationIntegration.ID().Name()). + HasErrorIntegration(errorNotificationIntegration.ID()). HasLastCommittedOnNotEmpty(). HasLastSuspendedOn(""). HasOwnerRoleType("ROLE"). @@ -361,7 +362,7 @@ func TestAcc_Task_Updates(t *testing.T) { HasDatabaseString(id.DatabaseName()). HasSchemaString(id.SchemaName()). HasNameString(id.Name()). - HasEnabledString(r.BooleanFalse). + HasStartedString(r.BooleanFalse). HasWarehouseString(""). HasScheduleString(""). HasConfigString(""). @@ -380,14 +381,14 @@ func TestAcc_Task_Updates(t *testing.T) { HasSchemaName(id.SchemaName()). HasOwner(currentRole.Name()). HasComment(""). - HasWarehouse(""). + HasWarehouse(sdk.NewAccountObjectIdentifier("")). HasSchedule(""). HasPredecessors(). HasState(sdk.TaskStateSuspended). HasDefinition(statement). HasCondition(""). HasAllowOverlappingExecution(false). - HasErrorIntegration(""). + HasErrorIntegration(sdk.NewAccountObjectIdentifier("")). HasLastCommittedOnNotEmpty(). HasLastSuspendedOnNotEmpty(). HasOwnerRoleType("ROLE"). @@ -721,7 +722,7 @@ func TestAcc_Task_Enabled(t *testing.T) { Config: config.FromModel(t, configModelDisabled), Check: assert.AssertThat(t, resourceassert.TaskResource(t, configModelDisabled.ResourceReference()). - HasEnabledString(r.BooleanFalse), + HasStartedString(r.BooleanFalse), resourceshowoutputassert.TaskShowOutput(t, configModelDisabled.ResourceReference()). HasState(sdk.TaskStateSuspended), ), @@ -730,7 +731,7 @@ func TestAcc_Task_Enabled(t *testing.T) { Config: config.FromModel(t, configModelEnabled), Check: assert.AssertThat(t, resourceassert.TaskResource(t, configModelEnabled.ResourceReference()). - HasEnabledString(r.BooleanTrue), + HasStartedString(r.BooleanTrue), resourceshowoutputassert.TaskShowOutput(t, configModelEnabled.ResourceReference()). HasState(sdk.TaskStateStarted), ), @@ -739,7 +740,7 @@ func TestAcc_Task_Enabled(t *testing.T) { Config: config.FromModel(t, configModelDisabled), Check: assert.AssertThat(t, resourceassert.TaskResource(t, configModelDisabled.ResourceReference()). - HasEnabledString(r.BooleanFalse), + HasStartedString(r.BooleanFalse), resourceshowoutputassert.TaskShowOutput(t, configModelDisabled.ResourceReference()). HasState(sdk.TaskStateSuspended), ), @@ -790,14 +791,14 @@ func TestAcc_Task_ConvertStandaloneTaskToSubtask(t *testing.T) { Check: assert.AssertThat(t, resourceassert.TaskResource(t, firstTaskStandaloneModel.ResourceReference()). HasScheduleString(schedule). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasSuspendTaskAfterNumFailuresString("1"), resourceshowoutputassert.TaskShowOutput(t, firstTaskStandaloneModel.ResourceReference()). HasSchedule(schedule). HasState(sdk.TaskStateStarted), resourceassert.TaskResource(t, secondTaskStandaloneModel.ResourceReference()). HasScheduleString(schedule). - HasEnabledString(r.BooleanTrue), + HasStartedString(r.BooleanTrue), resourceshowoutputassert.TaskShowOutput(t, secondTaskStandaloneModel.ResourceReference()). HasSchedule(schedule). HasState(sdk.TaskStateStarted), @@ -809,14 +810,14 @@ func TestAcc_Task_ConvertStandaloneTaskToSubtask(t *testing.T) { Check: assert.AssertThat(t, resourceassert.TaskResource(t, rootTaskModel.ResourceReference()). HasScheduleString(schedule). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasSuspendTaskAfterNumFailuresString("2"), resourceshowoutputassert.TaskShowOutput(t, rootTaskModel.ResourceReference()). HasSchedule(schedule). HasState(sdk.TaskStateStarted), resourceassert.TaskResource(t, childTaskModel.ResourceReference()). HasAfterIds(id). - HasEnabledString(r.BooleanTrue), + HasStartedString(r.BooleanTrue), resourceshowoutputassert.TaskShowOutput(t, childTaskModel.ResourceReference()). HasPredecessors(id). HasState(sdk.TaskStateStarted), @@ -828,14 +829,14 @@ func TestAcc_Task_ConvertStandaloneTaskToSubtask(t *testing.T) { Check: assert.AssertThat(t, resourceassert.TaskResource(t, firstTaskStandaloneModelDisabled.ResourceReference()). HasScheduleString(schedule). - HasEnabledString(r.BooleanFalse). + HasStartedString(r.BooleanFalse). HasSuspendTaskAfterNumFailuresString("10"), resourceshowoutputassert.TaskShowOutput(t, firstTaskStandaloneModelDisabled.ResourceReference()). HasSchedule(schedule). HasState(sdk.TaskStateSuspended), resourceassert.TaskResource(t, secondTaskStandaloneModelDisabled.ResourceReference()). HasScheduleString(schedule). - HasEnabledString(r.BooleanFalse), + HasStartedString(r.BooleanFalse), resourceshowoutputassert.TaskShowOutput(t, secondTaskStandaloneModelDisabled.ResourceReference()). HasSchedule(schedule). HasState(sdk.TaskStateSuspended), @@ -886,16 +887,18 @@ func TestAcc_Task_ConvertStandaloneTaskToFinalizer(t *testing.T) { Check: assert.AssertThat(t, resourceassert.TaskResource(t, firstTaskStandaloneModel.ResourceReference()). HasScheduleString(schedule). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasSuspendTaskAfterNumFailuresString("1"), resourceshowoutputassert.TaskShowOutput(t, firstTaskStandaloneModel.ResourceReference()). HasSchedule(schedule). + HasTaskRelations(sdk.TaskRelations{}). HasState(sdk.TaskStateStarted), resourceassert.TaskResource(t, secondTaskStandaloneModel.ResourceReference()). HasScheduleString(schedule). - HasEnabledString(r.BooleanTrue), + HasStartedString(r.BooleanTrue), resourceshowoutputassert.TaskShowOutput(t, secondTaskStandaloneModel.ResourceReference()). HasSchedule(schedule). + HasTaskRelations(sdk.TaskRelations{}). HasState(sdk.TaskStateStarted), ), }, @@ -905,16 +908,17 @@ func TestAcc_Task_ConvertStandaloneTaskToFinalizer(t *testing.T) { Check: assert.AssertThat(t, resourceassert.TaskResource(t, rootTaskModel.ResourceReference()). HasScheduleString(schedule). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasSuspendTaskAfterNumFailuresString("2"), resourceshowoutputassert.TaskShowOutput(t, rootTaskModel.ResourceReference()). HasSchedule(schedule). + // TODO(SNOW-1348116 - next pr): See why finalizer task is not populated // HasTaskRelations(sdk.TaskRelations{FinalizerTask: &finalizerTaskId}). HasState(sdk.TaskStateStarted), resourceassert.TaskResource(t, childTaskModel.ResourceReference()). - HasEnabledString(r.BooleanTrue), + HasStartedString(r.BooleanTrue), resourceshowoutputassert.TaskShowOutput(t, childTaskModel.ResourceReference()). - // HasTaskRelations(sdk.TaskRelations{FinalizedRootTask: &rootTaskId}). + HasTaskRelations(sdk.TaskRelations{FinalizedRootTask: &rootTaskId}). HasState(sdk.TaskStateStarted), ), }, @@ -924,16 +928,18 @@ func TestAcc_Task_ConvertStandaloneTaskToFinalizer(t *testing.T) { Check: assert.AssertThat(t, resourceassert.TaskResource(t, firstTaskStandaloneModelDisabled.ResourceReference()). HasScheduleString(schedule). - HasEnabledString(r.BooleanFalse). + HasStartedString(r.BooleanFalse). HasSuspendTaskAfterNumFailuresString("10"), resourceshowoutputassert.TaskShowOutput(t, firstTaskStandaloneModelDisabled.ResourceReference()). HasSchedule(schedule). + HasTaskRelations(sdk.TaskRelations{}). HasState(sdk.TaskStateSuspended), resourceassert.TaskResource(t, secondTaskStandaloneModelDisabled.ResourceReference()). HasScheduleString(schedule). - HasEnabledString(r.BooleanFalse), + HasStartedString(r.BooleanFalse), resourceshowoutputassert.TaskShowOutput(t, secondTaskStandaloneModelDisabled.ResourceReference()). HasSchedule(schedule). + HasTaskRelations(sdk.TaskRelations{}). HasState(sdk.TaskStateSuspended), ), }, @@ -978,12 +984,12 @@ func TestAcc_Task_SwitchScheduledWithAfter(t *testing.T) { Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModel), Check: assert.AssertThat(t, resourceassert.TaskResource(t, "snowflake_task.child"). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasScheduleString(schedule). HasAfterIds(). HasSuspendTaskAfterNumFailuresString("10"), resourceassert.TaskResource(t, "snowflake_task.root"). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasScheduleString(schedule). HasSuspendTaskAfterNumFailuresString("1"), ), @@ -992,12 +998,12 @@ func TestAcc_Task_SwitchScheduledWithAfter(t *testing.T) { Config: config.FromModel(t, rootTaskConfigModelAfterSuspendFailuresUpdate) + config.FromModel(t, childTaskConfigModelWithAfter), Check: assert.AssertThat(t, resourceassert.TaskResource(t, "snowflake_task.child"). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasScheduleString(""). HasAfterIds(rootId). HasSuspendTaskAfterNumFailuresString("10"), resourceassert.TaskResource(t, "snowflake_task.root"). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasScheduleString(schedule). HasSuspendTaskAfterNumFailuresString("2"), ), @@ -1006,12 +1012,12 @@ func TestAcc_Task_SwitchScheduledWithAfter(t *testing.T) { Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModel), Check: assert.AssertThat(t, resourceassert.TaskResource(t, "snowflake_task.child"). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasScheduleString(schedule). HasAfterIds(). HasSuspendTaskAfterNumFailuresString("10"), resourceassert.TaskResource(t, "snowflake_task.root"). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasScheduleString(schedule). HasSuspendTaskAfterNumFailuresString("1"), ), @@ -1020,12 +1026,12 @@ func TestAcc_Task_SwitchScheduledWithAfter(t *testing.T) { Config: config.FromModel(t, rootTaskConfigModelDisabled) + config.FromModel(t, childTaskConfigModelDisabled), Check: assert.AssertThat(t, resourceassert.TaskResource(t, "snowflake_task.child"). - HasEnabledString(r.BooleanFalse). + HasStartedString(r.BooleanFalse). HasScheduleString(schedule). HasAfterIds(). HasSuspendTaskAfterNumFailuresString("10"), resourceassert.TaskResource(t, "snowflake_task.root"). - HasEnabledString(r.BooleanFalse). + HasStartedString(r.BooleanFalse). HasScheduleString(schedule). HasSuspendTaskAfterNumFailuresString("10"), ), @@ -1072,10 +1078,10 @@ func TestAcc_Task_WithAfter(t *testing.T) { Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithAfter), Check: assert.AssertThat(t, resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasScheduleString(schedule), resourceassert.TaskResource(t, childTaskConfigModelWithAfter.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasAfterIds(rootId), ), }, @@ -1083,10 +1089,10 @@ func TestAcc_Task_WithAfter(t *testing.T) { Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithoutAfter), Check: assert.AssertThat(t, resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasScheduleString(schedule), resourceassert.TaskResource(t, childTaskConfigModelWithoutAfter.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasAfterIds(), ), }, @@ -1132,10 +1138,10 @@ func TestAcc_Task_WithFinalizer(t *testing.T) { Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithFinalizer), Check: assert.AssertThat(t, resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasScheduleString(schedule), resourceassert.TaskResource(t, childTaskConfigModelWithFinalizer.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasFinalizeString(rootId.FullyQualifiedName()), ), }, @@ -1143,10 +1149,10 @@ func TestAcc_Task_WithFinalizer(t *testing.T) { Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithoutFinalizer), Check: assert.AssertThat(t, resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasScheduleString(schedule), resourceassert.TaskResource(t, childTaskConfigModelWithoutFinalizer.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasFinalizeString(""), ), }, @@ -1194,10 +1200,10 @@ func TestAcc_Task_issue2207(t *testing.T) { Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModel), Check: assert.AssertThat(t, resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasScheduleString(schedule), resourceassert.TaskResource(t, childTaskConfigModel.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasAfterIds(rootId). HasCommentString("abc"), ), @@ -1212,10 +1218,10 @@ func TestAcc_Task_issue2207(t *testing.T) { Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithDifferentComment), Check: assert.AssertThat(t, resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasScheduleString(schedule), resourceassert.TaskResource(t, childTaskConfigModelWithDifferentComment.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasAfterIds(rootId). HasCommentString("def"), ), @@ -1255,7 +1261,7 @@ func TestAcc_Task_issue2036(t *testing.T) { Config: config.FromModel(t, taskConfigModelWithoutWhen), Check: assert.AssertThat(t, resourceassert.TaskResource(t, taskConfigModelWithoutWhen.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasWhenString(""), ), }, @@ -1264,7 +1270,7 @@ func TestAcc_Task_issue2036(t *testing.T) { Config: config.FromModel(t, taskConfigModelWithWhen), Check: assert.AssertThat(t, resourceassert.TaskResource(t, taskConfigModelWithWhen.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasWhenString("TRUE"), ), }, @@ -1273,10 +1279,232 @@ func TestAcc_Task_issue2036(t *testing.T) { Config: config.FromModel(t, taskConfigModelWithoutWhen), Check: assert.AssertThat(t, resourceassert.TaskResource(t, taskConfigModelWithoutWhen.ResourceReference()). - HasEnabledString(r.BooleanTrue). + HasStartedString(r.BooleanTrue). HasWhenString(""), ), }, }, }) } + +func TestAcc_Task_UpdateFinalizerExternally(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := "5 MINUTES" + + rootTaskConfigModel := model.TaskWithId("root", rootId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithSchedule(schedule). + WithSqlStatement(statement) + + childTaskConfigModelWithoutFinalizer := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithSchedule(schedule). + WithComment("abc"). + WithSqlStatement(statement) + childTaskConfigModelWithoutFinalizer.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + + childTaskConfigModelWithFinalizer := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithFinalize(rootId.FullyQualifiedName()). + WithComment("abc"). + WithSqlStatement(statement) + childTaskConfigModelWithFinalizer.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithoutFinalizer), + }, + // Set finalizer externally + { + PreConfig: func() { + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithSuspend(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithSuspend(true)) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithUnset(*sdk.NewTaskUnsetRequest().WithSchedule(true))) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithSetFinalize(rootId)) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithResume(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithResume(true)) + }, + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithoutFinalizer), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithoutFinalizer.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasFinalizeString(""), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithoutFinalizer.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{}), + ), + }, + // Set finalizer in config + { + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithFinalizer), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithFinalizer.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasFinalizeString(rootId.FullyQualifiedName()), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithFinalizer.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{FinalizedRootTask: &rootId}), + ), + }, + // Unset finalizer externally + { + PreConfig: func() { + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithSuspend(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithSuspend(true)) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithUnsetFinalize(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithSet(*sdk.NewTaskSetRequest().WithSchedule(schedule))) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithResume(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithResume(true)) + }, + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithFinalizer), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithFinalizer.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasFinalizeString(rootId.FullyQualifiedName()), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithFinalizer.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{FinalizedRootTask: &rootId}), + ), + }, + // Unset finalizer in config + { + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithoutFinalizer), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithoutFinalizer.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasFinalizeString(""), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithoutFinalizer.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{}), + ), + }, + }, + }) +} + +func TestAcc_Task_UpdateAfterExternally(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := "5 MINUTES" + + rootTaskConfigModel := model.TaskWithId("root", rootId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithSchedule(schedule). + WithSqlStatement(statement) + + childTaskConfigModelWithoutAfter := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithSchedule(schedule). + WithComment("abc"). + WithSqlStatement(statement) + childTaskConfigModelWithoutAfter.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + + childTaskConfigModelWithAfter := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). + WithComment("abc"). + WithSqlStatement(statement) + childTaskConfigModelWithAfter.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithoutAfter), + }, + // Set after externally + { + PreConfig: func() { + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithSuspend(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithSuspend(true)) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithUnset(*sdk.NewTaskUnsetRequest().WithSchedule(true))) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithAddAfter([]sdk.SchemaObjectIdentifier{rootId})) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithResume(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithResume(true)) + }, + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithoutAfter), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithoutAfter.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasAfterIds(), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithoutAfter.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{}), + ), + }, + // Set after in config + { + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithAfter), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithAfter.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasAfterIds(rootId), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithAfter.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{Predecessors: []sdk.SchemaObjectIdentifier{rootId}}), + ), + }, + // Unset after externally + { + PreConfig: func() { + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithSuspend(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithSuspend(true)) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithRemoveAfter([]sdk.SchemaObjectIdentifier{rootId})) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithSet(*sdk.NewTaskSetRequest().WithSchedule(schedule))) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithResume(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithResume(true)) + }, + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithAfter), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithAfter.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasAfterIds(rootId), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithAfter.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{Predecessors: []sdk.SchemaObjectIdentifier{rootId}}), + ), + }, + // Unset after in config + { + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithoutAfter), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithoutAfter.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasAfterIds(), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithoutAfter.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{}), + ), + }, + }, + }) +} diff --git a/pkg/schemas/gen/README.md b/pkg/schemas/gen/README.md index 78420ff9e2..3476f6d06f 100644 --- a/pkg/schemas/gen/README.md +++ b/pkg/schemas/gen/README.md @@ -83,6 +83,7 @@ If you change the show output struct in the SDK: Functional improvements: - handle the missing types (TODOs in [schema_field_mapper.go](./schema_field_mapper.go)) + - handle nested structs with identifiers / slices of identifiers - parametrize the generation, e.g.: - (optional) parametrize the output directory - currently, it's always written to `schemas` package - discover a change and generate as part of a `make pre-push` diff --git a/pkg/schemas/task_gen.go b/pkg/schemas/task_gen.go index 97a80a0976..dec4850868 100644 --- a/pkg/schemas/task_gen.go +++ b/pkg/schemas/task_gen.go @@ -1,5 +1,3 @@ -// Code generated by sdk-to-schema generator; DO NOT EDIT. - package schemas import ( @@ -147,6 +145,7 @@ func TaskToSchema(task *sdk.Task) map[string]any { taskSchema["config"] = task.Config taskSchema["budget"] = task.Budget taskSchema["last_suspended_reason"] = task.LastSuspendedReason + // This is manually edited, please don't re-generate this file finalizer := "" if task.TaskRelations.FinalizerTask != nil { finalizer = task.TaskRelations.FinalizerTask.FullyQualifiedName() diff --git a/pkg/sdk/tasks_impl_gen.go b/pkg/sdk/tasks_impl_gen.go index d3e10bcc86..9a15a6c680 100644 --- a/pkg/sdk/tasks_impl_gen.go +++ b/pkg/sdk/tasks_impl_gen.go @@ -155,6 +155,7 @@ func GetRootTasks(v Tasks, ctx context.Context, id SchemaObjectIdentifier) ([]Ta if task.TaskRelations.FinalizedRootTask != nil { tasksToExamine.Push(*task.TaskRelations.FinalizedRootTask) + alreadyExaminedTasksNames = append(alreadyExaminedTasksNames, current.Name()) continue } From f0514bcb9d530e9f9afcb89bac43b56a59d69254 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Mon, 14 Oct 2024 16:13:49 +0200 Subject: [PATCH 09/12] docs re-generated --- docs/resources/task.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/resources/task.md b/docs/resources/task.md index 56540d3c95..c7d8965aa3 100644 --- a/docs/resources/task.md +++ b/docs/resources/task.md @@ -77,10 +77,10 @@ resource "snowflake_task" "test_task" { ### Required - `database` (String) The database in which to create the task. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` -- `enabled` (Boolean) Specifies if the task should be started or suspended. - `name` (String) Specifies the identifier for the task; must be unique for the database and schema in which the task is created. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` - `schema` (String) The schema in which to create the task. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `(`, `)`, `"` - `sql_statement` (String) Any single SQL statement, or a call to a stored procedure, executed when the task runs. +- `started` (Boolean) Specifies if the task should be started or suspended. ### Optional @@ -150,7 +150,7 @@ resource "snowflake_task" "test_task" { - `warehouse` (String) The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. Due to Snowflake limitations warehouse identifier can consist of only upper-cased letters. (Conflicts with user_task_managed_initial_warehouse_size) - `week_of_year_policy` (Number) Specifies how the weeks in a given year are computed. `0`: The semantics used are equivalent to the ISO semantics, in which a week belongs to a given year if at least 4 days of that week are in that year. `1`: January 1 is included in the first week of the year and December 31 is included in the last week of the year. For more information, check [WEEK_OF_YEAR_POLICY docs](https://docs.snowflake.com/en/sql-reference/parameters#week-of-year-policy). - `week_start` (Number) Specifies the first day of the week (used by week-related date functions). `0`: Legacy Snowflake behavior is used (i.e. ISO-like semantics). `1` (Monday) to `7` (Sunday): All the week-related functions use weeks that start on the specified day of the week. For more information, check [WEEK_START docs](https://docs.snowflake.com/en/sql-reference/parameters#week-start). -- `when` (String) Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported. +- `when` (String) Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported. When a task is triggered (based on its SCHEDULE or AFTER setting), it validates the conditions of the expression to determine whether to execute. If the conditions of the expression are not met, then the task skips the current run. Any tasks that identify this task as a predecessor also don’t run. ### Read-Only From ee9583dfa93da2a748b9003843edc1fa844742f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Mon, 14 Oct 2024 16:42:06 +0200 Subject: [PATCH 10/12] Added test that proves the issue --- pkg/resources/task_acceptance_test.go | 322 ++++++++++++++---------- v1-preparations/ESSENTIAL_GA_OBJECTS.MD | 2 +- 2 files changed, 193 insertions(+), 131 deletions(-) diff --git a/pkg/resources/task_acceptance_test.go b/pkg/resources/task_acceptance_test.go index 6d09415520..58b29d5bf0 100644 --- a/pkg/resources/task_acceptance_test.go +++ b/pkg/resources/task_acceptance_test.go @@ -1,6 +1,9 @@ package resources_test import ( + "fmt" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "regexp" "strings" "testing" @@ -15,12 +18,10 @@ import ( "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/helpers/random" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/testenvs" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" r "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" configvariable "github.com/hashicorp/terraform-plugin-testing/config" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - - "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/tfversion" ) @@ -1160,133 +1161,6 @@ func TestAcc_Task_WithFinalizer(t *testing.T) { }) } -func TestAcc_Task_issue2207(t *testing.T) { - _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) - acc.TestAccPreCheck(t) - - rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() - childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() - statement := "SELECT 1" - schedule := "5 MINUTES" - - rootTaskConfigModel := model.TaskWithId("root", rootId, true, statement). - WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). - WithSchedule(schedule). - WithSqlStatement(statement) - - childTaskConfigModel := model.TaskWithId("child", childId, true, statement). - WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). - WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). - WithComment("abc"). - WithSqlStatement(statement) - childTaskConfigModel.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) - - childTaskConfigModelWithDifferentComment := model.TaskWithId("child", childId, true, statement). - WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). - WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). - WithComment("def"). - WithSqlStatement(statement) - childTaskConfigModelWithDifferentComment.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) - - resource.Test(t, resource.TestCase{ - ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, - PreCheck: func() { acc.TestAccPreCheck(t) }, - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.RequireAbove(tfversion.Version1_5_0), - }, - CheckDestroy: acc.CheckDestroy(t, resources.Task), - Steps: []resource.TestStep{ - { - Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModel), - Check: assert.AssertThat(t, - resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). - HasStartedString(r.BooleanTrue). - HasScheduleString(schedule), - resourceassert.TaskResource(t, childTaskConfigModel.ResourceReference()). - HasStartedString(r.BooleanTrue). - HasAfterIds(rootId). - HasCommentString("abc"), - ), - }, - // change comment - { - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(childTaskConfigModelWithDifferentComment.ResourceReference(), plancheck.ResourceActionUpdate), - }, - }, - Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithDifferentComment), - Check: assert.AssertThat(t, - resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). - HasStartedString(r.BooleanTrue). - HasScheduleString(schedule), - resourceassert.TaskResource(t, childTaskConfigModelWithDifferentComment.ResourceReference()). - HasStartedString(r.BooleanTrue). - HasAfterIds(rootId). - HasCommentString("def"), - ), - }, - }, - }) -} - -func TestAcc_Task_issue2036(t *testing.T) { - _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) - acc.TestAccPreCheck(t) - - id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() - statement := "SELECT 1" - schedule := "5 MINUTES" - when := "TRUE" - - taskConfigModelWithoutWhen := model.TaskWithId("test", id, true, statement). - WithSchedule(schedule). - WithSqlStatement(statement) - - taskConfigModelWithWhen := model.TaskWithId("test", id, true, statement). - WithSchedule(schedule). - WithSqlStatement(statement). - WithWhen(when) - - resource.Test(t, resource.TestCase{ - ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, - PreCheck: func() { acc.TestAccPreCheck(t) }, - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.RequireAbove(tfversion.Version1_5_0), - }, - CheckDestroy: acc.CheckDestroy(t, resources.Task), - Steps: []resource.TestStep{ - // create without when - { - Config: config.FromModel(t, taskConfigModelWithoutWhen), - Check: assert.AssertThat(t, - resourceassert.TaskResource(t, taskConfigModelWithoutWhen.ResourceReference()). - HasStartedString(r.BooleanTrue). - HasWhenString(""), - ), - }, - // add when - { - Config: config.FromModel(t, taskConfigModelWithWhen), - Check: assert.AssertThat(t, - resourceassert.TaskResource(t, taskConfigModelWithWhen.ResourceReference()). - HasStartedString(r.BooleanTrue). - HasWhenString("TRUE"), - ), - }, - // remove when - { - Config: config.FromModel(t, taskConfigModelWithoutWhen), - Check: assert.AssertThat(t, - resourceassert.TaskResource(t, taskConfigModelWithoutWhen.ResourceReference()). - HasStartedString(r.BooleanTrue). - HasWhenString(""), - ), - }, - }, - }) -} - func TestAcc_Task_UpdateFinalizerExternally(t *testing.T) { _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) acc.TestAccPreCheck(t) @@ -1508,3 +1382,191 @@ func TestAcc_Task_UpdateAfterExternally(t *testing.T) { }, }) } + +func TestAcc_Task_issue2207(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := "5 MINUTES" + + rootTaskConfigModel := model.TaskWithId("root", rootId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithSchedule(schedule). + WithSqlStatement(statement) + + childTaskConfigModel := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). + WithComment("abc"). + WithSqlStatement(statement) + childTaskConfigModel.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + + childTaskConfigModelWithDifferentComment := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). + WithComment("def"). + WithSqlStatement(statement) + childTaskConfigModelWithDifferentComment.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasScheduleString(schedule), + resourceassert.TaskResource(t, childTaskConfigModel.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasAfterIds(rootId). + HasCommentString("abc"), + ), + }, + // change comment + { + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(childTaskConfigModelWithDifferentComment.ResourceReference(), plancheck.ResourceActionUpdate), + }, + }, + Config: config.FromModel(t, rootTaskConfigModel) + config.FromModel(t, childTaskConfigModelWithDifferentComment), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasScheduleString(schedule), + resourceassert.TaskResource(t, childTaskConfigModelWithDifferentComment.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasAfterIds(rootId). + HasCommentString("def"), + ), + }, + }, + }) +} + +func TestAcc_Task_issue2036(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := "5 MINUTES" + when := "TRUE" + + taskConfigModelWithoutWhen := model.TaskWithId("test", id, true, statement). + WithSchedule(schedule). + WithSqlStatement(statement) + + taskConfigModelWithWhen := model.TaskWithId("test", id, true, statement). + WithSchedule(schedule). + WithSqlStatement(statement). + WithWhen(when) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + // create without when + { + Config: config.FromModel(t, taskConfigModelWithoutWhen), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, taskConfigModelWithoutWhen.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasWhenString(""), + ), + }, + // add when + { + Config: config.FromModel(t, taskConfigModelWithWhen), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, taskConfigModelWithWhen.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasWhenString("TRUE"), + ), + }, + // remove when + { + Config: config.FromModel(t, taskConfigModelWithoutWhen), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, taskConfigModelWithoutWhen.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasWhenString(""), + ), + }, + }, + }) +} + +func TestAcc_Task_issue3113(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + errorNotificationIntegration, errorNotificationIntegrationCleanup := acc.TestClient().NotificationIntegration.Create(t) + t.Cleanup(errorNotificationIntegrationCleanup) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := "5 MINUTES" + configModel := model.TaskWithId("test", id, true, statement). + WithSchedule(schedule). + WithSqlStatement(statement). + WithErrorIntegration(errorNotificationIntegration.ID().Name()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "snowflake": { + VersionConstraint: "=0.97.0", + Source: "Snowflake-Labs/snowflake", + }, + }, + Config: taskConfigWithErrorIntegration(id, errorNotificationIntegration.ID()), + ExpectError: regexp.MustCompile("error_integration: '' expected type 'string', got unconvertible type 'sdk.AccountObjectIdentifier'"), + }, + { + PreConfig: func() { + acc.TestClient().Task.DropFunc(t, id)() + }, + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + Config: config.FromModel(t, configModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModel.ResourceReference()). + HasErrorIntegrationString(errorNotificationIntegration.ID().Name()), + ), + }, + }, + }) +} + +func taskConfigWithErrorIntegration(id sdk.SchemaObjectIdentifier, errorIntegrationId sdk.AccountObjectIdentifier) string { + return fmt.Sprintf(` +resource "snowflake_task" "test" { + database = "%[1]s" + schema = "%[2]s" + name = "%[3]s" + schedule = "5 MINUTES" + sql_statement = "SELECT 1" + enabled = true + error_integration = "%[4]s" +} +`, id.DatabaseName(), id.SchemaName(), id.Name(), errorIntegrationId.Name()) +} diff --git a/v1-preparations/ESSENTIAL_GA_OBJECTS.MD b/v1-preparations/ESSENTIAL_GA_OBJECTS.MD index 02789cecae..b1b2065e68 100644 --- a/v1-preparations/ESSENTIAL_GA_OBJECTS.MD +++ b/v1-preparations/ESSENTIAL_GA_OBJECTS.MD @@ -33,7 +33,7 @@ newer provider versions. We will address these while working on the given object | STREAMLIT | πŸš€ | - | | TABLE | ❌ | [#2997](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2997), [#2844](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2844), [#2839](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2839), [#2735](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2735), [#2733](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2733), [#2683](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2683), [#2676](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2676), [#2674](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2674), [#2629](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2629), [#2418](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2418), [#2415](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2415), [#2406](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2406), [#2236](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2236), [#2035](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2035), [#1823](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1823), [#1799](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1799), [#1764](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1764), [#1600](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1600), [#1387](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1387), [#1272](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1272), [#1271](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1271), [#1248](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1248), [#1241](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1241), [#1146](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1146), [#1032](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1032), [#420](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/420) | | TAG | πŸ‘¨β€πŸ’» | [#2943](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2902), [#2598](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2598), [#1910](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1910), [#1909](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1909), [#1862](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1862), [#1806](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1806), [#1657](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1657), [#1496](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1496), [#1443](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1443), [#1394](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1394), [#1372](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1372), [#1074](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1074) | -| TASK | πŸ‘¨β€πŸ’» | [#1419](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1419), [#1250](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1250), [#1194](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1194), [#1088](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1088) | +| TASK | πŸ‘¨β€πŸ’» | [#3136](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3136), [#1419](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1419), [#1250](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1250), [#1194](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1194), [#1088](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1088) | | VIEW | πŸš€ | issues in the older versions: [resources](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues?q=label%3Aresource%3Aview+) and [datasources](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues?q=label%3Adata_source%3Aviews+) | | snowflake_unsafe_execute | ❌ | [#2934](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2934) | From 012fb1835c3fceb166f9b81f159e7e09a9af61de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Mon, 4 Nov 2024 12:22:30 +0100 Subject: [PATCH 11/12] Resolve merge conflicts --- pkg/resources/task_acceptance_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/resources/task_acceptance_test.go b/pkg/resources/task_acceptance_test.go index 58b29d5bf0..144f7b70bd 100644 --- a/pkg/resources/task_acceptance_test.go +++ b/pkg/resources/task_acceptance_test.go @@ -2,11 +2,12 @@ package resources_test import ( "fmt" - "github.com/hashicorp/terraform-plugin-testing/plancheck" "regexp" "strings" "testing" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + acc "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/objectparametersassert" From 90d70469b3b6fadb123d943875b92dac69d6c261 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Mon, 4 Nov 2024 12:46:23 +0100 Subject: [PATCH 12/12] Resolve merge conflicts and fix failing tests --- pkg/datasources/tasks.go | 2 +- pkg/datasources/tasks_acceptance_test.go | 6 +--- .../grant_ownership_acceptance_test.go | 2 +- pkg/resources/task.go | 2 +- pkg/resources/task_acceptance_test.go | 32 +++++++++---------- .../TestAcc_GrantOwnership/OnAllTasks/test.tf | 2 ++ .../TestAcc_GrantOwnership/OnTask/test.tf | 1 + .../OnTask_Discussion2877/1/test.tf | 1 + .../OnTask_Discussion2877/2/test.tf | 4 ++- .../OnTask_Discussion2877/3/test.tf | 1 + .../OnTask_Discussion2877/4/test.tf | 4 ++- 11 files changed, 31 insertions(+), 26 deletions(-) diff --git a/pkg/datasources/tasks.go b/pkg/datasources/tasks.go index 5af820fa7c..ff62bc5e96 100644 --- a/pkg/datasources/tasks.go +++ b/pkg/datasources/tasks.go @@ -85,7 +85,7 @@ func ReadTasks(d *schema.ResourceData, meta interface{}) error { taskMap["database"] = task.DatabaseName taskMap["schema"] = task.SchemaName taskMap["comment"] = task.Comment - taskMap["warehouse"] = task.Warehouse + taskMap["warehouse"] = task.Warehouse.Name() tasks = append(tasks, taskMap) } diff --git a/pkg/datasources/tasks_acceptance_test.go b/pkg/datasources/tasks_acceptance_test.go index 83ef718791..79c6069365 100644 --- a/pkg/datasources/tasks_acceptance_test.go +++ b/pkg/datasources/tasks_acceptance_test.go @@ -38,7 +38,6 @@ func TestAcc_Tasks(t *testing.T) { func tasks(databaseName string, schemaName string, taskName string) string { return fmt.Sprintf(` - resource snowflake_database "test" { name = "%v" } @@ -60,11 +59,8 @@ func tasks(databaseName string, schemaName string, taskName string) string { schema = snowflake_schema.test.name warehouse = snowflake_warehouse.test.name sql_statement = "SHOW FUNCTIONS" - enabled = true + started = true schedule = "15 MINUTES" - lifecycle { - ignore_changes = [session_parameters] - } } data snowflake_tasks "t" { diff --git a/pkg/resources/grant_ownership_acceptance_test.go b/pkg/resources/grant_ownership_acceptance_test.go index 4980f1bf37..d9233fb1c7 100644 --- a/pkg/resources/grant_ownership_acceptance_test.go +++ b/pkg/resources/grant_ownership_acceptance_test.go @@ -1210,7 +1210,7 @@ func TestAcc_GrantOwnership_OnTask_Discussion2877(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("snowflake_task.test", "name", taskId.Name()), resource.TestCheckResourceAttr("snowflake_task.child", "name", childId.Name()), - resource.TestCheckResourceAttr("snowflake_task.child", "after.0", taskId.Name()), + resource.TestCheckResourceAttr("snowflake_task.child", "after.0", taskId.FullyQualifiedName()), checkResourceOwnershipIsGranted(&sdk.ShowGrantOptions{ On: &sdk.ShowGrantsOn{ Object: &sdk.Object{ diff --git a/pkg/resources/task.go b/pkg/resources/task.go index 708b4c6bdc..498d1a38ca 100644 --- a/pkg/resources/task.go +++ b/pkg/resources/task.go @@ -538,7 +538,7 @@ func ReadTask(withExternalChangesMarking bool) schema.ReadContextFunc { if withExternalChangesMarking { if err = handleExternalChangesToObjectInShow(d, - showMapping{"allow_overlapping_execution", "allow_overlapping_execution", task.AllowOverlappingExecution, booleanStringFromBool(task.AllowOverlappingExecution), nil}, + outputMapping{"allow_overlapping_execution", "allow_overlapping_execution", task.AllowOverlappingExecution, booleanStringFromBool(task.AllowOverlappingExecution), nil}, ); err != nil { return diag.FromErr(err) } diff --git a/pkg/resources/task_acceptance_test.go b/pkg/resources/task_acceptance_test.go index 144f7b70bd..d17a48b174 100644 --- a/pkg/resources/task_acceptance_test.go +++ b/pkg/resources/task_acceptance_test.go @@ -772,13 +772,13 @@ func TestAcc_Task_ConvertStandaloneTaskToSubtask(t *testing.T) { WithSuspendTaskAfterNumFailures(2) childTaskModel := model.TaskWithId("second_task", id2, true, statement). WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(id.FullyQualifiedName()))) - childTaskModel.SetDependsOn([]string{rootTaskModel.ResourceReference()}) + childTaskModel.SetDependsOn(rootTaskModel.ResourceReference()) firstTaskStandaloneModelDisabled := model.TaskWithId("main_task", id, false, statement). WithSchedule(schedule) secondTaskStandaloneModelDisabled := model.TaskWithId("second_task", id2, false, statement). WithSchedule(schedule) - secondTaskStandaloneModelDisabled.SetDependsOn([]string{firstTaskStandaloneModelDisabled.ResourceReference()}) + secondTaskStandaloneModelDisabled.SetDependsOn(firstTaskStandaloneModelDisabled.ResourceReference()) resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, @@ -868,13 +868,13 @@ func TestAcc_Task_ConvertStandaloneTaskToFinalizer(t *testing.T) { WithSuspendTaskAfterNumFailures(2) childTaskModel := model.TaskWithId("second_task", finalizerTaskId, true, statement). WithFinalize(rootTaskId.FullyQualifiedName()) - childTaskModel.SetDependsOn([]string{rootTaskModel.ResourceReference()}) + childTaskModel.SetDependsOn(rootTaskModel.ResourceReference()) firstTaskStandaloneModelDisabled := model.TaskWithId("main_task", rootTaskId, false, statement). WithSchedule(schedule) secondTaskStandaloneModelDisabled := model.TaskWithId("second_task", finalizerTaskId, false, statement). WithSchedule(schedule) - secondTaskStandaloneModelDisabled.SetDependsOn([]string{firstTaskStandaloneModelDisabled.ResourceReference()}) + secondTaskStandaloneModelDisabled.SetDependsOn(firstTaskStandaloneModelDisabled.ResourceReference()) resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, @@ -966,13 +966,13 @@ func TestAcc_Task_SwitchScheduledWithAfter(t *testing.T) { WithSuspendTaskAfterNumFailures(2) childTaskConfigModelWithAfter := model.TaskWithId("child", childId, true, statement). WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))) - childTaskConfigModelWithAfter.SetDependsOn([]string{rootTaskConfigModelAfterSuspendFailuresUpdate.ResourceReference()}) + childTaskConfigModelWithAfter.SetDependsOn(rootTaskConfigModelAfterSuspendFailuresUpdate.ResourceReference()) rootTaskConfigModelDisabled := model.TaskWithId("root", rootId, false, statement). WithSchedule(schedule) childTaskConfigModelDisabled := model.TaskWithId("child", childId, false, statement). WithSchedule(schedule) - childTaskConfigModelDisabled.SetDependsOn([]string{rootTaskConfigModelDisabled.ResourceReference()}) + childTaskConfigModelDisabled.SetDependsOn(rootTaskConfigModelDisabled.ResourceReference()) resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, @@ -1060,13 +1060,13 @@ func TestAcc_Task_WithAfter(t *testing.T) { WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). WithSqlStatement(statement) - childTaskConfigModelWithAfter.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + childTaskConfigModelWithAfter.SetDependsOn(rootTaskConfigModel.ResourceReference()) childTaskConfigModelWithoutAfter := model.TaskWithId("child", childId, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). WithSchedule(schedule). WithSqlStatement(statement) - childTaskConfigModelWithoutAfter.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + childTaskConfigModelWithoutAfter.SetDependsOn(rootTaskConfigModel.ResourceReference()) resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, @@ -1120,13 +1120,13 @@ func TestAcc_Task_WithFinalizer(t *testing.T) { WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). WithFinalize(rootId.FullyQualifiedName()). WithSqlStatement(statement) - childTaskConfigModelWithFinalizer.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + childTaskConfigModelWithFinalizer.SetDependsOn(rootTaskConfigModel.ResourceReference()) childTaskConfigModelWithoutFinalizer := model.TaskWithId("child", childId, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). WithSchedule(schedule). WithSqlStatement(statement) - childTaskConfigModelWithoutFinalizer.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + childTaskConfigModelWithoutFinalizer.SetDependsOn(rootTaskConfigModel.ResourceReference()) resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, @@ -1181,14 +1181,14 @@ func TestAcc_Task_UpdateFinalizerExternally(t *testing.T) { WithSchedule(schedule). WithComment("abc"). WithSqlStatement(statement) - childTaskConfigModelWithoutFinalizer.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + childTaskConfigModelWithoutFinalizer.SetDependsOn(rootTaskConfigModel.ResourceReference()) childTaskConfigModelWithFinalizer := model.TaskWithId("child", childId, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). WithFinalize(rootId.FullyQualifiedName()). WithComment("abc"). WithSqlStatement(statement) - childTaskConfigModelWithFinalizer.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + childTaskConfigModelWithFinalizer.SetDependsOn(rootTaskConfigModel.ResourceReference()) resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, @@ -1292,14 +1292,14 @@ func TestAcc_Task_UpdateAfterExternally(t *testing.T) { WithSchedule(schedule). WithComment("abc"). WithSqlStatement(statement) - childTaskConfigModelWithoutAfter.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + childTaskConfigModelWithoutAfter.SetDependsOn(rootTaskConfigModel.ResourceReference()) childTaskConfigModelWithAfter := model.TaskWithId("child", childId, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). WithComment("abc"). WithSqlStatement(statement) - childTaskConfigModelWithAfter.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + childTaskConfigModelWithAfter.SetDependsOn(rootTaskConfigModel.ResourceReference()) resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, @@ -1403,14 +1403,14 @@ func TestAcc_Task_issue2207(t *testing.T) { WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). WithComment("abc"). WithSqlStatement(statement) - childTaskConfigModel.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + childTaskConfigModel.SetDependsOn(rootTaskConfigModel.ResourceReference()) childTaskConfigModelWithDifferentComment := model.TaskWithId("child", childId, true, statement). WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). WithComment("def"). WithSqlStatement(statement) - childTaskConfigModelWithDifferentComment.SetDependsOn([]string{rootTaskConfigModel.ResourceReference()}) + childTaskConfigModelWithDifferentComment.SetDependsOn(rootTaskConfigModel.ResourceReference()) resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, diff --git a/pkg/resources/testdata/TestAcc_GrantOwnership/OnAllTasks/test.tf b/pkg/resources/testdata/TestAcc_GrantOwnership/OnAllTasks/test.tf index f4c901edaf..cc7d4094a5 100644 --- a/pkg/resources/testdata/TestAcc_GrantOwnership/OnAllTasks/test.tf +++ b/pkg/resources/testdata/TestAcc_GrantOwnership/OnAllTasks/test.tf @@ -6,6 +6,7 @@ resource "snowflake_task" "test" { database = var.database schema = var.schema name = var.task + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } @@ -13,6 +14,7 @@ resource "snowflake_task" "second_test" { database = var.database schema = var.schema name = var.second_task + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } diff --git a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask/test.tf b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask/test.tf index f7b80a6d9a..df66234f14 100644 --- a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask/test.tf +++ b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask/test.tf @@ -7,6 +7,7 @@ resource "snowflake_task" "test" { schema = var.schema name = var.task warehouse = var.warehouse + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } diff --git a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/1/test.tf b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/1/test.tf index f7b80a6d9a..df66234f14 100644 --- a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/1/test.tf +++ b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/1/test.tf @@ -7,6 +7,7 @@ resource "snowflake_task" "test" { schema = var.schema name = var.task warehouse = var.warehouse + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } diff --git a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/2/test.tf b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/2/test.tf index 5aa8c57b5f..9dab06be4e 100644 --- a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/2/test.tf +++ b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/2/test.tf @@ -7,6 +7,7 @@ resource "snowflake_task" "test" { schema = var.schema name = var.task warehouse = var.warehouse + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } @@ -15,7 +16,8 @@ resource "snowflake_task" "child" { schema = var.schema name = var.child warehouse = var.warehouse - after = [snowflake_task.test.name] + after = [snowflake_task.test.fully_qualified_name] + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } diff --git a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/3/test.tf b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/3/test.tf index c8ef0f9c56..6acde0d353 100644 --- a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/3/test.tf +++ b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/3/test.tf @@ -7,5 +7,6 @@ resource "snowflake_task" "test" { schema = var.schema name = var.task warehouse = var.warehouse + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } diff --git a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/4/test.tf b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/4/test.tf index d57869ed64..f653336ba3 100644 --- a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/4/test.tf +++ b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/4/test.tf @@ -7,6 +7,7 @@ resource "snowflake_task" "test" { schema = var.schema name = var.task warehouse = var.warehouse + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } @@ -15,7 +16,8 @@ resource "snowflake_task" "child" { schema = var.schema name = var.child warehouse = var.warehouse - after = [snowflake_task.test.name] + after = [snowflake_task.test.fully_qualified_name] + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" }